text
stringlengths 9
7.94M
|
---|
\begin{document}
\title[Vanishing cohomology]{Vanishing cohomology and Betti bounds for complex projective hypersurfaces}
\author[L. Maxim ]{Lauren\c{t}iu Maxim} \address{L. Maxim : Department of Mathematics, University of Wisconsin-Madison, 480 Lincoln Drive, Madison WI 53706-1388, USA} \email {[email protected]} \thanks{L. Maxim is partially supported by the Simons Foundation Collaboration (Grant \#567077) and by the Romanian Ministry of National Education (CNCS-UEFISCDI grant PN-III-P4-ID-PCE-2020-0029).}
\author[L. P\u{a}unescu ]{Lauren\c{t}iu P\u{a}unescu} \address{L. P\u{a}unescu: Department of Mathematics, University of Sydney, Sydney, NSW, 2006, Australia} \email {[email protected]}
\author[M. Tib\u{a}r]{Mihai Tib\u{a}r} \address{M. Tib\u{a}r : Universit\' e de Lille, CNRS, UMR 8524 -- Laboratoire Paul Painlev\'e, F-59000 Lille, France} \email {[email protected]} \thanks{M. Tib\u{a}r acknowledges the support of the Labex CEMPI (ANR-11-LABX-0007). }
\keywords{singular projective hypersurface, vanishing cycles, vanishing cohomology, Betti numbers, Milnor fiber, Lefschetz hyperplane theorem}
\subjclass[2010]{32S30, 32S50, 55R55, 58K60}
\date{\today}
\begin{abstract} We employ the formalism of vanishing cycles and perverse sheaves to introduce and study the vanishing cohomology of complex projective hypersurfaces. As a consequence, we give upper bounds for the Betti numbers of projective hypersurfaces, generalizing those obtained by different methods by Dimca in the isolated singularities case, and by Siersma-Tib\u{a}r in the case of hypersurfaces with a $1$-dimensional singular locus. We also prove a supplement to the Lefschetz hyperplane theorem for hypersurfaces, which takes the dimension of the singular locus into account, and we use it to give a new proof of a result of Kato. \end{abstract}
\maketitle
\section{Introduction. Results}\label{intro} Let $V=\{f=0\} \subset {\mathbb C} P^{n+1}$ be a reduced complex projective hypersurface of degree $d$, with $n \geq 1$. By the classical Lefschetz Theorem, the inclusion map $j:V \hookrightarrow {\mathbb C} P^{n+1}$ induces cohomology isomorphisms \begin{equation}\label{one} j^k:H^k( {\mathbb C} P^{n+1};{\mathbb Z}) \overset{\cong}\longrightarrow H^k(V;{\mathbb Z}) \ \ \text{for all} \ \ k<n, \end{equation}
and a primitive monomorphism for $k=n$ (e.g., see \cite[Theorem 5.2.6]{Di}). Moreover, if $s=\dim V_{\rm sing}<n$ is the complex dimension of the singular locus of $V$ (with $\dim \emptyset=-1$), then Kato \cite{Ka} showed that (see also \cite[Theorem 5.2.11]{Di}) \begin{equation}\label{two} H^k(V;{\mathbb Z}) \cong H^k( {\mathbb C} P^{n+1};{\mathbb Z}) \ \ \text{for all} \ \ n+s+2\leq k\leq 2n, \end{equation} and the homomorphism $j^k$ induced by inclusion is given in this range (and for $k$ even) by multiplication by $d=\deg(V)$. It therefore remains to study the cohomology groups $H^k(V;{\mathbb Z})$ for $n \leq k \leq n+s+1$.
In the case when $V\subset {\mathbb C} P^{n+1}$ is a {\it smooth} degree $d$ hypersurface, the above discussion yields that $H^k(V;{\mathbb Z}) \cong H^k( {\mathbb C} P^{n};{\mathbb Z})$ for all $k \neq n$. This is in fact the only information we take as an input in this note (it also suffices to work with \eqref{one}, its homology counterpart, and Poincar\'e duality). The Universal Coefficient Theorem also yields in this case that $H^n(V;{\mathbb Z})$ is free abelian, and its rank $b_n(V)$ can be easily deduced from the formula for the Euler characteristic of $V$ (e.g., see \cite[Proposition 10.4.1]{M}): \begin{equation}\label{chi} \chi(V)=(n+2)-\frac{1}{d} \big[1+(-1)^{n+1}(d-1)^{n+2}\big]. \end{equation} Specifically, if $V\subset {\mathbb C} P^{n+1}$ is a smooth degree $d$ projective hypersurface, one has: \begin{equation}\label{bsm} b_n(V)=\frac{(d-1)^{n+2}+(-1)^{n+1}}{d}+\frac{3(-1)^n+1}{2}. \end{equation}
The case when $V$ has only isolated singularities was studied by Dimca \cite{Di0,Di}, (see also \cite{Mi} and \cite{ST}) while projective hypersurfaces with a one-dimensional singular locus have been more recently considered by Siersma-Tib\u{a}r \cite{ST}.
In the singular case, let us fix a Whitney stratification ${\mathcal V}$ of $V$ and consider a one-parameter smoothing of degree $d$, namely $$V_t:=\{f_t=f-tg=0\}\subset {\mathbb C} P^{n+1} \ \ (t \in {\mathbb C}),$$ for $g$ a general polynomial of degree $d$. Here, the meaning of ``general'' is that the hypersurface $W:=\{g=0\}$ is smooth and transverse to all strata in the stratification ${\mathcal V}$ of $V$.
Then, for $t \neq 0$ small enough, all the $V_t$ are smooth and transverse to the stratification ${\mathcal V}$. Let $$B=\{f=g=0\}$$ be the base locus (axis) of the pencil. Consider the incidence variety $$V_D:=\{(x,t)\in {\mathbb C} P^{n+1} \times D \mid x \in V_t \}$$ with $D$ a small disc centered at $0 \in {\mathbb C}$ so that $V_t$ is smooth for all $t \in D^*:=D\setminus \{0\}$. Denote by $\pi:V_D \to D$ the proper projection map, and note that $V=V_0=\pi^{-1}(0)$ and $V_t=\pi^{-1}(t)$ for all $t \in D^*$. In what follows we write $V$ for $V_0$ and use $V_t$ for a smoothing of $V$ (i.e., with $t \in D^*$). In this setup, one can define the Deligne vanishing cycle complex of the family $\pi$, see \cite[Section 10.3]{M} for a quick introduction. More precisely, one has a bounded constructible complex $$\varphi_\pi \underline{{\mathbb Z}}_{V_D} \in D^b_c(V)$$ on the hypersurface $V$, whose hypercohomology groups fit into a long exact sequence (called the {\it specialization sequence}): \begin{equation}\label{spec} \cdots \longrightarrow H^k(V;{\mathbb Z}) \overset{sp^k}{\longrightarrow} H^k(V_t;{\mathbb Z}) \overset{\alpha^k}{\longrightarrow} {\mathbb H}^k(V; \varphi_\pi \underline{{\mathbb Z}}_{V_D}) \longrightarrow H^{k+1}(V;{\mathbb Z}) \overset{sp^{k+1}}{\longrightarrow} \cdots \end{equation} The maps $sp^k$ are called the {\it specialization} morphisms, while the $\alpha^k$'s are usually referred to as the {\it canonical} maps. For any integer $k$, we define $$H^k_\varphi(V):={\mathbb H}^k(V; \varphi_\pi \underline{{\mathbb Z}}_{V_D})$$ and call it the {\it $k$-th vanishing cohomology group of $V$}. This is an invariant of $V$, i.e., it does not depend on the choice of a particular smoothing of degree $d$ (since all smooth hypersurfaces of a fixed degree are diffeomorphic). By its very definition, the vanishing cohomology measures the difference between the topology of a given projective hypersurface $V$ and that of a smooth hypersurface of the same degree.
\begin{rem}\label{r1} Since the incidence variety $V_D=\pi^{-1}(D)$ deformation retracts to $V=\pi^{-1}(0)$, and the {\it specialization map} $sp^k:H^k(V;{\mathbb Z}) \to H^k(V_t;{\mathbb Z})$ of \eqref{spec} factorizes as $$H^k(V;{\mathbb Z}) \overset{\cong}{\longrightarrow} H^k(V_D;{\mathbb Z}) \longrightarrow H^k(V_t;{\mathbb Z})$$ with $H^k(V_D;{\mathbb Z}) \to H^k(V_t;{\mathbb Z})$ induced by the inclusion map, it follows readily that the vanishing cohomology of $V$ can be identified with the relative cohomology of the pair $(V_D,V)$, i.e., \begin{equation} H^k_\varphi(V) \cong H^{k+1}(V_D,V_t;{\mathbb Z}).\end{equation} In particular, the groups $H^k_\varphi(V)$ are the cohomological version of the {\it vanishing homology groups} $$H_k^{\curlyvee}(V):=H_{k}(V_D,V_t;{\mathbb Z})$$ introduced and studied in \cite{ST} in special situations. For the purpose of computing Betti numbers of projective hypersurfaces, the two ``vanishing'' theories yield the same answer, but additional care is needed to handle torsion when computing the actual integral cohomology groups. \end{rem}
Our first result gives the concentration degrees of the vanishing cohomology of a projective hypersurface in terms of the dimension of the singular locus. \begin{thm}\label{th1} Let $V \subset {\mathbb C} P^{n+1}$ be a reduced complex projective hypersurface with $s=\dim V_{\rm sing}$ the complex dimension of its singular locus. Then \begin{equation} H^k_\varphi(V) \cong 0 \ \ \text{ for all integers} \ \ k \notin [n, n+s]. \end{equation} Moreover, $H^n_\varphi(V)$ is a free abelian group. \end{thm}
In view of Remark \ref{r1}, one gets by Theorem \ref{th1} and the Universal Coefficient Theorem the concentration degrees of the vanishing homology groups $H_k^{\curlyvee}(V)$ of a projective hypersurface in terms of the dimension of its singular locus: \begin{cor}\label{corh} With the above notations and assumptions, we have that \begin{equation} H_k^{\curlyvee}(V) \cong 0 \ \ \text{ for all integers} \ \ k \notin [n+1, n+s+1]. \end{equation} Moreover, $H_{n+s+1}^{\curlyvee}(V)$ is free. \end{cor}
\begin{rem} In the case when the projective hypersurface $V \subset {\mathbb C} P^{n+1}$ has a $1$-dimensional singular locus, it was shown in \cite[Theorem 4.1]{ST} that $H_k^{\curlyvee}(V) \cong 0$ for all $k \neq n+1, n+2$. Moreover, Theorem 6.1 of \cite{ST} shows that in this case one also has that $H_{n+2}^{\curlyvee}(V)$ is free. So, Corollary \ref{corh} provides a generalization of the results of \cite{ST} to projective hypersurfaces with arbitrary singularities. Nevertheless, the methods used in its proof are fundamentally different from those in \cite{ST}. \end{rem}
As a consequence of Theorem \ref{th1}, the specialization sequence \eqref{spec} together with the fact that the integral cohomology of a smooth projective hypersurface is free, yield the following result on the integral cohomology of a complex projective hypersurface (where the estimate on the $n$-th Betti number uses formula \eqref{bsm}): \begin{cor}\label{corgen} Let $V \subset {\mathbb C} P^{n+1}$ be a degree $d$ reduced projective hypersurface with a singular locus $V_{\rm sing}$ of complex dimension $s$. Then: \begin{itemize} \item[(i)] $H^k(V;{\mathbb Z}) \cong H^k(V_t;{\mathbb Z}) \cong H^k({\mathbb C} P^n;{\mathbb Z})$ \ for all integers $k \notin [n, n+s+1]$. \item[(ii)] $H^n (V;{\mathbb Z}) \cong \ker(\alpha^n)$ is free. \item[(iii)] $H^{n+s+1} (V;{\mathbb Z}) \cong H^{n+s+1} ({\mathbb C} P^n;{\mathbb Z}) \oplus \mathop{{\mathrm{coker}}}\nolimits(\alpha^{n+s})$. \item[(iv)] $H^k(V;{\mathbb Z}) \cong \ker(\alpha^k) \oplus \mathop{{\mathrm{coker}}}\nolimits(\alpha^{k-1})$ for all integers $k \in [n+1,n+s]$, $s\ge 1$.
\end{itemize} In particular, $$b_n(V) \leq b_n(V_t)=\frac{(d-1)^{n+2}+(-1)^{n+1}}{d}+\frac{3(-1)^n+1}{2},$$ and $$b_{k}(V) \leq \mathop{{\mathrm{rank}}}\nolimits \ H^{k-1}_{\varphi}(V) + b_{k}({\mathbb C} P^{n}) \ \ \text{ for all integers} \ k \in [n+1,n+s+1], \ s\ge 0.$$ \end{cor}
The homological version of the specialisation sequence \eqref{spec} identifies to the long exact sequence of the pair $(V_{D}, V_{t})$, namely: \[ \cdots \to H_{k+1}(V_{t};{\mathbb Z}) \to H_{k+1}(V_{D};{\mathbb Z}) \to H_{k+1}^{\curlyvee}(V;{\mathbb Z}) \ \overset{\alpha_{k}}{\longrightarrow} \ H_{k}(V_{t};{\mathbb Z}) \to \cdots \] The inclusions $V_{t}\hookrightarrow V_{D}\hookrightarrow {\mathbb C} P^{n+1}\times D$ induce in homology a commutative triangle, where $H_{k}(V_{t};{\mathbb Z}) \to H_{k}({\mathbb C} P^{n+1}\times D;{\mathbb Z})$ is injective for $k\not= n$ (by the Lefschetz Theorem for $k<n$, and it is multiplication by $d$ for $k>n$, see e.g. Remark \ref{Katoh} for the homological version of the proof of Theorem \ref{Kato}). This shows that the morphisms $H_{k}(V_{t};{\mathbb Z}) \to H_{k}(V_{D};{\mathbb Z})$ is also injective for all $k\not= n$, and therefore $\alpha_{k} =0$ for $k\not= n$. Consequently, the above long exact sequence splits into a $5$-term exact sequence, and short exact sequences: \begin{equation}\label{sp1} \begin{split}
0 \to H_{n+1}(V_{t};{\mathbb Z}) \to H_{n+1}(V;{\mathbb Z}) \to H_{n+1}^{\curlyvee}(V;{\mathbb Z}) \stackrel{\alpha_{n}}{\to}
H_{n}(V_{t};{\mathbb Z}) \rightarrow H_{n}(V;{\mathbb Z}) \to 0. \\
0 \to H_{k}(V_{t};{\mathbb Z}) \to H_{k}(V_{D};{\mathbb Z}) \to H_{k}^{\curlyvee}(V;{\mathbb Z}) \to 0 \ \ \ \ \ \ \ \ \mbox{ for } \ k\ge n+1. \end{split} \end{equation} We then get the following homological version of Corollary \ref{corgen}(i-iv), with the same upper bounds for Betti numbers, but with an interesting improvement for (iii) and (iv) showing more explicitly the dependence of the homology of $V$ on the vanishing homology groups:
\begin{cor}\label{corgenhom} Let $V \subset {\mathbb C} P^{n+1}$ be a degree $d$ reduced projective hypersurface with a singular locus $V_{\rm sing}$ of complex dimension $s$. Then:
\begin{itemize} \item[(i')] $H_{k}(V;{\mathbb Z}) \cong H_{k}(V_t;{\mathbb Z}) \cong H_{k}({\mathbb C} P^n;{\mathbb Z})$ \ for all $k\le n-1$ and all $k\ge n+s+2$. \item[(ii')] $H_{n}(V;{\mathbb Z}) \cong \mathop{{\mathrm{coker}}}\nolimits(\alpha_{n})$. \item[(iii')] $H_{n+1}(V;{\mathbb Z}) \cong \ker(\alpha_{n}) \oplus H_{n+1}({\mathbb C} P^n;{\mathbb Z})$. \item[(iv')] $H_{k}(V;{\mathbb Z}) \cong H_k^{\curlyvee}(V;{\mathbb Z}) \oplus H_{k}({\mathbb C} P^n;{\mathbb Z})$, for all $ n+2 \le k \le n+s+1$, whenever $s\ge 1$, \\ and $H_{n+s+1}(V;{\mathbb Z})$ is free. \end{itemize} \end{cor}
The ranks of the (possibly non-trivial) vanishing (co)homology groups can be estimated in terms of the local topology of singular strata and of their generic transversal types by making use of the hypercohomology spectral sequence. Such estimates can be made precise for hypersurfaces with low-dimensional singular loci.
Concretely, as special cases of Corollaries \ref{corgen} and \ref{corgenhom}, in Section \ref{bounds} we recast Siersma-Tib\u{a}r's \cite{ST} result for $s\le 1$, and in particular Dimca's \cite{Di0,Di} computation for $s=0$. Concerning the estimation of the rank of the highest interesting (co)homology group, we prove the following general result:
\begin{thm}\label{th2} Let $V \subset {\mathbb C} P^{n+1}$ be a degree $d$ reduced projective hypersurface with a singular locus $V_{\rm sing}$ of complex dimension $s$. For each connected stratum $S_i \subseteq V_{\rm sing}$ of top dimension $s$ in a Whitney stratification of $V$, let $F_i^\pitchfork$ denote its transversal Milnor fiber with corresponding Milnor number $\mu_i^\pitchfork$. Then: \begin{equation}\label{bt} b_{n+s+1}(V) \leq 1+ \sum_i \mu_i^\pitchfork, \end{equation}
and the inequality is strict for $n+s$ even. \end{thm}
In fact, the inequality in \eqref{bt} is deduced from
\begin{equation}\label{btb} b_{n+s+1}(V) \leq 1+\mathop{{\mathrm{rank}}}\nolimits \ H^{n+s}_{\varphi}(V),\end{equation} together with \begin{equation} \mathop{{\mathrm{rank}}}\nolimits \ H^{n+s}_{\varphi}(V) \leq \sum_i \mu_i^\pitchfork, \end{equation} and the inequality \eqref{btb} is strict for $n+s$ even. For further refinements of Theorem \ref{th2}, see Remark \ref{rem31}. Note also that if $s=0$, i.e., $V$ has only isolated singularities, then $\mu_i^\pitchfork$ is just the usual Milnor number of such a singularity of $V$.
Let us remark that if the projective hypersurface $V \subset {\mathbb C} P^{n+1}$ has singularities in codimension $1$, i.e., $s=n-1$, then $b_{n+s+1}(V)=b_{2n}(V)=r$, where $r$ denotes the number of irreducible components of $V$. Indeed, in this case, one has (e.g., see \cite[(5.2.9)]{Di}): \begin{equation}\label{top} H^{2n}(V;{\mathbb Z}) \cong {\mathbb Z}^{r}.\end{equation} In particular, Theorem \ref{th2} yields the following generalization of \cite[Corollary 7.6]{ST}: \begin{cor} If the reduced projective hypersurface $V \subset {\mathbb C} P^{n+1}$ has singularities in codimension $1$, then the number $r$ of irreducible components of $V$ satisfies the inequality: \begin{equation} r \leq 1+\sum_i \mu_i^\pitchfork. \end{equation} \end{cor}
\begin{rem}\label{fr} Note that if the projective hypersurface $V \subset {\mathbb C} P^{n+1}$ is a rational homology manifold, then the Lefschetz isomorphism \eqref{one} and Poincar\'e duality over the rationals yield that $b_i(V)=b_i({\mathbb C} P^n)$ for all $i \neq n$. Moreover, $b_n(V)$ can be deduced by computing the Euler characteristic of $V$, e.g., as in \cite[Section 10.4]{M}.\end{rem}
The computation of Betti numbers of a projective hypersurface which is a rational homology manifold can be deduced without appealing to Poincar\'e duality by using the vanishing cohomology instead, as the next result shows: \begin{prop}\label{p1} If the projective hypersurface $V \subset {\mathbb C} P^{n+1}$ is a ${\mathbb Q}$-homology manifold, then $H^k_{\varphi}(V) \otimes {\mathbb Q} \cong 0$ for all $k \neq n$. In particular, in this case one gets: $b_i(V)=b_i(V_t)=b_i({\mathbb C} P^n)$ for all $i \neq n$, and $b_n(V)=b_n(V_t)+\mathop{{\mathrm{rank}}}\nolimits H^n_{\varphi}(V)$. \end{prop}
At this end, we note that Corollary \ref{corgen}(i) reproves Kato's isomorphism \eqref{two} about the integral cohomology of $V$, by using only the integral cohomology of a smooth hypersurface (for this it suffices to rely only on the Lefschetz isomorphism \eqref{one}, its homological version, and Poincar\'e duality). In Section \ref{supLHT}, we give a new proof of Kato's result (see Theorem \ref{Kato}), which relies on the following supplement to the Lefschetz hyperplane section theorem for hypersurfaces, which may be of independent interest: \begin{thm}\label{thapi} Let $V \subset {\mathbb C} P^{n+1}$ be a reduced complex projective hypersurface with $s=\dim V_{\rm sing}$ the complex dimension of its singular locus. (By convention, we set $s=-1$ if $V$ is nonsingular.) Let $H \subset {\mathbb C} P^{n+1}$ be a generic hyperplane. Then \begin{equation}\label{34api} H^k(V,V\cap H; {\mathbb Z})=0 \ \ \text{for} \ \ k < n \ \ \text{and} \ \ n+s+1 < k < 2n. \end{equation} Moreover, $H^{2n}(V,V \cap H; {\mathbb Z})\cong{\mathbb Z}^r$, where $r$ is the number of irreducible components of $V$, and $H^{n}(V,V \cap H; {\mathbb Z})$ is (torsion-)free. \end{thm}
Note that the vanishing \eqref{34api} for $k<n$ is equivalent to the classical Lefschetz hyperplane section theorem. The proof of \eqref{34api} for $n+s+1 < k < 2n$ reduces to understanding the homotopy type of the complement of a smooth affine hypersurface transversal to the hyperplane at infinity; see \cite[Corollary 1.2]{Lib} for such a description. Homological counterparts of Theorem \ref{thapi} and of Kato's result are also explained in Section \ref{supLHT}, see Corollary \ref{corap} and Remark \ref{Katoh}.
Finally, let us note that similar techniques apply to the study of Milnor fiber cohomology of complex hypersurface singularity germs. This is addressed by the authors in the follow-up paper \cite{MPT} (see also \cite{ST0} for the case of $1$-dimensional singularities).
\noindent{\bf Acknowledgements.} L. Maxim thanks the Sydney Mathematical Research Institute (SMRI) for support and hospitality, and J\"org Sch\"urmann for useful discussions.
\section{Concentration degrees of vanishing cohomology}
The proof of Theorem \ref{th1} makes use of the formalism of perverse sheaves and their relation to vanishing cycles, see \cite{Di1,M} for a brief introduction.
\subsection{Proof of Theorem \ref{th1}} By definition, the incidence variety $V_D$ is a complete intersection of pure complex dimension $n+1$. It is non-singular if $V=V_0$ has only isolated singularities, but otherwise it has singularities where the base locus $B=V\cap W$ of the pencil $\{f_t\}_{t\in D}$ intersects the singular locus $\Sigma:=V_{\rm sing}$ of $V$.
If $\underline{{\mathbb Z}}_{V_D}$ denotes the constant sheaf with stalk ${\mathbb Z}$ on the complete intersection $V_D$, a result of L\^e \cite{Le} implies that the complex $\underline{{\mathbb Z}}_{V_D}[n+1]$ is a perverse sheaf on $V_D$. It then follows that $\varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]$ is a ${\mathbb Z}$-perverse sheaf on $\pi^{-1}(0)=V$ (see, e.g., \cite[Theorem 10.3.13]{M} and the references therein).
Recall that the stalk of the cohomology sheaves of $\varphi_\pi \underline{{\mathbb Z}}_{V_D}$ at a point $x \in V$ are computed by (e.g., see \cite[(10.20)]{M}): \begin{equation} {\mathcal H}^j(\varphi_\pi \underline{{\mathbb Z}}_{V_D})_x \cong H^{j+1}(B_{x}, B_{x}\cap V_t;{\mathbb Z}), \end{equation} where $B_{x}$ denotes the intersection of $V_D$ with a sufficiently small ball in some chosen affine chart ${\mathbb C}^{n+1} \times D$ of the ambient space ${\mathbb C} P^{n+1} \times D$ (hence $B_x$ is contractible). Here $B_{x}\cap V_t=F_{\pi,x}$ is the Milnor fiber of $\pi$ at $x$. Let us now consider the function $$h=f/g:{\mathbb C} P^{n+1} \setminus W \to {\mathbb C}$$ where $W:=\{g=0\}$, and note that $h^{-1}(0)=V\setminus B$ with $B=V\cap W$ the base locus of the pencil. If $x \in V \setminus B$, then in a neighborhood of $x$ one can describe $V_t$ ($t \in D^*$) as $$\{x \mid f_t(x)=0\}=\{x \mid h(x)=t\},$$ i.e., as the Milnor fiber of $h$ at $x$. Note also that $h$ defines $V$ in a neighborhood of $x \notin B$. Since the Milnor fiber of a complex hypersurface singularity germ does not depend on the choice of a local equation (e.g., see \cite[Remark 3.1.8]{Di}), we can therefore use $h$ or a local representative of $f$ when considering Milnor fibers (of $\pi$) at points in $V \setminus B$. From here on we will use the notation $F_x$ for the Milnor fiber of the hypersurface singularity germ $(V,x)$, and we note for future reference that the above discussion also yields that $F_x$ is a manifold, which moreover is contractible if $x \in V \setminus B$ is a smooth point.
It was shown in \cite[Proposition 5.1]{PP} (see also \cite[Proposition 4.1]{MSS} or \cite[Lemma 4.2]{ST}) that there are no vanishing cycles along the base locus $B$, i.e., \begin{equation} \varphi_\pi \underline{{\mathbb Z}}_{V_D} \vert_B \simeq 0.\end{equation} Therefore, if $u:V\setminus B \hookrightarrow V$ is the open inclusion, we get that \begin{equation}\label{s6} \varphi_\pi \underline{{\mathbb Z}}_{V_D} \simeq u_! u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}. \end{equation} Since pullback to open subvarieties preserves perverse sheaves, we note that $u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]$ is a perverse sheaf on the {\it affine} variety $V \setminus B$. Artin's vanishing theorem for perverse sheaves (e.g., \cite[Corollary 6.0.4]{Sc}) then implies that:
\begingroup \allowdisplaybreaks \begin{equation}\label{s1} \begin{split}
H^k_\varphi(V)
& := {\mathbb H}^{k}(V; \varphi_\pi \underline{{\mathbb Z}}_{V_D}) \\
& \cong {\mathbb H}^{k-n}(V; \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \\
& \cong {\mathbb H}^{k-n}(V; u_! u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \\
& \cong {\mathbb H}_c^{k-n}(V \setminus B; u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \\ & \cong 0 \end{split} \end{equation} \endgroup
for all $k-n<0$, or equivalently, for all $k<n$.
Contractibility of Milnor fibers at smooth points of $V \setminus B$ implies that the support of $\varphi_\pi \underline{{\mathbb Z}}_{V_D}$ is in fact contained in $\Sigma \setminus B$, with $\Sigma$ denoting as before the singular locus of $V$. In particular, if $v:\Sigma \setminus B \hookrightarrow V\setminus B$ is the closed inclusion, then \begin{equation}\label{s5} u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D} \simeq v_!v^*u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}.\end{equation} Next, consider the composition of inclusion maps $$\Sigma \setminus B \overset{q}{\hookrightarrow}\Sigma \overset{p}{\hookrightarrow} V$$ with $p\circ q=u \circ v$. By using \eqref{s6} and \eqref{s5}, we get: \begingroup \allowdisplaybreaks \begin{equation}\label{s3} \begin{split} \varphi_\pi \underline{{\mathbb Z}}_{V_D} & \simeq u_!v_!v^* u^*\varphi_\pi \underline{{\mathbb Z}}_{V_D} \\ & \simeq (u\circ v)_! (u\circ v)^* \varphi_\pi \underline{{\mathbb Z}}_{V_D} \\ & \simeq (p\circ q)_! (p\circ q)^* \varphi_\pi \underline{{\mathbb Z}}_{V_D} \\ & \simeq p_!q_!q^*p^* \varphi_\pi \underline{{\mathbb Z}}_{V_D} \\ &\simeq p_*p^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}, \end{split} \end{equation} \endgroup where the last isomorphism uses the fact that $p^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}$ is supported on $\Sigma \setminus B$, hence $p^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}\simeq q_!q^*p^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}$. Since the support of the perverse sheaf $\varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]$ on $V$ is contained in the closed subset $\Sigma$, we get that $p^*\varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]$ is a perverse sheaf on $\Sigma$ (e.g., see \cite[Corollary 8.2.10]{M}). Since the complex dimension of $\Sigma$ is $s$, the support condition for perverse sheaves together with the hypercohomology spectral sequence yield that $${\mathbb H}^{\ell}(\Sigma; p^*\varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \cong 0$$ for all $\ell \notin [-s,s]$. This implies by \eqref{s3} that \begin{equation}\label{s2} H^k_\varphi(V)={\mathbb H}^{k-n}(V; \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \cong {\mathbb H}^{k-n}(\Sigma;p^*\varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]) \cong 0\end{equation} for all $k \notin [n-s,n+s]$.
The desired concentration degrees for the vanishing cohomology is now obtained by combining \eqref{s1} and \eqref{s2}.
Let us finally show that $H^n_\varphi(V)$ is free. Fix a Whitney stratification ${\mathcal V}$ of $V$, so that $V \setminus \Sigma$ is the top stratum. (Note that together with $\pi^{-1}(D^*)$, this also yields a Whitney stratification of $V_D$.) Since $W$ intersects $V$ transversally (i.e., $W$ intersects each stratum $S$ in ${\mathcal V}$ transversally in ${\mathbb C} P^{n+1}$), we can assume without any loss of generality that the base locus $B=V \cap W$ is a closed union of strata of ${\mathcal V}$. Next, we have by \eqref{s1}
that $$ H^n_\varphi(V) \cong {\mathbb H}_c^{0}(V \setminus B; u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]),$$
with $${\mathcal P}:=u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n]$$ a ${\mathbb Z}$-perverse sheaf on the affine variety $V \setminus B$ and $u:V \setminus B \hookrightarrow V$ the open inclusion. In particular, this implies that if $S \in {\mathcal V}$ is any stratum in $V \setminus B$ with inclusion $i_S:S \hookrightarrow V \setminus B$ then ${\mathcal H}^k(i_S^!{\mathcal P}) \simeq 0$ for all integers $k<-\dim_{{\mathbb C}} S$. By the Artin-Grothendieck type result of \cite[Corollary 6.0.4]{Sc}, in order to show that ${\mathbb H}^0_c(V\setminus B;{\mathcal P})$ is free it suffices to check that the perverse sheaf ${\mathcal P}$ satisfies the following costalk condition (see \cite[Example 6.0.2(3)]{Sc}):\footnote{We thank J\"org Sch\"urmann for indicating the relevant references to us.} \begin{equation}\label{co1} {\mathcal H}^{-\dim_{{\mathbb C}} S}(i_S^!{\mathcal P})_x \ \text{ is free } \end{equation} for any point $x$ in any stratum $S$ in $V \setminus B$ with inclusion $i_S:S \hookrightarrow V \setminus B$. Let us now fix a stratum $S \in {\mathcal V}$ contained in $V \setminus B$ and let $x \in S$ be a point with inclusion map $k_x:\{x\} \hookrightarrow S$. Consider the composition $i_x:=i_S \circ k_x: \{x\} \hookrightarrow V \setminus B$. Using the fact that $$k_x^*i_S^! \simeq k_x^!i_S^! [2 \dim_{{\mathbb C}} S] \simeq i_x^! [2 \dim_{{\mathbb C}} S]$$ (e.g., see \cite[Remark 6.0.2(1)]{Sc}), the condition \eqref{co1} for $x \in S$ is equivalent to the following: \begin{equation}\label{co2} {\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal P}) \ \text{ is free}. \end{equation} In fact, the above discussion applies to any algebraically constructible complex ${\mathcal F}^\centerdot \in {^pD}^{\geq 0}$, with $({^pD}^{\leq 0}, {^pD}^{\geq 0})$ denoting the perverse t-structure on $D^b_c(V \setminus B)$. Furthermore, in our setup (i.e., working with PID coefficients and having finitely generated stalk cohomology) ${\mathcal F}^\centerdot \in {^pD}^{\geq 0}$ satisfies the additional costalk condition \eqref{co1} (or, equivalently, \eqref{co2}) if and only if the Verdier dual ${\mathcal D}{\mathcal F}^\centerdot $ satisfies ${\mathcal D}{\mathcal F}^\centerdot \in {^pD}^{\leq 0}$.
Let $i:V=V_0 \hookrightarrow V_D$ denote the closed inclusion, and consider the following {\it variation triangle} for the projection map $\pi:V_D \to D$: \begin{equation}\label{var} i^![1] \longrightarrow \varphi_\pi \overset{var}{\longrightarrow} \psi_\pi \overset{[1]}{\longrightarrow} \end{equation} with $\psi_\pi $ denoting the corresponding nearby cycle functor for $\pi$ (e.g., see \cite[(5.90)]{Sc}). Apply the functor $u^!=u^*$ to the triangle \eqref{var}, and the apply the resulting triangle of functors to the complex $\underline{{\mathbb Z}}_{V_D}[n]$ to get the following triangle of constructible complexes on $V\setminus B$: \begin{equation}\label{var2} {\mathcal Z}:=u^!i^!\underline{{\mathbb Z}}_{V_D}[n+1] \longrightarrow {\mathcal P}:=u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}[n] \longrightarrow {\mathcal R}:=u^* \psi_\pi \underline{{\mathbb Z}}_{V_D}[n] \overset{[1]}{\longrightarrow} \end{equation} Let $x \in S$ be a point in a stratum of $V \setminus B$ with inclusion map $i_x:\{x\} \hookrightarrow V \setminus B$ as before, and apply the functor $i_x^!$ to the triangle \eqref{var2} to get the triangle: \begin{equation}\label{var3} i_x^!{\mathcal Z} \longrightarrow i_x^!{\mathcal P} \longrightarrow i_x^!{\mathcal R} \overset{[1]}{\longrightarrow} \end{equation} The cohomology long exact sequence associated to \eqref{var3} contains the terms $$\cdots \longrightarrow {\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal Z}) \longrightarrow {\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal P}) \longrightarrow {\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal R}) \longrightarrow \cdots$$ Since the category of (torsion-)free abelian groups is closed under extensions, in order to prove \eqref{co2} it suffices to check that ${\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal Z})$ and ${\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal R})$ are (torsion-)free. (Note that, in fact, all costalks in question are finitely generated.)
Let us first show that ${\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal Z})$ is free. Regard the stratum $S$ containing $x$ as a stratum in $V_D$, and let $r_x:\{x\} \to V_D$ be the point inclusion, i.e., $r_x=i\circ u \circ i_x$. So $i_x^!{\mathcal Z}=r_x^!\underline{{\mathbb Z}}_{V_D}[n+1]$. Recall that $\underline{{\mathbb Z}}_{V_D}[n+1]$ is a ${\mathbb Z}$-perverse sheaf on $V_D$, i.e., $\underline{{\mathbb Z}}_{V_D}[n+1] \in {^pD}^{\leq 0}(V_D) \cap{^pD}^{\geq 0}(V_D)$. As already indicated above, in order to show that ${\mathcal H}^{\dim_{{\mathbb C}} S}(r_x^!\underline{{\mathbb Z}}_{V_D}[n+1])$ is free it suffices to verify that ${\mathcal D}(\underline{{\mathbb Z}}_{V_D}[n+1]) \in {^pD}^{\leq 0}(V_D)$, or equivalently, ${\mathcal D}\underline{{\mathbb Z}}_{V_D} \in {^pD}^{\leq -n-1}(V_D)$. This fact is a consequence of \cite[Definition 6.0.4, Example 6.0.11]{Sc}, where it is shown that the complete interesection $V_D$ has a {\it rectified homological depth} equal to its complex dimension $n+1$.
Next note that, due to the local product structure, the Milnor fiber $F_x$ of the hypersurface singularity germ $(V,x)$ with $x \in S$ has the homotopy type of a finite CW complex of real dimension $n-\dim_{{\mathbb C}} S$. In particular, $H_{n-\dim_{{\mathbb C}}S}(F_x;{\mathbb Z})$ is free. Since by the costalk calculation (cf. \cite[(5.92)]{Sc}) and Poincar\'e duality we have for $x\in S$ that \begin{equation} {\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal R}) \cong H_c^{n+\dim_{{\mathbb C}}S}(F_x;{\mathbb Z}) \cong H_{n-\dim_{{\mathbb C}}S}(F_x;{\mathbb Z}), \end{equation} it follows that ${\mathcal H}^{\dim_{{\mathbb C}} S}(i_x^!{\mathcal R})$ is free. This completes the proof of Theorem \ref{th1}.
\subsection{Proof of Proposition \ref{p1}} Since $V$ is a ${\mathbb Q}$-homology manifold, it follows by standard arguments involving the Hamm fibration (e.g., see \cite[Theorem 3.2.12]{Di}) that $V_D$ is also a ${\mathbb Q}$-homology manifold (with boundary). Thus $\underline{{\mathbb Q}}_{V_D}[n+1]$ is a self-dual ${\mathbb Q}$-perverse sheaf on $V_D$. Moreover, since $\varphi_\pi[-1]$ commutes with the Verdier dualizing functor (see \cite[Theorem 3.1]{Ma} and the references therein), we get that ${\mathcal Q}:=\varphi_\pi\underline{{\mathbb Q}}_{V_D}[n]$ is a Verdier self-dual perverse sheaf on $V$. Using the Universal Coefficients Theorem, we obtain: $$H^k_{\varphi}(V) \otimes {\mathbb Q}={\mathbb H}^{k-n}(V;{\mathcal Q})\cong {\mathbb H}^{k-n}(V;{\mathcal D}{\mathcal Q}) \cong {\mathbb H}^{n-k}(V;{\mathcal Q})^\vee = (H^{2n-k}_{\varphi}(V)\otimes {\mathbb Q})^\vee.$$ The desired vanishing follows now from Theorem \ref{th1}.
\section{Bounds on Betti numbers of projective hypersurfaces}\label{bounds} In this section, we prove Theorem \ref{th2} and specialize it, along with Corollary \ref{corgen}, in the case when the complex dimension $s$ of the singular locus is $\leq 1$.
\subsection{Proof of Theorem \ref{th2}} Let $\Sigma:=V_{\rm sing}$ be the singular locus of $V$, of complex dimension $s$, and fix a Whitney stratification ${\mathcal V}$ of $V$ so that $V \setminus \Sigma$ is the top open stratum. We have by Corollary \ref{corgen} (or by the specialization sequence \eqref{spec}) that $$b_{n+s+1}(V) \leq 1+\mathop{{\mathrm{rank}}}\nolimits \ H^{n+s}_{\varphi}(V).$$ So it suffices to show that \begin{equation}\label{rvg} \mathop{{\mathrm{rank}}}\nolimits \ H^{n+s}_{\varphi}(V) \leq \sum_i \mu_i^\pitchfork, \end{equation} where the summation on the right-hand side runs over the top $s$-dimensional connected strata $S_i$ of $\Sigma$, and $\mu_i^\pitchfork$ denotes the corresponding transversal Milnor number for such a stratum $S_i$.
If $s=0$, an easy computation shows that \eqref{rvg} is in fact an equality, see \eqref{nee} below. Let us next investigate the case when $s\geq 1$.
For any $\ell \leq s$, denote by $\Sigma_{\ell}$ the union of strata in $\Sigma$ of complex dimension $\leq \ell$. In particular, we can filter $\Sigma$ by closed (possibly empty) subsets $$\Sigma=\Sigma_s \supset \Sigma_{s-1} \supset \cdots \supset \Sigma_0 \supset \Sigma_{-1}=\emptyset.$$ Let $$U_\ell:=\Sigma_{\ell} \setminus \Sigma_{\ell-1}$$ be the union of $\ell$-dimensional strata, so $\Sigma_\ell=\sqcup_{k\leq \ell} U_k$. (Here, $\sqcup$ denotes disjoint union.) Recall that the smooth hypersurface $W=\{g=0\}$ was chosen so that it intersects each stratum in $\Sigma$ transversally.
In the notations of the proof of Theorem \ref {th1} , it follows from equations \eqref{s1} and \eqref{s5} that: $$ H^{n+s}_\varphi(V) \cong {\mathbb H}_c^{n+s}(V \setminus B; u^* \varphi_\pi \underline{{\mathbb Z}}_{V_D}) \cong {\mathbb H}_c^{n+s}(\Sigma \setminus B; v^* u^*\varphi_\pi \underline{{\mathbb Z}}_{V_D}),$$ with $B=V \cap W$ the axis of the pencil, and with $v:\Sigma\setminus B \hookrightarrow V \setminus B$ and $u:V \setminus B \hookrightarrow V$ the inclusion maps. We also noted that either $h$ or a local representative of $f$ can be used when considering Milnor fibers of $\pi$ at points in $V \setminus B$. For simplicity, let us use the notation $${\mathcal R} := v^* u^*\varphi_\pi \underline{{\mathbb Z}}_{V_D} \in D^b_c(\Sigma \setminus B),$$ and consider the part of the long exact sequence for the compactly supported hypercohomology of ${\mathcal R}$ associated to the disjoint union $$\Sigma \setminus B= (U_s \setminus B) \sqcup (\Sigma_{s-1} \setminus B)$$ involving $H^{n+s}_{\varphi}(V)$, namely: $$\cdots \to {\mathbb H}^{n+s}_c(U_s \setminus B; {\mathcal R}) \to H^{n+s}_{\varphi}(V) \to {\mathbb H}^{n+s}_c(\Sigma_{s-1} \setminus B; {\mathcal R}) \to \cdots$$
We claim that
\begin{equation}\label{cl1}
{\mathbb H}^{n+s}_c(\Sigma_{s-1} \setminus B; {\mathcal R}) \cong 0,
\end{equation}
so, in particular, there is an epimorphism:
\begin{equation}\label{nee2}{\mathbb H}^{n+s}_c(U_s \setminus B; {\mathcal R}) \twoheadrightarrow H^{n+s}_{\varphi}(V).\end{equation}
In order to prove \eqref{cl1}, consider
the part of the long exact sequence for the compactly supported hypercohomology of ${\mathcal R}$ associated to the disjoint union $$\Sigma_{s-1} \setminus B= (U_{s-1} \setminus B) \sqcup (\Sigma_{s-2} \setminus B)$$ involving $ {\mathbb H}^{n+s}_c(\Sigma_{s-1} \setminus B; {\mathcal R})$, namely: $$\cdots \to {\mathbb H}^{n+s}_c(U_{s-1} \setminus B; {\mathcal R}) \to {\mathbb H}^{n+s}_c(\Sigma_{s-1} \setminus B; {\mathcal R}) \to {\mathbb H}^{n+s}_c(\Sigma_{s-2} \setminus B; {\mathcal R}) \to \cdots$$ We first show that \begin{equation}\label{cl2}{\mathbb H}^{n+s}_c(U_{s-1} \setminus B; {\mathcal R}) \cong 0.\end{equation} Indeed, the $(p,q)$-entry in the $E_2$-term of the hypercohomology spectral sequence computing ${\mathbb H}^{n+s}_c(U_{s-1} \setminus B; {\mathcal R})$ is given by $$E^{p,q}_2=H^p_c(U_{s-1} \setminus B; {\mathcal H}^q( {\mathcal R})),$$ and we are interested in those pairs of integers $(p,q)$ with $p+q=n+s$. Since a point in a $(s-1)$-dimensional stratum of $V$ has a Milnor fiber which has the homotopy type of a finite CW complex of real dimension $n-s+1$, it follows that $${\mathcal H}^q( {\mathcal R}) \vert_{U_{s-1} \setminus B}\simeq 0 \ \ \text{ for any } \ q>n-s+1.$$ Also, by reasons of dimension, we have that $E_2^{p,q}=0$ if $p> 2s-2$. In particular, the only possibly non-trivial entries on the $E_2$-page of the above spectral sequence are those corresponding to pairs $(p,q)$ with $p\leq 2s-2$ and $q\leq n-s+1$, none of which add up to $n+s$. This proves \eqref{cl2}. If $s=1$, this completes the proof of \eqref{cl1} since $\Sigma_{-1}=\emptyset$. If $s>1$, the long exact sequences for the compactly supported hypercohomology of ${\mathcal R}$ associated to the disjoint union $$\Sigma_{\ell} \setminus B= (U_{\ell} \setminus B) \sqcup (\Sigma_{\ell-1} \setminus B),$$ $0 \leq \ell \leq s-1$, can be employed to reduce the proof of \eqref{cl1} to showing that \begin{equation}\label{cl3}{\mathbb H}^{n+s}_c(U_{\ell} \setminus B; {\mathcal R}) \cong 0\end{equation} for all $0 \leq \ell \leq s-1$. To prove \eqref{cl3}, we make use of the hypercohomology spectral sequence whose $E_2$-term is computed by $$E^{p,q}_2=H^p_c(U_{\ell} \setminus B; {\mathcal H}^q( {\mathcal R})),$$ and we are interested again in those pairs of integers $(p,q)$ with $p+q=n+s$. Since a point in an $\ell$-dimensional stratum of $V$ has a Milnor fiber which has the homotopy type of a finite CW complex of real dimension $n-\ell$, it follows that $${\mathcal H}^q( {\mathcal R}) \vert_{U_{\ell} \setminus B}\simeq 0 \ \ \text{ for any } \ q>n-\ell.$$ Moreover, by reasons of dimension, $E_2^{p,q}=0$ if $p> 2\ell$. So the only possibly non-trivial entries on the $E_2$-page are those corresponding to pairs $(p,q)$ with $p\leq 2\ell$ and $q\leq n-\ell$, none of which add up to $n+s$. This proves \eqref{cl3}, and completes the proof of \eqref{cl1} in the general case.
In order to prove \eqref{rvg}, we make use of the epimorphism \eqref{nee2} as follows. Recall that, in our notations, $U_s \setminus B$ is a disjoint union of connected strata $S_i \setminus B$ of complex dimension $s$. Each $S_i \setminus B$ has a generic transversal Milnor fiber $F_i^\pitchfork$, which has the homotopy type of a bouquet of $\mu_i^\pitchfork$ $(n-s)$-dimensional spheres. So the integral cohomology of $F_i^\pitchfork$ in concentrated in degree $n-s$. Moreover, for each $i$, there is a local system ${\mathcal L}_i^\pitchfork$ on $S_i \setminus B$ with stalk $\widetilde{H}^{n-s}(F_i^\pitchfork;{\mathbb Z})$, whose monodromy is usually refered to as the {\it vertical monodromy}. This is exactly the restriction of the constructible sheaf ${\mathcal H}^{n-s}( {\mathcal R})$ to $S_i \setminus B$. It then follows from the hypercohomology spectral sequence computing ${\mathbb H}^{n+s}_c(U_s \setminus B; {\mathcal R}) $ and by Poincar\'e duality that \begin{equation}\label{last}{\mathbb H}^{n+s}_c(U_s \setminus B; {\mathcal R}) \cong \bigoplus_i \ H^{2s}_c(S_i \setminus B;{\mathcal L}_i^\pitchfork) \cong \bigoplus_i \ H_0(S_i \setminus B;{\mathcal L}_i^\pitchfork)\end{equation} which readily gives \eqref{rvg}. $
$ $\square$
\begin{rem}\label{rem31} Note that the upper bound on $b_{n+s+1}(V)$ can be formulated entirely in terms of coinvariants of vertical monodromies along the top dimensional singular strata of $V$. Indeed, if in the notations of the above proof we further let $h_i^v$ denote the vertical monodromy along $S_i \setminus B$, then each term on the right-hand side of \eqref{last} is computed by the coinvariants of $h_i^v$, i.e., $H_0(S_i \setminus B;{\mathcal L}_i^\pitchfork) \cong \widetilde{H}^{n-s}(F_i^\pitchfork;{\mathbb Z})_{h^v_i}.$ Note that the latter statement, when combined with \eqref{nee2}, yields an epimorphism \begin{equation}\label{nee2b} \bigoplus_i \widetilde{H}^{n-s}(F_i^\pitchfork;{\mathbb Z})_{h^v_i} \twoheadrightarrow H^{n+s}_{\varphi}(V),\end{equation} the summation on the left hand side being over the top dimensional singular strata of $V$. One can, moreover, proceed like in \cite{MPT} and give a more precise dependence of all (possibly non-trivial) vanishing cohomology groups $H^{k}_{\varphi}(V)$, $n \leq k \leq n+s$, in terms of the singular strata of $V$. We leave the details to the interested reader. \end{rem}
\subsection{Isolated singularities} Assume that the projective hypersurface $V \subset {\mathbb C} P^{n+1}$ has only isolated singularities (i.e., $s=0$). Then the incidence variety $V_D$ is smooth since the pencil has an empty base locus, and the projection $\pi:V_D \to D$ has isolated singularities exactly at the singular points of $V$.
The only non-trivial vanishing homology group, $H_{n+1}^{\curlyvee}(V)$, is free, and is computed as: \begin{equation}\label{nee} H_{n+1}^{\curlyvee}(V) \cong \bigoplus_{x \in V_{\rm sing}} \widetilde{H}_{n}(F_x;{\mathbb Z}) \cong \bigoplus_{x \in V_{\rm sing}} {\mathbb Z}^{\mu_x}, \end{equation} where $F_x$ denotes the Milnor fiber of the isolated hypersurface singularity germ $(V,x)$, with corresponding Milnor number $\mu_x$. The second isomorphism follows from the fact that $F_x$ has the homotopy type of a bouquet of $\mu_x$ $n$-spheres.
The $5$-term exact sequence \eqref{sp1} then reads as: \begin{equation}\label{speciso} 0 \to H_{n+1}(V_t;{\mathbb Z}) \to H_{n+1}(V;{\mathbb Z}) {\to} \bigoplus_{x \in V_{\rm sing}} \widetilde{H}_{n}(F_x;{\mathbb Z}) \overset{\alpha_{n}}{\to} H_{n}(V_t;{\mathbb Z}) \to H_{n}(V;{\mathbb Z}) \to 0. \end{equation}
Therefore Corollary \ref{corgen}(i)--(iii), together with the following bound via Theorem \ref{th2}:
$$b_{n+1}(V) \leq 1+\sum_{x \in V_{\rm sing}} \mu_x.$$
recover \cite[Proposition 2.2]{ST}, which in turn is a homology counterpart of Dimca's result \cite[Theorem 5.4.3]{Di}.
In fact, Dimca's result was formulated in cohomology, and it is a direct
consequence of the specialization sequence \eqref{spec} via Theorem \ref{th1}, together with the observation that the only non-trivial vanishing cohomology group, $H^n_{\varphi}(V)$, is computed as: \begin{equation}\label{nee} H^n_{\varphi}(V) \cong \bigoplus_{x \in V_{\rm sing}} \widetilde{H}^n(F_x;{\mathbb Z}).\end{equation}
\begin{rem}\label{rem3.3} Let us recall here that if $V\subset {\mathbb C} P^{n+1}$ is a degree $d$ reduced projective hypersurface with only isolated singularities, then its Euler characteristic is computed by the formula (e.g., see \cite[Exercise 5.3.7(i) and Corollary 5.4.4]{Di} or \cite[Proposition 10.4.2]{M}): \begin{equation}\label{chii} \chi(V)=(n+2)-\frac{1}{d} \big[1+(-1)^{n+1}(d-1)^{n+2}\big] +(-1)^{n+1}\sum_{x \in V_{\rm sing}} \mu_x, \end{equation} with $\mu_x$ denoting as before the Milnor number of the isolated hypersurface singularity germ $(V,x)$. In particular, if $V$ is a projective {\it curve} (i.e., $n=1$), then $H_{0}(V;{\mathbb Z}) \cong {\mathbb Z}$, $H_{2}(V;{\mathbb Z})\cong {\mathbb Z}^r$, with $r$ denoting the number of irreducible components of $V$, and $H_{1}(V;{\mathbb Z})$ is a free group whose rank is computed from \eqref{chii} by the formula: \begin{equation}\label{b1}b_1(V)=r+1+d^2-3d-\sum_{x \in V_{\rm sing}} \mu_x.\end{equation} \end{rem}
\subsection{$1$-dimensional singular locus}
This particular case was treated in homology in \cite[Proposition 7.7]{ST}. Let us recall the preliminaries, in order to point out once more that in this paper we have transposed them to a fully general setting.
One starts with $V \subset {\mathbb C} P^{n+1}$, a degree $d$ projective hypersurface with a singular locus $\Sigma:=V_{\rm sing}$ of complex dimension $1$. The singular locus $\Sigma$ consists of a union of irreducible projective curves $\Sigma_i$ and a finite set $I$ of isolated singular points. Each curve $\Sigma_i$ has a generic transversal type of transversal Milnor fiber $F_i^\pitchfork \simeq \bigvee_{\mu_i^\pitchfork} S^{n-1}$ with corresponding transversal Milnor number $\mu_i^\pitchfork$. Each $\Sigma_i$ also contains a finite set $S_i$ of special points of non-generic transversal type. One endows $V$ with the Whitney stratification whose strata are: \begin{itemize} \item the isolated singular points in $I$, \item the special points in $S=\bigcup_i S_i$, \item the (top) one-dimensional components of $\Sigma \setminus S$, \item the open stratum $V \setminus \Sigma$. \end{itemize} The genericity of the pencil $\{V_{t}\}_{t\in D}$ implies that the base locus $B$ intersects each $\Sigma_i$ in a finite set $B_i$ of general points, which are not contained in $I \cup S_i$. The total space $V_D$ of the pencil has in this case only isolated singularities (corresponding to the points where $B$ intersects $\Sigma$), and the projection $\pi:V_D \to D$ has a $1$-dimensional singular locus $\Sigma \times \{0\}$.
With the above specified landscape, the Siersma-Tib\u ar result \cite[Proposition 7.7]{ST} reads now as the specialisation for $s=1$ of Corollary \ref{corgenhom}, together with the bound provided by Theorem \ref{th2}.
\section{Examples} In this section we work out a few specific examples. In particular, in \S\ref{quad} we show that the upper bound given by Theorem \ref{th2} is sharp, \S\ref{rathom} deals with a hypersurface which is a rational homology manifold, while \S\ref{projc} discusses the case of a projective cone on a singular curve. However, as pointed out in \cite{Di0} already in the case of isolated singularities, it is difficult in general to compute the integral cohomology of a hypersurface by means of Corollary \ref{corgen}. It is therefore important to also develop alternative methods for exact calculations of cohomology and/or Betti numbers, e.g., see \cite{Di} for special situations.
\subsection{Singular quadrics}\label{quad} Let $n$ and $q$ be integers satisfying $4 \leq q \leq n+1$, and let $$f_q(x_0,\ldots x_{n+1})=\sum_{0\leq i,j \leq n+1} q_{ij} x_i x_j$$ be a quadric of rank $q:=\mathop{{\mathrm{rank}}}\nolimits (Q)$ with $Q=(q_{ij})$. The singular locus $\Sigma$ of the quadric hypersurface $V_q=\{f_q=0\} \subset {\mathbb C} P^{n+1}$ is a linear space of complex dimension $s=n+1-q$ satisfying $0 \leq s \leq n-3$. The generic transversal type for $\Sigma={\mathbb C} P^s$ is an $A_1$-singularity, so $\mu^\pitchfork=1$. Theorem \ref{th2} yields that \begin{equation}\label{ub2} b_{n+s+1}(V_q) \leq 2.\end{equation} In what follows, we show that if the rank $q$ is even (i.e., $n+s+1$ is even), the upper bound on $ b_{n+s+1}(V_q)$ given in \eqref{ub2} is sharp. Indeed, in our notation, the quadric $V_q$ is a projective cone with vertex $\Sigma$ over a smooth quadric $W_q \subset {\mathbb C} P^{n-s}$. Moreover, since $n-s\geq 3$, the homotopy version of the Lefschetz hyperplane theorem yields that $W_q$ is simply-connected (see, e.g., \cite[Theorem 1.6.5]{Di}). Let $U=V_q \setminus \Sigma$ and consider the long exact sequence $$ \cdots \to H^k_c(U;{\mathbb Z}) \to H^k(V_q;{\mathbb Z}) \to H^k(\Sigma;{\mathbb Z}) \to H^{k+1}_c(U;{\mathbb Z}) \to \cdots $$ Note that projecting from $\Sigma$ gives $U$ the structure of a vector bundle of rank $s+1$ over $W_q$. Let $p:U \to W_q$ denote the bundle map. Then $$H^k_c(U;{\mathbb Z})\cong H^k(W_q;Rp_!\underline{{\mathbb Z}}_U)$$ can be computed by the corresponding hypercohomology spectral sequence (i.e., the compactly supported Leray-Serre spectral sequence of the map $p$), with $E^{a,b}_2=H^a(W_q;R^bp_!\underline{{\mathbb Z}}_U)$. Since $\pi_1(W_q)=0$, the local system $R^bp_!\underline{{\mathbb Z}}_U$ is constant on $W_q$ with stalk $H^b_c({\mathbb C}^{s+1};{\mathbb Z})$. Since the latter is ${\mathbb Z}$ if $b=2s+2$ and $0$ otherwise, the above spectral sequence yields isomorphisms $H^k_c(U;{\mathbb Z}) \cong H^{k-2-2s}(W_q;{\mathbb Z})$ if $k \geq 2s+2$ and $H^k_c(U;{\mathbb Z}) \cong 0$ if $k < 2s+2$. On the other hand, $H^k(\Sigma;{\mathbb Z})=0$ if $k>2s$, so the above long exact sequence yields: \begin{equation} H^k(V_q;{\mathbb Z})\cong \begin{cases} H^k(\Sigma;{\mathbb Z}) & 0 \leq k \leq 2s \\ 0 & k=2s+1 \\ H^{k-2-2s}(W_q;{\mathbb Z}) & 2s+2 \leq k \leq 2n. \end{cases} \end{equation} Since $W_q$ is a smooth quadric, its integral cohomology is known from \eqref{one}, \eqref{two} and \eqref{bsm}. Altogether, this gives: \begin{equation} H^k(V_q;{\mathbb Z})\cong \begin{cases} 0 & k \text{ odd} \\ {\mathbb Z} & k \text{ even}, \ k \neq n+s+1 \\ {\mathbb Z}^2 & k= n+s+1 \text{ even}. \end{cases} \end{equation}
\subsection{One-dimensional singular locus with a two-step filtration}\label{rathom} Let $V=\{f=0\}\subset {\mathbb C} P^4$ be the $3$-fold in homogeneous coordinates $[x:y:z:t:v]$, defined by $$f=y^2z+x^3+tx^2+v^3.$$ The singular locus of $V$ is the projective line $\Sigma=\{[0:0:z:t:0] \mid z,t \in {\mathbb C}\}$. By \eqref{one}, we get: $b_0(V)=1$, $b_1(V)=0$, $b_2(V)=1$. Since $V$ is irreducible, \eqref{top} yields: $b_6(V)=1$. We are therefore interested to understand the Betti numbers $b_3(V)$, $b_4(V)$ and $b_5(V)$.
It was shown in \cite[Example 6.1]{M0} that $V$ has a Whitney stratification with strata: $$S_3:=V \setminus \Sigma, \ \ S_1:=\Sigma \setminus [0:0:0:1:0], \ \ S_0:=[0:0:0:1:0],$$ giving $V$ a two-step filtration $V \supset \Sigma \supset [0:0:0:1:0].$
The transversal singularity for the top singular stratum $S_1$ is the Brieskorn type singularity $y^2+x^3+v^3=0$ at the origin of ${\mathbb C}^3$ (in a normal slice to $S_1$), with corresponding transversal Milnor number $\mu_1^\pitchfork =4$. So Theorem \ref{th2} yields that $b_5(V) \leq 5$, while Corollary \ref{corgen} gives $b_3(V) \leq 10$. As we will indicate below, the actual values of $b_3(V)$ and $b_5(V)$ are zero.
It was shown in \cite[Example 6.1]{M0} that the hypersurface $V$ is in fact a ${\mathbb Q}$-homology manifold, so it satisfies Poincar\'e duality over the rationals. In particular, $b_5(V)=b_1(V)=0$ and $b_4(V)=b_2(V)=1$. To determine $b_3(V)$, it suffices to compute the Euler characteristic of $V$, since $\chi(V)=4-b_3(V)$. Let us denote by $Y\subset {\mathbb C} P^4$ a smooth $3$-fold which intersects the Whitney stratification of $V$ transversally. Then \eqref{chi} yields that $\chi(Y)=-6$ and we have by \cite[(10.40)]{M} that \begin{equation}\label{plug} \chi(V)=\chi(Y)-\chi(S_1 \setminus Y) \cdot \mu_1^\pitchfork -\chi(S_0) \cdot (\chi(F_0)-1), \end{equation} where $F_0$ denotes the Milnor fiber of $V$ at the singular point $S_0$. As shown in \cite[Example 6.1]{M0}, $F_0 \simeq S^3 \vee S^3$. So, using the fact that the general $3$-fold $Y$ intersects $S_1$ at $3$ points, we get from \eqref{plug} that $\chi(V)=4$. Therefore, $b_3(V)=0$, as claimed. Moreover, since $H^3(V;{\mathbb Z})$ is free, this also shows that in fact $H^3(V;{\mathbb Z})\cong 0$.
\begin{rem} Note that the hypersurface of the previous example has the same Betti numbers as ${\mathbb C} P^3$. This fact can also be checked directly, by noting that the monodromy operator acting on the reduced homology of the Milnor fiber of $f$ at the origin in ${\mathbb C}^5$ has no eigenvalue equal to $1$ (see \cite[Corollary 5.2.22]{Di}).
More generally, consider a degree $d$ homogeneous polynomial $g(x_0,\ldots, x_n)$ with associated Milnor $F_g$ such that the monodromy operator $h_*$ acting on $\widetilde{H}_*(F_g;{\mathbb Q})$ is the identity. Then the hypersurface $V=\{g(x_0,\ldots, x_n)+x_{n+1}^{d}=0\}\subset {\mathbb C} P^{n+1}$ has the same ${\mathbb Q}$-(co)homology as ${\mathbb C} P^n$. For example, the hypersurface $V_n=\{x_0x_1\ldots x_n+x_{n+1}^{n+1}=0\}$ has singularities in codimension $2$, but the same ${\mathbb Q}$-(co)homology as ${\mathbb C} P^n$. However, $V_n$ does not have in general the ${\mathbb Z}$-(co)homology of ${\mathbb C} P^n$; indeed, $H^3(V_2;{\mathbb Z})$ contains $3$-torsion (cf. \cite[Proposition 5.4.8]{Di}). \end{rem}
\subsection{Projective cone on a curve}\label{projc} The projective curve $C=\{xyz=0\}\subset {\mathbb C} P^2$ has three irreducible components and three singularities of type $A_1$ (each having a corresponding Milnor number equal to $1$). Therefore, by Remark \ref{rem3.3} and formula \eqref{b1}, the integral cohomology of $C$ is given by: $$H^0(C;{\mathbb Z})\cong {\mathbb Z}, \ H^1(C;{\mathbb Z}) \cong {\mathbb Z}, \ H^2(C;{\mathbb Z})\cong {\mathbb Z}^3.$$ The projective cone on $C$ is the surface $V=\{xyz=0\}\subset {\mathbb C} P^3$. The singular locus of $V$ consists of three projective lines intersecting at the point $[0:0:0:1]$, each having a (generic) transversal singularity of type $A_1$, i.e., with corresponding transversal Milnor number equal to $1$. By \cite[(5.4.18)]{Di}, we have that $$H^k(V;{\mathbb Z}) \cong H^{k-2}(C;{\mathbb Z}), \ \ \text{for all} \ k \geq 2.$$ Together with \eqref{one}, this yields: \begin{equation}\label{comp1} H^0(V;{\mathbb Z})\cong {\mathbb Z}, \ H^1(V;{\mathbb Z})\cong 0, \ H^2(V;{\mathbb Z})\cong {\mathbb Z}, \ H^3(V;{\mathbb Z}) \cong {\mathbb Z}, \ H^4(V;{\mathbb Z})\cong {\mathbb Z}^3. \end{equation} By Theorem \ref{th1}, the only non-trivial vanishing cohomology groups of $V$ are $H^2_{\varphi}(V)$, which is free, and $H^3_{\varphi}(V)$. These can be explicitly computed by using \eqref{bsm}, \eqref{sp1} and \eqref{comp1}, to get: $$H^2_{\varphi}(V)\cong {\mathbb Z}^7, \ H^3_{\varphi}(V)\cong {\mathbb Z}^2$$ (compare with \cite[Example 7.5]{ST}).
\section{Supplement to the Lefschetz hyperplane theorem and applications}\label{supLHT} In this section, we give a new proof of Kato's result mentioned in the Introduction. Our proof is different from that of \cite[Theorem 5.2.11]{Di}, and it relies on a supplement to the Lefschetz hyperplane section theorem (Theorem \ref{thapi}), which is proved in Theorem \ref{thap} below.
\subsection{A supplement to the Lefschetz hyperplane theorem} In this section, we prove the following result of Lefschetz type: \begin{thm}\label{thap} Let $V \subset {\mathbb C} P^{n+1}$ be a reduced complex projective hypersurface with $s=\dim V_{\rm sing}$ the complex dimension of its singular locus. (By convention, we set $s=-1$ if $V$ is nonsingular.) Let $H \subset {\mathbb C} P^{n+1}$ be a generic hyperplane (i.e., transversal to a Whitney stratification of $V$), and denote by $V_H:=V\cap H$ the corresponding hyperplane section of $V$. Then \begin{equation}\label{34ap} H^k(V,V_H; {\mathbb Z})=0 \ \ \text{for} \ \ k < n \ \ \text{and} \ \ n+s+1 < k < 2n. \end{equation} Moreover, $H^{2n}(V,V_H; {\mathbb Z})\cong{\mathbb Z}^r$, where $r$ is the number of irreducible components of $V$, and $H^{n}(V,V_H; {\mathbb Z})$ is (torsion-)free. \end{thm} \begin{proof} Let us first note that the long exact sequence for the cohomology of the pair $(V,V_H)$ together with \eqref{top} yield that: $$H^{2n}(V,V_H; {\mathbb Z})\cong H^{2n}(V;{\mathbb Z})\cong{\mathbb Z}^r.$$ Moreover, we have isomorphisms: $$H^k(V,V_H; {\mathbb Z}) \cong H^k_c(V^a;{\mathbb Z}),$$ where $V^a:=V\setminus V_H$. Therefore, the vanishing in \eqref{34ap} for $k<n$ is a consequence of the Artin vanishing theorem (e.g., see \cite[Corollary 6.0.4]{Sc}) for the perverse sheaf $\underline{{\mathbb Z}}_{V^a}[n]$ (cf. \cite{Le}) on the affine hypersurface $V^a$ obtained from $V$ by removing the hyperplane section $V_H$. Indeed, $$H^k_c(V^a;{\mathbb Z})={\mathbb H}^{k-n}_c(V^a;\underline{{\mathbb Z}}_{V^a}[n]) \cong 0$$ for all $k-n<0$. (Note that vanishing in this range is equivalent to the classical Lefschetz hyperplane section theorem.)
Since $V$ is reduced, we have that $s<n$. If $n=s+1$ then $n+s+1=2n$ and there is nothing else to prove in \eqref{34ap}. So let us now assume that $n>s+1$. For $n+s+1<k<2n$, we have the following sequence of isomorphisms: \begin{equation}\label{35} \begin{split} H^k(V, V_H; {\mathbb Z}) &\cong H^k(V \cup H, H; {\mathbb Z}) \\ &\cong H_{2n+2-k} ({\mathbb C} P^{n+1}\setminus H, {\mathbb C} P^{n+1} \setminus (V \cup H); {\mathbb Z}) \\ &\cong H_{2n+1-k}({\mathbb C} P^{n+1}\setminus (V \cup H); {\mathbb Z}), \end{split} \end{equation} where the first isomorphism follows by excision, the second is an application of the Poincar\'e-Alexander-Lefschetz duality, and the third follows from the cohomology long exact sequence of a pair. Set $$U={\mathbb C} P^{n+1}\setminus (V \cup H),$$ and let $L = {\mathbb C} P^{n-s}$ be a generic linear subspace (i.e., transversal to both $V$ and $H$). Then, by transversality, $L \cap V$ is a nonsingular hypersurface in $L$, transversal to the hyperplane at infinity $L \cap H$ in $L$. Therefore, $U \cap L=L\setminus (V \cup H) \cap L$ has the homotopy type of a wedge $$U \cap L \simeq S^1 \vee S^{n-s} \vee \ldots \vee S^{n-s},$$ e.g., see \cite[Corollary 1.2]{Lib}. Thus, by the Lefschetz hyperplane section theorem (applied $s+1$ times), we obtain: $$H_i(U;{\mathbb Z}) \cong H_i(U \cap L;{\mathbb Z}) \cong 0$$ for all integers $i$ in the range $1 < i < n-s$. Substituting $i = 2n+1-k$ in \eqref{35}, we get that $H^k(V, V_H; {\mathbb Z})\cong 0$ for all integers $k$ in the range $n+s+1 < k < 2n$.
It remains to show that $H^{n}(V,V_H; {\mathbb Z})\cong H^n_c(V^a;{\mathbb Z})\cong {\mathbb H}^{0}_c(V^a;\underline{{\mathbb Z}}_{V^a}[n])$ is (torsion-)free. This follows as in the proof of Theorem \ref{th1} since the affine hypersurface $V^a$ has rectified homological depth equal to its complex dimension $n$. This completes the proof of the theorem. \end{proof}
Theorem \ref{thap} and the Universal Coefficient Theorem now yield the following consequence: \begin{cor}\label{corap} In the notations of Theorem \ref{thap} we have that: \begin{equation}\label{36ap} H_k(V,V_H; {\mathbb Z})=0 \ \ \text{for} \ \ k < n \ \ \text{and} \ \ n+s+1 < k < 2n. \end{equation} Moreover, $H_{2n}(V,V_H; {\mathbb Z})\cong{\mathbb Z}^r$, where $r$ is the number of irreducible components of $V$. \end{cor}
\subsection{Kato's theorem for hypersurfaces} The isomorphism \eqref{two} from the introduction was originally proved by Kato \cite{Ka}, and it holds more generally for complete intersections. We derive it here as a consequence of Theorem \ref{thap}.
\begin{thm}[Kato]\label{Kato} Let $V\subset {\mathbb C} P^{n+1}$ be a reduced degree $d$ complex projective hypersurface with $s=\dim V_{\rm sing}$ the complex dimension of its singular locus. (By convention, we set $s=-1$ if $V$ is nonsingular.) Then \begin{equation}\label{twoap} H^k(V;{\mathbb Z}) \cong H^k( {\mathbb C} P^{n+1};{\mathbb Z}) \ \ \text{for all} \ \ n+s+2\leq k\leq 2n. \end{equation} Moreover, if $j:V \hookrightarrow {\mathbb C} P^{n+1}$ denotes the inclusion, the induced cohomology homomorphisms \begin{equation}\label{threeap} j^k:H^k( {\mathbb C} P^{n+1};{\mathbb Z}) \longrightarrow H^k(V;{\mathbb Z}), \ \ n+s+2\leq k\leq 2n, \end{equation} are given by multiplication by $d$ if $k$ is even. \end{thm}
\begin{proof} The statement of the theorem is valid only if $n\geq s+2$, so in particular we can assume that $V$ is irreducible and hence $H^{2n}(V;{\mathbb Z})\cong {\mathbb Z}$. Moreover, the fact that $j^{2n}$ is multiplication by $d=\deg(V)$ is true regardless of the dimension of singular locus, see \cite[(5.2.10)]{Di}. If $n=s+2$ there is nothing else to prove, so we may assume (without any loss of generality) that $n \geq s+3$.
We next proceed by induction on $s$.
If $V$ is nonsingular (i.e., $s=-1$), the assertions are well-known for any $n \geq 1$. We include here a proof for completeness. The isomorphism \eqref{twoap} can be obtained in this case from the Lefschetz isomorphism \eqref{one}, its homology analogue, and Poincar\'e duality. The statement about $j^k$ can also be deduced from \eqref{one} and Poincar\'e duality, but we include here a different argument inspired by \cite{Di}. Consider the isolated singularity at the origin for the affine cone $CV \subset {\mathbb C}^{n+2}$ on $V$, and the corresponding link $L_V:=S^{2n+3} \cap CV$, for $S^{2n+3}$ a small enough sphere at the origin in ${\mathbb C}^{n+2}$. Then $L_V$ is a $(n-1)$-connected closed oriented manifold of real dimension $2n+1$, so its only possibly nontrivial integral (co)homology appears in degrees $0$, $n$, $n+1$ and $2n+1$. The Hopf fibration $S^1 \hookrightarrow S^{2n+3} \longrightarrow {\mathbb C} P^{n+1}$ induces by restriction to $CV$ a corresponding Hopf fibration for $V$, namely $S^1 \hookrightarrow L_V \longrightarrow V$. Then for any $n+1 \leq k \leq 2n-2$, the cohomology Gysin sequences for the diagram of fibrations $$\xymatrix{ S^{2n+3} \ar[r] & {\mathbb C} P^{n+1}\\ L_V \ar[u] \ar[r] & V \ar[u]. }$$ yield commutative diagrams (with ${\mathbb Z}$-coefficients): \begin{equation}\label{Gys} \CD 0=H^{k+1}(S^{2n+3}) @>>> H^k({\mathbb C} P^{n+1}) @>{\psi}>{\cong}> H^{k+2}({\mathbb C} P^{n+1}) @>>> H^{k+2}(S^{2n+3})=0\\ @VVV @V {j^k}VV @V {j^{k+2}}VV @VVV \\ 0=H^{k+1}(L_V) @>>> H^k(V) @>{\psi_V}>{\cong}> H^{k+2}(V) @>>> H^{k+2}(L_V)=0\\ \endCD \end{equation} Here, if $k=2\ell$ is even, the isomorphism $\psi$ is the cup product with the cohomology generator $a\in H^2({\mathbb C} P^{n+1};{\mathbb Z})$, and similarly, $\psi_V$ is the cup product with $j^2(a)$. The assertion about $j^k$ follows now from \eqref{Gys} by decreasing induction on $\ell$, using the fact mentioned at the beginning of the proof that $j^{2n}$ is given by multiplication by $d$.
Let us next choose a generic hyperplane $H \subset {\mathbb C} P^{n+1}$ (i.e., $H$ is transversal to a Whitney stratification of $V$), and set as before $V_H=V \cap H$. It then follows from Theorem \ref{thap} and the cohomology long exact sequence of the pair $(V,V_H)$ that $H^{2n-1}(V;{\mathbb Z}) \cong 0$. It therefore remains to prove \eqref{twoap} and the corresponding assertion about $j^k$ for $k$ in the range for $n+s+2\leq k\leq 2n-2$. Let us consider the commuting square $$\CD V_H @> {\delta} >> H={\mathbb C} P^n\\ @V {\gamma}VV @VVV\\\ V @>> {j} > {\mathbb C} P^{n+1} \endCD$$ and the induced commutative diagram in cohomology: \begin{equation}\label{di} \CD H^k({\mathbb C} P^{n+1};{\mathbb Z}) @> {j^k} >> H^k(V;{\mathbb Z}) \\ @V {\cong}VV @VV{\gamma^k}V\\\ H^k({\mathbb C} P^{n};{\mathbb Z}) @>> {\delta^k} > H^k(V_H;{\mathbb Z}) \endCD \end{equation} By Theorem \ref{thap} and the cohomology long exact sequence of the pair $(V,V_H)$ we get that $\gamma^k$ is an isomorphism for all integers $k$ in the range $n+s+2\leq k\leq 2n-2$. Moreover, since $V_H \subset {\mathbb C} P^n$ is a degree $d$ reduced projective hypersurface with a $(s-1)$-dimensional singular locus (by transversality), the induction hypothesis yields that $H^k(V_H;{\mathbb Z}) \cong H^k({\mathbb C} P^n;{\mathbb Z})$ for $n+s \leq k \leq 2n-2$ and that, in the same range and for $k$ even, the homomorphism $\delta^k$ is given by multiplication by $d$. The commutativity of the above diagram \eqref{di} then yields \eqref{twoap} for all integers $k$ satisfying $n+s+2\leq k\leq 2n-2$, and the corresponding assertion about the induced homomorphism $j^k$ for $k$ even in the same range. This completes the proof of the theorem. \end{proof}
\begin{rem} Let us remark here that the proof of Kato's theorem in \cite[Theorem 5.2.11]{Di} relies on the Kato-Matsumoto result \cite{KM} on the connectivity of the Milnor fiber of the singularity at the origin of the affine cone $CV \subset {\mathbb C}^{n+2}$. \end{rem}
\begin{rem}\label{Katoh}
One can prove the homological version of Theorem \ref{Kato} in the similar manner, namely by using Corollary \ref{corap} instead of Theorem \ref{thap}. This yields the isomorphisms: \begin{equation} H_k(V;{\mathbb Z}) \cong H_k( {\mathbb C} P^{n+1};{\mathbb Z}) \ \ \text{for all} \ \ n+s+2\leq k\leq 2n, \end{equation} and the homomorphisms induced by the inclusion $j:V\hookrightarrow {\mathbb C} P^{n+1}$ in homology are given in this range (and for $k$ even) by multiplication by $d=\deg(V)$. \end{rem}
\begin{rem} We already noted that Theorem \ref{th1} yields the isomorphism \eqref{two} of Kato's theorem (see Corollary \ref{corgen}(i)). On the other hand, Kato's Theorem \ref{Kato} may be used to obtain a weaker version of Theorem \ref{th1} by more elementary means. Indeed, in the notations from the Introduction consider the diagram: $$ \CD H^k({\mathbb C} P^{n+1};{\mathbb Z}) @> {\cong}>> H^k({\mathbb C} P^{n+1} \times D;{\mathbb Z}) @> {b^k} >> H^k(V_D;{\mathbb Z}) @> {c^k} >> H^k(V_t;{\mathbb Z})\\
@. @. @V {\cong}VV @. \\ @. @. H^k(V;{\mathbb Z}) \endCD $$ and let $a^k:=c^k \circ b^k$. By Theorem \ref{Kato}, we have that: \begin{itemize} \item[(i)] $a^k$ is the multiplication by $d$ if $k>n$ even and an isomorphism for $k<n$; \item[(ii)] $b^k$ is the multiplication by $d$ if $n+s+2\leq k \leq 2n$ ($k$ even) and an isomorphism for $k<n$. \end{itemize} Therefore, $c^k$ is an isomorphism if $n+s+2 \leq k \leq 2n$ or $k<n$. The cohomology long exact sequence of the pair $(V_D,V_t)$ then yields that $H^k_{\varphi}(V)\cong H^{k+1}(V_D,V_t;{\mathbb Z})\cong 0$ for all integers $k \notin [n-1,n+s+1]$. \end{rem}
\end{document} |
\begin{document}
\title{Measurement of the short-range attractive force between Ge plates using a torsion balance}
\author{W. J. Kim} \affiliation{Yale University, Department of Physics, P.O. Box 208120, New Haven, CT 06520, USA}
\author{A. O. Sushkov} \affiliation{Yale University, Department of Physics, P.O. Box 208120, New Haven, CT 06520, USA}
\author{D. A. R. Dalvit} \affiliation{Theoretical Division, MS B213, Los Alamos National Laboratory, Los Alamos, NM 87545, USA}
\author{S. K. Lamoreaux} \affiliation{Yale University, Department of Physics, P.O. Box 208120, New Haven, CT 06520, USA}
\date{\today}
\begin{abstract} We have measured the short-range attractive force between crystalline Ge plates, and found contributions from both the Casimir force and an electrical force possibly generated by surface patch potentials. Using a model of surface patch effects that generates an additional force due to a distance dependence of the apparent contact potential, the electrical force was parameterized using data at distances where the Casimir force is relatively small. Extrapolating this model, to provide a correction to the measured force at distances less than 5 $\mu$m, shows a residual force that is in agreement, within experimental uncertainty, with five models that have been used to calculate the Casimir force.
\end{abstract}
\pacs{12.20.Fv, 11.10.Wx, 73.40.Cg, 04.80.Cc}
\maketitle
{\it Introduction.-} The Casimir force has been a subject of great interest, both theoretically and experimentally, because it is a macroscopic manifestation of quantum vacuum effects \cite{Casimir,Milonni,Bordag}, and it can have significant effects in nanomechanical systems. Despite a number of successful measurements celebrated over the last decade \cite{Steve,Casexp}, early investigations of short-range forces \cite{Nancy,Stipe} report the possible systematic effects due to residual electrostatic forces. In particular, the observed variation in the effective contact potential, recently reported in \cite{KimPRARC} and later confirmed in \cite{Sven}, presents a problem of fundamental importance when setting limits to predicted submicron corrections to Newtonian gravity in a Casimir force measurement \cite{Roberto}. The optical response of a particular sample under study must also be carefully considered \cite{Svetovoy}, as the accuracy of data on the optical properties of materials typically limits calculational accuracy to no better than 5\%. In principle, both electric and optical studies of a given sample are subject to a combination of various surface effects of electric origin, and it is important to understand these issues in order to accurately characterize fundamental interactions, such as the Casimir force and non-Newtonian gravity.
{\it Our torsion balance set-up.-} In this Letter, we present results of force measurements between crystalline Ge plates \cite{ispoptics} in a sphere-plane geometry. Our apparatus, shown schematically in Fig. \ref{fig1}, is based on the design presented in \cite{Lamoreaux2} and improves on the apparatus described in \cite{Steve,fan}. On one side of a torsion pendulum a flat Ge plate is mounted, and approached by a Ge plate with a spherical surface, with radius of curvature $R=(15.10\pm0.05)$ cm, mounted on a Thorlabs T25 XYZ motion stage (8 nm resolution). When a force exists between these plates, the torsion body rotates and thereby generates an imbalance in capacitance on the other side of the pendulum, which carries a flat plate, situated in between two fixed ``compensator plates", that are attached to the support frame. An AC voltage is applied to the compensator plates, and the capacitance imbalance creates an AC voltage that is amplified and sent to a phase sensitive detector (PSD), providing an error signal to a proportional-integral-differential (PID) feedback circuit. A small correction voltage ($S_{\rm{PID}}$) is applied to the compensator plates keeping the system in equilibrium. The correction voltage is added to a large constant voltage $V_0 (\approx 9$V) to linearize the restoring force, $F\propto (S_{\rm{PID}}+V_0)^2\approx V_0^2+2V_0S_{\rm PID}$. This correction voltage provides a measure of the force between the Casimir plates and is recorded during the measurement.
\begin{figure}
\caption{Experimental setup of torsion balance (top-view). A pendulum body of length 15 cm hangs from a tungsten wire connected to a motorized rotation stage via the pivot point that is mounted on a support frame. The wire diameter is 25 $\mu$m, with length 2.5 cm, shorter than the previous experiment (66 cm) \cite{Steve} in order to minimize effects of tilt of the apparatus. At the bottom of the pendulum body (not shown in the figure) is a NdFeB magnet to damp the swinging modes of the pendulum at a natural frequency of 3 Hz. The mechanical assembly is covered by a glass bell jar (vacuum $5\times 10^{-7}$ torr) and is supported on a vibration isolation slab that has its foundation separate from the laboratory building.}
\label{fig1}
\end{figure}
The measured signal $S_{\rm{PID}}$ has contributions from several sources: \begin{equation} \label{PID} S_{\rm{PID}}(d,V_{\rm a})=S_{\rm{DC}}(d\rightarrow\infty)+S_{r}(d)+S_{a}(d,V_{\rm{a}}), \end{equation} where $S_{\rm{DC}}$ is the force-free component of the signal at large distances, $S_{r}$ is the residual signal due to distance-dependent forces, such as Casimir-Lifshitz force, and $S_{\rm{p}}$ is the signal due to the electrostatic force in response to an applied external voltage $V_{\rm{a}}$. For the sphere-plane geometry, this latter signal can be written in the proximity force approximation (PFA), valid when $d \ll R$, as $S_{a}(d,V_{\rm{a}})=\pi\epsilon_0 R(V_{\rm{a}}-V_{\rm{m}})^2/\beta d$, where $\beta$ is a calibration factor that converts $S_{\rm{PID}}$ in units of voltage to the actual units of force. The electrostatic signal is minimized ($S_{a}=0$) when $V_{\rm{a}}=V_{\rm{m}}$, and the electrostatic minimizing potential $V_{\rm{m}}$ is then defined to be the contact potential between the plates.
A range of plate voltages $V_{\rm{a}}$ is applied, and at a given separation the response $S_{\rm{PID}}$ is fitted to a parabola \begin{equation} S_{\rm{PID}}(d,V_{\rm{a}})=S_0+k(V_{\rm{a}}-V_{\rm{m}})^2. \label{para} \end{equation} The first two terms in Eq. (\ref{PID}) are absorbed in $S_0$ and represent the minimized signal when $V_{\rm{a}}=V_{\rm{m}}$. Repeating the parabola measurements shown in Fig. 2a, sequentially moving from the farthest to closest plate separations, enables us to inspect the $d$ dependence of the fitting parameters $k(d)$, $V_{\rm{m}}(d)$, and $S_0(d)$. The procedure outlined here was first implemented as a calibration routine in \cite{Iannuzzi} and more recently in \cite{KimPRARC} in an effort to detect a distance dependence of $V_{\rm{m}}$.
As the gap between the plates is reduced, the parabola curvature $k$ rapidly increases as shown in Fig. \ref{fig2}b. These curvature values are fitted to $k(d)=\alpha/d$, the expected dependence for the plane-sphere geometry, where the absolute distance $d\equiv d_0-d_{\rm{r}}$ is defined in terms of the asymptotic limit $d_0$ and the relative distance $d_r$ recorded during a parabola measurement. The conversion factor $\beta$ is then obtained through $\alpha\equiv\pi\epsilon_0R/\beta$. Obviously, $\alpha$ can be also used to determine the absolute distance through $d=\alpha/k$, implying a significant correlation of $\alpha$ with $d_0$. Consistency between these two methods of distance determination reflects validity of the use of the $1/d$ power law as implied by a value of $\chi^2_0$ close to unity for our data set. Fig. \ref{fig2}c shows the electric potentials $V_{\rm{m}}$ at minima of the parabola curvatures plotted versus $d$, indicating the distance-dependent minimizing potential $V_{\rm{m}}(d)$, a behavior that has been observed in other experiments \cite{KimPRARC,Sven}.
\begin{figure}\label{fig2}
\end{figure}
To see the trend in $V_{\rm m}(d)$ more clearly and to determine short-range forces with higher statistical accuracy, we have repeated 200 times the experimental sequence described in Fig. \ref{fig2}, yielding a total of 5800 data points. Each group of five data points taken at a given fixed distance with varying applied potential are used to determine the three parabola parameters discussed above, in addition to the force and distance. The mean value of the calibration factor after analyzing all data is $\beta=(1.35\pm0.04)\times 10^{-7}$~N/V. Both the asymptotic limit $d_0$, shown in Fig \ref{fig2}b, and the DC offset of the PID signals $S_{\rm{DC}}$ drift slightly during a run. The uncertainty in position is roughly 10 \% at a given distance and about 50 nm at the typical closest gap separation, consistent with the actuator minimum displacement of 40 nm. The DC offset drift has been corrected by monitoring $S_{\rm{PID}}$ before and after each consecutive run and applying a linear correction.
{\it Varying minimizing potential.-} An outstanding feature of our data is the distance variation of the applied voltage $V_{\rm{m}}$ that minimizes the force, as clearly shown in Fig. \ref{fig3}. It must be recognized that this variation can lead to an extra force of electrical origin, as demonstrated in \cite{Lamoreauxcont}. However, the model used in \cite{Lamoreauxcont} assumes that the variation in the minimizing potential is due to a varying contact potential, specifically modeled as a voltage source in series with the plates. The varying minimizing potential observed in our data is more likely due to large-scale gradients in the contact potential across the surface of the plates, due to, for example, polishing stresses or the curvature of the spherical surface plate changing the crystal plane orientation at the surface. Such variations have been observed for many materials \cite{Robertson,INFN}, with typical large scale fluctuations on the order of a few mV. The variation in the apparent contact potential is due to the effective averaging area changing as the curved and flat surfaces of the plates are brought together. A numerical analysis \cite{Note} of a wide range of surface potential variations shows that the variation of $V_{\rm{m}}(d)$ leads to an electrostatic force of the form $F^{\rm el}_{r_1}(d)= \pi R\epsilon_0[V_{\rm{m}}(d)+V_1]^2 / d$, where $V_1$ is a constant offset parameter at large distances that can be determined from the experimental data. The origin of this effect is due to the plate curvature, together with large scale variations in the surface contact potential. Note that although the parabola measurement {\it minimizes} the electrostatic force across the plates, it does not necessarily {\it nullify} all the electric forces that possibly exist. \begin{figure}
\caption{The value of the force minimizing potential as a function of plate separation. The red points are the average of the data. Our measurement reveals a slow rise of the minimizing potential as the plates approach each other, of order of 6 mV over 100 $\mu$m. This variation in our data set shows a similar trend observed in a recent measurement \cite{Sven} where the variation of 6 mV over 1 $\mu$m is reported between Au coated samples, significantly larger than the value found in our study.}
\label{fig3}
\end{figure}
{\it Random small-scale patches.-} In addition to large scale gradients in the surface potential, there can be small-scale (i.e., much smaller than the plate diameter) random fluctuations in the surface potential associated with strains, irregularities, and impurities. It is straightforward to show that the electrostatic energy per unit area between two flat plates with random patch voltages is \cite{Speake,Note} \begin{equation} E_{\rm patch}(d)=\frac{\epsilon_0 \pi V_{\rm rms}^2}{4 d} \int_0^\infty du S(u/d) \frac{e^{-2u}}{\sinh^2(u)}, \label{patch_residual} \end{equation} where $V_{\rm rms}$ is the rms value of the random patch voltages. For simplicity, we have assumed isotropic patches with surface correlation functions $\langle V_{k,i} V_{k',j} \rangle = V^2_{\rm rms} S(k) \delta_{i,j} \delta(k-k')$, where $i,j=1,2$ denote the plates, and $S(k)$ is the unity-normalized spectral density. The residual electrostatic force between the sphere and the plane due to these patches can be obtained from PFA as $F^{\rm el}_{r_2}(d)=2 \pi R E_{\rm patch}(d)$. For example, for random-voltage patches of radius $\lambda$ uniformly distributed on the surfaces, the spectral density is $S(k) \approx \sin k \lambda / \pi^2 k^2$. It is easy to see that in the limit $\lambda \gg d$ the residual patch force in the sphere-plane geometry scales as $F^{\rm el}_{r_2}(d)=\pi R\epsilon_0 V^2_{\rm{rms}}/d$ \cite{Note}.
{\it Electrostatic residual force.-} We fit the data of the residual force at the minimizing potential (Fig 2.d) with a force of electric origin $F^{\rm el}_r = F_0+F^{\rm el}_{r_1} + F^{\rm el}_{r_2} =F_0+\pi R \epsilon_0 \{[V_{\rm{m}}(d)+V_1]^2 + V^2_{\rm rms} \}/d$, where $F_0$ is an offset parameter at large distances. A least-squares fit of the observed force using data for $d>5$ $\mu$m (a regime where the Casimir force should be vanishingly small) and the measured $V_{\rm{m}}(d)$, while leaving $F_0$, $V_1$, and $V_{\rm{rms}}$ as adjustable fit parameters, yields an excellent description of the observed large distance force \cite{powerlaw}, as shown plotted with the data in Fig. 4. Including data at shorter distances ($d<5$ $\mu$m) causes a significant fit deviation, indicating an interference with the actual Casimir force which is highly nonlinear at short distances. A similar long-range force has been previously observed in the measurement of van der Waals interaction and the corresponding correction is applied to the data based on work function anisotropies and their related patch charges \cite{Nancy}.
{\it Casimir residual force.-} We have tested our data for the presence of a residual Casimir force $F_r^{\rm Cas}(d)$ between the Ge plates, which we have computed in PFA from the plane-plane Casimir-Lifshitz energy, $F^{\rm Cas}_r(d) = 2 \pi R E^{\rm Cas}_{pp}(d)$. We have calculated the corresponding reflection coefficients using five different theoretical models for the Ge plates \cite{Diego, Galina}: ideal dielectric; ideal dielectric + Drude conductivity corrections; ideal dielectric + plasma conductivity corrections; quasi-static Debye-H\"uckel screening model; and charge drift model. Fig. 4 (bottom) shows the experimental data for the residual force after subtraction of the joint (contact potential and patch potential) electrostatic forces, and the theory curves for the Casimir-Lifshitz force between Ge plates at $T=300$ K for these five theoretical models. The error bars take into account all statistical uncertainties (2-3 \%) as well as fitting uncertainties from the electrostatic force analysis (10\%). Within experimental uncertainty, our data agrees well with the theoretical predictions for the Casimir force, but it is not of sufficient accuracy to distinguish among the different models for the Ge plates. High precision measurements of the Casimir force require a careful evaluation of the electrostatic effects considered in detail for the first time in our present study. \begin{figure}\label{fig4}
\end{figure}
{\it Conclusions.-} We have performed measurements of the short-range force between Ge plates in the sphere-plane geometry, and have observed that the potential $V_{\rm{m}}$ that minimizes the electrostatic force depends on the gap between the plates. We have considered two contributions of electric origin present in the residual data for the force. The first contribution is due to large-scale variations in the contact potential along the surface of the plates, that leads to the gap-dependent minimizing potential and, as a result, to an electrostatic force proportional to $(V_{\rm{m}}(d)+V_1)^2/d$. The second contribution can be modeled as arising from potential patches on the surfaces that, in the case when they have typical sizes much smaller that the plate diameters and much larger than the plate separation, leads to a further electrostatic force proportional to $V_{\rm rms}^2/d$. We have fitted our experimental data at large distances ($d>5$ $\mu$m, where the Casimir force is expected to be negligible) with these two electrostatic force effects, and found we could establish good agreement between our model and the experimental data. Furthermore, we have subtracted these forces from the data at short separations ($d<5$ $\mu$m) and found a residual force that is in agreement with the theoretical predictions for the Casimir-Lifshitz force between Ge plates. Our measurements do not have enough accuracy to distinguish between the different theoretical models used to characterize the optical properties of the Ge plates. Future measurements are deemed necessary in light of our discussion, in particular to better understand the physical origins of the observed electrostatic forces. We are currently exploring similar surface effects in a pair of Au samples. We acknowledge support from Yale University for the construction of the experimental apparatus and data acquisition, and from Los Alamos LDRD program. We thank G. Klimchitskaya for useful discussions.
\end{document} |
\begin{document}
\title{Colorful Subhypergraphs in Uniform Hypergraphs}
\begin{abstract} There are several topological results ensuring the existence of a large complete bipartite subgraph in any properly colored graph satisfying some special topological regularity conditions. In view of $\mathbb{Z}_p$-Tucker lemma, Alishahi and Hajiabolhassan [{\it On the chromatic number of general Kneser hypergraphs, Journal of Combinatorial Theory, Series B, 2015}] introduced a lower bound for the chromatic number of Kneser hypergraphs ${\rm KG}^r({\mathcal H})$. Next, Meunier [{\it Colorful subhypergraphs in Kneser hypergraphs, The Electronic Journal of Combinatorics, 2014}] improved their result by proving that any properly colored general Kneser hypergraph ${\rm KG}^r({\mathcal H})$ contains a large colorful $r$-partite subhypergraph provided that $r$ is prime. In this paper, we give some new generalizations of $\mathbb{Z}_p$-Tucker lemma. Hence, improving Meunier's result in some aspects. Some new lower bounds for the chromatic number and local chromatic number of uniform hypergraphs are presented as well. \\
\noindent{\it Keyword:} chromatic number of hypergraphs, $\mathbb{Z}_p$-Tucker-Ky~Fan lemma, colorful complete hypergraph, $\mathbb{Z}_p$-box-complex, $\mathbb{Z}_p$-hom-complex \end{abstract}
\section{\bf Introduction} \subsection{{\bf Background and Motivations}} In 1955, Kneser~\cite{MR0068536} posed a conjecture about the chromatic number of Kneser graphs. In 1978, Lov{\'a}sz~\cite{MR514625} proved this conjecture by using algebraic topology. The Lov{\'a}sz's proof marked the beginning of the history of topological combinatorics. Nowadays, it is an active stream of research to study the coloring properties of graphs by using algebraic topology. There are several lower bounds for the chromatic number of graphs related to the indices of some topological spaces defined based on the structure of graphs. However, for hypergraphs, there are a few such lower bounds, see~\cite{2013arXiv1302.5394A,MR953021,Iriye20131333,MR1081939,MR2279672}.
A {\it hypergraph} ${\mathcal H}$ is a pair $(V({\mathcal H}),E({\mathcal H}))$, where $V({\mathcal H})$ is a finite set, called the vertex set of ${\mathcal H}$, and $E({\mathcal H})$ is a family of nonempty subsets of $V({\mathcal H})$, called the edge set of ${\mathcal H}$. Throughout the paper, by a nonempty hypergraph, we mean a hypergraph with at least one edge. If any edge $e\in E({\mathcal H})$ has the cardinality $r$, then the hypergraph ${\mathcal H}$ is called {\it $r$-uniform.} For a set $U\subseteq V({\mathcal H})$, the {\it induced subhypergraph on $U$,} denoted ${\mathcal H}[U]$, is a hypergraph with the vertex set $U$ and the edge set $\{e\in E({\mathcal H}):\; e\subseteq U\}$. Throughout the paper, by a {\it graph,} we mean a $2$-uniform hypergraph. Let $r\geq 2$ be a positive integer and $q\geq r$ be an integer. An $r$-uniform hypergraph ${\mathcal H}$ is called {\it $q$-partite} with parts $V_1,\ldots,V_q$ if \begin{itemize} \item $V({\mathcal H})=\displaystyle\bigcup_{i=1}^q V_i$ and \item each edge of ${\mathcal H}$ intersects each part $V_i$ in at most one vertex. \end{itemize} If ${\mathcal H}$ contains all possible edges, then we call it a {\it complete $r$-uniform $q$-partite hypergraph.}
Also, we say the hypergraph ${\mathcal H}$ is {\it balanced} if the values of $|V_j|$ for
$j =1,\ldots,q$ differ by at most one, i.e., $|V_i|-|V_j|\leq 1$ for each $i,j\in[q]$.
Let ${\mathcal H}$ be an $r$-uniform hypergraph and $U_1,\ldots, U_q$ be $q$ pairwise disjoint
subsets of $V({\mathcal H})$. The hypergraph ${\mathcal H}[U_1,\ldots, U_q]$ is a subhypergraph of ${\mathcal H}$ with the vertex set $\displaystyle\bigcup_{i=1}^q U_i$ and the edge set $$E({\mathcal H}[U_1,\ldots, U_q])=\left\{e\in E({\mathcal H}):\; e\subseteq \displaystyle\bigcup_{i=1}^q U_i\mbox{ and } |e\cap U_i|\leq 1\mbox{ for each } i\in[q]\right\}.$$ Note that ${\mathcal H}[U_1,\ldots, U_q]$ is an $r$-uniform $q$-partite hypergraph with parts $U_1,\ldots,U_q$. By the symbol ${[n]\choose r}$, we mean the family of all $r$-subsets of the set $[n]$. The hypergraph $K^r_n=\left([n],{[n]\choose r}\right)$ is celled the complete $r$-uniform hypergraph with $n$ vertices. For $r=2$, we would rather use $K_n$ instead of $K_n^2$. The largest possible integer $n$ such that ${\mathcal H}$ contains $K^r_n$ as a subhypergraph is called the {\it clique number of ${\mathcal H}$}, denoted $\omega({\mathcal H})$.
A {\it proper $t$-coloring} of a hypergraph ${\mathcal H}$ is a map $c:V({\mathcal H})\longrightarrow [t]$ such that there is no monochromatic edge. The minimum possible such a $t$ is called {\it the chromatic number of ${\mathcal H}$}, denoted $\chi({\mathcal H})$. If there is no such a $t$, we define the chromatic number to be infinite. Let $c$ be a proper coloring of ${\mathcal H}$ and $U_1,\ldots, U_q$ be $q$ pairwise disjoint subsets of $V({\mathcal H})$. The hypergraph ${\mathcal H}[U_1,\ldots, U_q]$ is said to be {\it colorful } if for each $j\in[q]$, the vertices of $U_j$ get pairwise distinct colors. For a properly colored graph $G$, a subgraph is called {\it multicolored} if its vertices get pairwise distinct colors.
For a hypergraph ${\mathcal H}$, {\it the Kneser hypergraph} ${\rm KG}^r({\mathcal H})$ is an $r$-uniform hypergraph with the vertex set $E({\mathcal H})$ and whose edges are formed by $r$ pairwise vertex-disjoint edges of ${\mathcal H}$, i.e., $$E({\rm KG}^r({\mathcal H}))=\left\{ \{e_1,\ldots,e_r\}:\; e_i\cap e_j=\varnothing\mbox{ for each } i\neq j\in[r] \right\}.$$ For any graph $G$, it is known that there are several hypergraphs ${\mathcal H}$ such that ${\rm KG}^2({\mathcal H})$ and $G$ are isomorphic.
The Kneser hypergraph ${\rm KG}^r\left(K_n^k\right)$ is called the ``usual" Kneser hypergraph which is denoted by ${\rm KG}^r(n,k)$. Coloring properties of Kneser hypergraphs have been studied extensively in the literature. Lov\'asz~\cite{MR514625} (for $r=2$) and Alon, Frankl and Lov\'asz~\cite{MR857448} determined the chromatic number of ${\rm KG}^r(n,k)$. For an integer $r\geq 2$, they proved that $$\chi\left({\rm KG}^r(n,k)\right)= \left\lceil{n-r(k-1)\over r-1}\right\rceil.$$ For a hypergraph ${\mathcal H}$, the $r$-colorability defect of ${\mathcal H}$, denoted ${\rm cd}_r({\mathcal H})$, is the minimum number of vertices which should be removed such that the induced hypergraph on the remaining vertices is $r$-colorable, i.e.,
$${\rm cd}_r({\mathcal H})=\min\left\{|U|:\; {\mathcal H}[V({\mathcal H})\setminus U]\mbox{ is $r$-colorable}\right\}.$$ For a hypergraph ${\mathcal H}$, Dol'nikov~\cite{MR953021}~(for $r=2$) and K{\v{r}}{\'{\i}}{\v{z}}~{\rm \cite{MR1081939} proved taht $$\chi({\rm KG}^r({\mathcal H}))\geq \left\lceil{{\rm cd}_r({\mathcal H})\over r-1}\right\rceil,$$ which is a generalization of the results by Lov\'asz~\cite{MR514625} and Alon, Frankl and Lov\'asz~\cite{MR857448}.
For a positive integer $r$, let $\mathbb{Z}_r=\{\omega,\omega^2\ldots,\omega^r\}$ be a cyclic group of order $r$ with generator $\omega$. Consider a vector $X=(x_1,x_2,\ldots,x_n)\in(\mathbb{Z}_r\cup\{0\})^n$. An alternating subsequence of $X$ is a sequence $x_{i_1},x_{i_2},\ldots,x_{i_m}$ of nonzero terms of $X$ such that $i_1<\cdots<i_m$ and $x_{i_j}\neq x_{i_{j+1}}$ for each $j\in [m-1]$. We denote by ${\rm alt}(x)$ the maximum possible length of an alternating subsequence of $X$. For a vector $X=(x_1,x_2,\ldots,x_n)\in (\mathbb{Z}_r\cup\{0\})^n$ and for an $\epsilon\in\mathbb{Z}_p$, set $X^\epsilon=\{i\in[n]:\; x_i=\epsilon\}$. Note that, by abuse of notation, we can write $X=(X^\epsilon)_{\epsilon\in \mathbb{Z}_r}$. For two vectors $X,Y\in (\mathbb{Z}_r\cup\{0\})^n$, by $X\subseteq Y$, we mean $X^\epsilon\subseteq Y^\epsilon$ for each $\epsilon\in\mathbb{Z}_r$.
For a hypergraph ${\mathcal H}$ and a bijection $\sigma:[n]\longrightarrow V({\mathcal H})$, define $${\rm alt}_r({\mathcal H},\sigma)=\displaystyle\max\left\{{\rm alt}(X):\; X\in (\mathbb{Z}_r\cup\{0\})^n\mbox{ such that } E({\mathcal H}[\sigma(X^\epsilon)])=\varnothing\mbox{ for each } \epsilon\in\mathbb{Z}_r \right\}.$$ Also, let $${\rm alt}_r({\mathcal H})=\displaystyle\min_{\sigma} {\rm alt}_r({\mathcal H},\sigma),$$
where the minimum is taken over all bijection $\sigma:[n]\longrightarrow V({\mathcal H})$. One can readily check that for any hypergraph ${\mathcal H}$, $|V({\mathcal H})|-{\rm alt}_r({\mathcal H})\geq {\rm cd}_r({\mathcal H})$ and the inequality is often strict, see~\cite{2013arXiv1302.5394A}. Alishahi and Hajiabolhassan~\cite{2013arXiv1302.5394A} improved Dol'nikov-K{\v{r}}{\'{\i}}{\v{z}} result by proving that
for any hypergraph ${\mathcal H}$ and for any integer $r\geq 2$, the quantity $\left\lceil{ |V({\mathcal H})|-{\rm alt}_r({\mathcal H})\over r-1}\right\rceil$ is a lower bound for the chromatic number of ${\rm KG}^r({\mathcal H})$.
Using this lower bound, the chromatic number of some families of graphs and hypergraphs are computed, see~\cite{2014arXiv1401.0138A,2014arXiv1403.4404A,2014arXiv1407.8035A,2015arXiv150708456A,2013arXiv1302.5394A,HaMe16}. There are some other lower bounds for the chromatic number of graphs which are better than the former discussed lower bounds. They are based on some topological indices of some topological spaces connected to the structure of graphs. In spite of these lower bounds being better, they are~not combinatorial and most of the times they are difficult to compute.
The existence of large colorful bipartite subgraphs in a properly colored graph has been extensively studied in the literature~\cite{2013arXiv1302.5394A,MR2971704,MR2763055,SiTaZs13,MR2279672,MR2351519}. To be more specific, there are several theorems ensuring the existence of a colorful bipartite subgraph in any properly colored graph such that the bipartite subgraph has a specific number of vertices related to some topological parameters connected to the graph. Simonyi and Tardos~\cite{MR2351519} improved Dol'nikov's lower bound and proved that in any proper coloring of a Kneser graph ${\rm KG}^2({\mathcal H})$, there is a multicolored complete bipartite graph $K_{\left\lceil{{\rm cd}_2({\mathcal H})\over 2}\right\rceil,\left\lfloor{{\rm cd}_2({\mathcal H})\over 2}\right\rfloor}$ such that the ${\rm cd}^2({\mathcal H})$ different colors occur alternating on the two parts of the bipartite graph with respect to their natural order. By a combinatorial proof, Alishahi and Hajiabolhassan~\cite{2013arXiv1302.5394A} improved this result. They proved that the the result remains true if we replace ${\rm cd}^2({\mathcal H})$ by $n-{\rm alt}_2({\mathcal H})$. Also, a stronger result is proved by Simonyi, Tardif, and Zsb{\'{a}}n~\cite{SiTaZs13}. \begin{alphtheorem}{\rm (Zig-zag Theorem~\cite{SiTaZs13}).}\label{zigzag} Let $G$ be a nonempty graph which is properly colored with arbitrary number of colors. Then $G$ contains a multicolored complete bipartite subgraph $K_{\lceil{t\over2}\rceil,\lfloor{t\over2}\rfloor}$, where ${\rm Xind}({\rm Hom}(K_2,G))+2= t$. Moreover, colors appear alternating on the two sides of the bipartite subgraph with respect to their natural ordering. \end{alphtheorem}
The quantity ${\rm Xind}({\rm Hom}(K_2,G))$ is the cross-index of hom-complex ${\rm Hom}(K^2,G)$ which will be defined in Subsection~\ref{Boxdefin}. We should mention that there are some other weaker similar results in terms of some other topological parameters, see~\cite{MR2279672,MR2351519}.
Note that prior mentioned results concern the existence of colorful bipartite subgraphs in properly colored graphs ($2$-uniform hypergraphs). In 2014, Meunier~\cite{Meunier14} found the first colorful type result for the uniform hypergraphs. He proved that for any prime number $p$, any properly colored Kneser hypergraph $\operatorname{KG}^p({\mathcal H})$ must contain a colorful balanced complete $p$-uniform $p$-partite subhypergraph with a specific number of vertices, see Theorem~\ref{colorfulhyper}.
\subsection{\bf Main Results} For a given graph $G$, there are several complexes defined based on the structure of $G$. For instance, the box-complex of $G$, denoted ${\rm B}_0(G)$, and the hom-complex of $G$, denoted ${\rm Hom}(K_2,G)$, see~\cite{MR1988723,SiTaZs13,MR2279672}. Also, there are some lower bounds for the chromatic number of graphs related to some indices of these complexes~\cite{SiTaZs13,MR2279672}. In this paper, we naturally generalize the definitions of box-complex and hom-complex of graphs to uniform hypergraphs. Also, the definition of $\mathbb{Z}_p$-cross-index of $\mathbb{Z}_p$-posets will be introduced. Using these complexes, as a first main result of this paper, we generalize Meunier's result~\cite{Meunier14} (Theorem~\ref{colorfulhyper}) to the following theorem. \begin{theorem}\label{maincolorfulindex} Let $r\geq 2$ be a positive integer and $p\geq r$ be a prime number. Assume that ${\mathcal H}$ is an $r$-uniform hypergraph and $c:V({\mathcal H})\longrightarrow[C]$ is a proper coloring of ${\mathcal H}$ {\rm (}$C$ arbitrary{\rm )}. Then we have the following assertions. \begin{itemize} \item[{\rm (i)}] There is some colorful balanced complete $r$-uniform $p$-partite subhypergraph in ${\mathcal H}$ with ${\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1$ vertices. In particular, $$\chi({\mathcal H})\geq {{\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1\over r-1}.$$ \item[{\rm (ii)}] If $p\leq \omega({\mathcal H})$, then there is some colorful balanced complete $r$-uniform $p$-partite subhypergraph in ${\mathcal H}$ with ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p$ vertices. In particular, $$\chi({\mathcal H})\geq {{\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p\over r-1}.$$ \end{itemize} \end{theorem}
Quantities ${\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))$ and ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))$ appearing in the statement of Theorem~\ref{maincolorfulindex} are respectively the $\mathbb{Z}_p$-index and $\mathbb{Z}_p$-cross-index of the $\mathbb{Z}_p$-box-complex $B_0({\mathcal H},\mathbb{Z}_p)$ and $\mathbb{Z}_p$-hom-complex ${\rm Hom}(K^r_p,{\mathcal H})$ which will be defined in Subsection~\ref{Boxdefin}. Using these complexes, we introduce some new lower bounds for the chromatic number of uniform hypergraphs. In view of Theorem~\ref{maincolorfulindex}, next theorem provides a hierarchy of lower bounds for the chromatic number of $r$-uniform hypergraphs. \begin{theorem}\label{inequalities} Let $r\geq 2$ be a positive integer and $p\geq r$ be a prime number. For any $r$-uniform hypergraph ${\mathcal H}$, we have the following inequalities.\\ {\rm (i)} If $p\leq \omega({\mathcal H})$, then $${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p \geq {\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1.$$ {\rm (ii)} If ${\mathcal H}={\rm KG}^r({\mathcal F})$ for some hypergraph ${\mathcal F}$, then $$
{\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1
\geq |V({\mathcal F})|-{\rm alt}_p({\mathcal F}) \geq {\rm cd}_p({\mathcal F}). $$ \end{theorem}
In view of Theorem~\ref{inequalities}, Theorem~\ref{maincolorfulindex} is a common extension of Theorem~\ref{zigzag} and Theorem~\ref{colorfulhyper}. Furthermore, for $r=2$, Theorem~\ref{maincolorfulindex} implies the next corollary which also is a generalization of Theorem~\ref{zigzag}. \begin{corollary}\label{cor1} Let $p$ be a prime number and let $G$ be a nonempty graph which is properly colored with arbitrary number of colors. Then there is a multicolored complete $p$-partite subgraph $K_{n_1,n_2,\ldots,n_p}$ of $G$ such that \begin{itemize} \item $\displaystyle\sum_{i=1}^pn_i={\rm ind}_{\mathbb{Z}_p}(B_0(G,\mathbb{Z}_p))+1$,
\item $|n_i-n_j|\leq 1$ for each $i,j\in[p]$. \end{itemize} Moreover, if $p\leq \omega(G)$, then ${\rm ind}_{\mathbb{Z}_p}(B_0(G,\mathbb{Z}_p))+1$ can be replaced with ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K_p,G))+p$. \end{corollary}
In view of the prior mentioned results, the following question naturally arises. \begin{question} Do Theorem~\ref{maincolorfulindex} and Theorem~\ref{inequalities} remain true for non-prime $p$? \end{question}
\subsection{\bf Applications to Local Chromatic Number of Uniform Hypergraphs} For a graph $G$ and a vertex $v\in V(G)$, the {\it closed neighborhood of $v$,} denoted $N[v]$, is the set $\{v\}\cup\{u:\; uv\in E(G)\}$. The {\it local chromatic number} of $G$, denoted $\chi_l(G)$, is defined in~\cite{ERDOS198621} as follows:
$$\chi_l(G)=\displaystyle\min_c\max\{|c(N[v])|:\; v\in V(G)\}$$ where the minimum in taken over all proper coloring $c$ of $G$. Note that Theorem~\ref{zigzag} gives the following lower bound for the local chromatic number of a nonempty graph $G$: \begin{equation}\label{localloerzigzag} \chi_l(G)\geq \left\lceil{{\rm Xind}({\rm Hom}(K_2,G))+2\over 2}\right\rceil+1. \end{equation} Note that for a Kneser hypergraph $\operatorname{KG}^2({\mathcal H})$, by using Simonyi and Tardos colorful result~\cite{MR2351519} or the extension given by Alishahi and Hajiabolhassan~\cite{2013arXiv1302.5394A}, there are two similar lower bounds for $\chi_l(\operatorname{KG}^2({\mathcal H}))$ which respectively used $\operatorname{cd}_2({\mathcal H})$ and
$|V({\mathcal H})|-\operatorname{alt}_2({\mathcal H})$ instead of ${\rm Xind}({\rm Hom}(K_2,G))+2$. However, as it is stated in Theorem~\ref{inequalities}, the lower bound in terms of ${\rm Xind}({\rm Hom}(K_2,G))+2$ is better than these two last mentioned lower bounds. Using Corollary~\ref{cor1}, we have the following lower bound for the local chromatic number of graphs. \begin{corollary}\label{locallowerp} Let $G$ be a nonempty graph and $p$ be a prime number. Then $$\chi_l(G)\geq t-\left\lfloor{t\over p}\right\rfloor+1,$$ where $t={\rm ind}_{\mathbb{Z}_p}(B_0(G,\mathbb{Z}_p))+1$. Moreover, if $p\leq \omega(G)$, then ${\rm ind}_{\mathbb{Z}_p}(B_0(G,\mathbb{Z}_p))+1$ can be replaced with ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K_p,G))+p$. \end{corollary} Note that if we set $p=2$, then previous theorem implies Simonyi and Tardos lower bound for the local chromatic number. Note that, in general, this lower bound might be better than Simonyi and Tardos lower bound. To see this, let $k\geq 2$ be a fixed integer. Consider the Kneser graph $\operatorname{KG}^2(n,k)$ and let $p=p(n)$ be a prime number such that $p=O(\ln n)$. By Theorem~\ref{inequalities}, for $n\geq pk$, we have $${\rm ind}_{\mathbb{Z}_p}(B_0(\operatorname{KG}^2(n,k),\mathbb{Z}_p))+1\geq \operatorname{cd}_p(K_n^k)=n-p(k-1).$$ Note that the lower bound for $\chi_l(\operatorname{KG}^2(n,k))$ coming form Inequality~\ref{localloerzigzag} is \begin{equation}\label{equation2} 1+\left\lceil{n-2k+2\over 2}\right\rceil={n\over 2}-o(1), \end{equation} while, in view of Corollary~\ref{locallowerp}, we have $$\chi_l(\operatorname{KG}^2(n,k))\geq n-p(k-1)-\left\lfloor{n-p(k-1)\over p}\right\rfloor+1=n-o(n),$$ which is better than the quantity in Equation~\ref{equation2} if $n$ is sufficiently large. However, since the induced subgraph on the neighbors of any vertex of $\operatorname{KG}(n,k)$ is isomorphic to $\operatorname{KG}(n-k,k)$, we have $$\chi_l(\operatorname{KG}(n,k))\geq n-3(k-1).$$ \begin{corollary} Let $\mathcal{F}$ be a hypergraph and $\alpha(\mathcal{F})$ be its independence number. Then for any prime number $p$, we have
$$\chi_l(\operatorname{KG}^2(\mathcal{F}))\geq \left\lceil{(p-1)|V(\mathcal{F})|\over p}\right\rceil-(p-1)\cdot\alpha(\mathcal{F})+1.$$ \end{corollary} \begin{proof} In view of Theorem~\ref{inequalities}, we have
$${\rm ind}_{\mathbb{Z}_p}(B_0(\operatorname{KG}^2(\mathcal{F}),\mathbb{Z}_p))+1\geq \operatorname{cd}_p(\mathcal{F})\geq |V(\mathcal{F})|-p\cdot\alpha(\mathcal{F}).$$ Now, Corollary~\ref{locallowerp} implies the assertion. \end{proof} Meunier~\cite{Meunier14} naturally generalized the definition of local chromatic number of graphs to uniform hypergraphs as follows. Let ${\mathcal H}$ be a uniform hypergraph. For a set $X\subseteq V({\mathcal H})$, the closed neighborhood of $X$ is the set $X\cup {\mathcal N}(X),$ where $${\mathcal N}(X)=\{v\in V({\mathcal H}):\; \exists\; e\in E({\mathcal H})\mbox{ such that } e\setminus X=\{v\}\}.$$ For a uniform hypergraph ${\mathcal H}$, the local chromatic number of ${\mathcal H}$ is defined as follows:
$$\chi_l({\mathcal H})=\displaystyle\min_c\max\{|c({\mathcal N}[e\setminus \{v\}])|:\; e\in E({\mathcal H})\mbox{ and } v\in e\},$$ where the minimum is taken over all proper coloring $c$ of ${\mathcal H}$.
Meunier~\cite{Meunier14}, by using his colorful theorem (Theorem~\ref{colorfulhyper}), generalized Simonyi and Tardos lower bound~\cite{MR2351519} for the local chromatic number of Kneser graphs to the local chromatic number of Kneser hypergraphs. He proved: $$
\chi_l(\operatorname{KG}^p({\mathcal H}))\geq \min\left(\left\lceil{|V({\mathcal H})|-\operatorname{alt}_p({\mathcal H})\over p}\right\rceil+1, \left\lceil{|V({\mathcal H})|-\operatorname{alt}_p({\mathcal H})\over p-1}\right\rceil \right)$$ for any hypergraph ${\mathcal H}$ and any prime number $p$. In what follows, we generalize this result. \begin{theorem}\label{localhyper} Let ${\mathcal H}$ be an $r$-uniform hypergraph with at least one edge and $p$ be a prime number, where $r\leq p\leq \omega({\mathcal H})$. Let $t={\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p$. If $t=ap+b$, where $a$ and $b$ are nonnegative integers and $0\leq b\leq p-1$, then $$\chi_l({\mathcal H})\geq \min\left(\left\lceil{(p-r+1)a+\min\{p-r+1,b\}\over r-1}\right\rceil+1, \left\lceil{t\over r-1}\right\rceil \right).$$ \end{theorem} \begin{proof} Let $c$ be an arbitrary proper coloring of ${\mathcal H}$ and let ${\mathcal H}[U_1,\ldots,U_p]$ be the colorful balanced complete $r$-uniform $p$-partite subhypergraph of ${\mathcal H}$ whose existence is ensured by Theorem~\ref{maincolorfulindex}. Note that $b$ numbers of $U_i$'s, say $U_1,\ldots,U_b$, have the cardinality $\lceil{t\over r}\rceil$ while the others have the cardinality $\lfloor{t\over r}\rfloor\geq 1$. Consider $U_1,\ldots,U_{p-r+1}$. Two different cases will be distinguished. \begin{itemize}
\item[{\bf Case 1.}] If $\left|\displaystyle\bigcup_{i=1}^{p-r+1} c(U_i)\right|<\left\lceil{t\over r-1}\right\rceil$, then there is a vertex $v\in \displaystyle\bigcup_{i=p-r+2}^p U_i$ whose color is~not in $\displaystyle\bigcup_{i=1}^{p-r+1} c(U_i)$. Consider an edge of ${\mathcal H}[U_1,\ldots,U_p]$ containing $v$
and such that $|e\cap U_{p-r+1}|=1$ and $e\cap U_i=\varnothing$ for $i=1,\ldots, p-r$. Let $e\cap U_{p-r+1}=\{u\}$. One can check that $$\{c(v)\}\cup\displaystyle\left(\bigcup_{i=1}^{p-r+1} c(U_i)\right)\subseteq c({\mathcal N}(e\setminus \{u\})).$$ Therefore, since any color is appeared in at most $r-1$ number of $U_i$'s, we have
$$\left|\bigcup_{i=1}^{p-r+1} c(U_i)\right|\geq \displaystyle\left\lceil{\sum_{i=1}^{p-r+1}|U_i|\over r-1}\right\rceil,$$ and consequently,
$$|c({\mathcal N}(e\setminus \{u\}))|\geq 1+\displaystyle\left\lceil{\sum_{i=1}^{p-r+1}|U_i|\over r-1}\right\rceil=1+\left\lceil{(p-r+1)a+\min\{p-r+1,b\}\over r-1}\right\rceil,$$ which completes the proof in Case~1.
\item[{\bf Case 2.}] If $\left|\displaystyle\bigcup_{i=1}^{p-r+1} c(U_i)\right|\geq\left\lceil{t\over r-1}\right\rceil$, then consider an edge of ${\mathcal H}[U_1,\ldots,U_p]$ such that
$|e\cap U_{p-r+1}|=1$ and $e\cap U_i=\varnothing$ for $i=1,\ldots, p-r$. Let $e\cap U_{p-r+1}=\{u\}$. One can see that $$\displaystyle\bigcup_{i=1}^{p-r+1} c(U_i)\subseteq c({\mathcal N}(e\setminus \{u\})),$$ which completes the proof in Case~2. \end{itemize} \end{proof}
\begin{corollary} Let ${\mathcal H}$ be a $p$-uniform hypergraph with at least one edge, where $p$ is a prime number. Then $$\chi_l({\mathcal H})\geq \min\left(\left\lceil{{\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^p_p,{\mathcal H}))+p\over p}\right\rceil+1, \left\lceil{{\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^p_p,{\mathcal H}))+p\over p-1}\right\rceil \right).$$ \end{corollary} \begin{proof} Since ${\mathcal H}$ has at least one edge, we have $\omega({\mathcal H})\geq p$. Therefore, in view of Theorem~\ref{localhyper}, we have the assertion. \end{proof} Note that if ${\mathcal H}=\operatorname{KG}^p(\mathcal{F})$, then, in view of Theorem~\ref{inequalities}, we have
$${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^p_p,{\mathcal H}))+p\geq |V(\mathcal{F})|-\operatorname{alt}_p(\mathcal{F}).$$ This implies that the previous corollary is a generalization of Meunier's lower bound for the local chromatic number of $\operatorname{KG}^p(\mathcal{F})$
\subsection{\bf Plan} Section~\ref{intro} contains some backgrounds and essential definitions used elsewhere in the paper. In Section~\ref{definition}, we present some new topological tools which help us for the proofs of main results. Section~\ref{sec:proofs} is devoted to the proofs of Theorem~\ref{maincolorfulindex} and Theorem~\ref{inequalities}.
\section{\bf Preliminaries}\label{intro}
\subsection{{\bf Topological Indices and Lower Bound for Chromatic Number}}
We assume basic knowledge in combinatorial algebraic topology. Here, we are going to bring a brief review of some essential notations and definitions which will be needed throughout the paper. For more, one can see the book written by Matou{\v{s}}ek~\cite{MR1988723}. Also, the definitions of box-complex, hom-complex, and cross-index will be generalized to $\mathbb{Z}_p$-box-complex, $\mathbb{Z}_p$-hom-complex, and $\mathbb{Z}_p$-cross-index, respectively.
Let $\mathbb{G}$ be a finite nontrivial group which acts on a topological space $X$. We call $X$ a {\it topological $\mathbb{G}$-space} if for each $g\in \mathbb{G}$, the map $g:X\longrightarrow X$ which $x\mapsto g\cdot x$ is continuous. A {\it free topological $\mathbb{G}$-space $X$} is a topological $\mathbb{G}$-space such that $\mathbb{G}$ acts on it freely, i.e., for each $g\in \mathbb{G}\setminus\{e\}$, the map $g:X\longrightarrow X$ has no fixed point. For two topological $\mathbb{G}$-spaces $X$ and $Y$, a continuous map $f:X\longrightarrow Y$ is called a $\mathbb{G}$-map if $f(g\cdot x)=g\cdot f(x)$ for each $g\in \mathbb{G}$ and $x\in X$. We write $X\stackrel{\mathbb{G}}{\longrightarrow} Y$ to mention that there is a $\mathbb{G}$-map from $X$ to $Y$. A map $f:X\longrightarrow Y$ is called a $\mathbb{G}$-equivariant map, if $f(g\cdot x)=g\cdot f(x)$ for each $g\in \mathbb{G}$ and $x\in X$.
Simplicial complexes provide a bridge between combinatorics and topology. A simplicial complex can be viewed as a combinatorial object, called abstract simplicial complex, or as a topological space, called geometric simplicial complex. Here, we just remind the definition of an abstract simplicial complex. However, we assume that the reader is familiar with the concept of how an abstract simplicial complex and its geometric realization are connected to each other. A {\it simplicial complex} is a pair $(V,K)$, where $V$ is a finite set and $K$ is a family of subsets of $V$ such that if $F\in K$ and $F'\subseteq F$, then $F'\in K$. Any set in $K$ is called a simplex. Since we may assume that $V=\bigcup_{F\in K}F$, we can write $K$ instead of $(V, K)$. The {\it dimension of $K$} is defined as follows:
$${\rm dim}(K)=\max\{|F|-1:\; F\in K\}.$$
The geometric realization of $K$ is denoted by $||K||$. For two simplicial complexes $C$ and $K$, by a {\it simplicial map $f:C\longrightarrow K$,} we mean a map from $V(C)$ to $V(K)$ such that the image of any simplex of $C$ is a simplex of $K$. For a nontrivial finite group $\mathbb{G}$, a {\it simplicial $\mathbb{G}$-complex} $K$ is a simplicial complex with a $\mathbb{G}$-action on its vertices such that each $g\in \mathbb{G}$ induces a simplicial map from $K$ to $K$, that is the map which maps $v$ to $g\cdot v$ for each $v\in V(K)$. If for each $g\in \mathbb{G}\setminus\{e\}$, there is no fixed simplex under the simplicial map made by $g$, then $K$ is called a
{\it free simplicial $\mathbb{G}$-complex.} For a simplicial $\mathbb{G}$-complex $K$, if we take the affine extension, then $K$ is free if and only if $||K||$ is free. For two simplicial $\mathbb{G}$-complexes $C$ and $K$, a simplicial map $f:C\longrightarrow K$ is called a simplicial $\mathbb{G}$-map if $f(g\cdot v)=g\cdot f(v)$ for each $g\in \mathbb{G}$ and $v\in V(C)$. We write $C\stackrel{\mathbb{G}} {\longrightarrow} K$, if there is a simplicial $\mathbb{G}$-map from $C$ to $K$. Note that if $C\stackrel{\mathbb{G}}{\longrightarrow} K$,
then $||C||\stackrel{\mathbb{G}}{\longrightarrow} ||K||$. A map $f:C\longrightarrow K$ is called a $\mathbb{G}$-equivariant map, if $f(g\cdot v)=g\cdot f(v)$ for each $g\in \mathbb{G}$ and $v\in V(C)$.
For an integer $n\geq 0$ and a nontrivial finite group $\mathbb{G}$, {\it $E_n \mathbb{G}$ space} is a free $(n-1)$-connected $n$-dimensional simplicial $\mathbb{G}$-complexes. A concrete example of an $E_n \mathbb{G}$ space is the $(n+1)$-fold join $\mathbb{G}^{*(n+1)}$. As a topological space $\mathbb{G}^{*(n+1)}$ is a $(n+1)$-fold join of an $(n+1)$-point discrete space. This is known that for any two $E_n \mathbb{G}$ space $X$ and $Y$, there is a $\mathbb{G}$-map from $X$ to $Y$.
For a $\mathbb{G}$-space $X$, define $${\rm ind}_{\mathbb{G}}(X)=\min\{n:\; X\stackrel{\mathbb{G}}{\longrightarrow} E_n\mathbb{G}\}.$$ Note that here $E_n \mathbb{G}$ can be any $E_n \mathbb{G}$, since there is a $\mathbb{G}$-map between any two $E_n \mathbb{G}$ spaces, see~\cite{MR1988723}. Also, for a simplicial complex $K$, by ${\rm ind}_{\mathbb{G}}(K)$, we mean
${\rm ind}_{\mathbb{G}}(||K||)$. Throughout the paper, for $\mathbb{G}=\mathbb{Z}_2$, we would rather use ${\rm ind}(-)$ instead of ${\rm ind}_{\mathbb{Z}_2}(-)$.
\noindent{\bf Properties of the $\mathbb{G}$-index.} \cite{MR1988723} Let $\mathbb{G}$ be a finite nontrivial group. \begin{itemize} \item[{\rm (i)}] ${\rm ind}_{\mathbb{G}}(X)>{\rm ind}_{\mathbb{G}}(Y)$ implies $X\stackrel{\mathbb{G}}{\centernot\longrightarrow} Y$. \item[{\rm (ii)}] ${\rm ind}_{\mathbb{G}}(E_n \mathbb{G})=n$ for any $E_n \mathbb{G}$ space. \item[{\rm (iii)}] ${\rm ind}_{\mathbb{G}}(X*Y)\leq {\rm ind}_{\mathbb{G}}(X)+{\rm ind}_{\mathbb{G}}(Y)+1$. \item[{\rm (iv)}] If $X$ is $(n-1)$-connected, then ${\rm ind}_{\mathbb{G}}(X)\geq n$. \item[{\rm (v)}] If $K$ is a free simplicial $\mathbb{G}$-complex of dimension $n$, then ${\rm ind}_{\mathbb{G}}(K)\leq n$. \end{itemize}
\subsection{{\bf $\mathbb{Z}_p$-Box-Complex, $\mathbb{Z}_p$-Poset, and $\mathbb{Z}_p$-Hom-Complex}}\label{Boxdefin} In this subsection, for any $r$-uniform hypergraph ${\mathcal H}$, we are going to define two objects; $\mathbb{Z}_p$-box-complex of ${\mathcal H}$ and $\mathbb{Z}_p$-hom-complex of ${\mathcal H}$ which the first one is a simplicial $\mathbb{Z}_p$-complex and the second one is a $\mathbb{Z}_p$-poset. Moreover, for any $\mathbb{Z}_p$-poset $P$, we assign a combinatorial index to $P$ called the cross-index of $P$. \\
\noindent{\bf $\mathbb{Z}_p$-Box-Complex.} Let $r\geq 2$ be a positive integer and $p\geq r$ be a prime number. For an $r$-uniform hypergraph ${\mathcal H}$, define the {\it $\mathbb{Z}_p$-box-complex of ${\mathcal H}$,} denoted ${\rm B}_0({\mathcal H},{\mathbb{Z}_p})$, to be a simplicial complex with the vertex set $\displaystyle\biguplus_{i=1}^pV({\mathcal H})=\mathbb{Z}_p\times V({\mathcal H})$ and the simplex set consisting of all $\{\omega^1\}\times U_1\cup\cdots\cup \{\omega^p\}\times U_p$, where
\begin{itemize} \item $U_1,\ldots,U_p$ are pairwise disjoint subsets of $V({\mathcal H})$, \item $\displaystyle\bigcup_{i=1}^p U_i\neq\varnothing$, and \item the hypergraph ${\mathcal H}[U_1,U_2,\ldots,U_p]$ is a complete $r$-uniform $p$-partite hypergraph.
\end{itemize} Note that some of $U_i$'s might be empty. In fact, if $U_1,\ldots,U_p$ are pairwise disjoint subsets of $V({\mathcal H})$ and the number of nonempty $U_i$'s is less than $r$, then ${\mathcal H}[U_1,U_2,\ldots,U_p]$ is a complete $r$-uniform $p$-partite hypergraph and thus $\{\omega^1\}\times U_1\cup\cdots\cup \{\omega^p\}\times U_p\in {\rm B}_0({\mathcal H},{\mathbb{Z}_p})$. For each $\epsilon\in{\mathbb{Z}_p}$ and each $(\epsilon',v)\in V({\rm B}_0({\mathcal H},{\mathbb{Z}_p}))$, define $\epsilon\cdot(\epsilon',v)=(\epsilon\cdot\epsilon',v)$. One can see that this action makes ${\rm B}_0({\mathcal H},{\mathbb{Z}_p})$ a free simplicial $\mathbb{Z}_p$-complex. It should be mentioned that the $\mathbb{Z}_2$-box-complex ${\rm B}_0({\mathcal H},\mathbb{Z}_2)$ is extensively studied in the literature, see~\cite{MR2279672,MR2351519}. In the literature, for a graph $G$, the simplicial complex ${\rm B}_0(G,{\mathbb{Z}_2})$ is shown by $B_0(G)$. This simplicial complex is used to introduce some lower bounds for the chromatic number of a given graph $G$, see~\cite{MR2279672}. In particular, we have the following inequalities $$\chi(G)\geq {\rm ind}(B_0(G))+1\geq {\rm coind}(B_0(G))+1\geq n-{\rm alt}({\mathcal F})\geq{\rm cd}_2({\mathcal F}),$$ where ${\mathcal F}$ is any hypergraph such that ${\rm KG}^2({\mathcal F})$ and $G$ are isomorphic, see~\cite{2014arXiv1403.4404A,2013arXiv1302.5394A,MR2279672}.\\
\noindent{\bf $\mathbb{Z}_p$-Poset.} A partially ordered set, or simply a {\it poset}, is defined as an ordered pair $P=(V(P),\preceq)$, where $V(P)$ is a set called the ground set of $P$ and $\preceq$ is a partial order on $V(P)$. For two posets $P$ and $Q$, by an order-preserving map $\phi:P\longrightarrow Q$, we mean a map $\phi$ from $V(P)$ to $V(Q)$ such that for each $u,v\in V(P)$, if $u\preceq v$, then $\phi(u)\preceq \phi(v)$.
A poset $P$ is called a {\it $\mathbb{Z}_p$-poset}, if $\mathbb{Z}_p$ acts on $V(P)$
and furthermore, for each $\epsilon\in \mathbb{Z}_p$, the map $\epsilon:V(P)\longrightarrow V(P)$ which $v\mapsto \epsilon\cdot v$ is an automorphism of $P$ (order preserving bijective map). If for each $\epsilon\in \mathbb{Z}_p\setminus\{e\}$, this map has no fixed point, then $P$ is called a {\it free $\mathbb{Z}_p$-poset}. For two $\mathbb{Z}_p$-poset $P$ and $Q$, by an order-preserving $\mathbb{Z}_p$-map $\phi:P\longrightarrow Q$, we mean
an order-preserving map from $V(P)$ to $V(Q)$ such that for each $v\in V(P)$ and $\epsilon\in \mathbb{Z}_p$, we have $\phi(\epsilon\cdot v)=\epsilon\cdot\phi(v)$.
If there exists such a map, we write $P\stackrel{\mathbb{Z}_p}{\longrightarrow} Q$.
For a nonnegative integer $n$ and a prime number $p$, let $Q_{n,p}$ be a free $\mathbb{Z}_p$-poset with ground set $\mathbb{Z}_p\times[n+1]$ such that for any two members $(\epsilon,i),(\epsilon',j)\in Q_{n,p}$, $(\epsilon,i)<_{Q_{n,p}}(\epsilon',j)$ if $i<j$. Clearly, $Q_{n,p}$ is a free $\mathbb{Z}_p$-poset with the action $\epsilon\cdot(\epsilon',j)=(\epsilon\cdot\epsilon',j)$ for each $\epsilon\in\mathbb{Z}_p$ and $(\epsilon',j)\in Q_{n,p}$. For a $\mathbb{Z}_p$-poset $P$, the {\it $\mathbb{Z}_p$-cross-index} of $P$, denoted ${\rm Xind}_{\mathbb{Z}_p}(P)$, is the least integer $n$ such that there is a $\mathbb{Z}_p$-map from $P$ to $Q_{n,p}$. Throughout the paper, for $p=2$, we speak about ${\rm Xind}(-)$ rather than ${\rm Xind}_{\mathbb{Z}_2}(-)$. It should be mentioned that ${\rm Xind}(-)$ is first defined in~\cite{SiTaZs13}.
Let $P$ be a poset. We can define an {\it order complex} $\Delta P$ with the vertex set same as the ground set of $P$ and simplex set consisting of all chains in $P$. One can see that if $P$ is a free $\mathbb{Z}_p$-poset, then $\Delta P$ is a free simplicial $\mathbb{Z}_p$-complex. Moreover, any order-preserving $\mathbb{Z}_p$-map $\phi:P\longrightarrow Q$ can be lifted to a simplicial $\mathbb{Z}_p$-map from $\Delta P$ to $\Delta Q$. Clearly, there is a simplicial $\mathbb{Z}_p$-map from $\Delta Q_{n,p}$ to $\mathbb{Z}_p^{*(n+1)}$ (identity map). Therefore, if ${\rm Xind}_{\mathbb{Z}_p}(P)=n$, then we have a simplicial $\mathbb{Z}_p$-map from $\Delta P$ to $\mathbb{Z}_p^{*(n+1)}$. This implies that ${\rm Xind}_{\mathbb{Z}_p}(P)\geq {\rm ind}_{\mathbb{Z}_p}(\Delta P)$. Throughout the paper, for each $(\epsilon, j)\in Q_{n,p}$, when we speak about the sign of $(\epsilon, j)$ and the absolute value of $(\epsilon, j)$, we mean $\epsilon$ and $j$, respectively.
\begin{alphtheorem}{\rm \cite{AliHajiMeu2016}}\label{altercrossindex} Let $P$ be a free ${\mathbb Z}_2$-poset and $\phi:P\longrightarrow Q_{s,2}$ be an order preserving ${\mathbb Z}_2$-map. Then $P$ contains a chain $p_1\prec_P\cdots\prec_Pp_{ k}$ such that $k= {\rm Xind}(P)+1$ and the signs of $\phi(p_i)$ and $\phi(p_{i+1})$ differ for each $i\in[k-1]$. Moreover, if $s= {\rm Xind}(P)$, then for any $(s+1)$-tuple $(\epsilon_1,\ldots,\epsilon_{s+1})\in\mathbb{Z}_2^{s+1}$, there is at least one chain $p_1\prec_P\cdots\prec_Pp_{ s+1}$ such that $\phi(p_i)=(\epsilon_i,i)$ for each $i\in[s+1]$. \end{alphtheorem}
\noindent{\bf $\mathbb{Z}_p$-Hom-Complex.} Let ${\mathcal H}$ be an $r$-uniform hypergraph. Also, let $p\geq r$ be a prime number. The {\it $\mathbb{Z}_p$-hom-complex} ${\rm Hom}(K^r_p,{\mathcal H})$ is a free $\mathbb{Z}_p$-poset with the ground set consisting of all ordered $p$-tuples $(U_1,\cdots,U_p)$, where $U_i$'s are nonempty pairwise disjoint subsets of $V$ and ${\mathcal H}[U_1,\ldots,U_p]$ is a complete $r$-uniform $p$-partite hypergraph. For two $p$-tuples $(U_1,\cdots,U_p)$ and $(U'_1,\cdots,U'_p)$ in ${\rm Hom}(K^r_p,{\mathcal H})$, we define $(U_1,\cdots,U_p)\preceq(U'_1,\cdots,U'_p)$ if $U_i\subseteq U'_i$ for each $i\in[p]$. Also, for each $\omega^i\in \mathbb{Z}_p=\{\omega^1,\ldots,\omega^p\}$, let $\omega^i\cdot (U_1,\cdots,U_p)=(U_{1+i},\cdots,U_{p+i})$, where $U_j=U_{j-p}$ for $j>p$. Clearly, this action is a free $\mathbb{Z}_p$-action on ${\rm Hom}(K^r_p,{\mathcal H})$. Consequently, ${\rm Hom}(K^r_p,{\mathcal H})$ is a free $\mathbb{Z}_p$-poset with this $\mathbb{Z}_p$-action.
For a nonempty graph $G$ and for $p=2$, it is proved~\cite{2014arXiv1403.4404A,2013arXiv1302.5394A,SiTaZs13,MR2279672} that \begin{equation}\label{equation} \begin{array}{lll} \chi(G) &\geq & {\rm Xind}({\rm Hom}(K_2,G))+2 \geq {\rm ind}(\Delta {\rm Hom}(K_2,G))+2 \geq {\rm ind}(B_0(G))+1\\
&\geq & {\rm coind}(B_0(G))+1\geq |V({\mathcal F})|-{\rm alt}_2({\mathcal F}) \geq {\rm cd}_2({\mathcal F}), \end{array} \end{equation} where ${\mathcal F}$ is any hypergraph such that ${\rm KG}^2({\mathcal F})$ and $G$ are isomorphic.\\
\section{\bf Notations and Tools}\label{definition}
For a simplicial complex $K$, by $\operatorname{sd} K$, we mean the first barycentric subdivision of $K$. It is the simplicial complex whose vertex set is the set of nonempty simplices of $K$ and whose simplices are the collections of simplices of $K$ which are pairwise comparable by inclusion. Throughout the paper, by $\sigma_{t-1}^{r-1}$, we mean the $(t-1)$-dimensional simplicial complex with vertex set $\mathbb{Z}_r$ containing all $t$-subsets of $\mathbb{Z}_r$ as its maximal simplices. The join of two simplicial complexes $C$ and $K$, denoted $C*K$, is a simplicial complex with the vertex set $V(C)\biguplus V(K)$ and such that the set of its simplices is $\{F_1\biguplus F_2:\; F_1\in C\mbox{ and } F_2\in K\}$. Clearly, we can see $\mathbb{Z}_r$ as a $0$-dimensional simplicial complex. Note that the vertex set of simplicial complex $\operatorname{sd}\mathbb{Z}_r^{*\alpha}$ can be identified with $(\mathbb{Z}_r\cup\{0\})^\alpha\setminus\{\boldsymbol{0}\}$ and the vertex set of $(\sigma^{r-1}_{t-1})^{*n}$ is the set of all pairs $(\epsilon,i)$, where $\epsilon\in \mathbb{Z}_r$ and $i\in [n]$.
\subsection{{\bf $\mathbb{Z}_p$-Tucker-Ky Fan lemma}} The famous Borsuk-Ulam theorem has many generalizations which have been extensively used in investigating graph coloring properties. Some of these interesting generalizations are Tucker lemma~\cite{MR0020254}, $Z_p$-Tucker Lemma~\cite{MR1893009}, and Tucker-Ky Fan~\cite{MR0051506}. For more details about the Borsuk-Ulam theorem and its generalizations, we refer the reader to \cite{MR1988723}.
Actually, Tucker lemma is a combinatorial counterpart of Borsuk-Ulam theorem. There are several interesting and surprising applications of Tucker Lemma in combinatorics, including a combinatorial proof of Lov{\'a}sz-Kneser theorem by Matou{\v{s}}ek \cite{MR2057690}. \begin{alphlemma}\label{tuckeroriginal} {\rm(Tucker lemma \cite{MR0020254}).} Let $m$ and $n$ be positive integers and $\lambda:\{-1,0,+1\}^n\setminus \{(0,\ldots,0)\} \longrightarrow \{\pm 1, \pm 2,\ldots ,\pm m\}$ be a map satisfying the following properties: \begin{itemize} \item for any $X\in \{-1,0,+1\}^n\setminus \{\boldsymbol{0}\}$, we have $\lambda(-X)=-\lambda(X)$ {\rm (}a
$Z_2$-equivariant map{\rm ),} \item no two signed vectors $X$ and $Y$ are such that $X\subseteq Y$ and $\lambda(X) =-\lambda(Y)$. \end{itemize} Then, we have $m \geq n$. \end{alphlemma} Another interesting generalization of the Borsuk-Ulam theorem is Ky~Fan's lemma~\cite{MR0051506}. This generalization ensures that with the same assumptions as in Lemma~\ref{tuckeroriginal}, there is odd number of chains $X_1\subseteq X_2\subseteq \cdots \subseteq X_n$ such that $$\{\lambda(X_1),\ldots,\lambda(X_n)\}=\{+c_1,-c_2,\ldots , (- 1)^{n-1}c_n\},$$ where $1\leq c_1 < \cdots < c_n \leq m$. Ky~Fan's lemma has been used in several articles to study some coloring properties of graphs, see \cite{AliHajiMeu2016,MR2763055,MR2837625}. There are also some other generalizations of Tucker Lemma. A $\mathbb{Z}_p$ version of Tucker Lemma, called $\mathbb{Z}_p$-Tucker Lemma, is proved by Ziegler~\cite{MR1893009} and extended by Meunier~\cite{MR2793613}. In next subsection, we present a $\mathbb{Z}_p$ version of Ky~Fan's lemma which is called $\mathbb{Z}_p$-Tucker-Ky Fan lemma.
\subsection{{\bf New Generalizations of Tucker Lemma}} Before presenting our results, we need to introduce some functions having key roles in the paper. Throughout the paper, we are going to use these functions repeatedly. Let $m$ be a positive integer. We remind that $(\sigma^{p-1}_{p-2})^{*m}$ is a free simplicial $\mathbb{Z}_p$-complex with vertex set $\mathbb{Z}_p\times [m]$. \\
\noindent{\bf The value function $l(-)$.} Let $\tau\in (\sigma^{p-1}_{p-2})^{*m}$ be a simplex. For each $\epsilon\in \mathbb{Z}_p$, define $\tau^\epsilon=\left\{(\epsilon,j):\; (\epsilon,j)\in \tau\right\}.$ Moreover, define
$$l(\tau)=\max\left\{\displaystyle|\bigcup_{\epsilon\in\mathbb{Z}_p} B^\epsilon|:\; \forall\epsilon\in\mathbb{Z}_p ,\; B^\epsilon\subseteq \tau^\epsilon\mbox{ and } \forall \epsilon_1,\epsilon_2\in\mathbb{Z}_p,\; |\;|B^{\epsilon_1}|-|B^{\epsilon_2}|\;|\leq 1 \right\}.$$
Note that if we set $h(\tau)=\displaystyle\min_{\epsilon\in \mathbb{Z}_p}|\tau^\epsilon|$, then
$$l(\tau)=p\cdot h(\tau)+|\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|>h(\tau)\}|.$$\\
\noindent{\bf The sign functions $s(-)$ and $s_0(-)$.} For an $a\in[m]$,
let $W_a$ be the set of all simplices $\tau\in (\sigma_{p-2}^{p-1})^{*m}$ such that $|\tau^\epsilon|\in\{0,a\}$ for each $\epsilon\in\mathbb{Z}_p$. Let $W=\displaystyle\bigcup_{a=1}^{m}W_a$. Choose an arbitrary $\mathbb{Z}_p$-equivariant map $s:W\longrightarrow \mathbb{Z}_p$. Also, consider an $\mathbb{Z}_p$-equivariant map $s_0:\sigma_{p-2}^{p-1}\longrightarrow \mathbb{Z}_p$. Note that since $\mathbb{Z}_p$ acts freely on both $\sigma_{p-2}^{p-1}$ and $W$, these maps can be easily built by choosing one representative in each orbit. It should be mentioned that both functions $s(-)$ and $s_0(-)$ are first introduced in~\cite{Meunier14}.
Now, we are in a position to generalize Tucker-Ky Fan lemma to $\mathbb{Z}_p$-Tucker-Ky Fan lemma. \begin{lemma}{\rm ($\mathbb{Z}_p$-Tucker-Ky Fan lemma).}\label{Z_pfanlemma} Let $m,n,p$ and $\alpha$ be nonnegative integers, where $m,n\geq 1$, $m\geq \alpha\geq 1$, and $p$ is prime. Let $$ \begin{array}{crcl} \lambda: & (\mathbb{Z}_p\cup\{0\})^n\setminus\{\boldsymbol{0}\} &\longrightarrow & \mathbb{Z}_p\times[m]\\ & X &\longmapsto & (\lambda_1(X),\lambda_2(X)) \end{array}$$ be a $\mathbb{Z}_p$-equivariant map satisfying the following conditions. \begin{itemize} \item For $X_1\subseteq X_2\in \left(\mathbb{Z}_p\cup\{0\}\right)^n\setminus\{\boldsymbol{0}\}$, if $\lambda_2(X_1)=\lambda_2(X_2)\leq \alpha$, then $\lambda_1(X_1)=\lambda_1(X_2)$. \item For $X_1\subseteq X_2\subseteq\cdots \subseteq X_p\in \left(\mathbb{Z}_p\cup\{0\}\right)^n\setminus\{\boldsymbol{0}\}$, if $\lambda_2(X_1)=\lambda_2(X_2)=\cdots=\lambda_2(X_p)\geq\alpha+1$, then
$$\left|\left\{\lambda_1(X_1),\lambda_1(X_2),\ldots,\lambda_1(X_p)\right\}\right|<p.$$ \end{itemize} Then there is a chain $$Z_1\subset Z_2\subset\cdots\subset Z_{n-\alpha}\in \left(\mathbb{Z}_p\cup\{0\}\right)^n\setminus\{\boldsymbol{0}\}$$ such that \begin{enumerate} \item for each $i\in [n-\alpha]$, $\lambda_2(Z_i)\geq \alpha+1$, \item for each $i\neq j\in [n-\alpha]$, $\lambda(Z_i)\neq \lambda(Z_j)$, and \item\label{condition3} for each $\epsilon\in\mathbb{Z}_p$,
$$\left\lfloor{n-\alpha\over p}\right\rfloor\leq \left|\left\{j:\; \lambda_1(Z_j)=\epsilon\right\}\right|\leq \left\lceil{n-\alpha\over p}\right\rceil.$$ \end{enumerate} In particular, $n-\alpha\leq (p-1)(m-\alpha)$. \end{lemma} \begin{proof} Note that the map $\lambda$ can be considered as a simplicial $\mathbb{Z}_p$-map from $\operatorname{sd} \mathbb{Z}_p^{*n}$ to $(\mathbb{Z}_p^{*\alpha})*((\sigma_{p-2}^{p-1})^{*(m-\alpha)}).$ Let $K={\rm Im}(\lambda)$. Note that each simplex in $K$ can be represented in a unique form $\sigma\cup\tau$ such that $\sigma\in \mathbb{Z}_p^{*\alpha}$ and $\tau \in (\sigma_{p-2}^{p-1})^{*m-\alpha}.$
In view of definition of the function $l(-)$ and the properties which $\lambda$ satisfies in, to prove the assertion, it suffices to show that there is a simplex $\sigma\cup\tau\in K$ such that $l(\tau)\geq n-\alpha$. For a contradiction, suppose that for each $\sigma\cup\tau\in K$, we have $l(\tau)\leq n-\alpha-1$.
Define the map $$\Gamma: \operatorname{sd} K\longrightarrow \mathbb{Z}_p^{*(n-1)}$$ such that for each vertex $\sigma\cup\tau\in V(\operatorname{sd} K)$, \begin{itemize} \item if $\tau=\varnothing$, then $\Gamma(\sigma\cup\tau)=(\epsilon, j)$, where $j$ is the maximum possible value such that $(\epsilon, j)\in\sigma$. Note that since $\sigma\in \mathbb{Z}_p^{*\alpha}$, there is only one $\epsilon\in\mathbb{Z}_p$ for which
the maximum is attained. Therefore, in this case, the function $\Gamma$ is well-defined.
\item if $\tau\neq\varnothing$. Define $h(\tau)=\displaystyle\min_{\epsilon\in \mathbb{Z}_p}|\tau^\epsilon|.$ \begin{enumerate}[label={\rm (\roman*)}] \item If $h(\tau)=0$, then define $\bar{\tau}=\{\epsilon\in \mathbb{Z}_p:\;
\tau^\epsilon= \varnothing\}\in \sigma^{p-1}_{p-2}$ and
$$\Gamma(\sigma\cup\tau)=\left(s_0(\bar\tau), \alpha+l(\tau)\right).$$
\item If $h(\tau)> 0$, then define $\bar{\tau}=\displaystyle
\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|=h(\tau)\}} \tau^\epsilon\in W$
and $$\Gamma(\sigma\cup\tau)=\left(s(\bar\tau), \alpha+l(\tau)\right).$$
\end{enumerate} \end{itemize} Now, we claim that $\Gamma$ is a simplicial $\mathbb{Z}_p$-map from $\operatorname{sd} K$ to $\mathbb{Z}_p^{*(n-1)}$. It is clear that $\Gamma$ is a $\mathbb{Z}_p$-equivariant map. For a contradiction, suppose that there are $\sigma\cup\tau,\sigma'\cup\tau' \in \operatorname{sd} K$ such that $\sigma\subseteq \sigma'$, $\tau\subseteq\tau'$, $\Gamma(\sigma\cup\tau)=(\epsilon,\beta)$, and $\Gamma(\sigma'\cup\tau')=(\epsilon',\beta)$, where $\epsilon\neq \epsilon'$. First note that in view of the definition of $\Gamma$ and the assumption $\Gamma(\sigma\cup\tau)=(\epsilon,\beta)$ and $\Gamma(\sigma'\cup\tau')=(\epsilon',\beta)$, the case $\tau=\varnothing$ and $\tau'\neq\varnothing$ is not possible. If $\tau'=\varnothing$, then $\tau=\tau'=\varnothing$ and we should have $(\epsilon,\beta),(\epsilon',\beta)\in\sigma'\in \mathbb{Z}_p^{*\alpha}$ which implies that $\epsilon=\epsilon'$, a contradiction. If $\varnothing\neq \tau\subseteq \tau'$, then in view of definition of $\Gamma$, we should have $l(\tau)=l(\tau')$. We consider three different cases.\\ \begin{enumerate}[label={\rm (\roman*)}] \item If $h(\tau)=h(\tau')=0$, then $$\epsilon=s_0(\{\epsilon\in \mathbb{Z}_p:\;
\tau^\epsilon= \varnothing\})\neq s_0(\{\epsilon\in \mathbb{Z}_p:\;
{\tau'}^\epsilon= \varnothing\})=\epsilon'.$$
Therefore, $ \{\epsilon\in \mathbb{Z}_p:\;
{\tau'}^\epsilon= \varnothing\}\subsetneq\{\epsilon\in \mathbb{Z}_p:\;
\tau^\epsilon= \varnothing\}$. This implies that
$$l(\tau')=p-|\{\epsilon\in \mathbb{Z}_p:\;
{\tau'}^\epsilon= \varnothing\}|>p-|\{\epsilon\in \mathbb{Z}_p:\;
\tau^\epsilon= \varnothing\}|=l(\tau),$$
a contradiction.\\
\item If $h(\tau)=0$ and $h(\tau')>0.$ We should have
$l(\tau)\leq p-1$ and $l(\tau')\geq p$ which contradicts the fact that $l(\tau)=l(\tau')$.\\
\item If $h(\tau)>0$ and $h(\tau')>0.$
Note that
$$ l(\tau)=p\cdot h(\tau)+|\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|>h(\tau)\}|
\mbox{ and } l(\tau')=p\cdot h(\tau')+|\{\epsilon\in\mathbb{Z}_p:\;
|{\tau'}^\epsilon|>h(\tau')\}|.$$ For this case, two different sub-cases will be distinguished.
\begin{itemize}
\item[(a)] If $h(\tau)=h(\tau')=h$, then
$$\epsilon=s(\displaystyle\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|=h\}} \tau^\epsilon)\neq s(\displaystyle\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |{\tau'}^\epsilon|=h\}} {\tau'}^\epsilon)=\epsilon'.$$
Clearly, it implies that $$\displaystyle\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|=h\}} \tau^\epsilon\neq \displaystyle\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |{\tau'}^\epsilon|=h\}} {\tau'}^\epsilon.$$
Note that $\tau\subseteq \tau'$ and $h=\displaystyle\min_{\epsilon\in \mathbb{Z}_p}|\tau^\epsilon|=\displaystyle\min_{\epsilon\in \mathbb{Z}_p}|{\tau'}^\epsilon|.$ Therefore, we should have
$$
\{\epsilon\in\mathbb{Z}_p:\; |{\tau'}^\epsilon|=h\} \subsetneq \{\epsilon\in\mathbb{Z}_p:\; |{\tau}^\epsilon|=h\}$$
and consequently $l(\tau)<l(\tau')$ which is a contradiction.
\item[(b)] If $h(\tau)<h(\tau')$, then
$$l(\tau)\leq p\cdot h(\tau)+p-1< p\cdot (h(\tau)+1)\leq l(\tau'),$$
a contradiction.
\end{itemize} \end{enumerate} Therefore, $\Gamma$ is a simplicial $\mathbb{Z}_p$-map from $\operatorname{sd} K$ to $\mathbb{Z}_p^{*(n-1)}$. Naturally, $\lambda$ can be lifted to a simplicial $\mathbb{Z}_p$-map $\bar\lambda:\operatorname{sd}^2 \mathbb{Z}_p^{*n}\longrightarrow \operatorname{sd} K$. Thus $\Gamma\circ\bar\lambda$ is a simplicial $\mathbb{Z}_p$-map from $\operatorname{sd}^2 \mathbb{Z}_p^{*n}$ to $\mathbb{Z}_p^{*(n-1)}$. In view of Dold's theorem~\cite{MR711043,MR1988723}, the dimension of $\mathbb{Z}_p^{*(n-1)}$ should be strictly larger than the connectivity of $\operatorname{sd}^2 \mathbb{Z}_p^{*n}$, that is $n-2>n-2$, which is not possible. \end{proof}
Lemma~\ref{Z_pfanlemma} provides a short simple proof of Meunier's colorful result for Kneser hypergraphs (next Theorem) as follows. \begin{alphtheorem}{\rm \cite{Meunier14}}\label{colorfulhyper} Let ${\mathcal H}$ be a hypergraph and let $p$ be a prime number. Then any proper coloring $c:V({\rm KG}^p({\mathcal H}))\longrightarrow [C]$ {\rm(}$C$ arbitrary{\rm)} must contain a colorful balanced complete $p$-uniform $p$-partite hypergraph with
$|V({\mathcal H})|-{\rm alt}_p({\mathcal H})$ vertices. \end{alphtheorem} \begin{proof} Consider a bijection $\pi:[n]\longrightarrow V({\mathcal H})$ such that ${\rm alt}_p({\mathcal H},\pi)={\rm alt}_p({\mathcal H}).$ We are going to define a map $$\begin{array}{cccc} \lambda: & (\mathbb{Z}_p\cup\{0\})^n\setminus\{\boldsymbol{0}\} &\longrightarrow & \mathbb{Z}_p\times[m]\\ & X &\longmapsto & (\lambda_1(X),\lambda_2(X)) \end{array}$$ satisfying the conditions of Lemma~\ref{Z_pfanlemma}
and with parameters $n= |V({\mathcal H})|$, $m={\rm alt}_p({\mathcal H})+C$, and $\alpha={\rm alt}_p({\mathcal H})$. Assume that $2^{[n]}$ is equipped with a total ordering $\preceq$. For each $X\in(\mathbb{Z}_p\cup\{0\})^n\setminus\{\boldsymbol{0}\}$, define $\lambda(X)$ as follows. \begin{itemize} \item If ${\rm alt}(X)\leq {\rm alt}_p({\mathcal H},\pi)$, then let $\lambda_1(X)$ be the first nonzero coordinate of $X$ and $\lambda_2(X)={\rm alt}(X)$. \item If ${\rm alt}(X)\geq {\rm alt}_p({\mathcal H},\pi)+1$, then in view of the definition of ${\rm alt}_p({\mathcal H},\pi)$, there is some $\epsilon\in\mathbb{Z}_p$ such that $E(\pi(X^\epsilon))\neq \varnothing$. Define $$c(X)=\max\left\{c(e):\; \exists\epsilon\in\mathbb{Z}_p\mbox { such that } e\subseteq \pi(X^\epsilon)\right\}$$ and $\lambda_2(X)={\rm alt}_p({\mathcal H},\pi)+c(X)$. Choose $\epsilon\in\mathbb{Z}_p$ such that there is at least one edge $e\in\pi (X^\epsilon)$ with $c(X)=c(e)$ and such that
$X^\epsilon$ is the maximum one having this property. By the maximum, we mean
the maximum according to the total ordering $\preceq$. It is clear that $\epsilon$ is defined uniquely. Now, let $\lambda_1(X)=\epsilon$. \end{itemize} One can check that $\lambda$ satisfies the conditions of Lemma~\ref{Z_pfanlemma}. Consider the chain $Z_1\subset Z_2\subset\cdots\subset Z_{n-{\rm alt}_p({\mathcal H},\pi)}$ whose existence is ensured by Lemma~\ref{Z_pfanlemma}. Note that for each $i\in[n-{\rm alt}_p({\mathcal H},\pi)]$, we have $\lambda_2(Z_i)>{\rm alt}_p({\mathcal H},\pi)$. Consequently, $\lambda_2(Z_i)={\rm alt}_p({\mathcal H},\pi)+c(Z_i)$. Let $\lambda(Z_i)=(\epsilon_i,j_i)$. Note that for each $i$, there is at least one edge $e_{i,\epsilon_i}\subseteq \pi(Z_i^{\epsilon_i})\subseteq \pi(Z_{n-{\rm alt}_p({\mathcal H},\pi)}^{\epsilon_i})$ such that $c(e_{i,\epsilon_i})=j_i-{\rm alt}_p({\mathcal H},\pi)$. For each $\epsilon\in\mathbb{Z}_p$, define $U_\epsilon=\{e_{i,\epsilon_i}:\; \epsilon_i=\epsilon\}.$ We have the following three properties for $U_\epsilon$'s. \begin{itemize} \item Since the chain $Z_1\subset Z_2\subset\cdots\subset
Z_{n-{\rm alt}_p({\mathcal H},\pi)}$ is satisfying Condition~\ref{condition3} of
Lemma~\ref{Z_pfanlemma}, we have
$\left\lfloor{n-{\rm alt}_p({\mathcal H},\pi)\over p}\right\rfloor\leq
|U_\epsilon|\leq \left\lceil{n-{\rm alt}_p({\mathcal H},\pi)\over p}\right\rceil.$ \item The edges in $U_\epsilon$ get distinct colors.
If there are two edges $e_{i,\epsilon}$ and $e_{i',\epsilon}$ in $U_\epsilon$ such that
$c(e_{i,\epsilon})=c(e_{i',\epsilon})$, then $\lambda(Z_i)=\lambda(Z_{i'})$
which is not possible. \item If $\epsilon\neq \epsilon'$, then for each $e\in U_\epsilon$ and $f\in U_{\epsilon'}$,
we have $e\cap f=\varnothing$. It is clear because
$e\subseteq\pi(Z_{n-{\rm alt}_p({\mathcal H},\pi)}^\epsilon)$,
$f\subseteq\pi(Z_{n-{\rm alt}_p({\mathcal H},\pi)}^{\epsilon'})$,
and
$$\pi(Z_{n-{\rm alt}_p({\mathcal H},\pi)}^\epsilon)\cap
\pi(Z_{n-{\rm alt}_p({\mathcal H},\pi)}^{\epsilon'})=\varnothing.$$ \end{itemize} Now, it is clear that the subhypergraph ${\rm KG}^p({\mathcal H})[U_{\omega^1},\ldots,U_{\omega^p}]$ is the desired subhypergraph. \end{proof}
The proof of next lemma is similar to the proof of Lemma~\ref{Z_pfanlemma}. \begin{lemma}\label{genfanlemma} Let $C$ be a free simplicial $\mathbb{Z}_p$-complex such that ${\rm ind}_{\mathbb{Z}_p}(C)\geq t$ and let $\lambda:C\longrightarrow (\sigma^{p-1}_{p-2})^{*m}$ be a simplicial $\mathbb{Z}_p$-map. Then there is at least one $t$-dimensional simplex $\sigma\in C$ such that $\tau=\lambda(\sigma)$ is a $t$-dimensional simplex and for each $\epsilon\in \mathbb{Z}_p$, we have
$\lfloor{t+1\over p}\rfloor\leq |\tau^\epsilon|\leq\lceil{t+1\over p}\rceil.$ \end{lemma} \begin{proof} For simplicity of notation, let $K={\rm Im}(\lambda)$. Clearly, to prove the assertion, it is enough to show that there is a $t$-dimensional simplex $\tau\in K$
such that $l(\tau)\geq t$. Suppose, contrary to the assertion, that there is no such a $t$-dimensional simplex. Therefore, for each simplex $\tau$ of $K$, we have $l(\tau)\leq t$. For each vertex $\tau\in V(\operatorname{sd} K)$, set $h(\tau)=\displaystyle\min_{\epsilon\in \mathbb{Z}_p}|\tau^\epsilon|$.
Let $\Gamma:\operatorname{sd} K\longrightarrow \mathbb{Z}_p^{*t}$ be a map such that for each vertex $\tau$ of $\operatorname{sd} K$, $\Gamma(\tau)$ is defined as follows. \begin{enumerate}[label={\rm (\roman*)}] \item If $h(\tau)=0$, then define $\bar{\tau}=\{\epsilon\in \mathbb{Z}_p:\;
\tau^\epsilon= \varnothing\}\in \sigma^{p-1}_{p-2}$ and
$$\Gamma(\sigma\cup\tau)=\left(s_0(\bar\tau), l(\tau)\right).$$
\item If $h(\tau)> 0$, then define $\bar{\tau}=\displaystyle
\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|=h(\tau)\}} \tau^\epsilon\in W$
and $$\Gamma(\sigma\cup\tau)=\left(s(\bar\tau), l(\tau)\right).$$ \end{enumerate} Similar to the proof of Lemma~\ref{Z_pfanlemma}, $\Gamma\circ\bar{\lambda}:\operatorname{sd} C\longrightarrow \mathbb{Z}_p^{*t}$ is a simplicial $\mathbb{Z}_p$-map. This implies that ${\rm ind}_{\mathbb{Z}_p}(C)\leq t-1$ which is~not possible. \end{proof}
Next proposition is an extension of Theorem~\ref{altercrossindex}. However, we lose some properties by this extension.
\begin{proposition}\label{Xindposet} Let $P$ be a free ${\mathbb Z}_p$-poset and $$\begin{array}{rll} \psi: P & \longrightarrow & Q_{s,p}\\
p &\longmapsto & (\psi_1(p),\psi_2(p)) \end{array}$$ be an order preserving ${\mathbb Z}_p$-map. Then $P$ contains a chain $p_1\prec_P\cdots\prec_Pp_{ k}$ such that \begin{itemize} \item $k= {\rm ind}_{\mathbb{Z}_p}(\Delta P)+1$, \item for each $i\in[k-1]$, $\psi_2(p_i)< \psi_2(p_{i+1})$, and \item for each $\epsilon\in\mathbb{Z}_p$,
$$\left\lfloor{k\over p}\right\rfloor\leq \left|\left\{j:\; \psi_1(p_j)=\epsilon\right\}\right|\leq \left\lceil{k\over p}\right\rceil.$$ \end{itemize} \end{proposition} \begin{proof} Note $\psi$ can be considered as a simplicial ${\mathbb Z}_p$-map from $\Delta P$ to $ \mathbb{Z}_p^{*n}\subseteq (\sigma_{p-2}^{p-1})^{*n}$. Now, in view of Lemma~\ref{genfanlemma}, we have the assertion. \end{proof} Note that, for $p=2$, since ${\rm Xind}(P)\geq {\rm ind}(\Delta P)$,
Theorem~\ref{altercrossindex} is better than proposition~\ref{Xindposet}. However, we cannot prove that proposition~\ref{Xindposet} is valid if we replace ${\rm ind}(\Delta P)$ by ${\rm Xind}(P)$.
In an unpublished paper, Meunier~\cite{unpublishedMeunier} introduced a generalization of Tuckey-Ky~Fan lemma. He presented a version of $\mathbb{Z}_q$-Fan lemma which is valid for each odd integer $q\geq 3$. To be more specific, he proved that if $q$ is an odd positive integer and $\lambda:V(T)\longrightarrow \mathbb{Z}_q\times[m]$ is a $\mathbb{Z}_q$-equivariant labeling of an $\mathbb{Z}_q$-equivariant triangulation of a $(d-1)$-connected free $\mathbb{Z}_q$-spaces $T$, then there is at least one simplex in $T$ whose vertices are labelled with labels $(\epsilon_0,j_0),(\epsilon_1,j_1),\ldots,(\epsilon_n,j_n)$, where $\epsilon_i\neq \epsilon_{i+1}$ and $j_i<j_{i+1}$ for all $i\in\{0,1,\ldots,n-1\}$. Also, he asked the question if the result is true for even value of $q$. This question received a positive answer owing to the work of B.~Hanke et~al.~\cite{Hanke2009404}. In both mentioned works, the proofs of $\mathbb{Z}_q$-Fan lemma are built in involved construction. Here, we take the opportunity of this paper to propose the following generalization of this result with a short simple proof because we are using similar techniques in the paper. \begin{lemma}{\rm($\mathbb{G}$-Fan lemma).}\label{Gtucker} Let $\mathbb{G}$ be a nontrivial finite group and let $T$ be a free $\mathbb{G}$-simplicial complex such that ${\rm ind}_{\mathbb{G}}(T)= n$. Assume that $\lambda:V(T)\longrightarrow \mathbb{G}\times[m]$ be a $\mathbb{G}$-equivariant labeling such that there is no edge in $T$ whose vertices are labelled with $(g,j)$ and $(g',j)$ with $g\neq g'$ and $j\in[m]$. Then there is at least one simplicial complex in $T$ whose vertices are labelled with labels $(g_0,j_0),(g_1,j_1),\ldots,(g_n,j_n)$, where $g_i\neq g_{i+1}$ and $j_i<j_{i+1}$ for all $i\in\{0,1,\ldots,n-1\}$. In particular, $m\geq n+1$. \end{lemma} \begin{proof} Clearly, the map $\lambda$ can be considered as a $\mathbb{G}$-simplicial map from $T$ to $\mathbb{G}^{*m}$. Naturally, each nonempty simplex $\sigma\in \mathbb{G}^{*m}$ can be identified with a vector $X=(x_1,x_2,\ldots,x_m)\in (\mathbb{G}\cup\{0\})^n\setminus\{\boldsymbol{0}\}$. To prove the assertion, it is enough to show that there is a simplex $\sigma\in T$ such that ${\rm alt}(\lambda(\sigma))\geq n+1$. For a contradiction, suppose that, for each simplex $\sigma\in T$, we have ${\rm alt}(\lambda(\sigma))\leq n$. Define $$\begin{array}{lrll} \Gamma:&V(\operatorname{sd} T) &\longrightarrow & \mathbb{G}\times[n]\\
&\sigma&\longmapsto & \left(g,{\rm alt}(\lambda(\sigma)\right)), \end{array}$$ where $g$ is the first nonzero coordinate of the vector $\lambda(\sigma)\in (\mathbb{G}\cup\{0\})^n\setminus\{\boldsymbol{0}\}.$ One can check that $\Gamma$ is a simplicial $\mathbb{G}$-map from $\operatorname{sd} T$ to $\mathbb{G}^{*n}$. Note $\mathbb{G}^{*n}$ is an $E_{n-1} \mathbb{G}$ space. Consequently, ${\rm ind}_{\mathbb{G}}(\mathbb{G}^{*n})= n-1$. This implies that ${\rm ind}_{\mathbb{G}}(T)\leq n-1$ which is a contradiction. \end{proof}
\subsection{\bf Hierarchy of Indices} The aim of this subsection is introducing some tools for the proof of Theorem~\ref{inequalities}.
Let $n,\alpha$, and $p$ be integers where $n\geq 1$, $n\geq\alpha\geq 0$, and $p$ is prime. Define $$\displaystyle\Sigma_p(n,\alpha)=\Delta\left\{X\in(\mathbb{Z}_p\cup\{0\})^n:\; {\rm alt}(X)\geq \alpha+1\right\}.$$ Note that $\displaystyle\Sigma_p(n,\alpha)$ is a free simplicial $\mathbb{Z}_p$-complex with the vertex set $$\left\{X\in(\mathbb{Z}_p\cup\{0\})^n:\; {\rm alt}(X)\geq \alpha+1\right\}.$$
\begin{lemma}\label{indsigma} Let $n,\alpha$, and $p$ be integers where $n\geq 1$, $n\geq\alpha\geq 0$, and $p$ is prime. Then $${\rm ind}_{\mathbb{Z}_p}(\displaystyle\Sigma_p(n,\alpha))\geq n-\alpha-1.$$ \end{lemma} \begin{proof} Define $$ \begin{array}{crcl} \lambda: & \operatorname{sd} \mathbb{Z}_p^{*n} & \longrightarrow &
(\mathbb{Z}_p^{*\alpha})*\left(\displaystyle\Sigma_p(n,\alpha)\right)\\
& X & \longmapsto &
\left
\{\begin{array}{cl}
(\epsilon,{\rm alt}(X)) & \mbox{ if ${\rm alt}(X)\leq \alpha$}\\
X & \mbox{ if ${\rm alt}(X)\geq \alpha+1$},
\end{array}
\right. \end{array}$$ where $\epsilon$ is the first nonzero term of $X$. Clearly, the map $\lambda$ is a simplicial $\mathbb{Z}_p$-map. Therefore, $$ \begin{array}{lll} n-1={\rm ind}_{\mathbb{Z}_p}( \operatorname{sd} \mathbb{Z}_p^{*n}) & \leq & {\rm ind}_{\mathbb{Z}_p}\left(\mathbb{Z}_p^{*\alpha}*\displaystyle\Sigma_p(n,\alpha)\right)\\ & \leq & {\rm ind}_{\mathbb{Z}_p}(\mathbb{Z}_p^{*\alpha})+{\rm ind}_{\mathbb{Z}_p}(\displaystyle\Sigma_p(n,\alpha))+1\\ &\leq &\alpha+{\rm ind}_{\mathbb{Z}_p}(\displaystyle\Sigma_p(n,\alpha)) \end{array} $$ which completes the proof. \end{proof} \begin{proposition}\label{inequalityI} Let ${\mathcal H}$ be a hypergraph. For any integer $r\geq 2$ and any prime number $p\geq r$, we have
$${\rm ind}_{\mathbb{Z}_p}({\rm B}_0({\rm KG}^r({\mathcal H}),\mathbb{Z}_p))+1\geq |V({\mathcal H})|-{\rm alt}_p({\mathcal H}).$$ \end{proposition} \begin{proof}
For convenience, let $|V({\mathcal H})|=n$ and $\alpha=n-{\rm alt}_p({\mathcal H})$. Let $\pi:[n]\longrightarrow V({\mathcal H})$ be the bijection such that ${\rm alt}_p({\mathcal H},\pi)={\rm alt}_p({\mathcal H})$. Define $$ \begin{array}{lrll} \lambda:& \Sigma_p(n,\alpha)& \longrightarrow & \operatorname{sd}{\rm B}_0({\rm KG}^r({\mathcal H}),\mathbb{Z}_p))\\
& X&\longmapsto & \{\omega^1\}\times U_1\cup\cdots\cup \{\omega^p\}\times U_p, \end{array} $$ where $U_i=\{e\in E({\mathcal H}):\; e\subseteq \pi(X^{\omega^i})\}.$ One can see that $\lambda$ is a simplicial $\mathbb{Z}_p$-map. Consequently, $${\rm ind}_{\mathbb{Z}_p}({\rm B}_0({\rm KG}^r({\mathcal H}),\mathbb{Z}_p))\geq {\rm ind}_{\mathbb{Z}_p}(\Sigma_p(n,\alpha))\geq n-{\rm alt}_p({\mathcal H})-1. $$ \end{proof}
\begin{proposition}\label{inequalityII} Let ${\mathcal H}$ be an $r$-uniform hypergraph and $p\geq r$ be a prime number. Then $$ {\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p\geq {\rm ind}_{\mathbb{Z}_p}(\Delta{\rm Hom}(K^r_p,{\mathcal H}))+p\geq {\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1.$$ \end{proposition} \begin{proof} Since already we know ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))\geq {\rm ind}_{\mathbb{Z}_p}(\Delta{\rm Hom}(K^r_p,{\mathcal H}))$, to prove the assertion, it is enough to show that ${\rm ind}_{\mathbb{Z}_p}(\Delta{\rm Hom}(K^r_p,{\mathcal H}))+p\geq {\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))+1.$ To this end, define $$\begin{array}{llll} \lambda: & \operatorname{sd} B_0({\mathcal H},\mathbb{Z}_p) & \longrightarrow & \left(\operatorname{sd}\sigma_{p-2}^{p-1}\right)*\displaystyle\left(\Delta{\rm Hom}(K^r_p,{\mathcal H})\right)\\ \end{array}$$ such that for each vertex $\tau=\displaystyle\bigcup_{i-1}^p\left(\{\omega^i\}\times U_i\right)$ of $\operatorname{sd} B_0({\mathcal H},\mathbb{Z}_p)$, $\lambda(\tau)$ is defined as follows. \begin{itemize} \item If $U_i\neq\varnothing$ for each $i\in[p]$, then $\lambda(\tau)=\tau.$ \item If $U_i=\varnothing$ for some $i\in[p]$, then $$\lambda(\tau)=\{\omega^i\in\mathbb{Z}_p:\; U_i=\varnothing\}.$$ \end{itemize} One can check that the map $\lambda$ is a simplicial $\mathbb{Z}_p$-map. Also, since $\sigma_{p-2}^{p-1}$ is a free simplicial $\mathbb{Z}_p$-complex of dimension $p-2$, we have ${\rm ind}_{\mathbb{Z}_p}(\sigma_{p-2}^{p-1})\leq p-2$ (see properties of the $\mathbb{G}$-index in Section~\ref{intro}). This implies that $$ \begin{array}{lll}
{\rm ind}_{\mathbb{Z}_p}(B_0({\mathcal H},\mathbb{Z}_p))& \leq & {\rm ind}_{\mathbb{Z}_p}\left(\left(\operatorname{sd}\sigma_{p-2}^{p-1}\right)* \left(\Delta{\rm Hom}(K^r_p,{\mathcal H})\right)\right)\\ & \leq &{\rm ind}_{\mathbb{Z}_p}(\sigma_{p-2}^{p-1})+ {\rm ind}_{\mathbb{Z}_p}(\Delta{\rm Hom}(K^r_p,{\mathcal H}))+1\\ &\leq & p-1+{\rm ind}_{\mathbb{Z}_p}(\Delta{\rm Hom}(K^r_p,{\mathcal H})) \end{array} $$ which completes the proof. \end{proof}
\section{\bf Proofs of Theorem~\ref{maincolorfulindex} and Theorem~\ref{inequalities}}\label{sec:proofs} Now, we are ready to prove Theorem~\ref{maincolorfulindex} and Theorem~\ref{inequalities}.\\
\noindent{\bf Proof of Theorem~\ref{maincolorfulindex}: Part (i).} For convenience, let ${\rm ind}_{\mathbb{Z}_p}({\rm B}_0({\mathcal H},{\mathbb{Z}_p}))=t$. Note that $$ \begin{array}{crcl} \Gamma: &\mathbb{Z}_p\times V({\mathcal H}) & \longrightarrow & \mathbb{Z}_p\times [C]\\
& (\epsilon,v) & \longmapsto & (\epsilon,c(v)) \end{array}$$ is a simplicial $\mathbb{Z}_p$-map from ${\rm B}_0({\mathcal H},{\mathbb{Z}_p})$ to $(\sigma^{p-1}_{r-2})^{*C}$. Therefore, in view of Lemma~\ref{genfanlemma}, there is a $t$-dimensional simplex $\tau\in{\rm im}(\Gamma)$ such that, for each $\epsilon\in \mathbb{Z}_p$, we have
$\lfloor{t+1\over p}\rfloor\leq |\tau^\epsilon|\leq\lceil{t+1\over p}\rceil.$ Let $\displaystyle\bigcup_{i=1}^p(\{\omega^i\} \times U_i)$ be the minimal simplex in $\Gamma^{-1}(\tau)$. One can see that ${\mathcal H}[U_1,\ldots,U_p]$ is the desired subhypergraph. Moreover, since every color can be appeared in at most $r-1$ number of $U_i$'s, we have $$C\geq {{\rm ind}_{p}({\rm B}_0({\mathcal H},{\mathbb{Z}_p}))+1\over r-1}.$$
\noindent{\bf Part (ii).} For convenience, let ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))=t$. Define the map $$\lambda:{\rm Hom}(K^r_p,{\mathcal H})\longrightarrow \operatorname{sd}(\sigma_{r-2}^{p-1})^{*C}$$ such that for each $(U_1,\cdots,U_p)\in {\rm Hom}(K^r_p,{\mathcal H})$, $$\lambda(U_1,\cdots,U_p)=\{\omega^1\}\times c(U_1) \cup\cdots\cup \{\omega^p\}\times c(U_p).$$ {\bf Claim.} There is a $p$-tuple $(U_1,\cdots,U_p)\in {\rm Hom}(K^r_p,{\mathcal H})$ such that for $\tau=\lambda(U_1,\cdots,U_p)$, we have $l(\tau)\geq {\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p$.
\noindent{\bf Proof of Claim.} Suppose, contrary to the claim, that for each $\tau\in {\rm Im}(\lambda)$, we have $l(\tau)\leq t+p-1$. Note that $\operatorname{sd}(\sigma_{r-2}^{p-1})^{*C}$ can be considered as a free $\mathbb{Z}_p$-poset ordered by inclusion. One can readily check that $\lambda$ is an order-preserving $\mathbb{Z}_p$-map. Clearly, for each $\tau\in {\rm Im}(\lambda)$, we have $h(\tau)=\displaystyle\min_{\epsilon\in\mathbb{Z}_p}|\tau^\epsilon|\geq 1$ and consequently, $l(\tau)\geq p$. Now, define $$\bar{\tau}=\displaystyle
\bigcup_{\{\epsilon\in\mathbb{Z}_p:\; |\tau^\epsilon|=h(\tau)\}} \tau^\epsilon\in W\quad {\rm and }\quad \Gamma(\tau)=\left(s(\bar\tau), l(\tau)-p+1\right).$$ One can see that the map $\Gamma:{\rm im}(\lambda)\longrightarrow Q_{t-1,p}$ is an order-preserving $\mathbb{Z}_p$-map. Therefore, $$\Gamma\circ\lambda:{\rm Hom}(K^r_p,{\mathcal H})\longrightarrow Q_{t-1,p}$$ is an order-preserving $\mathbb{Z}_p$-map, which contradicts the fact that ${\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))=t$.
$\square$
Now, let $(U_1,\cdots,U_p)$ be a minimal $p$-tuple in ${\rm Hom}(K^r_p,{\mathcal H})$ such that for $$\tau=\lambda(U_1,\cdots,U_p)=\{\omega^1\}\times c(U_1) \cup\cdots\cup \{\omega^p\}\times c(U_p),$$ we have $l(\tau)= t+p$. One can check that ${\mathcal H}[U_1,\cdots,U_p]$ is the desired complete $r$-uniform $p$-partite subhypergraph. Similar to the proof of Part (i), since every color can be appeared in at most $r-1$ number of $U_i$'s, we have $$C\geq {{\rm Xind}_{\mathbb{Z}_p}({\rm Hom}(K^r_p,{\mathcal H}))+p\over r-1}.$$
$\square$
\noindent{\bf Proof of Theorem~\ref{inequalities}.} It is simple to prove that
$|V({\mathcal F})|-{\rm alt}_p({\mathcal F}) \geq {\rm cd}_p({\mathcal F})$ for any hypergraph ${\mathcal F}$. Therefore, the proof follows by Proposition~\ref{inequalityI} and Proposition~\ref{inequalityII}.
$\square$\\
\noindent{\bf Acknowledgements.} I would like to acknowledge Professor Fr\'ed\'eric~Meunier for interesting discussions about the paper and his invaluable comments. Also, I would like to thank Professor Hossein~Hajiabolhasan and Mrs~Roya~Abyazi~Sani for their useful comments.
\def$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'${$'$}
\end{document} |
\begin{document}
\title{The Triple Point Spectrum of closed orientable $3$-manifolds}
\author{\'Alvaro Lozano Rojo}
\address{Centro Universitario de la Defensa Zaragoza, Academia General Militar Carretera de Huesca s/n. 50090 Zaragoza, Spain --- IUMA, Universidad de Zaragoza} \email{[email protected]}
\thanks{Partially supported by the European Social Fund and Diputaci\'on General de Arag\'on (Grant E15 Geometr{\'\i}a).}
\author{Rub\'en Vigara Benito} \address{Centro Universitario de la Defensa Zaragoza, Academia General Militar Carretera de Huesca s/n. 50090 Zaragoza, Spain --- IUMA, Universidad de Zaragoza} \email{[email protected]}
\subjclass[2000]{Primary 57N10, 57N35}
\keywords{$3$-manifold, homology $3$-sphere, immersed surface, filling Dehn surface, triple points, complexity of $3$-manifolds}
\begin{abstract}
The triple point numbers and the triple point spectrum of a closed
$3$-manifold were defined in~\cite{tesis}.
They are topological invariants that give a measure of the complexity of a
$3$-manifold
using the number of triple points of minimal filling Dehn surfaces.
Basic
properties of these invariants are presented, and the triple point spectra of
$\mathbb{S}^2\times \mathbb{S}^1$ and $\mathbb{S}^3$ are computed. \end{abstract}
\maketitle
\section{Introduction}\label{sec:intro}
Through the whole paper all 3-manifolds and surfaces are assumed to be orientable and closed, that is, compact connected and without boundary, $M$ denotes a 3-manifold and $S$ a surface. All objects are assumed to be in the smooth category: manifolds have a differentiable structure and all maps are assumed to be smooth. By simplicity a genus $g$ surface, $g=0,1,2,\ldots$, is called a \emph{$g$-torus}.
Let $M$ be a 3-manifold.
A subset $\Sigma\subset M$ is a \emph{Dehn surface} in $M$~\cite{Papa} if there exists a surface $S$ and a general position immersion $f:S\rightarrow M$ such that $\Sigma=f\left( S\right)$. In this situation we say that $S$ is the \emph{domain} of $\Sigma$ and that $f$ \emph{parametrizes} $\Sigma$. If $S$ is a $2$-sphere, a torus, or a $g$-torus then $\Sigma$ is a \emph{Dehn sphere}, a \emph{Dehn torus}, or a \emph{Dehn $g$-torus} respectively. For a Dehn surface $\Sigma\subset M$, its singularities are divided into \emph{double points}, where two sheets of $\Sigma$ intersect transversely, and \emph{triple points}, where three sheets of $\Sigma$ intersect transversely, and they are arranged along \emph{double curves} (see Section~\ref{sec:Dehn-surfaces-Johansson-diagrams} below for definitions and pictures).
A Dehn surface $\Sigma\subset M$ \emph{fills} $M$~\cite{Montesinos} if it defines a cell-decomposition of $M$ in which the 0-skeleton is the set of triple points of $\Sigma$, the 1-skeleton is the set of double and triple points of $\Sigma$, and the 2-skeleton is $\Sigma$ itself. Filling Dehn spheres are introduced (following ideas of W. Haken~\cite{Haken1}) in~\cite{Montesinos}, where it is proved that every closed, orientable 3-manifold has a filling Dehn sphere. In ~\cite{Montesinos} filling Dehn spheres and their Johansson diagrams (cf. Section~\ref{sec:Dehn-surfaces-Johansson-diagrams}) are proposed as a suitable way for representing all closed orientable 3-manifolds. A weaker version of filling Dehn surfaces are the \emph{quasi-filling Dehn surfaces} defined in~\cite{Amendola02}. A quasi-filling Dehn surface in $M$ is a Dehn surface whose complementary set in $M$ is a disjoint union of open $3$-balls. In~\cite{FennRourke} it is proved that every $3$-manifold has a quasi-filling Dehn sphere. As it is pointed out in~\cite{Amendola02} (see also~\cite{Funar}), filling Dehn surfaces are closely related with \emph{cubulations} of 3-manifolds: cell decompositions where the building blocks are cubes. The dual decomposition of that defined by a filling Dehn surface is a cubulation and vice versa, the dual cell decomposition of a cubulation is a filling Dehn surface (perhaps with nonconnected or nonorientable domain).
\begin{figure}
\caption{A triple point of $\Sigma$ and its triplet
in $S$}
\label{fig:triple-point-00}
\end{figure} The number of triple points of filling Dehn surfaces measures the complexity of $3$-manifolds. A filling Dehn surface $\Sigma\subset M$ with domain $S$ is \emph{minimal} if there is no filling Dehn surface in $M$ with domain $S$ with less triple points than $\Sigma$.
In~\cite{RHomotopies} it is defined the \emph{genus $g$ triple point number} $t_g(M)$ of $M$ as the number of triple points of a minimal filling $g$-torus of $M$, and the \emph{triple point spectrum} of $M$ as the sequence of numbers \[
\mathscr{T}(M)=(t_0(M),t_1(M),t_2(M),\ldots)\,. \] The genus $0$ triple point number of $M$ is the \emph{Montesinos complexity} $M$~\cite{racsam}. These invariants are related to the \emph{surface-complexity} of $M$, defined in~\cite{Amendola02} as the number of triple points of a quasi-filling Dehn surface of $M$ (perhaps with nonconnected or nonorientable domain).
We want to clarify how much information about $M$ encloses $\mathscr{T}(M)$. Since the original work~\cite{RHomotopies}, nothing is known about the triple point numbers and the triple point spectrum apart from their definitions. In this paper we introduce some properties of $\mathscr{T}(M)$ (Section~\ref{sec:spectrum-surgerysurfaces}), and we present the two unique known triple point spectra
\begin{align}
\mathscr{T}(\mathbb{S}^2\times \mathbb{S}^1)&=(2,1,3,5,\ldots)\,,\label{eq:triple-point-spectrum-S2-S1}\\
\mathscr{T}(\mathbb{S}^3)&=(2,4,6,8,\ldots)\,.\label{eq:triple-point-spectrum-S3}
\end{align}
The proof of \eqref{eq:triple-point-spectrum-S2-S1} relies on two previously known examples of minimal filling Dehn surfaces of $\mathbb{S}^2\times \mathbb{S}^1$, together with the inequality (Lemma~\ref{lem:triple-point-inequality})
\begin{equation}
t_{g+1}(M)\leq t_{g}(M)+2\,,\; g=0,1,2,\ldots ,\label{eq:consecutive-triple-point-numbers-2}
\end{equation}
that arises from a simple surgery operation (the \emph{handle piping}, see Figure~\ref{Fig:handle}). This will be detailed in Section~\ref{sec:spectrum-surgerysurfaces}.
The characterization of the triple point spectrum of $\mathbb{S}^3$ needs
more work.
The identity \eqref{eq:triple-point-spectrum-S3} is proposed in~\cite{racsam}
as an open question.
Using that $t_0(\mathbb{S}^3)=2$~\cite{racsam}, the handle piping provides $g$ filling
Dehn $g$-tori in $\mathbb{S}^3$ with $2+2g$ triple points for all $g=1,2,\ldots$.
The harder part is to prove that all these filling Dehn surfaces in $\mathbb{S}^3$ are minimal.
To do so, in Section~\ref{sec:fundamentalgroup} we introduce a presentation of the
fundamental group a Dehn $g$-torus. This is a generalization of a presentation of
the fundamental group of Dehn spheres due to W. Haken and detailed in~\cite{tesis}.
Using it, in Section~\ref{sec:checkers-homology-spheres} we prove:
\begin{thmA}
If $M$ is a $\mathbb{Z}/2$-homology $3$-sphere
\begin{equation*}
t_g(M) \geq 2 + 2g.
\end{equation*} \end{thmA}
This theorem completes the proof of \eqref{eq:triple-point-spectrum-S3}.
\begin{figure}
\caption{A filling Dehn sphere in $\mathbb{S}^2\times \mathbb{S}^1$}
\label{Fig:audi}
\end{figure}
\section{Dehn surfaces and their Johansson's diagrams}\label{sec:Dehn-surfaces-Johansson-diagrams}
Let $\Sigma$ be a Dehn surface in $M$ and consider a parametrization $f:S\to M$
of $\Sigma$.
The \emph{singularities} of $\Sigma$ are the points $x\in\Sigma$ such that
$\#f^{-1}(x)>1$. The \emph{singularity set} $S(\Sigma)$ of $\Sigma$ is the set of
singularities of $\Sigma$. As $f$ is in general position, the singularities of
$\Sigma$ can be
divided into double points ($\#f^{-1}(x)=2$), and triple points
($\#f^{-1}(x)=3$). Following~\cite{Shima}, we denote by $T(\Sigma)$ the set
of triple points of $\Sigma$. The preimage under $f$ in $S$ of the
singularity set of $\Sigma$, together with the information about how its
points become identified by $f$ in $\Sigma$ is the \emph{Johansson diagram} $\mathcal{D}$
of $\Sigma$ (see~\cite{Johansson1,Montesinos}). We say that two points of $S$
are \emph{related} if they project onto the same point of $\Sigma$.
In the following a \emph{curve} in $S$, $\Sigma$ or $M$ is the image of an immersion
(a \emph{parametrization} of the curve) from $\mathbb{S}^1$ or $\mathbb{R}$
into $S$, $\Sigma$ or $M$, respectively. A \emph{path} in $S$, $\Sigma$ or $M$ is a map $\eta$
from the interval $[0,1]$ into $S$, $\Sigma$ or $M$, respectively. We say that $\eta$ \emph{joins}
$\eta (0)$ \emph{with} $\eta (1)$, or that $\eta$ \emph{starts} at
$\eta (0)$ and \emph{ends} at $\eta (1)$, and if $\eta(0)=\eta(1)$ we say that $\eta$
is a \emph{loop}.
A double curve of $\Sigma$ is a curve in $M$ contained in $S(\Sigma)$.
Because $S$ is closed, double curves are closed and there is a finite number of them. The number of triple points is also finite. Because $S$ and $M$ are orientable, the preimage under $f$ of a double curve of $\Sigma$ is the union of two different closed curves in $S$, and we will say that these two curves are \emph{sister curves} of $\mathcal{D}$. Thus, the Johansson diagram of $\Sigma$ is composed by an even number of different closed curves in $S$, and we will identify $\mathcal{D}$ with the set of different curves that compose it. For any curve $\alpha\in\mathcal{D}$ we denote by $\tau \alpha$ the sister curve of $\alpha$ in $\mathcal{D}$. This defines a free involution $\tau:\mathcal{D}\rightarrow\mathcal{D}$, the \emph{sistering} of $\mathcal{D}$, that sends each curve of $\mathcal{D}$ into its sister curve of $\mathcal{D}$.
The curves of $\mathcal{D}$ intersect with others or with themselves transversely at the \emph{crossings} of $\mathcal{D}$. The crossings of $\mathcal{D}$ are the preimage under $f$ of the triple points of $\Sigma$. If $P$ is a triple point of $\Sigma$, the three crossings of $\mathcal{D}$ in $f^{-1}(D)$ compose \emph{the triplet of} $P$ (see Figure~\ref{fig:triple-point-00}). In Section~\ref{sec:fundamentalgroup} we will consider paths contained in curves of $\mathcal{D}$ or in double curves. In this special case, we will consider that paths are also immersed, and therefore they continue ``straight ahead'' when they arrive to a crossing.
If $\Sigma$ is a Dehn surface in $M$, a connected component of $M-\Sigma$ is a \emph{region} of $\Sigma$, a connected component of $\Sigma-S(\Sigma)$ is a \emph{face} of $\Sigma$, and a connected component of $S(\Sigma)-T(\Sigma)$ is an \emph{edge} of $\Sigma$. The Dehn surface $\Sigma$ fills $M$ if and only if all its edges, faces and regions are open $1$, $2$ or $3$-dimensional disks respectively.
In Figure~\ref{Fig:audi} we have depicted (left) one of the simplest Johansson diagrams of filling Dehn spheres. This is the diagram of a Dehn sphere $\Sigma$ (right) that fills $\mathbb{S}^2\times \mathbb{S}^1$. In this figure we consider $\mathbb{S}^2\times \mathbb{S}^1$ as $\mathbb{S}^2\times [-1,1]$ where the top and bottom covers $\mathbb{S}^2\times \{+1\}$ and $\mathbb{S}^2\times \{-1\}$ are identified by the vertical projection. This example appears completely detailed in~\cite[Example 7.5.1]{tesis}.
If we are given an \emph{abstract diagram}, i.e., an even collection of curves
in $S$ coherently identified in pairs, it is possible to know if this
abstract diagram is \emph{realizable}: if it is actually the Johansson diagram
of a Dehn surface in a $3$-manifold (see~\cite{Johansson1,Johansson2,tesis}).
It is also possible to know if the abstract diagram is \emph{filling}: if it is
the Johansson diagram of a filling Dehn surface of a 3-manifold (see
~\cite{tesis}). If $\Sigma$ fills $M$, it is possible to build $M$ out of the Johansson diagram of $\Sigma$. Thus, filling Johansson diagrams represent all closed, orientable $3$-manifolds.
It must be noted that when a diagram $(\mathcal{D},\tau)$ in $S$ is not realizable, the quotient space of $S$ under the equivalence relation defined by the diagram is something very close to a Dehn surface: it is a 2-dimensional complex with simple, double and triple points, but it cannot be embedded in any $3$-manifold. We reserve the name \emph{pseudo Dehn surface} for these objects. Many constructions about Dehn surfaces, as the presentation of their fundamental group given in Section~\ref{sec:fundamentalgroup}, for example, are also valid for pseudo Dehn surfaces.
\begin{figure}
\caption{Handle piping}
\label{Fig:handle}
\end{figure}
\section{The triple point spectrum}
\label{sec:spectrum-surgerysurfaces}
In order to understand the structure of the sequence $\mathscr{T}(M)$, we need to relate the different triple point numbers of $M$. For a shorter notation we will omit the word ``filling'' in the expression ``minimal filling Dehn surface''. If we are given a filling Dehn $g$-torus $\Sigma_g$ of $M$ with $q$ triple points, we can always obtain a filling Dehn $(g+1)$-torus $\Sigma_{g+1}$ of $M$ with $q+2$ triple points by applying to $\Sigma_g$ the \emph{handle piping} modification depicted in Figure~\ref{Fig:handle} in a small neighbourhood of a triple point. If the original $\Sigma_g$ is minimal, we have:
\begin{lemma}\label{lem:triple-point-inequality}
For any $g=0,1,2,\ldots$
\begin{equation}
\pushQED{\qed}
t_{g+1}(M)\leq t_{g}(M)+2\,,\label{eq:consecutive-triple-point-numbers-3}\qedhere
\popQED
\end{equation}
\end{lemma}
This lemma suggests the following definitions.
\begin{definition}\label{def:exceptional}
A minimal Dehn $g$-torus of $M$ is \emph{exceptional} if it has less than $t_{g-1}(M)+2$ triple points.
\end{definition}
\begin{definition}\label{def:height}
The \emph{height} $\mathscr{H}(M)$ of $M$ is the highest genus among all exceptional Dehn $g$-tori of $M$.
If $M$ has no exceptional Dehn $g$-torus, $\mathscr{H}(M)=0$.
\end{definition}
It is clear that if $\mathscr{H}(M)$ is finite, the equality in \eqref{eq:consecutive-triple-point-numbers-3} holds for all $g\geq \mathscr{H}(M)$.
An important result for filling Dehn surfaces which is of great interest here is the following.
\begin{proposition}
\label{prop:triplets-regions}
A Dehn $g$-torus in $M$ with $q$ triple points and $r$ regions fills $M$ if and only if
\begin{equation}
r=q+(2-2g)\,.\label{eq:triplets-regions}
\end{equation}
\end{proposition}
This proposition follows from~\cite[Theorem 3.7.1]{tesis} (see also~\cite[Lemma 43]{RHomotopies}). Its proof relies on Euler's characteristic techniques and it has strong consequences in this context:
\begin{remark}\label{rmk:at-least-1+2g-triple-points}
A filling Dehn $g$-torus has at least $2g-1$ triple points.
\end{remark}
\begin{remark}\label{rmk:handle-regions-unchanged}
After a handle piping on a filling Dehn $g$-torus the number of regions remains unchanged. More generally, if $\Sigma_g$ is a filling Dehn $g$-torus of $M$ with $q$ triple points and $\Sigma_{g+1}$ is a filling Dehn $(g+1)$-torus of $M$ with $q+2$ triple points, the number of regions of $\Sigma_g$ and $\Sigma_{g+1}$ is the same.
\end{remark}
\begin{remark}\label{rmk:exceptional-reduce-regions}
Exceptional Dehn $g$-tori reduce the number of regions, that is, an exceptional Dehn $g$-torus
in $M$ has less regions than any minimal Dehn $g'$-torus in $M$ with $g'<g$.
\end{remark}
\begin{figure}
\caption{Dual curves to the curves of $\mathcal{D}$}
\label{Fig:dual-curve-flat}
\label{Fig:dual-curve}
\label{Fig:dual-curve-flat-dual-curve}
\end{figure}
As the number of regions is bounded from below, by Remark~\ref{rmk:exceptional-reduce-regions} there cannot be exceptional Dehn $g$-tori in $M$ with arbitrarily high genus. Therefore,
\begin{theorem}\label{thm:height-finite}
The height of $M$ is finite.\qed
\end{theorem}
In~\cite[Example 7.5.2]{tesis} it is shown a filling Dehn $1$-torus of $\mathbb{S}^2\times \mathbb{S}^1$ with just $1$ triple point. It is clearly a minimal Dehn $1$-torus, and by Proposition~\ref{prop:triplets-regions} it has only one region. By Remark~\ref{rmk:exceptional-reduce-regions}, there cannot be an exceptional Dehn $g$-torus in $\mathbb{S}^2\times \mathbb{S}^1$ of genus $g>1$. On the other hand, a Dehn sphere in any $3$-manifold has an even number of triple points~\cite{Haken1}, and therefore the filling Dehn sphere of Figure~\ref{Fig:audi} is minimal. It turns out that $\mathscr{H}(\mathbb{S}^2\times \mathbb{S}^1)=1$ and
\begin{theorem}
$\mathscr{T}(\mathbb{S}^2\times \mathbb{S}^1)=(2,1,3,5,\ldots )\,.$\qed
\end{theorem}
\section{The fundamental group of a Dehn $g$-torus}
\label{sec:fundamentalgroup}
In this section the manifold $M$ containing $\Sigma$ is no longer needed. Although we still talk about ``Dehn surfaces'', the construction only makes use of the Johansson diagram $\mathcal{D}$, so it is valid for pseudo Dehn surfaces.
Let $f:S\to M$ be a Dehn surface in $M$ and denote by $\mathcal{D}$ its Johansson diagram.
Fix a simple point $x\notin\mathcal{D}$ as the base point of the fundamental group
$\pi_1(\Sigma)$ of $\Sigma$. We identify $x$ with its preimage
under $f$.
A path in $\Sigma$ is \emph{surfacewise} if it is a path on $S$ mapped to
$\Sigma$ through $f$. We denote by $\pi_S = f_*\pi_1(S)$ the subgroup of $\pi_1(\Sigma)$
generated by the sufacewise loops based at $x$.
In general we will use the same notation for a surfacewise path in $\Sigma$
and its preimage path in $S$.
Let $\alpha,\tau\alpha$ be two sister curves of $\mathcal{D}$, and take two paths
$\lambda_{\alpha},\lambda_{\tau\alpha}$ in $S$, starting from $x$ and
arriving to related points on $\alpha,\tau\alpha$, respectively
(see Figure~\ref{Fig:dual-curve-flat-dual-curve}).
\begin{definition}\label{def:dual-paths}
We say that the loop $\lambda_\alpha \lambda_{\tau\alpha}^{-1}$ in
$\Sigma$ based at $x$ is \emph{dual} to $\alpha$.
\end{definition}
\begin{proposition}\label{prop:surfacewise-and-duals-generate-pi1}
Surfacewise loops based at $x$ and loops dual to the curves
of $\mathcal{D}$ generate $\pi_1(\Sigma)$.
\end{proposition}
\begin{figure}
\caption{Crossing-type and corner-type intersections with $S(\Sigma)$}
\label{Fig:crossing-type}
\label{Fig:corner-type}
\label{Fig:crossing-corner-type}
\end{figure}
\begin{proof}
Consider a loop $\sigma$ in $\Sigma$ based at $x$. Up to homotopy, we can
assume that all the intersections of $\sigma$ with the singular set of $\Sigma$ are of
``crossing type'' or of ``corner type'' as in Figure~\ref{Fig:crossing-type} and~\ref{Fig:corner-type} respectively.
Therefore, $\sigma$ can be written as a product
$\sigma = b_1 b_2\cdots b_k$ of surfacewise paths
(Figure~\ref{Fig:holed_plane}). If $k=1$ or $2$, either $\sigma$ is
surfacewise or it is dual to a
curve of $\mathcal{D}$, and so there is nothing to prove.
In other case, for each $i=2,\ldots,k-1$ we choose a midpoint $x_i$ of $b_i$
and a path $c_i$ joining $x$ with $x_i$ (Figure~\ref{Fig:holed_plane}).
We write $b_i=b_i^-b_i^+$, with
$b_i^-$ ending and $b_i^+$ starting at $x_i$. The loop $\sigma$
is homotopic to
\[
b_1 b_2^- c_2^{-1} c_2 b_2^+ b_3^-
\cdots c_{k-2}b_{k-2}^+ b_{k-1}^- c_{k-1}^{-1}
c_{k-1} b_{k-1}^+ b_k\,,
\]
and this expression is a product of loops dual to curves of $\mathcal{D}$.
\end{proof}
\begin{figure}
\caption{Loops as product of dual loops}
\label{Fig:holed_plane}
\end{figure}
We know that $\pi_S$ is finitely generated because so is $\pi_1(S)$. The
following lemma allows us to find a finite set of generators for
$\pi_1(\Sigma)$.
\begin{lemma}
\label{lem:fundamental-dual-paths-surface-conjugate}
If $a,a'$ are loops on $\Sigma$ dual to $\alpha\in\mathcal{D}$, they are
\emph{surfacewise conjugate}, that is, there are two surfacewise loops
$s ,t$ based at $x$ such that $a=s a' t$.
\end{lemma}
\begin{figure}
\caption{Duals to $\alpha$ are surfacewise conjugate}
\label{Fig:holed_plane_duals-conjugate}
\end{figure}
\begin{proof}
Put $a=\lambda_\alpha \lambda_{\tau\alpha}^{-1}$, $a'=\mu_\alpha
\mu_{\tau\alpha}^{-1}$, where
$\lambda_\alpha,\lambda_{\tau\alpha},\mu_\alpha,\mu_{\tau\alpha}$ are
surfacewise paths as in Definition~\ref{def:dual-paths}. Let
$x_\alpha,x_{\tau\alpha},x'_\alpha,x'_{\tau\alpha}$ be the endpoints of
$\lambda_\alpha,\lambda_{\tau\alpha},\mu_\alpha,\mu_{\tau\alpha}$ in $\mathcal{D}$,
respectively (Figure~\ref{Fig:holed_plane_duals-conjugate}). Let $d$ be one
path in $S$ contained in $\alpha$ joining $x_\alpha$ with $x'_\alpha$, and let $\tau d$ be the
sister path of $d$ inside $\tau\alpha$ joining $x_{\tau\alpha}$ with
$x'_{\tau\alpha}$. In $\Sigma$ the paths $d$ and $\tau d$ coincide, and so we have:
$$a=\lambda_\alpha \lambda_{\tau\alpha}^{-1}=\lambda_\alpha d (\tau d)^{-1}
\lambda_{\tau\alpha}^{-1}=
\lambda_\alpha d \mu_{\alpha}^{-1}\mu_\alpha\mu_{\tau\alpha}^{-1}
\mu_{\tau\alpha} (\tau d)^{-1} \lambda_{\tau\alpha}^{-1}= s a' t\,,$$ where $s =\lambda_\alpha d \mu_{\alpha}^{-1}$ and $t=\mu_{\tau\alpha} (\tau d)^{-1} \lambda_{\tau\alpha}^{-1}$ are surfacewise loops in $\Sigma$ based at $x$. \end{proof}
From now on, we will fix a set of generators $s_1,s_2,\ldots,s_{2g}$ of $\pi_S$, and we will also fix a set of preferred dual loops to the curves of $\mathcal{D}$ as follows. For each diagram curve $\alpha\in\mathcal{D}$ we choose a \emph{basepoint} $x_\alpha$ and a joining arc $\lambda_\alpha$ from $x$ to $x_\alpha$, such that the basepoints of sister curves are related (see Figure~\ref{Fig:dual-curve-flat-dual-curve}). The preferred dual loop of $\alpha\in\mathcal{D}$ is
\[
a = \lambda_\alpha \lambda_{\tau\alpha}^{-1}\,.
\] If a curve of $\mathcal{D}$ is denoted with lowercase greek letters \[
\alpha, \beta, \gamma, \ldots,
\alpha_1, \alpha_2, \ldots, \alpha_i, \ldots, \tau\alpha\,, \] its preferred dual loop will be denoted with the corresponding lowercase roman letters \[
a, b, c,\ldots, a_1, a_2,\ldots, a_i,\ldots, \tau a\,. \]
\begin{figure}
\caption{Contractible loop near a triple point}
\label{Fig:contractible-loop}
\end{figure}
By Proposition~\ref{prop:surfacewise-and-duals-generate-pi1} and Lemma \ref{lem:fundamental-dual-paths-surface-conjugate}, we have: \begin{theorem}\label{thm:finite-set-of-generators-pi1} If $\alpha_1,\alpha_2,\ldots ,\alpha_{2k}$ are the curves of $\mathcal{D}$, then $\pi_1(\Sigma)$ is generated by \[
\pushQED{\qed}
s_1,s_2,\ldots,s_{2g},a_1,a_2,\ldots,a_{2k}\,.\qedhere
\popQED \]
\end{theorem}
\begin{figure}
\caption{Triple point relation (diagram view)}
\label{Fig:triple-point-diagram}
\end{figure}
In order to obtain a presentation of $\pi_1(\Sigma)$, we need to establish a set of relators associated to the generators of Theorem~\ref{thm:finite-set-of-generators-pi1}. These relators will be a natural extension of Haken's relators for Dehn spheres (see also~\cite{racsam,tesis}).
\begin{enumerate}[\em(R1)]
\item \emph{Dual loop relations}. By construction, if
$\alpha,\tau \alpha$ are two sister curves of $\mathcal{D}$, their dual loops
$a,\tau a$ verify $\tau a= a^{-1}$.
\item \emph{Triple point relations}. The idea behind this relation is that any small circuit around
a triple point as that of Figure~\ref{Fig:contractible-loop}
is homotopically trivial.
Assume that all the curves of $\mathcal{D}$ are oriented,
with sister curves coherently oriented.
Let $P$ be a triple point of $\Sigma$ and let $P_1,P_2,P_3$ be the
three crossings of $\mathcal{D}$ in the triplet of $P$. Label the curves of
$\mathcal{D}$ intersecting at these crossings as in
Figure~\ref{Fig:triple-point-diagram}.
Consider three paths
$\omega_1,\omega_2,\omega_3$ joining $x$ with $P_1,P_2,P_3$ respectively.
At $P_1$, consider the path $d_\alpha$ contained in $\alpha$ that travels
from $P_1$ along the branch labelled $\alpha$ in the positive
sense until it reaches the basepoint $x_\alpha$ of $\alpha$. Consider
also
the similarly defined paths $d_\beta,d_\gamma$ contained in
$\beta,\gamma$ and joining $P_2,P_3$ with $x_\beta,x_\gamma$
respectively, and the sister paths
$d_{\tau\alpha},d_{\tau\beta},d_{\tau\gamma}$ of
$d_{\alpha},d_{\beta},d_{\gamma}$ respectively, that join $P_2,P_3,P_1$
with $x_{\tau\alpha},x_{\tau\beta},x_{\tau\gamma}$ along
$\tau\alpha,\tau\beta,\tau\gamma$, respectively. If we introduce the
loops
\begin{align*}
t_\alpha&=\omega_1 d_\alpha \lambda_\alpha^{-1} &
t_\beta&=\omega_2 d_\beta \lambda_\beta^{-1} &
t_\gamma&=\omega_3 d_\gamma \lambda_\gamma^{-1} \\
t_{\tau\alpha}&=\omega_2 d_{\tau\alpha} \lambda_{\tau\alpha}^{-1} &
t_{\tau\beta}&=\omega_3 d_{\tau\beta} \lambda_{\tau\beta}^{-1} &
t_{\tau\gamma}&=\omega_1 d_{\tau\gamma} \lambda_{\tau\gamma}^{-1}
\end{align*}
we get the triple point relation around $P$:
\begin{equation}
\label{ec:3pto_relation}
t_\alpha a t_{\tau\alpha}^{-1} \,
t_\beta b t_{\tau\beta}^{-1} \,t_\gamma c t_{\tau\gamma}^{-1}=1\,.
\end{equation}
Note that the loops
$t_\alpha,t_{\tau\alpha}, t_\beta,t_{\tau\beta}, t_\gamma$ and
$t_{\tau\gamma}$ are surfacewise elements of $\Sigma$ and so they can be
expressed as words in $s_1,s_2,\ldots,s_{2g}$.
\begin{figure}
\caption{Triple point relation (Dehn surface
view)}
\label{Fig:triple-point-Sigma}
\end{figure}
\item \emph{Double curve relations}.
In~\cite{racsam,RHomotopies} this relators did not appear because in $\mathbb{S}^2$ all loops
are homotopically trivial, while in a $g$-torus this is not true.
Let $\alpha,\tau\alpha$ be a pair of sister curves
of $\mathcal{D}$. We orient both curves coherently starting from their basepoints
$x_\alpha,x_{\tau\alpha}$. The loops
$\lambda_\alpha \alpha \lambda_{\alpha}^{-1}$ and
$\lambda_{\tau\alpha} \tau\alpha \lambda_{\tau\alpha}^{-1}$ in $S$ are
related in $\Sigma$ because $\alpha$ and $\tau\alpha$ coincide in
$\Sigma$:
\[
\lambda_\alpha \alpha \lambda_{\alpha}^{-1}= \lambda_\alpha\lambda_{\tau\alpha}^{-1} \lambda_{\tau\alpha} \tau\alpha \lambda_{\tau\alpha}^{-1} \lambda_{\tau\alpha} \lambda_{\alpha}^{-1}\,.
\]
If we also denote by $\alpha$ and $\tau\alpha$ the surfacewise elements
of $\pi_1(\Sigma)$ represented by
$\lambda_\alpha \alpha \lambda_{\alpha}^{-1}$ and
$\lambda_{\tau\alpha} \tau\alpha \lambda_{\tau\alpha}^{-1}$,
respectively, this relation can be written as
\begin{equation}\label{eq:double-curve-relator}
\alpha=a \,\tau\alpha \, a^{-1}\,.
\end{equation}
\item \emph{Surface relations}. Those are the relations that the
surfacewise generators $s_1,s_2,\ldots,s_{2g}$ verify when considered as
elements of $\pi_1(S)$.
\end{enumerate}
Any relation among those specified above define an associated
\emph{dual loop relator}, \emph{triple point relator},
\emph{double curve relator} or \emph{surface relator} in the natural way.
\begin{theorem}
\label{thm:fundamental-group-presentation}
If
$\mathcal{D}=\{\alpha_1,\alpha_2,\ldots,\alpha_{2k}\}$, the fundamental group
$\pi_{1}(\Sigma)$ is isomorphic to
\[
\pi (\mathcal{D})=\langle\, s_1,s_2,\ldots,s_{2g},a_1,a_2,\ldots,a_{2k} \mid
\text{R1, R2, R3 and R4 relators}\,\rangle\,.
\]
\end{theorem}
We need to introduce some notation and a previous result before being able
to give the proof of this theorem.
If we consider the generators of $\pi (\mathcal{D})$ as elements of $\pi_1 (\Sigma)$,
we are defining implicitly a homomorphism
$\varepsilon:\pi (\mathcal{D})\to\pi_1(\Sigma)$ which is surjective by
Theorem~\ref{thm:finite-set-of-generators-pi1}. To prove that
$\varepsilon$ is also injective we will prove first that $\pi(\mathcal{D})$ behaves
exactly as $\pi_1(\Sigma)$ when dealing with covering spaces:
the covering spaces of $\Sigma$ are completely characterized by
representations of $\pi(\mathcal{D})$ on a permutation group. For $m\in\mathbb{N}$ we
denote by $\Omega_m$ the group of permutations of the finite set
$\{1,2,\ldots, m\}$, and we denote by $\Omega_\infty$ the group of
permutations of the countable set $\mathbb{N}$.
\begin{lemma}
\label{lem:representations-factorize}
Every representation $\rho:\pi (\mathcal{D})\to \Omega_m$, with $m\in\mathbb{N}\cup\{\infty\}$, factors
through $\varepsilon$, i.e., there exists a representation
$\hat\rho:\pi_1(\Sigma)\to\Omega_m$ such that the following diagram
commutes.
\[
\xymatrix{
\pi(\mathcal{D}) \ar[d]_{\varepsilon} \ar[r]^{\rho} & \Omega_m \\
\pi_1(\Sigma) \ar[ur]_{\hat\rho}
}
\]
\end{lemma}
\begin{proof}
Fix an $m\in\mathbb{N}\cup\{\infty\}$, and consider a representation $\rho:\pi(\mathcal{D})\to \Omega_m$.
As $\pi(\mathcal{D})$ verifies the surface relations we have a
natural homomorphism
\[
\zeta:\pi_1(S)\to \pi(\mathcal{D})\,.
\]
The map $\rho_S=\rho\circ \zeta$ is a representation of $pi_1(S)$, it is the
monodromy homomorphism of a $m$-sheeted covering map $h:\hat{S}\to S$.
Let $x^1,x^2,\ldots$ be the different points of $\hat{S}$ on the fiber of $h$
over $x$, labelled in such way that for any $s\in\pi_1(S)$ the lift of $s$
starting in $x^i$ has its end in $x^j$, where $j=\rho_S(s)(i)$.
If we lift the diagram $\mathcal{D}$ to $\hat{S}$, we obtain another diagram $\hat{\mathcal{D}}$ in $\hat{S}$. We will define a ``sistering'' $\hat{\tau}:\hat{\mathcal{D}}\to\hat{\mathcal{D}}$ between the curves of $\hat{\mathcal{D}}$, with the expected compatibility with $\tau$. For any curve $\alpha\in\mathcal{D}$, we call $\lambda_\alpha^i$ the lift of $\lambda_\alpha$ starting at $x^i$, and $x_\alpha^i$ the endpoint of $\lambda_\alpha^i$. The sistering $\hat{\tau}$ is defined as follows: $\hat{\tau}$ sends the curve of $\hat{\mathcal{D}}$ passing through $x_\alpha^i$ to the curve of $\hat{\mathcal{D}}$ passing through $x_{\tau\alpha}^j$, where $j=\rho(a)(i)$. The points of both curves are related by $\hat{\tau}$ starting from $x_\alpha^i,x_{\tau\alpha}^j$ lifting the relation among the the points of $\alpha$ and $\tau\alpha$ near $x_\alpha$ and $x_{\tau\alpha}$ in the natural way. If we think about the circle $\mathbb{S}^1$ as the real numbers modulo $2\pi$, we can take parametrizations $\alpha,\tau\alpha:\mathbb{R}\to S$ of the curves $\alpha,\tau\alpha$ respectively such that $\alpha(0)=x_\alpha$, $\tau\alpha(0)=x_{\tau\alpha}$ and such that $\alpha(t)$ is related by $\tau$ with $\tau\alpha(t)$ for all $t\in\mathbb{R}$. Taking lifts $\alpha^i,\tau\alpha^j:\mathbb{R}\to \hat{S}$ of these parametrizations with $\alpha^i(0)=x^i_\alpha$, $\tau\alpha^j(0)=x^j_{\tau\alpha}$ we state that $\alpha^i(t)$ is related by $\hat{\tau}$ with $\tau\alpha^j(t)$ for all $t\in\mathbb{R}$. We want to prove that $(\hat{\mathcal{D}},\hat{\tau})$ is an abstract Johansson diagram on $\hat{S}$. \begin{claim}\label{claim:2-hat-tau-well-defined} The sistering $\hat{\tau}$ is well defined: if $x_{\alpha}^i,x_{\alpha}^{i'}$ are different lifts of $x_\alpha$ lying on the same curve $\hat{\alpha}$ of $\hat{\mathcal{D}}$, then $x_{\tau\alpha}^{j}$ and $x_{\tau\alpha}^{j'}$ lie on the same curve of $\hat{\mathcal{D}}$, where $j=\rho(a)(i)$ and $j'=\rho(a)(i')$. \end{claim} \begin{proof}[Proof of Claim~\ref{claim:2-hat-tau-well-defined}] Assume first that $x_{\alpha}^i$ and $x_{\alpha}^{i'}$ are consecutive in $\hat{\alpha}$: we can travel along $\hat{\alpha}$ from $x_{\alpha}^i$ to $x_{\alpha}^{i'}$ without crossing any other point of $h^{-1}(x_{\alpha})$. Choosing appropiately the orientation of $\alpha$ or the indices $i,i'$, we can assume that the lift of $\alpha$ starting at $x_{\alpha}^i$ ends at $x_{\alpha}^{i'}$, and in terms of $\pi(\mathcal{D})$ this implies that $\rho(\alpha)(i)=i'$. Therefore, \begin{align*} \rho(\tau\alpha)(j)&=\rho(\tau\alpha)(\rho(a)(i))=\rho(a\tau\alpha)(i)\\ & =\rho(\alpha a)(i)= \rho(a)(\rho(\alpha)(i))=\rho(a)(i')=j'\,, \end{align*} because by the double curve relation \eqref{eq:double-curve-relator} it is $\alpha a=a \tau\alpha$. The points $x_{\tau\alpha}^j$ and $x_{\tau\alpha}^{j'}$ are consecutive in a lift of $\tau\alpha$ in $\hat{\mathcal{D}}$.
By repeating this argument, the statement is also true if $x_{\alpha}^i$ and $x_{\alpha}^{i'}$ are not consecutive in $\hat{\alpha}$ \end{proof} \begin{claim}\label{claim:1-hat-tau-involutive} The sistering $\hat{\tau}$ is an involution on $\hat{\mathcal{D}}$. \end{claim}
\begin{proof}[Proof of Claim~\ref{claim:1-hat-tau-involutive}]
We have $\tau a=a^{-1}$ in $\pi(\mathcal{D})$ by dual loop relations. Then $\rho(\tau a)=\rho(a)^{-1}$ because $\rho$ is a homomorphism. Therefore if we have $\rho(a)(i)=j$, it is also $\rho(\tau a)(j)=i$. In other words, the sister of the curve of $\hat{\mathcal{D}}$ passing through $x_\alpha^i$ is the curve of $\hat{\mathcal{D}}$ passing through $x_{\tau\alpha}^j$, and the sister of the curve of $\hat{\mathcal{D}}$ passing through $x_{\tau\alpha}^j$ is the curve of $\hat{\mathcal{D}}$ passing through $x_{\alpha}^i$. \end{proof}
The sistering $\hat{\tau}$ defines an equivalence relation into the points of $\hat{\mathcal{D}}$.
By Claims~\ref{claim:2-hat-tau-well-defined} and~\ref{claim:1-hat-tau-involutive}, each point of $\hat{\mathcal{D}}$ which is not a crossing has exactly another point related with it.
\begin{claim}
\label{claim:4-crossings-in-triples}
The crossings of $\hat{\mathcal{D}}$ are related in triplets by $\hat{\tau}$. \end{claim}
\begin{proof}[Proof of Claim~\ref{claim:4-crossings-in-triples}] Let $P$ be a triple point of $\Sigma$, and let $P_1,P_2,P_3$ be the three crossings of $\mathcal{D}$ in the triplet of $P$. We label the curves of $\mathcal{D}$ intersecting at $P_1,P_2,P_3$ as in Figure~\ref{Fig:triple-point-diagram}, and we consider the paths $$\omega_1,\omega_2,\omega_3,\quad d_{\alpha},d_{\tau\alpha},d_{\beta},d_{\tau\beta},d_{\gamma},d_{\tau\gamma},\quad t_{\alpha},t_{\tau\alpha},t_{\beta},t_{\tau\beta},t_{\gamma},t_{\tau\gamma}$$ as in the construction of the triple point relations (Figure~\ref{Fig:triple-point-diagram}).
For each $n=1,2,3$ and $i=1,2,\ldots$, let $\omega_n^i$ be the lift of $\omega_n$ starting at $x^{i}$, and let $P_n^i$ be the endpoint of $\omega_n^i$. If we choose one of the lifts $P_1^i$ of $P_1$, it is related by $\hat{\tau}$ with one lift $P_2^j$ of $P_2$ by means of the sister curve of the lift of $\alpha$ passing through $P_1^i$. Let label $\alpha^i$ the lift of $\alpha$ passing through $P_1^i$. In order to find $P_2^j$ we need to find the sister curve of $\alpha^i$, and to do this we follow $\alpha^i$ until we reach a lift of $x_\alpha$. This is made by taking the lift of $d_\alpha$ starting at $P_1^i$, which ends at a lift $x_\alpha^{i'}$ of $x_\alpha$. The lift of $\lambda_\alpha$ ending at $x_\alpha^{i'}$ starts at $x^{i'}$, where $i'=\rho(t_\alpha)(i)$. The sister curve of $\alpha^i$ is the lift of $\tau \alpha$ passing through $x_{\tau\alpha}^{j'}$, where $j'=\rho(a)(i')$. Finally, $P_2^j$ is located at the starting point of the lift of $d_{\tau \alpha}$ ending at $x_{\tau\alpha}^{j'}$, and it verifies that $j=\rho(t_{\tau\alpha}^{-1})(j')$. In other words, it is $$j=\rho(t_\alpha a t_{\tau\alpha}^{-1})(i)\,.$$ By the same argument, we have that $P_2^j$ is related with $P_3^\ell$, with $\ell=\rho(t_\beta b t_{\tau\beta}^{-1})(j)$ and that $P_3^\ell$ is related with $P_1^{i^*}$, where $i^*=\rho(t_\gamma c t_{\tau\gamma}^{-1})(\ell)$. Triple point relation~\eqref{ec:3pto_relation} at $P$ implies that $i^*=i$, and thus there is no more points related with $P_1^i$ different from $P_2^j$ and $P_3^\ell$. This construction is valid in general and so the crossings of $\mathcal{D}$ are related in triples by $\hat{\tau}$. \end{proof}
We have proved that $(\hat{\mathcal{D}},\hat{\tau})$ is an abstract Johansson diagram in $\hat{S}$: points in $\hat{\mathcal{D}}$ are identified in pairs with the exception of crossings, which are identified in triplets. Moreover, this equivalence relation is compatible with $f$, that is, if $\hat{Y},\hat{Z}\in\hat{S}$ are related by $\hat{\tau}$, their projections $h(\hat{Y}),h(\hat{Z})\in S$ are related by $\tau$. The quotient space $\hat{\Sigma}$ of $\hat{S}$ under the equivalence relation defined by $\hat{\tau}$ is a (pseudo) Dehn surface and we can define a map $\hat{h}:\hat{\Sigma}\to\Sigma$ making the following diagram commutative: \[
\xymatrix{
\hat{S} \ar[r]^{\hat{f}} \ar[d]_{h} & \hat\Sigma \ar[d]^{\hat{h}} \\
S \ar[r]_ {f} & \Sigma
} \] where $\hat{f}:\hat{S}\to\hat\Sigma$ is the quotient map. The map $\hat{h}$ is in fact an $m$-sheeted covering map whose monodromy homomorphism $\hat\rho$ verifies by construction $\rho=\hat\rho\circ\varepsilon$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:fundamental-group-presentation}]
Due to Cayley's Theorem ~\cite[p. 9]{Hall}, every group admits an injective
representation into the group of permutations of its own elements. Because
$\pi(\mathcal{D})$ is finite or countable, the group of permutations of its elements
is isomorphic to $\Omega_m$, for an $m\in\mathbb{N}\cup\{\infty\}$. Thus, we can construct a
faithful representation $\rho:\pi(\mathcal{D})\to\Omega_m$, and by
Lemma~\ref{lem:representations-factorize}, this implies that $\varepsilon$
must be injective. \end{proof}
Hurewicz's Theorem implies that the first homology group of $\Sigma$ is isomorphic to the abelianization of its fundamental group. Let us see how to adapt the presentation of Theorem~\ref{thm:fundamental-group-presentation} to the abelian case. Following the notation of above: \begin{enumerate}[\em({AR}1)]
\item \emph{Dual loop relations}.\label{AR1}
These relations remains the same, if
$\alpha$ and $\tau\alpha$ are sister curves, their preferred dual loops
satisfy $a = -\tau a$.
\item \emph{Triple point relations}. The triple points relations can be
simplified using commutativity to
\[
a + b + c = w,
\]
where $w$ is a word in surfacewise generators and $a$, $b$ and $c$ are
preferred dual loops of double curves.
\item \emph{Double curve relations}. Conjugacies on commuting elements are
equalities, so the double curve relation can be written as
$\alpha = \tau\alpha$.
\item \emph{Surface relations}. The generators of the fundamental group of
the $g$-torus $S$ are related by a product of commutators. In the abelian
context, this relation is trivial.
\end{enumerate} Relations~AR\ref{AR1} allow us to select $k$ generators $a_1,a_2,\ldots,a_k$ from the set of preferred dual loops, dropping $k$ of them. Finally,
\begin{theorem}
\label{thm:homology}
The abelian group $H_1(\Sigma)$ is isomorphic to
\[
H_1(\mathcal{D}) = \langle\, s_1,s_2,\ldots,s_{2g},a_1,a_2,\ldots,a_k
\mid \text{AR2 and AR3 relations} \,\rangle.
\] \end{theorem}
\begin{remark}\label{rmk:homology-M-Sigma}
If $\Sigma$ is filling, it is the $2$-skeleton of $M$ as a CW-complex, so
$\pi_1(\Sigma)=\pi_1(M)$. Hence $H_1(\Sigma)=H_1(M)$, which is isomorphic to
the second homology group due to Poincar\'e duality. In this way,
Theorem~\ref{thm:homology} characterizes completely the homology of the
closed orientable manifold $M$. \end{remark}
\section{Checkerboards and homology spheres}\label{sec:checkers-homology-spheres}
Consider a closed orientable $3$-manifold $M$ and a Dehn surface
$S\to\Sigma\subset M$. We say \emph{$M$ is checkered by $\Sigma$} if
we can assign a
color from $\{0,1\}$ to each region of $\Sigma$ such that if two of them
share a face they have different colors. In the same way, $S$ is checkered by
the Johansson diagram $\mathcal{D}$ of $\Sigma$ if we can assign a color from $\{0,1\}$ to
each face of $S-\mathcal{D}$ such that if two of them share an edge they have
different colors. Checkerboards are a usual tool in knot theory
(see \cite{Carter-Kamada-Saito}, for example).
\begin{lemma}
\label{lem:checkered_paths}
The manifold $M$ is checkered by $\Sigma$ if and only if any loop
on $M$ meeting $\Sigma$ transversely on the faces intersects $\Sigma$ in an even
number of points.
\end{lemma}
\begin{proof}
Given the decomposition of $M$ in regions of $\Sigma$, we can build a graph $G_\Sigma$
describing it combinatorially. Each vertex corresponds to a
region and there is an edge connecting two vertices if the corresponding
regions share a face. It is clear that $M$ is checkered by $\Sigma$ if and only if
$G_\Sigma$ is $2$-colourable.
Each path $\gamma$ in $M$ meeting $\Sigma$ transversely on the faces has an associated
path $c_\gamma$ in $G_\Sigma$ that encodes the sequence of ``crossings'' of $\gamma$ between
different regions of $\Sigma$ across the faces of $\Sigma$. The number of points of $\gamma\cap\Sigma$ is just the
number of edges of $c_\gamma$, and if $\gamma$ is closed, $c_\gamma$ is closed too.
As a graph is $2$-colorable if and only if it is bipartite, the statement
of the lemma is equivalent to: $G_\Sigma$ is bipartite if and only if all
loops have an even number of edges, which is trivially true.
\end{proof}
\begin{proposition}
\label{prop:checkered}
A $\mathbb{Z}/2$-homology $3$-sphere is checkered by any
Dehn surface.
\end{proposition}
\begin{proof}
Let $M$ be a $\mathbb{Z}/2$-homology $3$-sphere and let $\Sigma$ be a Dehn surface in
$M$. Let $\gamma$ be a loop in $M$ intersecting $\Sigma$ transversely at
the faces. Both $\gamma$ and $\Sigma$ are $\mathbb{Z}/2$-nullhomologous.
Therefore the set of intersection points of $\gamma$ and $\Sigma$ must be a
$\mathbb{Z}/2$-nulhomologous $0$-cycle, and then, the number of intersection points
should be even. Lemma~\ref{lem:checkered_paths} completes the proof.
\end{proof}
If the surface $S$ is orientable, we can fix a well defined normal field on
$\Sigma$ in $M$. We assign to each face of $\Sigma$ the color of
the region pointed by the normal vector field on the face. The faces of $S$
sharing an edge cannot share also the color, as the corresponding regions
share a face (see Figure~\ref{fig:checkers}). Therefore:
\begin{figure}
\caption{Coloring faces with region colors.}
\label{fig:checkers}
\end{figure}
\begin{lemma}\label{lem:orientable-checkered-surface-checkered-diagram}
If $M$ and $S$ are orientable and $M$ is checkered by $\Sigma$, the
surface $S$ is also checkered by $\mathcal{D}$.
\end{lemma}
The converse of this lemma does not hold. The 2-sphere $\mathbb{S}^2$ is
checkered by the diagram of Figure~\ref{Fig:audi}, but
$\mathbb{S}^2\times \mathbb{S}^1$ is not checkered by the corresponding
filling Dehn sphere. The existence of checkerboards has strong
implications on the number of triple points.
\begin{proposition}
\label{prop:checkered-even}
If $M$ is checkered by $\Sigma$, the number of triple points of $\Sigma$ is
even.
\end{proposition}
\begin{proof}
For each curve of $\mathcal{D}$, consider a \emph{neighbouring curve} running parallel to it and intersecting $\mathcal{D}$ transversely near the crossings of $\mathcal{D}$
(see~\cite[Fig. 26]{RHomotopies}). As $S$ is
checkered by $\mathcal{D}$, by a similar argument to that of the proof of
Lemma~\ref{lem:checkered_paths} each neighbouring curve must intersect $\mathcal{D}$
in an even number of points.
Because the number of curves in $\mathcal{D}$ is even, the total amount of intersection
points between neighbouring curves and diagram curves is a multiple of $4$.
Each crossing of the diagram
corresponds to two of those intersections, so the number of crossings is
even. Finally, the number of triple points is also even, as it is the
number of crossings over three.
\end{proof}
From Remark~\ref{rmk:at-least-1+2g-triple-points} and Proposition~\ref{prop:checkered-even} we get:
\begin{theorem}
\label{thm:spectrum_bound}
Let $\Sigma$ be a filling Dehn $g$-torus
of $M$. If $M$ is checkered by $\Sigma$, the number of
triple points of $\Sigma$ is greater than or equal to $2g$. \qed
\end{theorem}
All these results allows us to give a sharper lower bound for the
triple point numbers of $\mathbb{Z}/2$-homology $3$-spheres.
\begin{theorem}\label{thm:spectrum-homology-sphere_bound}
If $M$ is a $\mathbb{Z}/2$-homology $3$-sphere
\begin{equation*}
t_g(M) \geq 2 + 2g.
\end{equation*}
\end{theorem}
\begin{proof}
By Theorem~\ref{thm:spectrum_bound} it is enough to show that
the number of triple points cannot be $2g$.
Let $\Sigma$ be a Dehn $g$-torus in $M$ with exactly $2g$ triple
points. Let $S$ be the domain of $\Sigma$.
Let us compute the first homology group of $\Sigma$ with coefficients in
$\mathbb{Z}/2$. By Theorem~\ref{thm:homology} this abelian group is
generated by $s_1,s_2,\ldots,s_{2g}$, the curves generating $H_1(S)$, and the
preferred dual loops $a_1,a_2,\ldots,a_{k}$ extracted from the $k$ double curves of $\Sigma$.
The relations come from the $2g$ AR2 relations and the $k$ AR3 relations by
just taking coefficients in $\mathbb{Z}/2$. With this presentation $H_1(\Sigma;\mathbb{Z}/2)$
has the same number of generators as of relators.
Consider only the $k$ AR3 relators. If $\alpha$ and $\tau\alpha$
are sister curves, as they are surfacewise curves they can be written
(up to homology, see \eqref{eq:double-curve-relator}) as
\begin{align*}
\alpha &= \sum_{i=1}^{2g} n^i_\alpha s_i, &
\tau\alpha &= \sum_{i=1}^{2g} m^i_\alpha s_i,
\end{align*}
with $n_\alpha^i,m_\alpha^i\in\mathbb{Z}/2$. Therefore, the relation AR3 relation
given by the pair $\alpha,\tau\alpha$ is
\[
\sum_{i=1}^{2g} q^i_\alpha s_i=0\,,
\]
with $q^i_\alpha=n^i_\alpha + m^i_\alpha$.
All this information can be written as a tableau
\[
\begin{array}{c|cccc}
& s_1 & s_2 & \cdots & s_{2g} \\[.5mm]
\hline
\alpha_1\vphantom{\raisebox{2mm}1}
& q_{\alpha_1}^1 & q_{\alpha_1}^2 & \cdots & q_{\alpha_1}^{2g}
\\[1mm]
\alpha_2 & q_{\alpha_2}^1 & q_{\alpha_2}^2 & \cdots & q_{\alpha_2}^{2g}
\\
\vdots & \vdots & \vdots & \ddots & \vdots \\[1mm]
\alpha_k & q_{\alpha_k}^1 & q_{\alpha_k}^2 & \cdots & q_{\alpha_k}^{2g}
\\[1mm]
\end{array}
\]
Rows in this tableau represents double curves relators.
Using intersection theory, it can be seen that columns count
intersection between the surfacewise generators and diagram curves.
By Proposition~\ref{prop:checkered} and Lemma~\ref{lem:orientable-checkered-surface-checkered-diagram}
the surface $S$ is checkered by $\mathcal{D}$, so the intersection number of each curve $s_i$
with
the diagram should be even. This implies that the sum of each column is $0\pmod2$.
In other words, one of the AR3 relators can be written as a linear combination
of the rest. Therefore, the group $H_1(\Sigma;\mathbb{Z}/2)$ has
$2g+k$ generators and at most $2g+k-1$ nontrivial relations, and it cannot be trivial.
By Remark~\ref{rmk:homology-M-Sigma} this implies that $\Sigma$ cannot fill the $\mathbb{Z}/2$-homology $3$-sphere $M$.
\end{proof}
It is well known that $t_0(\mathbb{S}^3)=2$ (see~\cite{racsam,tesis}).
From Lemma~\ref{lem:triple-point-inequality} and
Theorem~\ref{thm:spectrum-homology-sphere_bound}, $\mathbb{S}^3$ cannot have exceptional Dehn $g$-tori, therefore $\mathscr{H}(\mathbb{S}^3)=0$ and finally
\begin{corollary}
$\mathscr{T}(\mathbb{S}^3)=(2,4,6,\ldots)$.\qed
\end{corollary}
\end{document} |
\begin{document}
\sloppy
\title{Path Deviations Outperform Approximate Stability in Heterogeneous Congestion Games}
\begin{abstract} We consider non-atomic network congestion games with heterogeneous players where the latencies of the paths are subject to some bounded deviations. This model encompasses several well-studied extensions of the classical Wardrop model which incorporate, for example, risk-aversion, altruism or travel time delays. Our main goal is to analyze the worst-case deterioration in social cost of a \emph{deviated\xspace Nash flow} (i.e., for the perturbed latencies) with respect to an original Nash flow.
We show that for homogeneous players deviated\xspace Nash flows coincide with approximate Nash flows and derive tight bounds on their inefficiency. In contrast, we show that for heterogeneous populations this equivalence does not hold. We derive tight bounds on the inefficiency of both deviated\xspace and approximate Nash flows for \emph{arbitrary} player sensitivity distributions.
Intuitively, our results suggest that the negative impact of path deviations (e.g., caused by risk-averse behavior or latency perturbations) is less severe than approximate stability\xspace (e.g., caused by limited responsiveness or bounded rationality).
We also obtain a tight bound on the inefficiency of deviated\xspace Nash flows for matroid congestion games and homogeneous populations if the path deviations can be decomposed into edge deviations.
In particular, this provides a tight bound on the Price of Risk-Aversion for matroid congestion games. \end{abstract}
\section{Introduction}
In 1952, Wardrop~\cite{Wardrop1952} introduced a simple model, also known as the \emph{Wardrop model}, to study outcomes of selfish route choices in traffic networks which are affected by congestion. In this model, there is a continuum of non-atomic players, each controlling an infinitesimally small amount of flow, whose goal is to choose paths in a given network to minimize their own travel times. The latency (or delay) of each edge is prescribed by a non-negative, non-decreasing latency function which depends on the total flow on that edge.
Ever since its introduction, the Wardrop model has been used extensively, both in operations research and traffic engineering studies, to investigate various aspects of selfish routing in networks.
More recently, the classical Wardrop model has been extended in various ways to capture more complex player behaviors. Examples include the incorporation of uncertainty attitudes (e.g., risk-aversion, risk-seeking), cost alterations (e.g., latency perturbations, road pricing), other-regarding dispositions (e.g., altruism, spite) and player biases (e.g., responsiveness, bounded rationality).
Several of these extensions can be viewed as defining some modified cost for each path which combines the original latency with some `deviation' (or perturbation) along that path. Such deviations are said to be \emph{$\beta$-bounded} if the total deviation along each path is at most $\beta$ times the latency of that path. The player objective then becomes to minimize the combined cost of latency and deviation along a path (possibly using different norms). An equilibrium outcome corresponds to a \emph{$\beta$-deviated\xspace Nash flow}, i.e., a Nash flow with respect to the combined cost. The deviations might be given explicitly (e.g., as in the altruism model of Chen et al.~\cite{Chen2014}) or be defined implicitly (e.g., as in the risk-aversion model of Nikolova and Stier-Moses~\cite{Nikolova2015}). Further, different fractions of players might perceive these deviations differently, i.e., players might be heterogeneous with respect to the deviations.
Another extension, which is closely related to the one above, is to incorporate different degrees of `responsiveness' of the players. For example, each player might be willing to deviate to an alternative route only if her latency decreases by at least a certain fraction. In this context, an equilibrium outcome corresponds to an \emph{$\epsilon$-approximate Nash flow} for some $\epsilon \ge 0$, i.e., for each player the latency is at most $(1+\epsilon)$ times the latency of any other path. Here, $\epsilon$ is a parameter which reflects the responsiveness of the players. An analogue definition can be given for populations with heterogeneous responsiveness parameters.
To illustrate the relation between deviated\xspace and approximate Nash flows, suppose we are given a $\beta$-deviated\xspace Nash flow $f$ for some $\beta \geq 0$, where the latency $\ell_P(f)$ of each path $P$ is perturbed by an arbitrary $\beta$-bounded deviation $\delta_P(f)$ satisfying $0 \leq \delta_P(f) \leq \beta l_P(f)$. Intuitively, the deviations inflate the latency on each path by at most a factor of $(1+\beta)$. Further, assume that the population is homogeneous. From the Nash flow conditions (see Section~\ref{sec:pre} for formal definitions), it follows trivially that $f$ is also an $\epsilon$-approximate Nash flow with $\epsilon = \beta$. But does the converse also hold? That is, can every $\epsilon$-approximate Nash flow be induced by a set of bounded path deviations? More generally, what about the relation between deviated\xspace and approximate Nash flows for heterogenous populations? Can we bound the inefficiency of these flows?
In this paper, we answer these questions by investigating the relation between the two equilibrium notions. Our main goal is to quantify the inefficiency of deviated\xspace and approximate Nash flows, both for homogeneous and heterogeneous populations. To this aim, we study the (relative) worst-case deterioration in social cost of a $\beta$-deviated\xspace Nash flow with respect to an original (unaltered) Nash flow; we use the term \emph{$\beta$-deviation\xspace ratio} to refer to this ratio. This ratio has recently been studied in the context of risk aversion \cite{Lianeas2016,Nikolova2015} and in the more general context of bounded path deviations \cite{Kleer2016}. Similarly, for approximate Nash flows we are interested in bounding the \emph{$\epsilon$-stability\xspace ratio}, i.e., the worst-case deterioration in social cost of an $\epsilon$-approximate Nash flow with respect to an original Nash flow.
Note that these notions differ from the classical \emph{price of anarchy} notion~\cite{Koutsoupias1999}, which refers to the worst-case deterioration in social cost of a $\beta$-deviated\xspace (respectively, $\varepsilon$-approximate) Nash flow with respect to an \emph{optimal} flow. While the price of anarchy typically depends on the class of latency functions (see, e.g., \cite{Chen2014,Christodoulou2011,Kleer2016,Nikolova2015} for results in this context), the deviation\xspace ratio is independent of the latency functions but depends on the topology of the network (see \cite{Kleer2016,Nikolova2015}).
\paragraph{Our contributions.}
The main contributions of this paper are as follows:
\begin{enumerate}\itemsep7pt
\item We show that for homogeneous populations the set of $\beta$-deviated\xspace Nash flows coincides with the set of $\epsilon$-approximate Nash flows for $\beta = \epsilon$. Further, we derive an upper bound on the $\epsilon$-stability\xspace ratio (and thus also on the $\epsilon$-deviation\xspace ratio) which is at most $(1+\epsilon)/(1-\epsilon n)$, where $n$ is the number of nodes, for single-commodity networks. We also prove that the upper bound we obtain is tight for \emph{generalized Braess graphs}. These results are presented in Section~\ref{sec:approx}.
\item We prove that for heterogenous populations the above equivalence does not hold. We derive tight bounds for both the $\beta$-deviation\xspace ratio and the $\epsilon$-stability\xspace ratio for single-commodity instances on series-parallel graphs and arbitrary sensitivity distributions of the players. To the best of our knowledge, these are the first inefficiency results in the context of heterogenous populations which are tight for \emph{arbitrary} sensitivity distributions. Our bounds show that both ratios depend on the demands and sensitivity distribution $\gamma$ of the heterogenous players (besides the respective parameters $\beta$ and $\epsilon$). Further, it turns out that the $\beta$-deviation\xspace ratio is always at most the $\epsilon$-stability\xspace ratio for $\epsilon = \beta \gamma$. These results are given in Section~\ref{sec:het}.
\item We also derive a tight bound on the $\beta$-deviation\xspace ratio for single-commodity matroid congestion games and homogeneous populations if the path deviations can be decomposed into edge deviations. To the best of our knowledge, this is the first result in this context which goes beyond network congestion games. In particular, this gives a tight bound on the Price of Risk-Aversion \cite{Nikolova2015} for matroid congestion games. This result is of independent interest and presented in Section~\ref{sec:approx}.
\end{enumerate}
In a nutshell, our results reveal that for homogeneous populations there is no quantitative difference between the inefficiency of deviated\xspace and approximate Nash flows in the worst case. In contrast, for heterogenous populations the $\beta$-deviation\xspace ratio is always at least as good as the $\epsilon$-stability\xspace ratio with $\epsilon = \beta \gamma$. Intuitively, our results suggest that the negative impact of path deviations (e.g., caused by risk-averse behavior or latency perturbations) is less severe than approximate stability\xspace (e.g., caused by limited responsiveness or bounded rationality).
\paragraph{Related work.}
We give a brief overview of the works which are most related to our results. Christodoulou et al. \cite{Christodoulou2011} study the inefficiency of approximate equilibria in terms of the price of anarchy and price of stability (for homogeneous populations). Generalized Braess graphs were introduced by Roughgarden \cite{Roughgarden2006} and are used in many other lower bound constructions (see, e.g., \cite{Englert2008,Kleer2016,Roughgarden2006}). Chen et al. \cite{Chen2014} study an altruistic extension of the Wardrop model and, in particular, also consider heterogeneous altruistic populations. They obtain an upper bound on the ratio between an altruistic Nash flow and a social optimum for parallel graphs, which is tight for two sensitivity classes. It is mentioned that this bound is most likely not tight in general. Meir and Parkes \cite{Meir2014Arxiv} study player-specific cost functions in a smoothness framework \cite{Roughgarden2015}. Some of their inefficiency results are tight, although none of their bounds seems to be tight for arbitrary sensitivity distributions. Matroids have also received some attention in the Wardrop model. In particular, Fujishige et al. \cite{Fujishige2015} show that matroid congestion games are immune against the Braess paradox (and their analysis is tight in a certain sense). We refer the reader to \cite{Kleer2016} for additional references and relations of other models to the bounded path deviation model considered here.
\section{Preliminaries}\label{sec:pre}
Let $\mathcal{I} = (E,(l_e)_{e \in E},(\mathcal{S}_i)_{i \in [k]},(r_i)_{i \in [k]})$ be an instance of a non-atomic congestion game. Here, $E$ is the set of resources (or edges, or arcs) that are equipped with a non-negative, non-decreasing, continuous latency function $l_e: \mathbb{R}_{\ge 0} \rightarrow \mathbb{R}_{\ge 0}$. Each commodity $i \in [k]$ has a strategy set $\mathcal{S}_i \subseteq 2^E$ and demand $r_i \in \mathbb{R}_{>0}$. Note that in general the strategy set $\mathcal{S}_i$ of player $i$ is defined by arbitrary resource subsets. If each strategy $P \in \mathcal{S}_i$ corresponds to an $s_i, t_i$-path in a given directed graph, then the corresponding game is called a \emph{network} congestion game.\footnote{If a network congestion game with a single commodity is considered (i.e., $k = 1$), we omit the commodity index for ease of notation.} We slightly abuse terminology and use the term \emph{path} also to refer to a strategy $P \in \mathcal{S}_i$ of player $i$ (which does not necessarily correspond to a path in a graph); no confusion shall arise. We denote by $\mathcal{S} = \cup_i \mathcal{S}_i$ the set of all paths.
An outcome of the game is a (feasible) flow $f^i: \mathcal{S}_i \rightarrow \mathbb{R}_{\geq 0}$ satisfying $\sum_{P \in \mathcal{S}_i} f_P^i = r_i$ for every $i \in [k]$. We use $\mathcal{F}(\mathcal{S})$ to denote the set of all feasible flows $f = (f^1,\dots,f^k)$.
Given a flow $f = (f^i)_{i \in [k]} \in \mathcal{F}(\mathcal{S})$, we use $f^i_e$ to denote the total flow on resource $e \in E$ of commodity $i \in [k]$, i.e., $f_e^i = \sum_{P \in \mathcal{S}_i : e \in P} f_P^i$. The total flow on edge $e \in E$ is defined as $f_e = \sum_{i \in [k]} f_e^i$.
The latency of a path $P \in \mathcal{S}$ with respect to $f$ is defined as $l_P(f) := \sum_{e \in P} l_e(f_e)$. The cost of commodity $i$ with respect to $f$ is $C_i(f) = \sum_{P \in \mathcal{S}_i} f_P l_P(f)$. The \textit{social cost} $C(f)$ of a flow $f$ is given by its total average latency, i.e., $C(f) = \sum_{i \in [k]} C_i(f) = \sum_{e \in E} f_e l_e(f_e)$. A flow that minimizes $C(\cdot)$ is called \textit{(socially) optimal}.
If the population is heterogenous, then each commodity $i \in [k]$ is further partitioned in $h_i$ \emph{sensitivity classes}, where class $j \in [h_i]$ has demand $r_{ij}$ such that $r_i = \sum_{j \in [h_i]} r_{ij}$. Given a path $P \in \mathcal{S}_i$, we use $f_{P,j}$ to refer to the amount of flow on path $P$ of sensitivity class $j$ (so that $\sum_{j \in [h_i]} f_{P,j} = f_P$).
\begin{comment} We say that $f$ is a \emph{Nash flow} if \begin{equation} \forall i \in [k], \forall P \in \mathcal{S}_i, f_P > 0: \qquad l_{P}(f) \leq l_{P'}(f) \ \ \forall P' \in \mathcal{S}_i. \label{def:nash} \end{equation} Note that under a Nash flow $f$ all flow-carrying paths $P \in \mathcal{S}_i$ of commodity $i \in [k]$ have the same latency. \end{comment}
\paragraph{Deviated Nash flows.}
We consider a \emph{bounded deviation model} similar to the one introduced in \cite{Kleer2016}.\footnote{In fact, in \cite{Kleer2016} more general path deviations are introduced; the path deviations considered here correspond to \emph{$(0, \beta)$-path deviations} in \cite{Kleer2016}.} We use $\delta= (\delta_P)_{P \in \mathcal{S}}$ to denote some arbitrary path deviations, where $\delta_P : \mathcal{F}(\mathcal{S}) \rightarrow \mathbb{R}_{\geq 0}$ for all $P \in \mathcal{S}$. Let $\beta \geq 0$ be fixed. Define the set of \emph{$\beta$-bounded path deviations} as $
\Delta(\beta) = \{ (\delta_P)_{P \in \mathcal{S}} \ | \ 0 \leq \delta_P(f) \leq \beta l_P(f) \text{ for all } f \in \mathcal{F}(\mathcal{S}) \}. $
Every commodity $i \in [k]$ and sensitivity class $j \in [h_i]$ has a non-negative sensitivity $\gamma_{ij}$ with respect to the path deviations. The population is \emph{homogeneous} if $\gamma_{ij} = \gamma$ for all $i \in [k]$, $j \in [h_i]$ and some $\gamma \ge 0$; otherwise, it is \emph{heterogeneous}. Define the \emph{deviated latency} of a path $P \in \mathcal{S}_i$ for sensitivity class $j \in [h_i]$ as $q_{P}^j(f) = l_P(f) + \gamma_{ij} \delta_P(f)$.
We say that a flow $f$ is a \emph{$\beta$-deviated\xspace Nash flow} if there exist some $\beta$-bounded path deviations $\delta \in \Delta(\beta)$ such that \begin{equation} \forall i \in [k], \forall j \in [h_i], \forall P \in \mathcal{S}_i, f_{P,j} > 0: \qquad q_{P}^j(f) \leq q_{P'}^j(f) \ \ \forall P' \in \mathcal{S}_i. \label{eq:nash} \end{equation}
We define the \emph{$\beta$-deviation\xspace ratio} $\text{$\beta$-DR}(\mathcal{I})$ as the maximum ratio $C(f^\beta)/C(f^0)$ of an $\beta$-deviated\xspace Nash flow $f^\beta$ and an original Nash flow $f^0$. Intuitively, the deviation ratio measures the worst-case deterioration in social cost as a result of (bounded) deviations in the path latencies. Note that here the comparison is done with respect to an \emph{unaltered} Nash flow to measure the impact of these deviations.
The set $\Delta(\beta)$ can also be restricted to path deviations which are defined as a function of edge deviations along that path. Suppose every edge $e \in E$ has a deviation $\delta_e : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ satisfying $0 \leq \delta_e(x) \leq \beta l_e(x)$ for all $x \geq 0$. For example, feasible path deviations can then be defined by the $L_1$-norm objective $\delta_P(f) = \sum_{e \in P} \delta_e(x)$ (as in \cite{Kleer2016,Nikolova2015}) or the $L_2$-norm objective $\delta_P(f) = \sqrt{\sum_{e \in P} \delta_e(x)^2)}$ (as in \cite{Nikolova2015,Lianeas2016}).
The \emph{Price of Risk-Aversion} introduced by Nikolova and Stier-Moses \cite{Nikolova2015} is technically the same ratio as the deviation\xspace ratio for the $L_1$- and $L_2$-norm (see \cite{Kleer2016} for details).
\paragraph{Approximate Nash flows.}
We introduce the notion of an approximate Nash flow. Also here, each commodity $i \in [k]$ and sensitivity class $j \in [h_i]$ has a non-negative sensitivity $\epsilon_{ij}$. We say that the population is \emph{homogeneous} if $\epsilon_{ij} = \epsilon$ for all $i \in [k]$, $j \in [h_i]$ and some $\epsilon \ge 0$; otherwise, it is \emph{heterogeneous}.
A flow $f$ is an \emph{$\epsilon$-approximate Nash flow} with respect to sensitivities $\epsilon = (\epsilon_{ij})_{i \in [k], j \in [h_i]}$ if \begin{equation} \forall i \in [k], \ \forall j \in [h_i], \ \forall P \in \mathcal{S}_i, f_{P,j} > 0: \qquad l_{P}(f) \leq (1+\epsilon_{ij}) l_{P'}(f) \ \ \forall P' \in \mathcal{S}_i \label{def:epsnash} \end{equation} Note that a $0$-approximate Nash flow is simply a Nash flow. We define the \textit{$\epsilon$-stability\xspace ratio} $\text{$\epsilon$-SR}(\mathcal{I})$ as the maximum ratio $C(f^\epsilon)/C(f^0)$ of an $\epsilon$-approximate Nash flow $f^\epsilon$ and an original Nash flow $f^0$.
Some of the proofs are missing in the main text below and can be found in the appendix.
\section{Heterogeneous populations}\label{sec:het}
We first elaborate on the relation between deviated\xspace and approximate Nash flows for general congestion games with heterogeneous populations.
\begin{proposition}\label{prop:inclusion} Let $\mathcal{I}$ be a congestion game with heterogeneous players. If $f$ is a $\beta$-deviated\xspace Nash flow for $\mathcal{I}$, then $f$ is an $\epsilon$-approximate Nash flow for $\mathcal{I}$ with $\epsilon_{ij} = \beta \gamma_{ij}$ for all $i \in [k]$ and $j \in [h_i]$ (for the same demand distribution $r$). \end{proposition}
\paragraph{Discrete sensitivity distributions.}
Subsequently, we show that the reverse of Proposition \ref{prop:inclusion} does not hold.
We do this by providing tight bounds on the $\beta$-deviation\xspace ratio and the $\epsilon$-stability ratio for instances on (single-commodity) series-parallel graphs and arbitrary discrete sensitivity distributions.
\begin{theorem}\label{thm:hetero} Let $\mathcal{I}$ be a single-commodity network congestion game on a series-parallel graph with heterogeneous players, demand distribution $r = (r_i)_{i \in [h]}$ normalized to $1$, i.e., $\sum_{j \in [h]} r_i = 1$, and sensitivity distribution $\gamma = (\gamma_i)_{i \in [h]}$, with $\gamma_1 < \gamma_2 < \dots < \gamma_h$.
Let $\beta \ge 0$ be fixed and define $\epsilon = (\beta \gamma_i)_{i \in [h]}$. Then the $\epsilon$-stability ratio and the $\beta$-deviation\xspace ratio are bounded by: \begin{align} \text{$\epsilon$-SR}(\mathcal{I}) \le 1 + \beta \sum_{j = 1}^h r_j\gamma_j \quad\text{and}\quad \text{$\beta$-DR}(\mathcal{I}) \le 1 + \beta \cdot \max_{j \in [h]} \bigg\{ \gamma_j \bigg( \sum_{p = j}^h r_p \bigg) \bigg\}. \label{eq:comparison_epsilon}
\end{align} Further, both bounds are tight for all distributions $r$ and $\gamma$. \end{theorem}
It is not hard to see that the bound on the $\beta$-deviation\xspace ratio is always smaller than the bound on the $\epsilon$-stability ratio.\footnote{This follows from Markov's inequality: for a random variable $Y$, $P(Y \geq t) \leq E(Y)/t$.} Our bound on the $\beta$-deviation\xspace ratio also yields tight bounds on the \emph{Price of Risk-Aversion} \cite{Nikolova2015} for series-parallel graphs and arbitrary heterogeneous risk-averse populations, both for the $L_1$-norm and $L_2$-norm objective.\footnote{Observe that we show tightness of the bound on parallel arcs, in which case these objectives coincide.}
We need the following technical lemma for the proof of the $\beta$-deviation ratio.
\begin{lemma}\label{lem:max_seq} Let $0 \leq \tau_{k-1} \leq \dots \leq \tau_1 \leq \tau_0$ and $c_i \geq 0$ for $i = 1,\dots,k$ be given. We have $ c_1 \tau_0 + \sum_{i = 1}^{k-1} (c_{i+1} - c_i)\tau_i \leq \tau_0 \cdot \max_{i=1,\dots,k} \{c_i\}. $ \end{lemma}
\begin{proof}[Theorem~\ref{thm:hetero}, $\beta$-deviation\xspace ratio] Let $x = f^\beta$ be a $\beta$-deviated\xspace Nash flow with path deviations $(\delta_P)_{P \in \mathcal{S}} \in \Delta(\beta)$ and let $z = f^0$ be an original Nash flow. Let $X = \{a \in A : x_a > z_a\}$ and $Z = \{a \in A : z_a \geq x_a \text{ and } z_a > 0\}$ (arcs with $x_a = z_a = 0$ may be removed without loss of generality).
In order to analyze the ratio $C(x)/C(z)$ we first argue that we can assume without loss of generality that the latency function $l_a(y)$ is constant for values $y \geq x_a$ for all arcs $a \in Z$. To see this, note that we can replace the function $l_a(\cdot)$ with the function $\hat{l}_a$ defined by $\hat{l}_a(y) = l_a(x_a)$ for all $y \geq x_a$ and $\hat{l}_a(y) = l_a(y)$ for $y \leq x_a$. In particular, this implies that the flow $x$ is still a $\beta$-deviated\xspace Nash flow for the same path deviations as before. This holds since for any path $P$ the latency $l_P(x)$ remains unchanged if we replace the function $l_a$ by $\hat{l}_a$.
By definition of arcs in $Z$, we have $x_a \leq z_a$ and therefore $\hat{l}_a(z_a) = l_a(x_a) \leq l_a(z_a)$. Let $z'$ be an original Nash flow for the instance with $l_a$ replaced by $\hat{l}_a$. Then we have $C(z') \leq C(z)$ using the fact that series-parallel graphs are immune to the Braess paradox, see Milchtaich \cite[Lemma 4]{Milchtaich2006}. Note that, in particular, we find $C(x)/C(z) \leq C(x)/C(z')$. By repeating this argument, we may without loss of generality assume that all latency functions $l_a$ are constant between $x_a$ and $z_a$ for $a \in Z$. Afterwards, we can even replace the function $\hat{l}_a$ by a function that has the constant value of $l_a(x_a)$ everywhere.
In the remainder of the proof, we will denote $P_j$ as a flow-carrying arc for sensitivity class $j \in [h]$ that maximizes the path latency amongst all flow-carrying path for sensitivity class $j \in [h]$, i.e., $ P_j = \text{argmax}_{P \in \mathcal{P} : x_{P,j} > 0} \{ l_P(x) \}. $ Moreover, there also exists a path $P_0$ with the property that $z_a \geq x_a$ and $z_a > 0$ for all arcs $a \in P_0$ (see, e.g., Lemma 2 \cite{Milchtaich2006}).
For fixed $a < b \in \{1,\dots,h\}$, the Nash conditions imply that (these steps are of a similar nature as Lemma 1 \cite{Fleischer2005}) \begin{align*} l_{P_a}(x) + \gamma_{a}\cdot \delta_{P_a}(x) &\leq l_{P_b}(x) + \gamma_{a} \cdot \delta_{P_b}(x) \\ l_{P_b}(x) + \gamma_{b}\cdot \delta_{P_b}(x) &\leq l_{P_a}(x) + \gamma_{b} \cdot \delta_{P_a}(x). \end{align*} Adding up these inequalities implies that $(\gamma_b - \gamma_a) \delta_{P_b}(x) \leq (\gamma_b - \gamma_a) \delta_{P_a}(x)$, which in turn yields that $\delta_{P_b}(x) \leq \delta_{P_a}(x)$ (using that $\gamma_a < \gamma_b$ if $a < b$). Furthermore, we also have \begin{equation}\label{eq:nash1} l_{P_1}(x) + \gamma_1 \delta_{P_1}(x) \leq l_{P_0}(x) + \gamma_1 \delta_{P_0}(x), \end{equation} and $l_{P_0}(x) = l_{P_0}(z) \leq l_{P_1}(z) \leq l_{P_1}(x)$, which can be seen as follows. The equality follows from the fact that $l_a$ is constant for all $a \in Z$ and, by choice, $P_0$ only consists of arcs in $Z$. The first inequality follows from the Nash conditions of the original Nash flow $z$, since there exists a flow-decomposition in which the path $P_0$ is used (since the flow on all arcs of $P_0$ is strictly positive in $z$). The second inequality follows from the fact that $$ \sum_{e \in P_1} l_e(z_e) = \sum_{e \in P_1 \cap X} l_e(z_e) + \sum_{e \in P_1 \cap Z} l_e(z_e) \leq \sum_{e \in P_1 \cap X} l_e(x_e) + \sum_{e \in P_1 \cap Z} l_e(x_e) $$ using that $z_e \leq x_e$ for $e \in X$ and the fact that latency functions for $e \in Z$ are constant. In particular, we find that $l_{P_0}(x) \leq l_{P_1}(x)$. Adding this inequality to (\ref{eq:nash1}), we obtain $\gamma_1\delta_{P_1}(x) \leq \gamma _1 \delta_{P_0}(x)$ and therefore $\delta_{P_1}(x) \leq \delta_{P_0}(x)$. Thus $ \delta_{P_h}(x) \leq \delta_{P_{h-1}}(x) \leq \dots \leq \delta_{P_1}(x) \leq \delta_{P_0}(x). $ Moreover, by using induction (see appendix) it can be shown that \begin{equation}\label{eq:induction} l_{P_j}(x) \leq l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) + \bigg[\sum_{g = 1}^{j-1} (\gamma_{g+1} - \gamma_g)\delta_{P_g}(x)\bigg] - \gamma_j \delta_{P_j}(x). \end{equation}
Using (\ref{eq:induction}), we then have \begin{eqnarray} C(x) &\leq & \sum_{j=1}^h r_j l_{P_j}(x) \ \ \ \ \text{(by choice of the paths $P_j$)} \nonumber \\ &\leq& \sum_{j=1}^h r_j \left(l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) + \bigg[\sum_{g = 1}^{j-1} (\gamma_{g+1} - \gamma_g)\delta_{P_g}(x)\bigg] - \gamma_j \delta_{P_j}(x) \right) \nonumber \\ &=& l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) + \sum_{j=1}^{h} (r_{j+1}+\dots+ r_h)(\gamma_{j+1} - \gamma_j)\delta_{P_j}(x) - r_j\gamma_j \delta_{P_j}(x) \nonumber \\ &\leq & l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) \nonumber \\ & & +\sum_{j=1}^{h-1} \bigg[(r_{j+1}+\dots+ r_h)\gamma_{j+1} - (r_j+r_{j+1}+\dots+ r_h)\gamma_j\bigg]\delta_{P_j}(x) \nonumber \end{eqnarray} In the last inequality, we leave out the last negative term $-r_h\gamma_h\delta_{P_h}(x)$. Note that $\gamma_1 = (r_1 + \dots + r_h)\gamma_1$ since we have normalized the demand to $1$.
We can then apply Lemma \ref{lem:max_seq} with $\tau_i = \delta_{P_i}(x)$ for $i = 0,\dots, h-1$ and $ c_i = \gamma_i \cdot \sum_{p = i}^h r_p $ for $i = 1,\dots, k$. Continuing the estimate, we get $$ C(x) \leq l_{P_0}(x) + \max_{j \in [h]} \bigg\{ \gamma_j \cdot \sum_{p = j}^h r_p \bigg\} \cdot \delta_{P_0}(x) \leq \bigg[1 + \beta \cdot \max_{j \in [h]} \bigg\{ \gamma_j \bigg( \sum_{p = j}^h r_p \bigg) \bigg\}\bigg] C(z) $$ where for the second inequality we use that $\delta_{P_0}(x) \leq \beta l_{P_0}(x)$, which holds by definition, and $l_{P_0}(x) = l_{P_0}(z) = C(z)$, which holds because $z$ is an original Nash flow and all arcs in $P_0$ have strictly positive flow in $z$ (and because of the fact that that all arcs in $P_0$ have a constant latency functions).
To prove tightness, fix $j \in [h]$ and consider the following instance on two arcs. We take $(l_1(y),\delta_1(y)) = (1,\beta)$ and $(l_2(y),\delta_2(y))$ with $\delta_2(y) = 0$ and $l_2(y)$ a strictly increasing function satisfying $l_2(0) = 1 + \epsilon$ and $l_2(r_j+r_{j+1}+\dots+r_h) = 1 + \gamma_j \beta$, where $\epsilon < \gamma_j \beta$. The (unique) original Nash flow is given by $z = (z_1,z_2) = (1,0)$ with $C(z) = 1$. The (unique) $\beta$-deviated\xspace Nash flow $x$ is given by $x = (x_1,x_2) = (r_1 + r_2 + \dots + r_{j-1}, r_j+r_{j+1}+\dots+r_h)$ with $C(x) = 1 + \beta \cdot \gamma_j (r_j + \dots + r_h)$. Since this construction holds for all $j \in [h]$, we find the desired lower bound.\qed
\end{proof}
\paragraph{Continuous sensitivity distributions.}
We obtain a similar result for more general (not necessarily discrete) sensitivity distributions. That is, we are given a Lebesgue integrable \emph{sensitivity density function} $\psi : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ over the total demand. Since we can normalize the demand to $1$, we have the condition that $\int_0^\infty \psi(y) dy = 1$. We then find the following natural generalizations of our upper bounds: \begin{enumerate} \item $\text{$\epsilon$-SR}(\mathcal{I}) \le 1 + \beta \int_{0}^\infty y\cdot \psi(y)dy$, and \item $\text{$\beta$-DR}(\mathcal{I}) \le 1 + \beta \cdot \sup_{t \in \mathbb{R}_{\geq 0}} \big\{ t \cdot \int_t^\infty \psi(y) dy \big\}$. \end{enumerate} These bounds are both asymptotically tight for all distributions. Details are given in Corollary \ref{cor:het} in the appendix.
\section{Homogeneous population}\label{sec:approx}
The reverse of Proposition~\ref{prop:inclusion} also holds for homogeneous players in single-commodity instances. As a consequence, the set of $\beta$-deviated\xspace Nash flows and the set of $\epsilon$-approximate Nash flows with $\epsilon = \beta \gamma$ coincide in this case.
Recall that for homogeneous players we have $\gamma_{ij} = \gamma$ for all $i \in [k]$, $j \in [h_i]$ and some $\gamma \ge 0$.
\begin{proposition}\label{prop:equiv} Let $\mathcal{I}$ be a single-commodity congestion game with homogeneous players. $f$ is an $\epsilon$-approximate Nash flow for $\mathcal{I}$ if and only if $f$ is a $\beta$-deviated\xspace Nash flow for $\mathcal{I}$ with $\epsilon = \beta\gamma$. \end{proposition}
\noindent \emph{Upper bound on the stability ratio.} Our main result in this section is an upper bound on the $\epsilon$-stability ratio. Given the above equivalence, this bound also applies to the $\beta$-deviation\xspace ratio with $\epsilon = \beta\gamma$.
The following concept of alternating paths is crucial. For single-commodity instances an alternating path always exists (see, e.g., \cite{Nikolova2015}).
\begin{definition}[Alternating path \cite{Lin2011,Nikolova2015}] Let $\mathcal{I}$ be a single-commodity network congestion game and let $x$ and $z$ be feasible flows. We partition the edges $E = X \cup Z$ such that $Z = \{a \in E : z_a \geq x_a \text{ and } z_a > 0\}$ and $X = \{a \in E : z_a < x_a \text{ or } z_a = x_a = 0\}$. We say that $\pi$
is an alternating $s,t$-path if the arcs in $\pi \cap Z$ are oriented in the direction of $t$, and the arcs in $\pi \cap X$ are oriented in the direction of $s$. We call the number of backward arcs on $\pi$ the \emph{backward length} of $\pi$ and refer to it by $q(\pi) = |\pi \cap X|$.
\label{def:alt_path} \end{definition}
\begin{theorem}\label{thm:approx_single} Let $\mathcal{I}$ be a single-commodity network congestion game. Let $\epsilon \ge 0$ be fixed and consider an arbitrary alternating path $\pi$ with backward length $q = q(\pi)$. If $\epsilon < 1/q$, then the $\epsilon$-stability ratio is bounded by $$ \text{$\epsilon$-SR}(\mathcal{I}) \le \frac{1 + \epsilon}{1 - \epsilon \cdot q} \leq \frac{1 + \epsilon}{1 - \epsilon \cdot n}. $$
\end{theorem}
Note that the restriction on $\epsilon$ stated in the theorem always holds if $\epsilon < 1/n$. In particular, for $\epsilon \ll 1/n$ we roughly get $\text{$\epsilon$-SR}(\mathcal{I}) \leq 1 + \epsilon n$. The proof of Theorem~\ref{thm:approx_single} is inspired by a technique of Nikolova and Stier-Moses \cite{Nikolova2015}, but technically more involved.
\begin{proof}
Let $x = f^\epsilon$ be an $\epsilon$-approximate Nash flow and let $z = f^0$ an original Nash flow. Let $\pi = Z_1X_1Z_2X_2\dots Z_{\eta-1}X_{\eta-1}Z_{\eta}$ be an alternating path for $x$ and $z$, where $Z_i$ and $X_i$ are maximal sections consisting of consecutive arcs, respectively, in $Z$ and $X$ (i.e., $Z_i \subseteq Z$ and $X_i \subseteq X$ for all $i$). Furthermore, we let $q_i = |X_i|$ and write $X_i = (X_{iq_i},\dots,X_{i2},X_{i1})$, where $X_{ij}$ are the arcs in the section $X_i$. By definition, for every arc $X_{ij}$ there exists a path $C_{ij}X_{ij}D_{ij}$ that is flow-carrying for $x$.\footnote{Note that for a Nash flow one can assume that there is a flow-carrying path traversing all arcs $X_{iq_i},\dots,X_{i1}$; but this cannot be done for an approximate Nash flow.}
For convenience, we define $C_{01} = D_{\eta,0} = \emptyset$. Furthermore, we denote $P^{\max}$ as a path maximizing $l_P(x)$ over all paths $P \in \mathcal{S}$. For convenience, we will abuse notation, and write $Q = Q(x) = \sum_{a \in Q} l_{a}(x)$ for $Q \subseteq E$.
Note that for all $i,j$: \begin{equation}\label{eq:pmax_upperbound} C_{ij}(x) + X_{ij}(x) + D_{ij}(x) \leq P^{\max}(x). \end{equation}
\begin{figure}
\caption{Sketch of the situation in the proof of Theorem \ref{thm:approx_single} with $q_1 = 1$ and $q_2 = 2$.}
\label{fig:braess}
\end{figure} Fix some $i \in \{1,\dots,\eta-1\}$. Then we have $C_{i1} + X_{i1} + D_{i1} \leq (1+\epsilon)(C_{i-1,q_{i-1}} + Z_i + D_{i1})$ by definition of an $\epsilon$-approximate Nash flow. This implies that (leaving out $D_{i1}$ on both sides) \begin{eqnarray} C_{i1} + X_{i1} &\leq &(1+\epsilon)Z_i + C_{i-1,q_{i-1}} + \epsilon(C_{i-1,q_{i-1}} + D_{i1}). \nonumber
\end{eqnarray}
Furthermore, for all $j \in \{2,\dots,q_i\}$, we have $C_{ij} + X_{ij} + D_{ij} \leq (1+\epsilon)(C_{i,j-1} + D_{ij})$ which implies (again leaving out $D_{ij}$ on both sides) $$ C_{ij} + X_{ij} \leq C_{i,j-1} + \epsilon(C_{i,j-1} + D_{ij}). $$
Adding up these inequalities for $j \in\{1,\dots,q_i\}$ and subtracting $\sum_{j=1}^{q_i-1} C_{ij}$ from both sides, we obtain for all $i \in \{1,\dots,\eta-1\}$
\begin{equation}\label{eq:alt_general_i} C_{i,q_i} + \sum_{j = 1}^{q_i} X_{ij} \leq C_{i-1,q_{i-1}} + (1+\epsilon)Z_i + \epsilon \bigg( \sum_{j = 1}^{q_i} D_{ij} + C_{i-1,q_{i-1}} + \sum_{j=1}^{q_i-1}C_{ij}\bigg). \end{equation}
Moreover, we also have \begin{equation}\label{eq:alt_pmax} P^{\max} \leq (1+\epsilon)(C_{\eta-1,\eta-1} + Z_{\eta}) = C_{\eta-1,\eta-1} + (1+\epsilon)Z_{\eta} + \epsilon C_{\eta-1,\eta-1}. \end{equation} Adding up the inequalities in (\ref{eq:alt_general_i}) for all $i \in \{1,\dots,\eta-1\}$, and the inequality in (\ref{eq:alt_pmax}), we obtain $$ P^{\max} + \sum_{i=1}^{\eta-1} C_{i,q_i} + \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} X_{ij} \leq \sum_{i=1}^{\eta-1} C_{i,q_i} + (1+\epsilon) \sum_{i = 1}^{\eta} Z_i + \epsilon \bigg(\sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} C_{ij} + D_{ij} \bigg) $$ which simplifies to \begin{equation}\label{eq:cum} P^{\max} + \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} X_{ij} \leq (1+\epsilon) \sum_{i = 1}^{\eta} Z_i + \epsilon \bigg(\sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} C_{ij} + D_{ij} \bigg). \end{equation} Using (\ref{eq:pmax_upperbound}), we obtain $$ \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} C_{ij} + D_{ij} \leq \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} P^{\max} - X_{ij} = \bigg(\sum_{i=1}^{\eta-1} q_i\bigg)P^{\max} - \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} X_{ij}. $$ Combining this with (\ref{eq:cum}), and rearranging some terms, we get \begin{align*}\label{eq:cum_final} (1 - \epsilon \cdot q)P^{\max} &\leq (1+\epsilon) \bigg[ \sum_{i = 1}^{\eta} Z_i - \sum_{i = 1}^{\eta-1} \sum_{j=1}^{q_i} X_{ij}\bigg] = (1+\epsilon) \bigg[ \sum_{e \in Z \cap \pi} l_e(x_e) - \sum_{e \in X \cap \pi} l_e(x_e)\bigg] \end{align*} where $q = q(\pi) = \sum_{i = 1}^{\eta-1} q_i$ is the backward length of $\pi$.
Similarly (see also \cite[Lemma 4.5]{Nikolova2015}), it can be shown that \begin{equation}\label{eq:z} l_Q(z) \geq \sum_{e \in Z \cap \pi} l_e(z_e) - \sum_{e \in X \cap \pi} l_e(z_e) \end{equation} for any path $Q$ with $z_Q > 0$ (these all have the same latency, since $z$ is an original Nash flow). Using a similar argument as in \cite[Theorem 4.6]{Nikolova2015}, we obtain \begin{eqnarray} (1 - \epsilon \cdot q)l_{P^{\max}}(x) &\leq & (1+\epsilon) \bigg[ \sum_{e \in Z \cap \pi} l_e(x_e) - \sum_{e \in X \cap \pi} l_e(x_e)\bigg] \nonumber \\ & \leq & (1+\epsilon) \bigg[ \sum_{e \in Z \cap \pi} l_e(z_e) - \sum_{e \in X \cap \pi} l_e(z_e)\bigg]
\nonumber
\le (1+\epsilon)l_Q(z).
\end{eqnarray} By multiplying both sides with the demand $r$, we obtain $ (1 - \epsilon \cdot q) C(x) \leq (1 - \epsilon \cdot q) r \cdot l_{P^{\max}}(x) \leq (1+\epsilon)r \cdot l_Q(z) = (1+\epsilon) C(z) $ for $\epsilon < 1/q$, which proves the claim.
\qed \end{proof}
\paragraph{Tight bound on the stability ratio.}
In this section, we consider instances for which all backward sections of the alternating path $\pi$ consist of a single arc., i.e., $q_i = 1$ for all $i = 1,\dots,\eta-1$. We then have $q = \sum_{i=1}^{\eta-1} q_i \leq \lfloor n/2 \rfloor - 1$ since every arc in $X$ must be followed directly by an arc in $Z$ (and we can assume w.l.o.g. that the first and last arc are contained in $Z$). By Theorem \ref{thm:approx_single}, we obtain
$ \text{$\epsilon$-SR}(\mathcal{I}) \leq (1 + \epsilon)/(1 - \epsilon \cdot (\lfloor n/2 \rfloor - 1)) $ for all $\epsilon < 1/(\lfloor n/2 \rfloor - 1)$. We show that this bound is tight. Further, we show that there exist instances for which $\text{$\epsilon$-SR}(\mathcal{I})$ is unbounded for $\epsilon \geq 1/(\lfloor n/2 \rfloor - 1)$. This completely settles the case of $q_i = 1$ for all $i$.
Our construction is based on the \emph{generalized Braess graph} \cite{Roughgarden2006}.
By construction, alternating paths for these graphs satisfy $q_i = 1$ for all $i$ (see Figure \ref{fig:braess_5} in the appendix for an example and a formal definition of these graphs).
\begin{theorem}\label{thm:braess_tight} Let $n = 2m$ be fixed and let $\mathcal{B}^m$ be the set of all instances on the generalized Braess graph with $n$ nodes. Then $$ \sup_{\mathcal{I} \in \mathcal{B}^m} \text{$\epsilon$-SR}(\mathcal{I})
= \begin{cases} \frac{1 + \epsilon}{1 - \epsilon \cdot (\lfloor n/2 \rfloor - 1)} & \text{if $\epsilon < \frac{1}{\lfloor n/2 \rfloor - 1}$,} \\ \infty & \text{otherwise.} \end{cases} $$ \end{theorem}
\paragraph{Non-symmetric matroid congestion games.}\label{sec:matroid}
In the previous sections, we considered (symmetric) network congestion games only. It is interesting to consider other combinatorial strategy sets as well. In this section we make a first step in this direction by focusing on the bases of matroids as strategies.
A matroid congestion game is given by $\mathcal{J} = (E,(l_e)_{e \in E},(\mathcal{S}_i)_{i \in [k]},(r_i)_{i \in [k]})$, and matroids $\mathcal{M}_i = (E,\mathcal{I}_i)$ over the ground set $E$ for every $i \in [k]$.\footnote{A matroid over $E$ is given by a collection $\mathcal{I} \subseteq 2^E$ of subsets of $E$ (called \emph{independent sets}). The pair $\mathcal{M} = (E,\mathcal{I})$ is a \emph{matroid} if the following three properties hold: i) $\emptyset \in \mathcal{I}$; ii) If $A \in \mathcal{I}$ and $B \subseteq A$, then $B \in \mathcal{I}$. iii) If $A,B \in \mathcal{I}$ and $|A| > |B|$, then there exists an $a \in A \setminus B$ such that $B + a \in \mathcal{I}$.} The strategy set $\mathcal{S}_i$ consists of the \emph{bases} of the matroid $\mathcal{M}_i$, which are the independent sets of maximum size, e.g., spanning trees in an undirected graph. We refer the reader to Schrijver \cite{schrijver2003} for an extensive overview of matroid theory.
As for network congestion games, it can be shown that in general the $\epsilon$-stability ratio can be unbounded (see Theorem \ref{thm:matroid_unbounded} in the appendix); this also holds for general path deviations because the proof of Proposition~\ref{prop:equiv} in the appendix holds for arbitrary strategy sets. However, if we consider path deviations induced by the sum of edge deviations (as in \cite{Nikolova2015,Kleer2016}), then we can obtain a more positive result for general matroids.
Recall that for every resource $e \in E$ we have a deviation function $\delta_e : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ satisfying $0 \leq \delta_e(x) \leq \beta l_e(x)$ for all $x \geq 0$. The deviation of a basis $B$
is then given by $\delta_B(f) = \sum_{e \in B} \delta_e(f_e)$.
\begin{theorem}\label{thm:matroid} Let $\mathcal{J} = (E,(l_e)_{e \in E},(\mathcal{S}_i)_{i \in [k]},(r_i)_{i \in [k]})$ be a matroid congestion game with homogeneous players. Let $\beta \ge 0$ be fixed and consider $\beta$-bounded basis deviations as defined above. Then the $\beta$-deviation\xspace ratio is upper bounded by $\text{$\beta$-DR}(\mathcal{J}) \le 1 + \beta$. Further, this bound is tight already for $1$-uniform matroid congestion games. \end{theorem}
\noindent \textbf{Acknowledgements.} We thank the anonymous referees for their very useful comments, and one reviewer for pointing us to Lemma 1 \cite{Fleischer2005}.
\appendix
\section{Omitted material of Section \ref{sec:het}}
\begin{rtheorem}{Proposition}{\ref{prop:inclusion}} Let $\mathcal{I}$ be a congestion game with heterogeneous players. If $f$ is a $\beta$-deviated\xspace Nash flow for $\mathcal{I}$, then $f$ is an $\epsilon$-approximate Nash flow for $\mathcal{I}$ with $\epsilon_{ij} = \beta \gamma_{ij}$ for all $i \in [k]$ and $j \in [h_i]$ (for the same demand distribution $r$). \end{rtheorem} \begin{proof} Let $f$ be a $\beta$-deviated Nash flow for some set of path deviations $\delta \in \Delta(0,\beta)$. For $P \in \mathcal{S}$, we have $$ l_P(f) \leq l_P + \gamma_{ij}\delta_P(f) \leq l_{P'}(f) + \gamma_{ij} \delta_{P'}(f)\leq (1+ \gamma_{ij} \beta) l_{P'}(f) $$ where we use the non-negativity of $\delta_P(f)$ in the first inequality, the Nash condition for $f$ in the second inequality, and the fact that $\delta_{P'}(f) \leq \beta l_{P'}(f)$ in the third inequality. By definition, it now holds that $f$ is also a $\epsilon$-approximate equilibrium (for $\epsilon = \beta \gamma$). \qed \end{proof}
\begin{rtheorem}{Theorem}{\ref{thm:hetero}} Let $\mathcal{I}$ be a network congestion game on a series-parallel graph with heterogeneous players, demand distribution $r = (r_i)_{i \in [h]}$ normalized to $1$, i.e., $\sum_{j \in [h]} r_i = 1$, and sensitivity distribution $\gamma = (\gamma_i)_{i \in [h]}$, with $\gamma_1 < \gamma_2 < \dots < \gamma_h$.
Let $\beta \ge 0$ be fixed and define $\epsilon = (\beta \gamma_i)_{i \in [h]}$. Then the $\epsilon$-stability ratio and the $\beta$-deviation\xspace ratio are bounded by: \begin{align} \text{$\epsilon$-SR}(\mathcal{I}) \le 1 + \beta \sum_{j = 1}^h r_j\gamma_j \quad\text{and}\quad \text{$\beta$-DR}(\mathcal{I}) \le 1 + \beta \cdot \max_{j \in [h]} \bigg\{ \gamma_j \bigg( \sum_{p = j}^h r_p \bigg) \bigg\}. \label{eq:comparison_epsilon}
\end{align} Further, both bounds are tight for all distributions $r$ and $\gamma$. \end{rtheorem}
\subsection{Proof of statement for $\epsilon$-$SR(\mathcal{I})$.} \begin{proof}[Theorem \ref{thm:hetero}, $\epsilon$-stability ratio]
The statement for $\epsilon$-approximate equilibria can be proven almost similar as in Kleer and Sch\"afer \cite{Kleer2016} (the bound there is used for path deviations, but the proof extends directly to approximate equilibria). For completeness, we give the argument here.
For $j \in [k]$, let $\bar{P}_j$ be a path maximizing $l_P(x)$ over all flow-carrying paths $P \in \mathcal{P}$ of type $j$. Moreover, there exists a path $\pi$ such that $x_a \leq z_a$ and $z_a > 0$ for all $a \in \pi$ (see, e.g., Milchtaich \cite{Milchtaich2006}). We then have (this is also reminiscent of an argument by Lianeas et al. \cite{Lianeas2016}): $$ l_{\bar{P}_j}(x) \leq (1+\beta \gamma_j) l_{\pi}(x) = (1 + \beta \gamma_j)\sum_{a \in \pi} l_a(x_a). $$ Note that, by definition of the alternating path $\pi$, we have $x_a \leq z_a$ for all $a \in \pi$. Continuing with the estimate, we find $l_{\bar{P}_j}(x) \leq (1 + \beta \gamma_j)\sum_{a \in \pi} l_a(z_a)$ and thus
$$ C(x) \leq \sum_{j \in [h]} r_j l_{\bar{P}_j}(x) \leq \sum_{j \in [h]} r_j (1 + \beta \gamma_j)\sum_{a \in \pi} l_a(z_a) = C(z) \bigg(\sum_{j \in [h]} r_j (1 + \beta \gamma_j)\bigg) $$ Since $\sum_{j \in [h]} r_j = 1$, we get the desired result. Note that we use $C(z) = \sum_{a \in \pi} l_a(z_a)$, which is true because there exists a flow-decomposition of $z$ in which $\pi$ is flow-carrying (here we use $z_a > 0$ for all $a \in \pi$).
Tightness follows by considering an instance with arc set $\{0,1,\dots,h\}$ where the zeroth arc has latency $l_0(y) = 1$ and the arcs $j \in \{1,\dots,h\}$ have latency $l_j(y) = 1 + \beta \gamma_j$. An original Nash flow is given by $f^0 = (z_0,z_1,\dots,z_h) = (1,0,\dots,0)$, and an $\epsilon$-approximate Nash flow is given by $f^\epsilon = (x_0,x_1,\dots,x_h) = (0, r_1, r_2, \dots, r_h)$.\qed \end{proof}
\subsection{Missing arguments for proof of $\beta$-deviation ratio.}
\begin{rtheorem}{Lemma}{\ref{lem:max_seq}} Let $0 \leq \tau_{k-1} \leq \dots \leq \tau_1 \leq \tau_0$ and $c_i \geq 0$ for $i = 1,\dots,k$ be given. Then $$ c_1 \tau_0 + \sum_{i = 1}^{k-1} (c_{i+1} - c_i)\tau_i \leq \tau_0 \cdot \max_{i=1,\dots,k} \{c_i\}. $$ \end{rtheorem} \begin{proof} The statement is clearly true for $k = 1$. Now suppose the statement is true for some $k \in \mathbb{N}$. We will prove the statement for $k + 1$. \\ \textbf{Case 1: $c_{k+1} - c_{k} \leq 0$.} Then we have \begin{eqnarray} c_1 \tau_0 + \sum_{i = 1}^{k} (c_{i+1} - c_i)\tau_i &\leq & c_1 \tau_0 + \sum_{i = 1}^{k-1} (c_{i+1} - c_i)\tau_i \text{ \ \ \ \ (using $\tau_k \geq 0$) } \nonumber \\ &\leq & \tau_0 \cdot \max_{i=1,\dots,k} \{c_i\} \text{ \ \ \ \ (using induction hypothesis) } \nonumber \\ &\leq & \tau_0 \cdot \max_{i=1,\dots,k+1} \{c_i\}\text{ \ \ \ \ (using non-negativity of $\tau_0$) } \nonumber \end{eqnarray}\\ \textbf{Case 2: $c_{k+1} - c_{k} > 0$.} \begin{eqnarray} c_1 \tau_0 + \sum_{i = 1}^{k} (c_{i+1} - c_i)\tau_i &= & c_1 \tau_0 + \bigg[\sum_{i = 1}^{k-1} (c_{i+1} - c_i)\tau_i \bigg] + (c_{k+1}-c_k)\tau_{k} \nonumber \\ &\leq & c_1 \tau_0 + \bigg[\sum_{i = 1}^{k-1} (c_{i+1} - c_i)\tau_i \bigg] + (c_{k+1}-c_k)\tau_{k-1} \text{ \ \ ($\tau_k \leq \tau_{k-1}$) } \nonumber \\ &=& c_1 \tau_0 + \bigg[\sum_{i = 1}^{k-2} (c_{i+1} - c_i)\tau_i \bigg] + (c_{k+1}-c_{k-1})\tau_{k-1} \nonumber \\ &\leq & \tau_0 \cdot \max_{i=1,\dots,k-2,k-1,k+1} \{c_i\} \text{ \ \ \ \ (using induction hypothesis) } \nonumber \\ &\leq & \tau_0 \cdot \max_{i=1,\dots,k+1} \{c_i\}\text{ \ \ \ \ (using non-negativity of $\tau_0$) } \nonumber \end{eqnarray} Note that we apply the induction hypothesis with the set $\{c_1,\dots,c_{k-1},c_{k+1}\}$ of size $k$. \qed \end{proof}
\subsubsection{Induction step to prove inequality (\ref{eq:induction}) in proof of Theorem \ref{thm:hetero}:}
The case $j = 1$ is precisely (\ref{eq:nash1}). Now suppose it holds for some $j$, then we have \begin{eqnarray} l_{P_{j+1}}(x) &\leq& l_{P_j}(x) + \gamma_{j+1}\delta_{P_j}(x) - \gamma_{j+1}\delta_{P_{j+1}}(x) \ \ \ \ \text{(Nash condition for path $P_{j+1}$)} \nonumber \\ &\leq& l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) + \bigg[\sum_{g = 1}^{j-1} (\gamma_{g+1} - \gamma_g)\delta_{P_g}(x)\bigg] - \gamma_j \delta_{P_j}(x) \nonumber \\ & & + \ \gamma_{j+1}\delta_{P_j}(x) - \gamma_{j+1}\delta_{P_{j+1}}(x) \nonumber \ \ \ \text{(induction hypothesis)} \nonumber \\ & = &l_{P_0}(x) + \gamma_1 \delta_{P_0}(x) + \bigg[\sum_{g = 1}^{j} (\gamma_{g+1} - \gamma_g)\delta_{P_g}(x)\bigg] - \gamma_{j+1} \delta_{P_{j+1}}(x), \nonumber \end{eqnarray} which shows the result for $j + 1$.\qed
\subsection{Bounded deviation model for $(0,\beta)$-path deviations and general sensitivity density distributions (single-commodity case).} We use the section to explain the generalization of the bounds in Theorem \ref{thm:hetero} to more general sensitivity density distributions (see Corollary \ref{cor:het} further on). For sake of completeness, we adjust the definitions given in the description of the bounded deviation model (see Section \ref{sec:pre}).
We are given a single-commodity instance $\mathcal{I} = (E,(l_e)_{e \in E},\mathcal{S})$ and $\beta \geq 0$ fixed. We consider the set $$
\Delta(0,\beta) = \{ \delta = (\delta_P)_{P \in \mathcal{S}} \ \big| \ 0 \leq \delta_P(f) \leq \beta l_P(f) \text{ for all } f \in \mathcal{F}(\mathcal{S}) \} $$ of $(0,\beta)$-path deviation vectors $\delta= (\delta_P)_{P \in \mathcal{S}}$ where $\delta_P : \mathcal{F}(\mathcal{S}) \rightarrow \mathbb{R}_{\geq0}$ for all $P \in \mathcal{S}$. Moreover we have a Lesbesgue integrable (or just continuous) density function $\psi : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ over the the demand, which is normalized to $1$, i.e., $\int_0^\infty \phi(y)dy = 1$. For feasible flow $f$, we have an indicator function $$ \mathbb{I} : \mathcal{S} \times [0,\infty) \rightarrow \{0,1\} $$ which is $1$ if sensitivity $\gamma \in [0,\infty)$ is present on path $P$ in flow $f$, and $0$ otherwise.
Define the \emph{deviated latency} of a path $P \in \mathcal{S}_i$ for sensitivity class $j \in [h_i]$ as $q_{P}^j(f) = l_P(f) + \gamma_{ij} \delta_P(f)$. We say that a flow $f$ is a \emph{$\beta$-deviated\xspace Nash flow} if there exist some $\beta$-bounded path deviations $\delta \in \Delta(\beta)$ such that \begin{equation} \forall \gamma [0,\infty), \forall s \in \mathcal{S} \text{ with } \mathbb{I}(P,\gamma) = 1: \qquad q_{P}^j(f) \leq q_{P'}^j(f) \ \ \forall P' \in \mathcal{S}. \label{eq:nash} \end{equation}
We define the \emph{$\beta$-deviation\xspace ratio} $\text{$\beta$-DR}(\mathcal{I})$ as the maximum ratio $C(f^\beta)/C(f^0)$ of an $\beta$-deviated\xspace Nash flow $f^\beta$ and an original Nash flow $f^0$.
\noindent The result of Theorem \ref{thm:hetero} for general sensitivity demand functions (as mentioned at the end of Section \ref{sec:het}) can now be stated as follows.
\begin{corollary}\label{cor:het}
Let $\mathcal{I}$ be a network congestion game on a series-parallel graph with heterogeneous players, given by a Lebesgue integrable demand density function $\psi : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$. Let $\beta \geq 0$ be fixed and let $\epsilon = (\beta\gamma_i)_{i \in [h]}$. Then the $\epsilon$-stability ratio and the $\beta$-deviation\xspace ratio are bounded by: \begin{align} \text{$\epsilon$-SR}(\mathcal{I}) \le 1 + \beta \int_{0}^\infty y\cdot \psi(y)dy \quad\text{and}\quad \text{$\beta$-DR}(\mathcal{I}) \le 1 + \beta \sup_{t \in \mathbb{R}_{\geq 0}} \bigg\{ t \cdot \int_t^\infty \psi(y) dy \bigg\}. \label{eq:comparison_epsilon_app}
\end{align} Further, both bounds are asymptotically tight for all distributions $r$ and $\gamma$.
\end{corollary} \begin{proof} We first show that we can reduce to a discrete instance as considered in Theorem \ref{thm:hetero}. For every path $P \in \mathcal{S}$ we can set the sensitivity of all the flow on that path to $$ \gamma_P^* = \inf\{ \gamma : \mathbb{I}(P,\gamma) = 1 \}. $$ In particular, this induces a demand distribution over a discrete (finite) valued sensitivity population (here we implicitly use the continuity of the latency functions). The upper bounds in Theorem \ref{thm:hetero} for the resulting sensitivity distributions will never be worse than the quantities in (\ref{eq:comparison_epsilon_app}). This concludes the proofs of the upper bounds (since the result now follows from the proof of Theorem \ref{thm:hetero}).
For tightness of the $\beta$-deviation ratio, fix $t \in [0,\infty)$, and let $T = \int_t^\infty \phi(y)dy$. Consider the following instance on two arcs. We take $(l_1(y),\delta_1(y)) = (1,\beta)$ and take $(l_2(y),\delta_2(y))$ with $\delta_2(y) = 0$ and $l_2(y)$ a strictly increasing function satisfying $l_2(0) = 1 + \epsilon'$ and $l_2(T) = 1 + t\cdot \beta$, where $\epsilon' < t \cdot \beta$. The (unique) original Nash flow is given by $z = (z_1,z_2) = (1,0)$ with $C(z) = 1$, and the (unique) deviated Nash flow $x$ is given by $x = (x_1,x_2) = (1-T,T)$ with $C(x) = 1 + \beta \cdot \gamma \cdot T$. Since this construction holds for all $t \in [0,\infty)$, the bound in (\ref{eq:comparison_epsilon_app}) is asymptotically tight. This holds since we can get arbitrarily close to the supremum if it is finite, and otherwise we can create a sequence of instances of which its ratio goes approaches infinity in case the supremum is not finite.
For tightness of the $\epsilon$-stability ratio, we create a discretized version of the construction in the proof of Theorem \ref{thm:hetero}. Fix some arbitrary $A> 0$. Choose $\alpha$ large enough so that $$ A = \int_\alpha^\infty \psi(y) dy $$ For the density under $\alpha$, we discretize the interval $[0,\alpha]$ into the intervals $[k\cdot \epsilon', (k+1)\epsilon']$ where $k = 0,\dots,q$ for some $q = \alpha/\epsilon'$ (without loss of generality $q$ can be assumed to be integral, and it can be chosen as large as desired). We then consider the discrete demand distribution $r = (r_1,\dots,r_q,A)$, where $$ r_i = \int_{k\epsilon'}^{(k+1)\epsilon'} \psi(y) dy. $$ We then create an instance with $q+1$ arcs, with latency functions $l_k(y) = (1 + k\epsilon')$ for $k = 1,\dots,q$ and $l_{q+1}(y) = 1 + A$. We now can use the same construction as in the proof of statement (\ref{eq:comparison_epsilon}), and by sending $A,\epsilon' \rightarrow 0$ we can get arbitrarily close to the value $1 + \beta \int_{0}^\infty y\cdot \psi(y)dy$. Note that the number of arcs in the instance goes to infinity as $A,\epsilon' \rightarrow 0$. \qed \end{proof}
\section{Omitted material of Section \ref{sec:approx}}
\begin{rtheorem}{Proposition}{\ref{prop:equiv}}
Let $\mathcal{I}$ be a single-commodity congestion game with homogeneous players. $f$ is an $\epsilon$-approximate Nash flow for $\mathcal{I}$ if and only if $f$ is a $\beta$-deviated\xspace Nash flow for $\mathcal{I}$ with $\epsilon = \beta\gamma$. \end{rtheorem} \begin{proof} The first part of the proof is a special case of the proof of Proposition \ref{prop:inclusion}. Let $f$ be a $\beta$-deviated\xspace Nash flow for some set of path deviations $\delta \in \Delta(0,\beta)$. For $P \in \mathcal{S}$, we have $$ l_P(f) \leq l_P + \gamma \delta_P(f) \leq l_{P'}(f) + \gamma \delta_{P'}(f)\leq (1+ \gamma \beta) l_{P'}(f) $$ where we use the non-negativity of $\delta_P(f)$ in the first inequality, the Nash condition for $f$ in the second inequality, and the fact that $\delta_{P'}(f) \leq \beta l_{P'}(f)$ in the third inequality. By definition, it now holds that $f$ is also a $\epsilon$-approximate equilibrium (for $\epsilon = \beta \gamma$).
Reversely, let $f$ be a $\epsilon$-approximate Nash flow (w.l.o.g., we can take $\gamma = 1$, so that $\beta = \epsilon$). We show that there exist $(0,\beta)$-path deviations $\delta_P$ such that $f$ is inducible with respect to these path deviations. Let $P_1,\dots,P_k$ be the set of flow-carrying paths under $f$, and assume without loss of generality that $l_{P_1}(f) \leq l_{P_2}(f) \leq \dots \leq l_{P_k}(f)$. We define $\delta_{P_i}(f) = l_{P_k}(f) - l_{P_i}(f)$ for $i = 1,\dots,k$. Using the Nash condition for the path $P_k$, we find $$ \delta_{P_i}(f) = l_{P_k}(f) - l_{P_i}(f) \leq (1+ \beta)l_{P_i}(f) - l_{P_i}(f) = \beta l_{P_i}(f) $$ which shows that these path deviations are feasible. Moreover, we take $\delta_{Q}(f) = \beta l_Q(f)$ for all the paths $Q \in \mathcal{P} \setminus \{P_1,\dots,P_k\}$ which are not flow-carrying under $f$. Now, let $i \in \{1,\dots,k\}$ be fixed. Then for any $j \in \{1,\dots,k\}$, we have $$ l_{P_i}(f) + \delta_{P_i}(f) = l_{P_k}(f) = l_{P_j}(f) + \delta_{P_j}(f) $$ and for any $Q \in \mathcal{P} \setminus \{P_1,\dots,P_k\}$, we have $$ l_{P_i}(f) + \delta_{P_i}(f) = l_{P_k}(f) \leq (1+\beta)l_Q(f) = l_Q(f) + \delta_Q(f), $$ using the Nash condition for the path $P_k$ and the definition of $\delta_Q(f)$. This shows that $f$ is indeed a $\beta$-deviated Nash flow. \qed \end{proof}
\subsubsection{Generalized Braess graph.} The \emph{$m$-th (generalized) Braess graph} $G^m = (V^m, A^m)$ is defined by $V^m = \{s,v_1,\dots,v_{m-1},w_1,\dots,w_{m-1},t\}$ and $A^m$ as the union of three sets: $E^m_1 = \{(s,v_j),(v_j,w_j),(w_j,t): 1 \leq j \leq m-1\}$, $E^m_2 = \{(v_j,w_{j-1}) : 2 \leq j \leq m\}$ and $E^m_3 = \{(v_1,t)\cup\{(s,w_{m-1}\}\}$ (see Figure \ref{fig:braess_5} for an example).
\begin{rtheorem}{Theorem}{\ref{thm:braess_tight}} Let $n = 2m$ be fixed and let $\mathcal{B}^m$ be the set of all instances on the generalized Braess graph with $n$ nodes. Then $$ \sup_{\mathcal{I} \in \mathcal{B}^m} \text{$\epsilon$-SR}(\mathcal{I})
= \begin{cases} \frac{1 + \epsilon}{1 - \epsilon \cdot (\lfloor n/2 \rfloor - 1)} & \text{if $\epsilon < \frac{1}{\lfloor n/2 \rfloor - 1}$,} \\ \infty & \text{otherwise.} \end{cases} $$
\end{rtheorem}
The proof of the upper bound for $\epsilon < 1/(\lfloor n/2 \rfloor - 1)$ follows from the discussion in the main text of Section \ref{sec:approx}. That is, we either have a path consisting solely of $Z$-edges (in which case we get a bound of $1 + \epsilon$ which follows from similar arguments as the proof of Theorem \ref{thm:hetero}) or we have an alternating path $q_i = 1$ for all sections of consecutive $X$-edges in the alternating path (in Figure \ref{fig:braess_5}, these are the edges with latency $T$). That is, every such section consists of one edge in $X$.
\begin{proof}[Lower bound for $\epsilon \geq 1/(\lfloor n/2 \rfloor - 1)$] Let $\tau \geq 0$ be a fixed constant. Let $y_m : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ be a non-decreasing, continuous function with $y_m(1/m) = 0$ and $y_m(1/(m-1)) = \tau$. We define $$ l^{m}_a(g) = \left\{ \begin{array}{ll} (m - j)\cdot y_m(g) & \text{ for } a \in \{(s,v_j) : 1 \leq j \leq m-1\}\\ j \cdot y_m(g) & \text{ for } a \in \{(w_j,t) : 1 \leq j \leq m-1\}\\ 1 & \text{ for } a \in \{(v_i,w_{i-1}): 2 \leq i \leq m-1\} \cup E_3\\ (1+\epsilon) + ((m-1)\epsilon - 1)\tau & \text{ for } a \in \{(v_i,w_i): 1 \leq i \leq m-1\} \end{array}\right. $$ Note that $l_a^m$ is non-negative for all $a \in A$. This is clear for the first three cases, and the last case follows from the assumption $\epsilon \geq 1/(m-1)$.
\begin{figure}
\caption{The fifth Braess graph with $l_a^5$ on the arcs as defined in the proof of Theorem \ref{thm:braess_tight}, with $T = (1+\epsilon) + ((m-1)\epsilon - 1)\tau$. The bold arcs indicate the alternating path $\pi_1$.}
\label{fig:braess_5}
\end{figure}
A Nash flow $z = f^0$ is given by routing $1/m$ units of flow over the paths $(s,w_{m-1},t),(s,v_1,t)$ and the paths in $\{(s,v_j,w_{j-1},t) : 2 \leq j \leq m-1\}$. Note that all these paths have latency one, and the path $(s,v_j,w_j,t)$, for some $1 \leq m \leq j$, has latency $(1+\epsilon) + ((m-1)\epsilon - 1)\tau \geq 1$ for all $\tau \geq 0$ and $ \epsilon \geq 1/(m-1)$. We conclude that $C(z) = 1$.
An $\epsilon$-approximate Nash flow $x = f^\epsilon$ is given by routing $1/(m-1)$ units of flow over the paths in $\{(s,v_j,w_j,t) : 1 \leq j \leq m - 1\}$. Each such path $P$ then has a latency of $$ l_P(x) = (1+\epsilon) + ((m-1)\epsilon - 1)\tau + (m-j+j)\tau = (1+\epsilon)(1 + (m-1)\tau). $$ Furthermore, we have $l_P(x) = (1+\epsilon)l_{P'}(x)$ for all $P' = (s,v_j,w_{j-1},t)$, where $2 \leq j \leq m-1$. The same argument holds for the paths $(s,w_{m-1},t)$ and $(s,v_1,t)$, so $x$ is indeed an $\epsilon$-approximate Nash flow. We have $C(x) = (1+\epsilon)(1 + (m-1)\tau)$. Sending $\tau \rightarrow \infty$ then gives the desired result. \qed \end{proof}
\begin{proof}[Lower bound for $\epsilon < 1/(\lfloor n/2 \rfloor - 1)$] Let $y_m : \mathbb{R}_{\geq 0} \rightarrow \mathbb{R}_{\geq 0}$ be a non-decreasing, continuous function with $y_m(1/m) = 0$ and $y_m(1/(m-1)) = \epsilon/(1 - \epsilon\cdot(m-1))$. We define $$ l^{m}_a(g) = \left\{ \begin{array}{ll} (m - j)\cdot y_m(g) & \text{ for } a \in \{(s,v_j) : 1 \leq j \leq m-1\}\\ j \cdot y_m(g) & \text{ for } a \in \{(w_j,t) : 1 \leq j \leq m-1\}\\ 1 & \text{ for } a \in \{(v_i,w_{i-1}): 2 \leq i \leq m-1\} \cup E_3\\ 1 & \text{ for } a \in \{(v_i,w_i): 1 \leq i \leq m-1\} \end{array}\right. $$ Note that $l_a^m$ is non-negative for all $a \in A$.
A Nash flow $z = f^0$ is given by routing $1/m$ units of flow over the paths $(s,w_{m-1},t),(s,v_1,t)$ and the paths in $\{(s,v_j,w_{j-1},t) : 2 \leq j \leq m-1\}$. Note that all these paths have latency one, and the path $(s,v_j,w_j,t)$, for $1 \leq m \leq j$, has latency one as well. We conclude that $C(z) = 1$.
An $\epsilon$-approximate Nash flow $x = f^\epsilon$ is given by routing $1/(m-1)$ units of flow over the paths in $\{(s,v_j,w_j,t) : 1 \leq j \leq m - 1\}$. Each such path $P$ then has a latency of $$ l_P(x) = 1 + (m - j + j) \cdot \frac{\epsilon}{1 - \epsilon\cdot (m-1)} = \frac{1+\epsilon}{1 - \epsilon\cdot (m-1)}. $$ Furthermore, for all $P' = (s,v_j,w_{j-1},t)$, where $2 \leq j \leq m-1$, we have $$ (1+\epsilon)l_{P'}(x) = (1+\epsilon)\left(1 + (m - 1) \cdot \frac{\epsilon}{1 - \epsilon\cdot (m-1)}\right) = l_P(x) $$ The same argument holds for the paths $(s,w_{m-1},t)$ and $(s,v_1,t)$, so $x$ is indeed an $\epsilon$-approximate Nash flow. The result now follows since $C(x)/C(z) = (1+\epsilon)/(1 - \epsilon\cdot (m-1))$ as desired. \qed \end{proof}
\subsection{Unboundedness of the $\epsilon$-stability ratio for matroid congestion games.} We first show that the ratio between a $\beta$-approximate Nash flow and an original Nash flow can, in general, be unbounded.
\begin{theorem}\label{thm:matroid_unbounded} Let $\epsilon \geq 0$ fixed and let $\mathcal{J} = (E,(l_e)_{e \in E},\mathcal{S})$ be a single-commodity matroid congestion game in which $\mathcal{S}$ contains all subsets of $E$ of precisely size $k$. Then $$ \sup_{\mathcal{J}} \text{$\epsilon$-SR}(\mathcal{J}) \ \geq \ \left\{ \begin{array}{ll} \frac{1 + \epsilon}{1 - \epsilon (k-1)} & \text{ \ \ \ if } \epsilon < 1/(k - 1) \\ \infty & \text{ \ \ \ if } \epsilon \geq 1/(k - 1), \end{array}\right. $$ For $k = 2$, the resulting lower bound $(1+\epsilon)/(1 - \epsilon)$ for $\epsilon < 1$ is tight (i.e., it is also an upper bound). This settles the case of $k=2$ completely. \end{theorem} \begin{proof}[Unboundedness] Fix some $k \in \mathbb{N}$ and consider an instance with demand $r = 1$ and $E = \{e_0,\dots,e_k\}$, and, as stated above, let $\mathcal{S}$ be the family of all subsets of size $k$. Let $l_0(y) = 1$ be the constant latency function of resource $e_0$ and for $e \in \{e_1,\dots,e_k\}$, let $l_e(y)$ be a non-negative, continuous, increasing function satisfying $$ l_e((k-1)/k) = 1 \ \ \text{ and } \ \ l_e(1) = M, $$ where $M$ is a constant to be determined later.
A classical Nash flow $z = f^0$ is given by assigning $1/k$ units of flow to the strategies $E \setminus \{e_j\}$ for $j = 1,\dots,k$, so that the load on $e_0$ is $z_0 = 1$, and the load on resources $e_j$ is $x_{j} = (k-1)/k$. Every strategy has latency $k$ (since $l_e((k-1)/k) = 1$ for all resources $j \in \{1,\dots,k\}$), and therefore also $C(z) = k$.
We construct an $\epsilon$-approximate Nash flow $x = f^\epsilon$ as follows.The flow $x$ is defined by assigning all flow to the strategy $s = \{e_1,\dots,e_k\}$. For suitable choices of $M$, we show that $x$ is then indeed an $\epsilon$-approximate Nash flow. The set of other strategies is given by $s^j = E \setminus \{e_j\}$ for $j \in \{1,\dots,k\}$. The Nash conditions for $x$ are then equivalent to \begin{equation}\label{eq:nash_matroid} l_s(x) = k\cdot M \leq (1 + \epsilon)[1 + (k-1)M] = l_{s_j}(x) \end{equation} for all $j \in \{1,\dots,k\}$. If $\epsilon \geq 1/(k-1)$, then $1 \leq \epsilon (k-1)$, and thus $$ k \cdot M \leq 1 + \epsilon + (k-1)M + M \leq 1 + \epsilon + (k-1)M + \epsilon (k-1)M = (1+\epsilon)(1 + (k-1)M) $$ for any non-negative $M$, i.e., the Nash conditions are always satisfied. Note that $C(x) = kM$ so that $C(x)/C(z) = M \rightarrow \infty$ as $M \rightarrow \infty$. Therefore the $\epsilon$-ratio is unbounded for $\epsilon \geq 1/(k-1)$.
If $\epsilon < 1/(k-1)$, then the Nash condition in (\ref{eq:nash_matroid}) is equivalent to $$ M \leq \frac{1 + \epsilon}{1 - \epsilon(k-1)} $$ which is strictly positive since $\epsilon < 1/(k-1)$. In particular, by choosing $M = (1 + \epsilon)/(1 - \epsilon(k-1))$, we get $C(x)/C(z) = (k\cdot M)/k = (1 + \epsilon)/(1 - \epsilon(k-1))$ for $\epsilon < 1/(k-1)$. \qed
\end{proof}
For the case $k = 2$, the construction in the previous proof yields a lower bound of $(1+\epsilon)/(1 - \epsilon)$ if $\epsilon< 1$, and $\infty$ otherwise. For $\epsilon < 1$, we show that the bound $(1+\epsilon)/(1-\epsilon)$ is also an upper bound.
\begin{proof}[Upper bound for $k = 2$] Let $z = f^0$ be a classical Nash flow and let $x = f^\epsilon$ be a worst-case $\epsilon$-approximate Nash flow (both with normalized demand $r = 1$). We partition $E = X \cup Z$, where $Z = \{a \in E : z_a \geq x_a \text{ and } z_a > 0\}$ and $X = \{a \in E : z_a < x_a \text{ or } z_a = x_a = 0\}$.
Let us first elaborate on the \emph{structure} of the flow $z$. One of the following cases holds. \begin{enumerate}[i)] \item There are two resources $e$ and $e'$ which are used by every player, that is, all flow is assigned to the strategy $\{e,e'\}$. By definition, both these resources will then be part of $Z$ (since $x_a \leq 1 = z_a$ for $a = e,e'$). \item There is precisely one resource $e_0$ used by every player. Again, this resource will then be part of $Z$ by definition. Moreover, we then also have $$ l_{e_0}(1) = l_{e_0}(z_e) \leq l_{e'}(z_{e'}) $$ for every other resource $e'$. Also, all other resources $e'$ with $z_{e'} > 0$ have equal latency (which is true because of base exchange arguments), which in turn is greater or equal than that of $e_0$. \item There are no resources which are used by every player. Then all flow-carrying resources in $z$ have equal latency (again because of base exchange arguments). \end{enumerate}
\noindent \textbf{Claim 1:} Without loss of generality, there is at most one resource in $Z$. \begin{proof} If there are at least two resources in $Z$, then there exists a pair of resources in $Z$, forming a strategy with a strictly positive amount of flow assigned to it in $z$ (by what was said above regarding the structure of $z$).\footnote{We emphasize that this is not necessarily true for all combinations of resources in $Z$.} Using the fact that matroids are immune against the Braess paradox, as shown by Fujishige et al. \cite{Fujishige2015}, we can then carry out similar arguments as in the proof of Theorem \ref{thm:hetero} for the $\beta$-deviation ratio, but then for the more simple homogeneous case.\footnote{Phrased differently, the proof of Theorem \ref{thm:hetero} for the $\beta$-deviation ratio essentially relies on the fact that for the flow $z$ we can without loss of generality assume that all latency functions of resources in $Z$ are constant (by using immunity against the Braess paradox), and that there exists a strategy, with positive amount of flow assigned to it in $z$, only using resources in $Z$.} This would result in a bound of $1 + \epsilon \leq (1+\epsilon)/(1 - \epsilon)$. \qed \end{proof}
Let $P_{\max}$ be a strategy maximizing $l_P(x)$ over all flow-carrying strategies $P$ in $x$.
\noindent \textbf{Claim 2:} Without loss of generality, $P_{\max}$ uses only resources in $X$ (and not the resource $e_0$ in $Z$).
\begin{proof} Suppose that every strategy with maximum latency is of the form $\{f,e_0\}$ for some resource $f \in E \setminus \{e_0\}$. Let $\{f_1,\dots,f_q\}$ be the set of all such resources $f$. It follows that, for any fixed $j = 1,\dots,q$, the strategy $\{e,f_j\}$, for some $e \in E \setminus \{e_0\}$, cannot be flow-carrying, otherwise, \begin{eqnarray} l_{e_0}(x_{e_0}) &\leq& l_{e_0}(z_{e_0}) \ \ \ \ \text{(definition of $Z$)} \nonumber \\ &\leq& l_{e}(z_{e}) \ \ \ \ \ \ \text{(structure of Nash flow $z$ discussed above)} \nonumber \\ &\leq& l_e(x_e) \ \ \ \ \ \ \text{(definition of $X$)} \nonumber \end{eqnarray} which implies that $l_{e_0}(x_{e_0}) + l_{f_j}(x_{f_j}) \leq l_e(x_e) + l_{f_j}(x_{f_j})$. Since $\{e_0,f_j\}$ is a strategy of maximum latency, then also $\{e,f_j\}$ is a strategy of maximum latency, which contradicts our assumption.
Moreover, not all players in $x$ can use resource $e_0$, otherwise $1 = x_{e_0} \leq z_{e_0}$, meaning that also all players use resource $e_0$ in $z$. But since $x_e > z_e$ for all other resources, which are in $X$ since we have only one resource in $Z$, this leads to a contradiction, since the total flow on all edges in $x$ is then higher than the total flow on all edges in $z$.
Now, let $\{a,b\}$ be a flow-carrying strategy for some $a,b \in E\setminus \{e_0\}$, which exists by what was said above. For any flow-carrying strategy of the form $\{a,e\}$ for some $e \in E$, we have $$ l_a(x_a) + l_e(x_e) \leq l_{P_{\max}}(x) \leq (1+\beta)l_P(x) $$ for any other strategy $P$. We can then raise the value of $l_a(x_a)$ until the latency of some strategy of the form $\{a,e\}$ becomes tight with respect to $l_{P_{\max}}(x)$. Note that, since $a \in X$, we have $x_a > z_a$ and therefore this does not contradict the fact that $l_a$ is non-decreasing. More precisely, we replace $l_a$ by some function $\hat{l}_a(y)$ which is non-negative, continuous and non-decreasing, and that satisfies $\hat{l}_a(z_a) = l_a(z_a)$ and $\hat{l}_a(x_a) = l_a(x_a) + \alpha$, where $\alpha$ is the smallest value such that $l_a(x_a) + \alpha + l_e(x_e) = l_{P_{\max}}(x)$ for some flow-carrying resource of the form $\{e,a\}$. If $a \neq e_0$, we have found the desired result, and otherwise, we can use a similar argument as in the beginning of the claim to show that $\{a,b\}$ is also a flow-carrying strategy with maximum latency. Note that the social cost $C(x)$ of $x$ can only get worse if we replace the function $l_a$ by $\hat{l}_a$.\qed \end{proof}
We now use Claims 1 and 2 to establish the bound $(1+\epsilon)/(1-\epsilon)$ for $\epsilon < 1$. Let $P_{\max} = \{a,b\}$ for $a,b \in E \setminus \{e_0\}$ (which is justified because of Claim 2). The Nash conditions then give (by comparing $\{a,b\}$ with $\{a,e_0\}$ and $\{b,e_0\}$) $$ l_a(x_a) + l_b(x_b) \leq (1+\epsilon)[l_a(x_a) + l_{e_0}(x_{e_0})] $$ $$ l_a(x_a) + l_b(x_b) \leq (1+\epsilon)[l_b(x_b) + l_{e_0}(x_{e_0})] $$ Adding up these inequalities, and rewriting, gives \begin{equation}\label{eq:nash_matroid2} (1 -\epsilon)[l_a(x_a) + l_b(x_b)] \leq (1+\epsilon)[l_{e_0}(x_{e_0})+ l_{e_0}(x_{e_0})] \end{equation} By definition of $Z$, we have $l_{e_0}(x_{e_0}) \leq l_{e_0}(z_{e_0})$, and, moreover, by the structure of the flow $z$ (as discussed in the beginning of the proof), we have $l_{e_0}(z_{e_0}) \leq l_{e}(z_{e})$ for any other resource with $z_e > 0$. On top of that, it also holds that $l_{e_0}(z_{e_0}) + l_{e}(z_{e}) = C(z)$, for any $e$ with $z_e > 0$, because of the fact that the demand is normalized to $r = 1$. Combining all this implies that $l_{e_0}(x_{e_0})+ l_{e_0}(x_{e_0}) \leq C(z)$. Using this observation, combined with (\ref{eq:nash_matroid2}), and the fact that $\epsilon < 1$, it follows that $$ C(x) \leq l_a(x_a) + l_b(x_b) \leq \frac{1 + \epsilon}{1 - \epsilon} \cdot [l_{e_0}(x_{e_0})+ l_{e_0}(x_{e_0})] \leq \frac{1 + \epsilon}{1 - \epsilon} \cdot C(z) $$ which is the desired result. \qed \end{proof}
\subsection{Proof of Theorem \ref{thm:matroid}} \begin{rtheorem}{Theorem}{\ref{thm:matroid}} Let $\mathcal{J} = (E,(l_e)_{e \in E},(\mathcal{S}_i)_{i \in [k]},(r_i)_{i \in [k]})$ be a matroid congestion game with homogeneous players. Let $\beta \ge 0$ be fixed and consider $\beta$-bounded basis deviations as defined above. Then the $\beta$-deviation\xspace ratio is upper bounded by $\text{$\beta$-DR}(\mathcal{J}) \le 1 + \beta$. Further, this bound is tight already for $1$-uniform matroid congestion games.
\end{rtheorem} \begin{proof} The proof consists of establishing the following two claims. \begin{enumerate}[i)] \item We have $l_e(x_e) \leq (1+\beta)l_e(z_e)$ for all $e \in E$ with $x_e > z_e$. \item It holds that $$ \sum_{e : x_e > z_e} (x_e - z_e)l_e(x_e) \leq (1 + \beta) \sum_{e : z_e \geq x_e} (z_e - x_e)l_e(x_e). $$ \end{enumerate} From these two claims, the result can be derived as follows. Rewriting the inequality in $ii)$, we find $$ \sum_{e : x_e > z_e} x_el_e(x_e) \leq \sum_{e : x_e > z_e} z_el_e(x_e) + (1 + \beta) \sum_{e : z_e \geq x_e} (z_e - x_e)l_e(x_e). $$ Adding $\sum_{e : z_e \geq x_e} x_el_e(x_e)$ to both sides of the inequality, and using the definition of $C(x)$, implies $$ C(x) \leq \sum_{e : x_e > z_e} z_el_e(x_e) + (1 + \beta)\sum_{e : z_e \geq x_e} z_e l_e(x_e) - \beta \sum_{e : z_e \geq x_e} x_el_e(x_e) $$ In the first term, we now use that the fact that $l_e(x_e) \leq (1+\beta) l_e(z_e)$ for every $e$ in this summation, because of $i)$; in the second term, we use the fact that $l_e(x_e) \leq l_e(z_e)$ for all resources $e$ in the summation (which follows from $z_e \geq x_e$ and the fact that the latency functions are non-decreasing); the third term is left out using the fact that $\beta \sum_{e : z_e \geq x_e} x_el_e(x_e) \geq 0$ because of the non-negativity of the latency functions and $\beta$. This then implies that $$ C(x) \leq (1+\beta) \sum_{e : x_e > z_e} z_el_e(z_e) + (1 + \beta)\sum_{e : z_e \geq x_e} z_e l_e(z_e) - 0 = (1+\beta)C(z) $$ which gives the desired result.
It now remains to show that $i)$ and $ii)$ holds. We first prove $i)$, of which the proof is of a similar nature as a proof of Fujishige et al. \cite{Fujishige2015}. In particular, using similar arguments as Lemma 3.2 \cite{Fujishige2015}, it can be shown that for every $e \in E$ with $x_e > z_e$, there exists an $f \in E \setminus \{e\}$ with $z_f > x_f$ such that $$ l_e(x_e) + \delta_e(x_e) \leq l_f(x_f) + \delta(x_f) \text{ \ \ \ and \ \ \ } l_f(z_f) \leq l_e(z_e).\footnote{We refer the reader to the proof of Lemma 3.2 \cite{Fujishige2015} for details. We omit it here for notational reasons.} $$ The proof of this argument uses the fact that $x$ is a Nash flow w.r.t. the costs $l_e + \delta_e$, and $z$ a Nash flows w.r.t. to the latencies $l_e$. It follows that $$ l_e(x_e) \leq l_e(x_e) + \delta_e(x_e) \leq l_f(x_f) + \delta(x_f) \leq (1+\beta)l_f(x_f) \leq (1 + \beta)l_f(z_f) \leq (1+\beta)l_e(z_e) $$ using the fact that $0 \leq \delta_e(x_e) \leq \beta l_e(x_e)$ and the properties of the resources $e$ and $f$.
We now prove the second claim. We use a `variational inequality' argument similar to, e.g., the proof of Theorem 4 \cite{Lianeas2016}. Because of the fact that $x$ is $\beta$-deviated Nash flow, it follows that $$ \sum_{e \in E} x_e(l_e(x_e)+ \delta_e(x_e)) \leq \sum_{e \in E} z_e(l_e(x_e) + \delta_e(x_e)). $$ Rewriting gives $$ \sum_{e: x_e > z_e} (x_e - z_e)(l_e(x_e)+ \delta_e(x_e)) \leq \sum_{e: z_e \geq x_e} (z_e - x_e)(l_e(x_e)+ \delta_e(x_e)). $$ Using the fact that $0 \leq \delta_e(x_e) \leq \beta l_e(x_e)$ it follows that $$ \sum_{e: x_e > z_e} (x_e - z_e)l_e(x_e) \leq \sum_{e: z_e \geq x_e} (z_e - x_e)(l_e(x_e)+ \delta_e(x_e)) \leq (1+\beta)\sum_{e: z_e \geq x_e} (z_e - x_e)l_e(x_e) $$ which completes the proof.
Tightness follows, e.g., from a similar construction as in the last tightness construction of the proof of Theorem \ref{thm:hetero} (if applied to a homogeneous population). \qed \end{proof}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{About the spectra of a real nonnegative matrix and its signings}
\author[B]{Kawtar Attas} \ead{[email protected]}
\author[B]{Abderrahim Boussa\"{\i}ri\corref{cor1}} \ead{[email protected]}
\author[B]{Mohamed Zaidi} \ead{[email protected]}
\cortext[cor1]{Corresponding author}
\address[B]{Facult\'e des Sciences A\"{\i}n Chock, D\'epartement de Math\'ematiques et Informatique,
Km 8 route d'El Jadida, BP 5366 Maarif, Casablanca, Maroc}
\begin{abstract} For a real matrix $M$, we denote by $sp(M)$ the spectrum of $M$ and by $\left \vert M\right \vert $ its absolute value, that is the matrix obtained from $M$ by replacing each entry of $M$ by its absolute value. Let $A$ be a nonnegative real matrix, we call a \emph{signing} of $A$ every real matrix $B$ such that $\left \vert B\right \vert =A$. In this paper, we study the set of all signings of $A$ such that $sp(B)=\alpha sp(A)$ where $\alpha$ is a complex unit number. Our work generalizes some results obtained in \cite{anradha2013, cuihou13, shader}. \end{abstract}
\begin{keyword}
Spectra; digraph; nonnegative matrices; irreducible matrices.
\MSC 05C20, 05C50 \end{keyword}
\end{frontmatter}
\section{Introduction}
Throughout this paper, all matrices are real or complex. The identity matrix of order $n$ is denoted by $I_{n}$ and the transpose of a matrix $A$ by $A^{T}$. Let $\Sigma$ be a set of nonzero real or complex numbers such that if $x\in \Sigma$ then $x^{-1}\in \Sigma$.\ Two square matrices $A$ and $B$ are $\Sigma$-\emph{diagonally similar} if $B=\Lambda^{-1}A\Lambda$ for some nonsingular diagonal matrix $\Lambda$ with diagonal entries in $\Sigma$. A square matrix $A$ is \emph{reducible }if there exists a permutation matrix $P$, so that $A$ can be reduced to the form $PAP^{T}=\left( \begin{array} [c]{cc} X & Y\\ 0 & Z \end{array} \right) $ where $X$ and $Z$ are square matrices. A matrix which is not reducible is said to be \emph{irreducible}. A real matrix $A$ is nonnegative, $A\geq0$, if all its entries are nonnegative.
Let $A$ be a $n\times n$ real or complex matrix. The multiset $\left \{ \lambda_{1},\lambda_{2},\ldots,\lambda_{n}\right \} $ of eigenvalues of $A$ is called the \emph{spectrum} of $A$ and is denoted by $sp(A)$. We usually assume that $\left \vert \lambda_{1}\right \vert \geq \left \vert \lambda_{2}\right \vert \geq \ldots \geq \left \vert \lambda_{n}\right \vert $. The \emph{spectral radius} of $A$ is $\left \vert \lambda_{1}\right \vert $ that is denoted by $\rho(A)$. Clearly, two real matrices $\left \{ -1,1\right \} $-diagonally similar have the same absolute value and the same spectrum.
In this paper, we address the following problem.
\begin{prob} Let $A$ be a nonnegative real matrix and let $\alpha$ be a complex unit number. Characterize the set of all signings of $A$ such that $sp(B)=\alpha sp(A)$. \end{prob}
Our motivation comes from the works of Shader and So \cite{shader} which correspond to adjacency matrix of a\emph{ }graph. Let $G$ be a finite simple graph with vertex set $V(G)=\left \{ v_{1},\ldots,v_{n}\right \} $ and edge set $E(G)$. The \emph{adjacency matrix} of\emph{ }$G$ is the symmetric matrix $A(G)=(a_{ij})_{1\leq i,j\leq n}$ where $a_{ij}=a_{ji}=1$\ if $\left \{ v_{i},v_{j}\right \} $ is an edge of $G$, otherwise $a_{ij}=a_{ji}=0$. Since the matrix $A(G)$ is symmetric, its eigenvalues are real. The \emph{adjacency spectrum} $Sp\left( G\right) $ of $G$ is defined as the spectrum of $A(G)$. Let $G^{\sigma}$ be an orientation of $G$, which assigns to each edge a direction so that the resultant graph $G^{\sigma}$ becomes an oriented graph. The \emph{skew-adjacency} matrix of $G^{\sigma}$ is the real skew symmetric matrix $S(G^{\sigma})=(a_{ij}^{\sigma})_{1\leq i,j\leq n}$\ where $a_{ij}^{\sigma }=-a_{ji}^{\sigma}=1$ if $(i,j)$ is an arc of $G^{\sigma}$ and $a_{ij} ^{\sigma}=0$, otherwise. The \emph{skew-spectrum} $Sp\left( G^{\sigma }\right) $ of $G^{\sigma}$ is defined as the spectrum of $S(G^{\sigma})$. Note that $Sp(G^{\sigma})$ consists of only purely imaginary eigenvalues because $S(G^{\sigma})$ is a real skew symmetric matrix.
There are several recent works about the relationship between $Sp(G^{\sigma})$ and $Sp(G)$ (see for example \cite{anradha2013, anuradha2014, cavers, cuihou13, shader}). In the last paper, Shader and So have obtained a result which is closely related to our work. To state this result, let $G$ be a bipartite graph with bipartition $[ I,J]$. The orientation $\varepsilon_{I,J}$ that assigns to each edge of $G$ a direction from $I$ to $J$ is called the \emph{canonical orientation}. Shader and So showed \cite{shader} that $Sp\left( G^{\varepsilon_{I,J}}\right) =iSp\left( G\right) $. Moreover, they proved that a graph $G$ is bipartite if and only if $Sp\left( G^{\sigma}\right) =isp\left( G\right) $ for some orientation $G^{\sigma}$ of $G$. For connected graphs, this result can be viewed as a particular case of Proposition \ref{shader generalis} because a graph $G$ is bipartite if and only if there exists a permutation matrix $P$ such that $PA(G)P^{T}=\left( \begin{array} [c]{cc} 0 & X\\ X^{T} & 0 \end{array} \right) $ where the zero diagonal blocks are square. Consider now an orientation $G^{\sigma}$ of a graph $G$ and let $W$ be a subset of $V(G)$. The orientation $G^{\tau}$ of $G$ obtained from $G^{\sigma}$ by reversing the direction of all arcs between $W$ and $V(G)\backslash W$ is said to be obtained from $G^{\sigma}$ by \emph{switching} with respect to $W$. Two orientations are \emph{switching-equivalent} if one can be obtained from the other by switching. Anuradha, Balakrishnan, Chen, Li, Lian and So \cite{anradha2013} proved that $Sp\left( G^{\sigma}\right) =isp\left( G\right) $ if an only if $G^{\sigma}$ is switching-equivalent to the canonical orientation. For a bipartite connected graph, this is a direct consequence of Proposition \ref{solution shader} because two orientations of a graph are switching-equivalent if and only if their skew-adjacency matrices are $\{-1,1\}$-diagonally similar.
In this work, we consider only the case of irreducible matrices (not necessarily symmetric). To state our main result, we need some terminology. A \emph{digraph }$D$ is a pair consisting of a finite set $V(D)$ of \emph{vertices} and a subset $E(D)$ of ordered pairs of vertices called \emph{arcs}. Let $v,v^{\prime}$ two vertices of $D$, a \emph{path} $P$ from $v$ to $v^{\prime}$ is a finite sequence $v_{0}=v,\ldots,v_{k}=v^{\prime}$ such that $(v_{0},v_{1}),\ldots,(v_{k-1},v_{k})$ are arcs of $D$. The \emph{length} of $P$ is the number $k$ of its arcs.\ If $v_{0}=v_{k}$, we say that $P$ is a \emph{closed path. }A digraph is said to be\emph{ strongly connected} if for any two vertices $v$ and $v^{\prime}$, there is a path joining $v$ to $v^{\prime}$. It is easy to see that a strongly connected digraph has a closed path. The \emph{period} of a digraph is the greatest common divisor of the lengths of its closed paths. A digraph is \emph{aperiodic }if its period is one.
With each $n\times n$ matrix $A=(a_{ij})_{1\leq i,j\leq n}$, we associate a digraph $D_{A}$ on the vertex set $\left[ n\right] =\left \{ 1,\ldots ,n\right \} $ and with arc set $E(D_{A})=\left \{ (i,j):a_{ij}\neq0\right \} $. It is easy to show that $A$ is irreducible if and only if $D_{A}$ is strongly connected. The \emph{period} of a matrix is the period of its associate digraph.
\begin{nt} Let $A$ be an irreducible nonnegative real matrix with period $p$ and let $\alpha$ be a complex unit number, we denote by $\mathcal{M}(\alpha,A)$ the set of all signings of $A$ such that $sp(B)=\alpha sp(A)$. \end{nt}
In Corollary \ref{forme de alpha}, we gave a necessary condition for $\mathcal{M}(\alpha,A)$ to be nonempty. More precisely, we will prove that if $\mathcal{M}(\alpha,A)$ is nonempty, then $\alpha=e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $. If $p>1$, we will construct (see Corollary \ref{nonempty period p}) for each $k\in \left \{ 0,\ldots,2p-1\right \} $ a matrix $\widetilde{A}$ such that $sp(\widetilde{A})=e^{\frac{i\pi k}{p}}sp(A)$. Finally, by using Corollary \ref{forme de alpha} and Proposition \ref{structurede malpha}, we prove our main theorem.
\begin{thm} \label{main theorem}Let $A$ be an irreducible nonnegative matrix with period $p$ and let $\alpha$ be an unit complex number. Then the following statements hold:
\begin{description} \item[i)] $\mathcal{M}(\alpha,A)$ is nonempty iff $\alpha=e^{\frac{i\pi k}{p} }$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $;
\item[ii)] For $k\in \left \{ 0,\ldots,2p-1\right \} $, we have
\item[a)] if $k$ is even then $\mathcal{M}(e^{\frac{i\pi k}{p}},A)$ is the set of all matrices $\left \{ -1,1\right \} -$diagonally similar to $A$;
\item[b)] if $k$ is odd then $\mathcal{M}(e^{\frac{i\pi k}{p}},A)$ is the set of all matrices $\left \{ -1,1\right \} -$diagonally similar to $B_{0}$ where $B_{0}$ is an arbitrary signing of $A$ such that $sp(B_{0})=e^{\frac{i\pi}{p}}sp(A)$. \end{description} \end{thm}
\begin{req} Let $A$ be an aperiodic matrix. It follows from the above Theorem that $\mathcal{M}(\alpha,A)$ is empty whenever $\alpha \neq \pm1$. Moreover, $\mathcal{M}(1,A)$ (resp. $\mathcal{M}(-1,A)$) is the set of all matrices $\left \{ -1,1\right \} -$diagonally similar to $A$ (resp. to $-A$). \end{req}
\section{Some properties of $\mathcal{M}(\alpha,A)$}
We will use the following theorem due to Frobenius (see \cite{godsil}).
\begin{thm}
\label{frobenius} Let $A$ be an irreducible nonnegative $n\times n$ matrix and let $B$ be a complex $n\times n$ matrix such that $|B|\leq A$. Then $\left \vert \lambda \right \vert \leq \rho(A)$ for each eigenvalue $\lambda$ of $B$ and $\lambda=\rho(A)e^{i\theta}$ iff $B=e^{i\theta}LAL^{-1}$, where $L$ is a complex diagonal matrix such that $\left \vert L\right \vert =I_{n}$. \end{thm}
In the next Proposition, we describe the structure of $\mathcal{M}(\alpha,A)$.
\begin{prop} \label{structurede malpha} Let $A$ be an irreducible nonnegative matrix with period $p$ and let $\alpha$ be an unit complex number. Then $\mathcal{M} (\alpha,A)$ is empty or $\mathcal{M}(\alpha,A)$ is the set of all matrices $\left \{ -1,1\right \} -$diagonally similar to $B_{0}$ where $B_{0}$ is an arbitrary matrix in $\mathcal{M}(\alpha,A)$. \end{prop}
\begin{pf} Assume that $\mathcal{M}(\alpha,A)$ is nonempty and let $B_{0}\in \mathcal{M}(\alpha,A)$. It is easy to see that $\Lambda^{-1}B_{0}\Lambda\in \mathcal{M} (\alpha,A)$ for every $\left \{ -1,1\right \} $-diagonal matrix $\Lambda$. Conversely, let $B\in \mathcal{M}(\alpha,A)$. Then $sp(B)=sp(B_{0})=\alpha sp(A)$. Let $\lambda=\rho(A)e^{i\theta}$ be a common eigenvalue of $B$ and $B_{0}$. By Theorem \ref{frobenius}, we have $B=e^{i\theta}LAL^{-1}$ and $B_{0}=e^{i\theta}L^{\prime}AL^{\prime-1}$ where $L$, $L^{\prime}$ are complex diagonal matrices such that $\left \vert L\right \vert =\left \vert L^{\prime }\right \vert =I_{n}$. It follows that $B=(L^{^{\prime}}L)^{-1}B_{0}L^{\prime }L^{-1}$. To conclude, it suffices to apply Lemma \ref{realsimilarit} below. \qed\end{pf}
\begin{lem} \label{realsimilarit} Let $A$ be an $n\times n$ irreducible nonnegative matrix and let $B$, $B^{\prime}$ be two signings of $A$. If there exists a complex diagonal matrix $\Gamma$ such that $B^{\prime}=\Gamma B\Gamma^{-1}$ and $\left \vert \Gamma \right \vert =I_{n}$ then $B$ and $B^{\prime}$ are $\left \{ -1,1\right \} -$diagonally similar. \end{lem}
\begin{pf} Let $A:=(a_{ij})_{1\leq i,j\leq n}$, $B:=(b_{ij})_{1\leq i,j\leq n}$ and $B^{\prime}: =(b_{ij}^{\prime})_{1\leq i,j\leq n}$. We denote by $\gamma_{1},\ldots,\gamma_{n}$ the diagonal entries of $\Gamma$. Let $\Delta$ be the diagonal matrix with diagonal entries $\delta_{j} =\gamma_{j}\gamma_{1}^{-1}$ for $j=1,\ldots, n$. As $A$ is irreducible, the digraph $D_{A}$ is strongly connected and then there is a path $j=i_{1} ,\ldots,i_{r}=1$ of $D_{A}$ from $j$ to $1$. By definition of $D_{A}$, we have $a_{i_{1}i_{2}}\neq0,\ldots,a_{i_{r-1}i_{r}}\neq0$. It follows that $b_{i_{1}i_{2}}\neq0$,\ldots, $b_{i_{r-1}i_{r}}\neq0$. and $b_{i_{1}i_{2} }^{\prime}\neq0$,\ldots, $b_{i_{r-1}i_{r}}^{\prime}\neq0$ because $\left \vert B\right \vert =\left \vert B^{\prime}\right \vert =A$. Moreover, from the equality $B^{\prime}=\Gamma B\Gamma^{-1}$ we have $b_{i_{1}i_{2}}^{\prime }=\gamma_{i_{1}}b_{i_{1}i_{2}}\gamma_{i_{2}}^{-1}$ ,$b_{i_{2}i_{3}}^{\prime }=\gamma_{i_{2}}b_{i_{2}i_{3}}\gamma_{i_{3}}^{-1}$,\ldots, $b_{i_{r-1}i_{r} }^{\prime}=\gamma_{i_{r-1}}b_{i_{r-1}i_{r}}\gamma_{i_{r}}^{-1}$. Then $b_{i_{1}i_{2}}^{\prime}\ldots b_{i_{r-1}i_{r}}^{\prime}=\gamma_{i_{1}} \gamma_{i_{r}}^{-1}b_{i_{1}i_{2}}\ldots b_{i_{r-1}i_{r}}$. But by hypothesis, $B,B^{\prime}$ are real matrices and $\left \vert B\right \vert =\left \vert B^{\prime}\right \vert $, then $b_{i_{1}i_{2}}^{\prime}\ldots b_{i_{r-1}i_{r} }^{\prime}=\pm b_{i_{1}i_{2}}\ldots b_{i_{r-1}i_{r}}$ and hence $\delta _{j}=\gamma_{j}\gamma_{1}^{-1}=\gamma_{i_{1}}\gamma_{i_{r}}^{-1}\in \left \{ -1,1\right \} $. To conclude, it suffices to see that $\Delta B\Delta ^{-1}=\Gamma B\Gamma^{-1}=B^{\prime}$. \qed\end{pf}
By using Theorem \ref{frobenius}, we obtain the following.
\begin{prop} \label{eigenvalue of B} Let $A$ be an irreducible nonnegative real matrix with period $p$ and let $B$ be a signing of $A$ such that $\rho(B)=\rho(A)$. If $\lambda$ is an eigenvalue of $B$ such that $\left \vert \lambda \right \vert =\rho(A)$, then $\lambda =\rho(A)e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $. \end{prop}
\begin{pf}
Let $A:=(a_{ij})_{1\leq i,j\leq n}$, $B:=(b_{ij})_{1\leq i,j\leq n}$ and
$\lambda=\rho(A)e^{i\theta}$. By Theorem \ref{frobenius}, we have $B=e^{i\theta}LAL^{-1}$ where $L$ is a complex diagonal matrix such that $\left \vert L\right \vert =I_{n}$. It follows that $b_{ij}=e^{i\theta}l_{i}a_{ij}l_{j} ^{-1}$ for $i,j\in \left \{ 1,\ldots,n\right \} $, where $l_{1},\ldots,l_{n}$ are the diagonal entries of $L$. Consider now a closed path $C=(i_{1} ,i_{2},\ldots,i_{r},i_{1})$ of $D_{A}$. By the previous equality, we have \begin{align*} \text{ }\frac{b_{i_{1}i_{2}}\ldots b_{i_{r-1}i_{r}}b_{i_{r}i_{1}}} {a_{i_{1}i_{2}}\ldots a_{i_{r-1}i_{r}}a_{i_{r}i_{1}}} & =(e^{i\theta }l_{i_{1}}l_{i_{2}}^{-1})\ldots(e^{i\theta}l_{i_{r-1}}l_{i_{r}}^{-1} )(e^{i\theta}l_{i_{r}}l_{i_{1}}^{-1})\\ & =(e^{i\theta})^{r} \end{align*}
Then $(e^{i\theta})^{r}\in \left \{ 1,-1\right \} $ because $\left \vert B\right \vert =A$.
Since $p$ is the greatest common divisor of the lengths of the closed paths in $D_{A}$, we have $(e^{i\theta})^{p}\in \left \{ 1,-1\right \} $ and then $\lambda=\rho(A)e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots ,2p-1\right \} $. \qed\end{pf}
\begin{req} \label{eigenvalues of A} Let $A=(a_{ij})_{1\leq i,j\leq n}$ be an irreducible nonnegative real matrix with period $p$ and let $\lambda$ be an eigenvalue of $A$ such that $\left \vert \lambda \right \vert =\rho(A)$. By applying Proposition \ref{eigenvalue of B} to $B=A$, we have $\lambda=\rho (A)e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $. \end{req}
The following result gives a necessary condition under which $\mathcal{M} (\alpha,A)$ is nonempty.
\begin{cor} \label{forme de alpha} Let $A$ be an irreducible nonnegative real matrix of period $p$ and let $\alpha$ be a complex unit number. If $\mathcal{M} (\alpha,A)$ is nonempty then $\alpha=e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $, in particular $\alpha^{p}=\pm1$. \end{cor}
\begin{pf} Let $\lambda$ be an eigenvalue of $A$ such that $\left \vert \lambda \right \vert =\rho(A)$. By applying Remark \ref{eigenvalues of A}, we have $\lambda =\rho(A)e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $. Let $B\in \mathcal{M}(\alpha,A)$. We have $\alpha \rho(A)e^{\frac{i\pi k}{p}}\in sp(B)$ because $sp(B)=\alpha sp(A)$. It follows from Proposition \ref{eigenvalue of B} that $\alpha \rho(A)e^{\frac{i\pi k}{p}}=\rho (A)e^{\frac{i\pi h}{p}}$ for some $h\in \left \{ 0,\ldots,2p-1\right \} $ and hence $\alpha=e^{\frac{i\pi(h-k)}{p}}$. \qed\end{pf}
\section{Proof of the Main Theorem}
Let $n$ be a positive integer and let $(r_{1},\ldots,r_{p})$ be a partition of $n$, that is $r_{1},\ldots,r_{p}$ are positive integers and $r_{1} +\cdots+r_{p}=n$. For $i=1,\ldots,p-1$, let $A_{i}$ be a $r_{i}\times r_{i+1} $ matrix and let $A_{p}$ be a $r_{p}\times r_{1}$ matrix. The matrix $\left( \begin{array} [c]{ccccc} 0 & A_{1} & 0 & \cdots & 0\\ 0 & 0 & A_{2} & \cdots & 0\\ \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & 0 & \ddots & A_{p-1}\\ A_{p} & 0 & \cdots & 0 & 0 \end{array} \right) $ is denoted by $Cyc(A_{1},A_{2},\ldots,A_{p})$.
Each matrix of this form is called $p$-\emph{cyclic. }
Recall\ the well-known result of Frobenius about irreducible matrices with period $p>1$.
\begin{prop} \label{cyclic form} Let $A$ be an irreducible nonnegative real matrix with period $p>1$, then there exists a permutation matrix $P$ such that $PAP^{T}$ is $p$-cyclic. \end{prop}
\begin{req} \label{coverce aperiodic} For aperiodic matrices, the converse of Corollary \ref{forme de alpha} is true because $A\in \mathcal{M}(1,A)$ and $-A\in \mathcal{M}(-1,A)$. \end{req}
For $p>1$, we have the following result.
\begin{prop} \label{converse for periodic} Let $A=Cyc(A_{1},A_{2},\ldots,A_{p})$ be a nonnegative $p$-cyclic matrix where $A_{i}$ is a $r_{i}\times r_{i+1}$ matrix for $i=1,\ldots,p-1$ and $A_{p}$ is a $r_{p}\times r_{1}$ matrix. Let $\widetilde{A}$ be the matrix obtained from $A$ by replacing the block $A_{p}$ by $-A_{p}$. Given $\alpha=e^{\frac{i\pi k}{p}}$ where $k\in \left \{ 0,\ldots,2p-1\right \} $, then
\begin{description} \item[i)] if $k$ is even, $e^{\frac{i\pi k}{p}}A$ is diagonally similar to $A$, in particular $sp(A)=e^{\frac{i\pi k}{p}}sp(A)$;
\item[ii)] if $k$ is odd, $e^{\frac{i\pi k}{p}}A$ is diagonally similar to $\widetilde{A}$, in particular $sp(\widetilde{A})=e^{\frac{i\pi k}{p}}sp(A)$. \end{description} \end{prop}
\begin{pf} Let $L:=\left( \begin{array} [c]{ccccc} I_{r_{1}} & 0 & 0 & \cdots & 0\\ 0 & e^{\frac{i\pi}{p}}I_{r_{2}} & 0 & \cdots & 0\\ \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & 0 & \ddots & 0\\ 0 & 0 & \cdots & 0 & e^{\frac{i\pi(p-1)}{p}}I_{r_{p}} \end{array} \right) $. It easy to check that if $k$ is even, $e^{\frac{i\pi k}{p} }LAL^{-1}=A$ and if $k$ is odd, $e^{\frac{i\pi k}{p}}LAL^{-1}=\widetilde{A}$. \qed\end{pf}
The next Corollary is a direct consequence of the above Proposition and Theorem \ref{cyclic form}.
\begin{cor} \label{nonempty period p} Let $A$ be an irreducible nonnegative matrix with period $p>1$. Then $\mathcal{M}(e^{\frac{i\pi k}{p}},A)$ is nonempty for $k\in \left \{ 0,\ldots,2p-1\right \} $. \end{cor}
\begin{pf} By Proposition \ref{cyclic form}, there exists a permutation matrix $P$ such that $PAP^{T}$ is $p-$cyclic. Let $A^{\prime}:=PAP^{T}:=Cyc(A_{1},A_{2} ,\ldots,A_{p})$ and let $\widetilde{A^{\prime}}$ the matrix obtained from $A^{\prime}$ by replacing the block $A_{p}$ by $-A_{p}$. It follows from Proposition \ref{converse for periodic} that $sp(A^{\prime})=e^{\frac{i\pi k}{p}}sp(A^{\prime})$ if $k$ is even and $sp(\widetilde{A^{\prime}} )=e^{\frac{i\pi k}{p}}sp(A^{\prime})$ if $k$ is odd. If $k$ is even then $sp(A)=e^{\frac{i\pi k}{p}}sp(A)$ because $P^{T}A^{\prime}P=A$ and hence $A\in \mathcal{M}(e^{\frac{i\pi k}{p}},A)$. If $k$ is odd then $sp(P^{T} \widetilde{A^{\prime}}P)=e^{\frac{i\pi k}{p}}sp(P^{T}A^{\prime}P)=e^{\frac {i\pi k}{p}}sp(A)$. Moreover, since$\left \vert \widetilde{A^{\prime} }\right \vert =A^{\prime}$, we have $\left \vert P^{T}\widetilde{A^{\prime} }P\right \vert =P^{T}A^{\prime}P=A$ and then $P^{T}\widetilde{A^{\prime}} P\in \mathcal{M}(e^{\frac{i\pi k}{p}},A)$. \qed\end{pf}
\begin{req} \label{egalite des malpha} It follows from Corollary \ref{forme de alpha}, Remark \ref{coverce aperiodic} and Corollary \ref{nonempty period p} that $\mathcal{M}(\alpha,A)$ is nonempty iff $\alpha=e^{\frac{i\pi k}{p}}$ for some $k\in \left \{ 0,\ldots,2p-1\right \} $. Moreover, as $e^{\frac{i\pi k}{p} }sp(A)=sp(A)$ if $k$ is even, we have $e^{\frac{i\pi k}{p}}sp(A)=e^{\frac {i\pi}{p}}sp(A)$ if $k$ is odd and then \begin{align*} \mathcal{M}(1,A) & =\mathcal{M}(e^{\frac{2i\pi}{p}},A)=\cdots=\mathcal{M} (e^{\frac{2(p-1)i\pi}{p}},A)\\ \mathcal{M}(e^{\frac{i\pi}{p}},A) & =\mathcal{M}(e^{\frac{3i\pi}{p} },A)=\cdots=\mathcal{M}(e^{\frac{(2p-1)i\pi}{p}},A) \end{align*}
\end{req}
\begin{pot}
i) (Necessity) This follows from Corollary \ref{forme de alpha}.
(Sufficiency) If $p=1$ then from Corollary \ref{forme de alpha} we have $\alpha=\pm1$ and hence by Remark \ref{coverce aperiodic}, $\mathcal{M} (\alpha,A)$ is nonempty. If $p>1$, it suffices to apply Corollary \ref{nonempty period p}.
ii) By Remark \ref{egalite des malpha}, $\mathcal{M}(e^{\frac{i\pi k}{p} },A)=\mathcal{M}(1,A)$ if $k$ is even and $\mathcal{M}(e^{\frac{i\pi k}{p} },A)=\mathcal{M}(e^{\frac{i\pi}{p}},A)$ if $k$ is odd. To prove a) and b) it suffices to apply Proposition \ref{structurede malpha} respectively to $\alpha=1$ and to $\alpha=e^{\frac{i\pi}{p}}$. \qed\end{pot}
\section{The special case of symmetric matrices}
In this section we give some consequences of Theorem \ref{main theorem} when the matrix $A$ is symmetric.
\begin{prop} \label{shader generalis} Let $A$ be an irreducible nonnegative symmetric real matrix. Then the following statements are equivalent :
\begin{description} \item[i)] there is a real skew-symmetric matrix $B$ such that $\left \vert B\right \vert =A$ and $sp(B)=isp(A)$;
\item[ii)] there exists a permutation matrix $P$ such that $PAP^{T}=\left( \begin{array} [c]{cc} 0 & X\\ X^{T} & 0 \end{array} \right) $ where the zero diagonal blocks are square. \end{description} \end{prop}
\begin{pf} Note that the period of $A$ is a most $2$ because its associate digraph contains a closed path of length $2$.
First, we prove the implication i)$\implies$ii). It follows from i) that $\mathcal{M}(i,A)$ is nonempty. Then, by Corollary \ref{forme de alpha}, the period of $A$ is necessarily $2$. It follows from Proposition \ref{cyclic form} that there exists a permutation matrix $P$, so that $PAP^{T}=\left( \begin{array} [c]{cc} 0 & X\\ Y & 0 \end{array} \right) $ where the zero diagonal blocks are square. But, the matrix $PAP^{T}$ is symmetric, then $Y=X^{T}$.
To prove ii) $\implies$ i), it suffices to apply Proposition \ref{converse for periodic} for $p=2$ and $k=1$. \qed\end{pf}
\begin{prop} \label{solution shader} Let $A=\left( \begin{array} [c]{cc} 0 & X\\ X^{T} & 0 \end{array} \right) $ be an irreducible nonnegative symmetric matrix and let $B$ be a skew symmetric matrix such that $\left \vert B\right \vert =A$. Then the following statements are equivalent :
\begin{description} \item[i)] $sp(B)=isp(A)$;
\item[ii)] $B$ is $\{-1,1\}$-diagonally similar to $\widetilde{A}=\left( \begin{array} [c]{cc} 0 & X\\ -X^{T} & 0 \end{array} \right) $. \end{description} \end{prop}
\begin{pf} It follows from assertion ii) of Proposition \ref{converse for periodic} that $sp(\widetilde{A})=isp(A)$ and then $\widetilde{A}\in \mathcal{M}(i,A)$. The equivalence i) $\Longleftrightarrow$ ii) result from assertion b) of Theorem \ref{main theorem}. \qed\end{pf}
\end{document} |
\begin{document}
\title[Moduli spaces of weighted pointed stable rational curves]{Moduli spaces of weighted pointed stable rational curves via GIT} \date{March 2010}
\author{Young-Hoon Kiem} \address{Department of Mathematics and Research Institute of Mathematics, Seoul National University, Seoul 151-747, Korea} \email{[email protected]}
\author{Han-Bom Moon} \address{Department of Mathematics, Seoul National University, Seoul 151-747, Korea} \email{[email protected]}
\thanks{Partially supported by an NRF grant}
\begin{abstract} We construct the Mumford-Knudsen space $\overline{M}_{0,n} $ of $n$-pointed stable rational curves by a sequence of explicit blow-ups from the GIT quotient $(\mathbb{P}^1)^n/\!/ SL(2)$ with respect to the symmetric linearization $\mathcal{O} (1,\cdots,1)$. The intermediate blown-up spaces turn out to be the moduli spaces of weighted pointed stable curves $\overline{M}_{0,n\cdot \epsilon} $ for suitable ranges of $\epsilon$. As an application, we provide a new unconditional proof of M. Simpson's Theorem about the log canonical models of $\overline{M}_{0,n} $. We also give a basis of the Picard group of $\overline{M}_{0,n\cdot \epsilon} $. \end{abstract}
\maketitle
\section{Introduction}\label{sec1} Recently there has been a tremendous amount of interest in the birational geometry of moduli spaces of stable curves. See for instance \cite{AlexSwin, FedoSmyt,Hassett,Kapranov,Keel,Li, Mustata, Simpson} for the genus 0 case only. Most prominently, it has been proved in \cite{AlexSwin, FedoSmyt,Simpson} that the log canonical models for $(\overline{M}_{0,n} , K_{\overline{M}_{0,n} }+\alpha D)$, where $D$ is the boundary divisor and $\alpha$ is a rational number, give us Hassett's moduli spaces $\overline{M}_{0,n\cdot \epsilon} $ of weighted pointed stable curves with \emph{symmetric} weights $n\cdot \epsilon =(\epsilon,\cdots, \epsilon)$. See \S\ref{sec2.1} for the definition of $\overline{M}_{0,n\cdot \epsilon} $ and Theorem \ref{thm1.2} below for a precise statement. The purpose of this paper is to prove that actually all the moduli spaces $\overline{M}_{0,n\cdot \epsilon} $ can be constructed by explicit blow-ups from the GIT quotient $(\mathbb{P}^1)^n/\!/ SL(2)$ with respect to the symmetric linearization $\mathcal{O} (1,\cdots,1)$ where $SL(2)$ acts on $(\mathbb{P}^1)^n$ diagonally. More precisely, we prove the following. \begin{theorem}\label{thm1.1} There is a sequence of blow-ups \begin{equation}\label{eq1} \overline{M}_{0,n} =\overline{M}_{0,n\cdot \epsilon_{m-2}} \to \overline{M}_{0,n\cdot \epsilon_{m-3}} \to \cdots \to \overline{M}_{0,n\cdot \epsilon_2} \to \overline{M}_{0,n\cdot \epsilon_1} \to (\mathbb{P}^1)^n/\!/ SL(2) \end{equation} where $m=\lfloor\frac{n}{2}\rfloor$ and $\frac1{m+1-k}<\epsilon_k\le \frac1{m-k}$. Except for the last arrow when $n$ is even, the center for each blow-up is a union of transversal smooth subvarieties of same dimension. When $n$ is even, the last arrow is the blow-up along the singular locus which consists of $\frac12\binom{n}{m}$ points in $(\mathbb{P}^1)^n/\!/ SL(2)$, i.e. $\overline{M}_{0,n\cdot \epsilon_1} $ is Kirwan's partial desingularization (see \cite{Kirwan}) of the GIT quotient $(\mathbb{P}^1)^{2m}/\!/ SL(2)$.\end{theorem} If the center of a blow-up is the transversal union of smooth subvarieties in a nonsingular variety, the result of the blow-up is isomorphic to that of the sequence of smooth blow-ups along the irreducible components of the center in any order (see \S\ref{sec2.3}). So each of the above arrows can be decomposed into the composition of smooth blow-ups along the irreducible components.
As an application of Theorem \ref{thm1.1}, we give a new proof of the following theorem of M. Simpson (\cite{Simpson}) without relying on Fulton's conjecture. \begin{theorem}\label{thm1.2} Let $\alpha$ be a rational number satisfying $\frac{2}{n-1} < \alpha \le 1$ and let $D=\overline{M}_{0,n} -M_{0,n}$ denote the boundary divisor. Then the log canonical model $$\overline{M}_{0,n} (\alpha) = \mathrm{Proj}\;\left(\bigoplus_{l \ge 0} H^0(\overline{M}_{0,n} , \mathcal{O} (\lfloor l (K_{\overline{M}_{0,n} } +\alpha D) \rfloor))\right) $$ satisfies the following: \begin{enumerate} \item If $\frac{2}{m-k+2} < \alpha \le \frac{2}{m-k+1}$ for $1\le k\le m-2$, then $\overline{M}_{0,n} (\alpha) \cong \overline{M}_{0,n\cdot \epsilon_k} $.\item If $\frac{2}{n-1} < \alpha \le \frac{2}{m+1}$, then $\overline{M}_{0,n} (\alpha) \cong (\mathbb{P}^1)^n /\!/ G$ where the quotient is taken with respect to the symmetric linearization $\mathcal{O} (1,\cdots,1)$. \end{enumerate} \end{theorem} There are already two different \emph{unconditional} proofs of Theorem \ref{thm1.2} by Alexeev-Swinarski \cite{AlexSwin} and by Fedorchuk-Smyth \cite{FedoSmyt}. See Remark \ref{rem-compproofs} for a brief outline of the two proofs. In this paper we obtain the ampleness of some crucial divisors directly from Theorem \ref{thm1.1}. As another application, we give an explicit basis of the Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ for each $k$.
It is often the case in moduli theory that adding an extra structure makes a problem easier. Let $0\le k< n$. A pointed nodal curve $(C,p_1,\cdots, p_n)$ of genus $0$ together with a morphism $f:C\to \mathbb{P}^1$ of degree $1$ is called \emph{$k$-stable} if \begin{enumerate} \item[i.] all marked points $p_i$ are smooth points of $C$; \item[ii.] no more than $n-k$ of the marked points $p_i$ can coincide; \item[iii.] any ending irreducible component $C'$ of $C$ which is contracted by $f$ contains more than $n-k$ marked points; \item[iv.] the group of automorphisms of $C$ preserving $f$ and $p_i$ is finite. \end{enumerate} A. Mustata and M. Mustata prove the following in \cite{Mustata}. \begin{theorem} \cite[\S1]{Mustata} There is a fine moduli space $F_k$ of $k$-stable pointed parameterized curves $(C,p_1,\cdots,p_n,f)$. Furthermore, the moduli spaces $F_k$ fit into a sequence of blow-ups \begin{equation}\label{2010eq1} \xymatrix{ \mathbb{P}^1[n]\ar@{=}[r] & F_{n-2}\ar[r]^{\psi_{n-2}} & F_{n-3}\ar[r]^{\psi_{n-3}} & \cdots \ar[r]^{\psi_2} & F_1\ar[r]^{\psi_1} & F_0\ar@{=}[r] & (\mathbb{P}^1)^n } \end{equation} whose centers are transversal unions of smooth subvarieties. \end{theorem} The first term $\mathbb{P}^1[n]$ is the Fulton-MacPherson compactification of the configuration space of $n$ points in $\mathbb{P}^1$ constructed in \cite{FM}. The blow-up centers are transversal unions of smooth subvarieties and hence we can further decompose each arrow into the composition of smooth blow-ups along the irreducible components in any order. This blow-up sequence is actually a special case of L. Li's inductive construction of a \emph{wonderful compactification} of the configuration space and transversality of various subvarieties is a corollary of Li's result \cite[Proposition 2.8]{Li}. (See \S\ref{sec2.3}.) The images of the blow-up centers are invariant under the diagonal action of $SL(2)$ on $(\mathbb{P}^1)^n$ and so this action lifts to $F_k$ for all $k$. The aim of this paper is to show that the GIT quotient of the sequence \eqref{2010eq1} by $SL(2)$ gives us \eqref{eq1}.
To make sense of GIT quotients, we need to specify a linearization of the action of $G=SL(2)$ on $F_k$. For $F_0=(\mathbb{P}^1)^n$, we choose the symmetric linearization $L_0=\mathcal{O} (1,\cdots, 1)$. Inductively, we choose $L_k=\psi^*_kL_{k-1}\otimes \mathcal{O} (-\delta_kE_{k})$ where $E_k$ is the exceptional divisor of $\psi_k$ and $0<\delta_k<\!<\delta_{k-1}<\!< \cdots <\!<\delta_1<\!<1$. Let $F_k^{ss}$ (resp. $F_k^s$) be the semistable (resp. stable) part of $F_k$ with respect to $L_k$. Then by \cite[\S3]{Kirwan} or \cite[Theorem 3.11]{Hu}, we have \begin{equation}\label{2010eq2} \psi_k^{-1}(F^s_{k-1})\subset F_k^s\subset F_k^{ss}\subset \psi_k^{-1}(F_{k-1}^{ss}). \end{equation} In particular, we obtain a sequence of morphisms $$\bar\psi_k:F_k/\!/ G\to F_{k-1}/\!/ G.$$ It is well known that a point $(x_1,\cdots,x_n)$ in $F_0=(\mathbb{P}^1)^n$ is stable (resp. semistable) if $\ge \lfloor \frac{n}2\rfloor$ points (resp. $> \lfloor \frac{n}2\rfloor$ points) do not coincide (\cite{MFK,K1}).
Let us first consider the case where $n$ is odd. In this case, $F_0^s=F_0^{ss}$ because $\frac{n}2$ is not an integer. Hence $F_k^s=F_k^{ss}$ for any $k$ by \eqref{2010eq2}. Since the blow-up centers of $\psi_k$ for $k\le m+1$ lie in the unstable part, we have $F_k^s=F_0^s$ for $k\le m+1$. Furthermore, the stabilizer group of every point in $F_k^s$ is $\{\pm 1\}$, i.e. $\bar G=PGL(2)$ acts freely on $F_k^s$ for $0\le k\le n-2$ and thus $F_k/\!/ G=F_k^s/G$ is nonsingular. By the stability conditions, forgetting the degree 1 morphism $f:C\to \mathbb{P}^1$ gives us an invariant morphism $F_{n-m+k}^s\to \overline{M}_{0,n\cdot \epsilon_k} $ which induces a morphism $$\phi_k:F_{n-m+k}/\!/ G\to \overline{M}_{0,n\cdot \epsilon_k} \quad \text{for } k=0,\cdots, m-2.$$ Since both varieties are nonsingular, we can conclude that $\phi_k$ is an isomorphism by showing that the Picard numbers are identical. Since $\bar G$ acts freely on $F_{n-m+k}^s$, the quotient of the blow-up center of $\psi_{n-m+k+1}$ is again a transversal union of $\binom{n}{m-k}$ smooth varieties $\Sigma^S_{n-m+k}/\!/ G$ for a subset $S$ of $\{1,\cdots, n\}$
with $|S|=m-k$, which are isomorphic to the moduli space $\overline{M}_{0,(1,\epsilon_k,\cdots,\epsilon_k)}$ of weighted pointed stable curves with $n-m+k+1$ marked points (Remark \ref{2010rem1}). Finally we conclude that $$\varphi_k:\overline{M}_{0,n\cdot \epsilon_k} \cong F_{n-m+k}/\!/ G\mapright{\bar\psi_{n-m+k}} F_{n-m+k-1}/\!/ G\cong \overline{M}_{0,n\cdot \epsilon_{k-1}} $$ is a blow-up by using a lemma in \cite{Kirwan} which tells us that quotient and blow-up commute. (See \S\ref{sec2.2}.) It is straightforward to check that this morphism $\varphi_k$ is identical to Hassett's natural morphisms (\S\ref{sec2.1}). Note that the isomorphism $$\phi_{m-2}:\mathbb{P}^1[n]/\!/ G\mapright{\cong} \overline{M}_{0,n} $$ was obtained by Hu and Keel (\cite{HuKeel}) when $n$ is odd because $L_0$ is a \emph{typical} linearization in the sense that $F_0^{ss}=F_0^s$. The above proof of the fact that $\phi_k$ is an isomorphism in the odd $n$ case is essentially the same as Hu-Keel's. However their method does not apply to the even degree case.
\defF_{n-m+k} {F_{n-m+k} } \def\tilde{F}_{n-m+k} {\tilde{F}_{n-m+k} } \defY_{n-m+k} {Y_{n-m+k} }
The case where $n$ is even is more complicated because $F_k^{ss}\ne F_k^s$ for all $k$. Indeed, $F_m/\!/ G=\cdots =F_0/\!/ G= (\mathbb{P}^1)^n/\!/ G$ is singular with exactly $\frac12\binom{n}{m}$ singular points. But for $k\ge 1$, the GIT quotient of $F_{n-m+k}$ by $G$ is nonsingular and we can use Kirwan's partial desingularization of the GIT quotient $F_{n-m+k}/\!/ G$ (\cite{Kirwan}). For $k\ge 1$, the locus $Y_{n-m+k}$ of closed orbits in $F_{n-m+k}^{ss}-F_{n-m+k}^s$ is the disjoint union of the transversal intersections of smooth divisors $\Sigma^S_{n-m+k}$ and $\Sigma^{S^c}_{n-m+k}$ where
$S\sqcup S^c=\{1,\cdots,n\}$ is a partition with $|S|=m$. In particular, $Y_{n-m+k}$ is of codimension $2$ and the stabilizers of points in $Y_{n-m+k}$ are all conjugates of $\mathbb{C}^*$. The weights of the action of the stabilizer $\mathbb{C}^*$ on the normal space to $Y_{n-m+k}$ are $2,-2$. By Luna's slice theorem (\cite[Appendix 1.D]{MFK}), it follows that $F_{n-m+k}/\!/ G$ is smooth along the divisor $Y_{n-m+k}/\!/ G$. If we let $\tilde{F}_{n-m+k} \to F_{n-m+k} ^{ss}$ be the blow-up of $F_{n-m+k} ^{ss}$ along $Y_{n-m+k} $, $\tilde{F}_{n-m+k} ^{ss}=\tilde{F}_{n-m+k} ^s$ and $\tilde{F}_{n-m+k} /\!/ G=\tilde{F}_{n-m+k} ^s/G$ is nonsingular. Since blow-up and quotient commute (\S\ref{sec2.2}), the induced map $$\tilde{F}_{n-m+k} /\!/ G\to F_{n-m+k} /\!/ G$$ is a blow-up along $Y_{n-m+k} /\!/ G$ which has to be an isomorphism because the blow-up center is already a smooth divisor. So we can use $\tilde{F}_{n-m+k} ^s$ instead of $F_{n-m+k} ^{ss}$ and apply the same line of arguments as in the odd degree case. In this way, we can establish Theorem \ref{thm1.1}.
To deduce Theorem \ref{thm1.2} from Theorem \ref{thm1.1}, we note that by \cite[Corollary 3.5]{Simpson}, it suffices to prove that $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample for $\frac{2}{m-k+2}<\alpha\le \frac{2}{m-k+1}$ where $D_k=\overline{M}_{0,n\cdot \epsilon_k} -M_{0,n}$ is the boundary divisor of $\overline{M}_{0,n\cdot \epsilon_k} $ (Proposition \ref{prop-amplerange}). By the intersection number calculations of Alexeev and Swinarski (\cite[\S3]{AlexSwin}), we obtain the nefness of $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ for $\alpha= \frac{2}{m-k+1}+s$ for some (sufficiently small) positive number $s$. Because any positive linear combination of an ample divisor and a nef divisor is ample, it suffices to show that $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample for $\alpha=\frac{2}{m-k+2}+t$ for \emph{any} sufficiently small $t>0$. We use induction on $k$. By calculating the canonical divisor explicitly, it is easy to show when $k=0$. Because $\varphi_k$ is a blow-up with exceptional divisor $D^{m-k+1}_k$, $\varphi_k^*(K_{\overline{M}_{0,n\cdot \epsilon_{k-1}} }+\alpha D_{k-1})-\delta D^{m-k+1}_k$ is ample for small $\delta>0$ if $K_{\overline{M}_{0,n\cdot \epsilon_{k-1}} }+\alpha D_{k-1}$ is ample. By a direct calculation, we find that these ample divisors give us $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ with $\alpha=\frac{2}{m-k+2}+t$ for any sufficiently small $t>0$. So we obtain a proof of Theorem \ref{thm1.2}.
For the moduli spaces of \emph{unordered} weighted pointed stable curves \[\widetilde{M}_{0,n\cdot\epsilon_k}=\overline{M}_{0,n\cdot \epsilon_k} /S_n\] we can simply take the $S_n$ quotient of our sequence \eqref{eq1} and thus $\widetilde{M}_{0,n\cdot\epsilon_k}$ can be constructed by a sequence of \emph{weighted blow-ups} from $\mathbb{P}^n/\!/ G=\left((\mathbb{P}^1)^n/\!/ G\right)/S_n$. In particular, $\widetilde{M}_{0,n\cdot\epsilon_1}$ is a weighted blow-up of $\mathbb{P}^n/\!/ G$ at its singular point when $n$ is even.
Here is an outline of this paper. In \S2, we recall necessary materials about the moduli spaces $\overline{M}_{0,n\cdot \epsilon_k} $ of weighted pointed stable curves, partial desingularization and blow-up along transversal center. In \S3, we recall the blow-up construction of the moduli space $F_k$ of weighted pointed parameterized stable curves. In \S4, we prove Theorem \ref{thm1.1}. In \S5, we prove Theorem \ref{thm1.2}. In \S6, we give a basis of the Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ as an application of Theorem \ref{thm1.1}.
\textbf{Acknowledgement.} This paper grew out of our effort to prove a conjecture of Brendan Hassett (passed to us by David Donghoon Hyeon): When $n$ is even, $\widetilde{M}_{0,n\cdot\epsilon_1}$ is the (weighted) blow-up of $\mathbb{P}^n/\!/ G$ at the singular point. It is our pleasure to thank Donghoon Hyeon for useful discussions. We are also grateful to David Smyth who kindly pointed out an error in a previous draft.
\section{Preliminaries}\label{sec2}
\subsection{Moduli of weighted pointed stable curves}\label{sec2.1} We recall the definitions and basic facts on Hassett's moduli spaces of weighted pointed stable curves from \cite{Hassett}.
A family of nodal curves of genus $g$ with $n$ marked points over base scheme $B$ consists of \begin{enumerate} \item a flat proper morphism $\pi : C \to B$ whose geometric fibers are nodal connected curves of arithmetic genus $g$ and \item sections $s_1, s_2, \cdots, s_n$ of $\pi$. \end{enumerate} An $n$-tuple $\mathcal{A} =(a_1, a_2, \cdots, a_n) \in \mathbb{Q}^n$ with $0 < a_i \le 1$ assigns a weight $a_i$ to the $i$-th marked point. Suppose that $2g-2+a_1+a_2+\cdots+a_n > 0$.
\begin{definition}\cite[\S 2]{Hassett} A family of nodal curves of genus $g$ with $n$ marked points $(C, s_1, \cdots, s_n) \stackrel{\pi}{\to} B$ is stable of type $(g, \mathcal{A})$ if \begin{enumerate} \item the sections $s_1, \cdots, s_n$ lie in the smooth locus of $\pi$; \item for any subset $\{s_{i_1}, \cdots, s_{i_r}\}$ of nonempty intersection, $a_{i_1} + \cdots + a_{i_r} \le 1$; \item $K_{\pi} + a_1s_1 + a_2s_2+\cdots+a_ns_n$ is $\pi$-relatively ample. \end{enumerate} \end{definition}
\begin{theorem}\cite[Theorem 2.1]{Hassett} There exists a connected Deligne-Mumford stack $\overline{\mathcal{M}}_{g, \mathcal{A}}$, smooth and proper over $\mathbb{Z}$, representing the moduli functor of weighted pointed stable curves of type $(g, \mathcal{A})$. The corresponding coarse moduli scheme $\overline{M}_{g, \mathcal{A}}$ is projective over $\mathbb{Z}$. \end{theorem}
When $g = 0$, there is no nontrivial automorphism for any weighted pointed stable curve and hence $\overline{M}_{0, \mathcal{A}}$ is a projective \emph{smooth variety} for any $\mathcal{A} $.
There are natural morphisms between moduli spaces with different weight data. Let $\mathcal{A}=\{a_1, \cdots, a_n\}$, $\mathcal{B}=\{b_1, \cdots, b_n\}$ be two weight data and suppose $a_i \ge b_i$ for all $1 \le i \le n$. Then there exists a birational \emph{reduction} morphism \[
\varphi_{\mathcal{A}, \mathcal{B}}
: \overline{\mathcal{M}}_{g, \mathcal{A}} \to
\overline{\mathcal{M}}_{g, \mathcal{B}}. \] For $(C, s_1, \cdots, s_n) \in \overline{\mathcal{M}}_{g, \mathcal{A}}$, $\varphi_{\mathcal{A}, \mathcal{B}}(C, s_1, \cdots, s_n)$ is obtained by collapsing components of $C$ on which $\omega_C + b_1s_1+ \cdots + b_ns_n$ fails to be ample. These morphisms between moduli stacks induce corresponding morphisms between coarse moduli schemes.
The exceptional locus of the reduction morphism $\varphi_{\mathcal{A}, \mathcal{B}}$ consists of boundary divisors $D_{I, I^c}$ where $I = \{i_1, \cdots, i_r\}$ and $I^c=\{j_1, \cdots, j_{n-r}\}$ form a partition of $\{1, \cdots, n\}$ satisfying $r > 2$, $$a_{i_1} + \cdots + a_{i_r} > 1 \quad \text{and}\quad b_{i_1} + \cdots + b_{i_r} \le 1.$$ Here $D_{I, I^c}$ denotes the closure of the locus of $(C, s_1, \cdots, s_n)$ where $C$ has two irreducible components $C_1, C_2$ with $p_a(C_1) = 0$, $p_a(C_2) = g$, $r$ sections $s_{i_1}, \cdots s_{i_r}$ lying on $C_1$, and the other $n-r$ sections lying on $C_2$.
\begin{proposition}\cite[Proposition 4.5]{Hassett}\label{reduction} The boundary divisor $D_{I,I^c}$ is isomorphic to $ \overline{M}_{0, \mathcal{A}'_I} \times \overline{M}_{g, \mathcal{A}'_{I^c}},$ with $ \mathcal{A}'_I = (a_{i_1}, \cdots, a_{i_r}, 1) $ and $\mathcal{A}'_{I^c}= (a_{j_1}, \cdots, a_{j_{n-r}}, 1).$ Furthermore, $
\varphi_{\mathcal{A}, \mathcal{B}}(D_{I, I^c})
\cong \overline{M}_{g, \mathcal{B}'_{I^c}}$ with $ \mathcal{B}'_{I^c} = (b_{j_1}, \cdots, b_{j_{n-r}}, \sum_{k=1}^r b_{i_k}).$ \end{proposition}
From now on, we focus on the $g=0$ case. Let $$m = \lfloor \frac{n}{2}\rfloor,\quad \frac{1}{m-k+1} < \epsilon_k \le \frac{1}{m-k}\quad \text{and}\quad n\cdot \epsilon_k = (\epsilon_k, \cdots, \epsilon_k).$$ Consider the reduction morphism \[
\varphi_{n\cdot \epsilon_{k}, n \cdot \epsilon_{k-1}}
: \overline{M}_{0, n \cdot \epsilon_{k}} \to
\overline{M}_{0, n \cdot \epsilon_{k-1}}. \]
Then $D_{I, I^c}$ is contracted by $\varphi_{n\cdot \epsilon_{k}, n \cdot \epsilon_{k-1}}$ if and only if $|I| = m - k + 1$. Certainly, there are ${n} \choose {m-k+1}$ such partitions $I\sqcup I^c$ of $\{1,\cdots,n\}$.
For two subsets $I, J \subset \{1, \cdots, n\}$ such that $|I| =
|J| = m - k + 1$, $D_{I, I^c} \cap D_{J, J^c}$ has codimension at least two in $\overline{M}_{0,n\cdot \epsilon_k} $. So if we denote the complement of the intersections of the divisors by \[
\overline{M}_{0,n\cdot \epsilon_k} ' = \overline{M}_{0,n\cdot \epsilon_k} - \bigcup_{|I| = |J| = n-k+1, I \ne J} D_{I, I^c} \cap D_{J,
J^c}, \] we have $\mathrm{Pic}(\overline{M}_{0,n\cdot \epsilon_k} ') = \mathrm{Pic}(\overline{M}_{0,n\cdot \epsilon_k} )$. The restriction of $\varphi_{n \cdot \epsilon_{k}, n \cdot \epsilon_{k-1}}$ to $\overline{M}_{0,n\cdot \epsilon_k} '$ is a contraction of ${n} \choose {m-k+1}$ \emph{disjoint} divisors and its image is an open subset whose complement has codimension at least two. Therefore we obtain the following equality of Picard numbers: \begin{equation}\label{eq-eqPicNumber}
\rho(\overline{M}_{0, n \cdot \epsilon_{k}}) =
\rho(\overline{M}_{0, n \cdot \epsilon_{k-1}}) + {n \choose {m-k+1}}. \end{equation}
It is well known that the Picard number of $\overline{M}_{0,n} $ is \begin{equation}\label{eq-eqPic2} \rho(\overline{M}_{0,n} )=\rho(\overline{M}_{0, n \cdot \epsilon_{m-2}})=2^{n-1}-\binom{n}{2}-1. \end{equation} Hence we obtain the following lemma from \eqref{eq-eqPicNumber} and \eqref{eq-eqPic2}. \begin{lemma}\label{lem-PicNumMze}\begin{enumerate}\item If $n$ is odd, $\rho(\overline{M}_{0,n\cdot \epsilon_k} )= n+\sum_{i=1}^k\binom{n}{m-i+1}$.\item If $n$ is even, $\rho(\overline{M}_{0,n\cdot \epsilon_k} )=n+\frac12\binom{n}{m}+\sum_{i=2}^k\binom{n}{m-i+1}$.\end{enumerate} \end{lemma}
\subsection{Partial desingularization}\label{sec2.2} We recall a few results from \cite{Kirwan, Hu} on change of stability in a blow-up.
Let $G$ be a complex reductive group acting on a projective nonsingular variety $X$. Let $L$ be a $G$-linearized ample line bundle on $X$. Let $Y$ be a $G$-invariant closed subvariety of $X$, and let $\pi : \widetilde{X} \to X$ be the blow-up of $X$ along $Y$, with exceptional divisor $E$. Then for sufficiently large $d$, $L_d = \pi^*L^d \otimes \mathcal{O} (-E)$ becomes very ample, and there is a natural lifting of the $G$-action to $L_d$ ( \cite[\S3]{Kirwan}).
Let $X^{ss}$(resp. $X^s$) denote the semistable (resp. stable) part of $X$. With respect to the polarizations $L$ and $L_d$, the following hold (\cite[\S3]{Kirwan} or \cite[Theorem 3.11]{Hu}) :\begin{equation}\label{eq-StabBlowup} \widetilde{X}^{ss} \subset \pi^{-1}(X^{ss}), \qquad \widetilde{X}^{s} \supset \pi^{-1}(X^{s}).\end{equation} In particular, if $X^{ss} = X^s$, then $\widetilde{X}^{ss} = \widetilde{X}^s = \pi^{-1}(X^s)$.
For the next lemma, let us suppose $Y^{ss}=Y\cap X^{ss}$ is nonsingular. We can compare the GIT quotient of $\widetilde{X}$ by $G$ with respect to $L_d$ with the quotient of $X$ by $G$ with respect to $L$. \begin{lemma}\cite[Lemma 3.11]{Kirwan} \label{blowupGIT} For sufficiently large $d$, $\widetilde{X}/\!/ G$ is the blow-up of $X/\!/ G$ along the image $Y /\!/ G$ of $Y^{ss}$. \end{lemma}
Let $\mathcal{I}$ be the ideal sheaf of $Y$. In the statement of Lemma \ref{blowupGIT}, the blow-up is defined by the ideal sheaf $(\mathcal{I}^m)_G$ which is the $G$-invariant part of $\mathcal{I}^m$, for some $m$. (See the proof of \cite[Lemma 3.11]{Kirwan}.) In the cases considered in this paper, the blow-ups always take place along \emph{reduced} ideals, i.e. $\widetilde{X}/\!/ G$ is the blow-up of $X/\!/ G$ along the subvariety $Y/\!/ G$ because of the following. \begin{lemma}\label{lem-specialcasebl} Let $G=SL(2)$ and $\mathbb{C}^*$ be the maximal torus of $G$. Suppose $Y^{ss}$ is smooth. The blow-up $\widetilde{X}/\!/ G\to X/\!/ G$ is the blow-up of the reduced ideal of $Y/\!/ G$ if any of the following holds: \begin{enumerate} \item The stabilizers of points in $X^{ss}$ are all equal to the center $\{\pm 1\}$, i.e. $\bar G=SL(2)/\{\pm 1\}$ acts on $X^{ss}$ freely. \item If we denote the $\mathbb{C}^*$-fixed locus in $X^{ss}$ by $Z^{ss}_{\mathbb{C}^*}$, $Y^{ss}=Y\cap X^{ss}=GZ^{ss}_{\mathbb{C}^*}$ and the stabilizers of points in $X^{ss}-Y^{ss}$ are all $\{\pm 1\}$. Furthermore suppose that the weights of the action of $\mathbb{C}^*$ on the normal space of $Y^{ss}$ at any $y\in Z^{ss}_{\mathbb{C}^*}$ are $\pm l$ for some $l\ge 1$. \item There exists a smooth divisor $W$ of $X^{ss}$ which intersects transversely with $Y^{ss}$ such that the stabilizers of points in $X^{ss}-W$ are all $\mathbb{Z}_2=\{\pm 1\}$ and the stabilizers of points in $W$ are all isomorphic to $\mathbb{Z}_4$. \end{enumerate} In the cases (1) and (3), $Y/\!/ G=Y^s/G$ and $X/\!/ G=X^s/G$ are nonsingular and the morphism $\widetilde{X}/\!/ G\to X/\!/ G$ is the smooth blow-up along the smooth subvariety $Y/\!/ G$. \end{lemma} \begin{proof} Let us consider the first case. Let $\bar G=PGL(2)$. By Luna's \'etale slice theorem \cite[Appendix 1.D]{MFK}, \'etale locally near a point in $Y^{ss}$, $X^{ss}$ is $\bar{G}\times S$ and $Y^{ss}$ is $\bar{G}\times S^Y$ for some nonsingular locally closed subvariety $S$ and $S^Y=S\cap Y$. Then \'etale locally $\widetilde{X}^{ss}$ is $\bar{G}\times \mathrm{bl}_{S^Y}S$ where $\mathrm{bl}_{S^Y}S$ denotes the blow-up of $S$ along the nonsingular variety $S^Y$. Thus the quotients $X/\!/ G$, $Y/\!/ G$ and $\widetilde{X}/\!/ G$ are \'etale locally $S$, $S^Y$ and $\mathrm{bl}_{S^Y}S$ respectively. This implies that the blow-up $\widetilde{X}/\!/ G\to X/\!/ G$ is the smooth blow-up along the reduced ideal of $Y/\!/ G$.
For the second case, note that the orbits in $Y^{ss}$ are closed in $X^{ss}$ because the stabilizers are maximal. So we can again use Luna's slice theorem to see that \'etale locally near a point $y$ in $Y^{ss}$, the varieties $X^{ss}$, $Y^{ss}$ and $\widetilde{X}$ are respectively $G\times_{\mathbb{C}^*}S$, $G\times_{\mathbb{C}^*}S^0$ and $G\times_{\mathbb{C}^*}\mathrm{bl}_{S^0}S$ for some nonsingular locally closed $\mathbb{C}^*$-equivariant subvariety $S$ and its $\mathbb{C}^*$-fixed locus $S^0$. Therefore the quotients $X/\!/ G$, $Y/\!/ G$ and $\widetilde{X}/\!/ G$ are \'etale locally $S/\!/ \mathbb{C}^*$, $S^0$ and $(\mathrm{bl}_{S^0}S)/\!/ \mathbb{C}^*$. Thus it suffices to show $$(\mathrm{bl}_{S^0}S)/\!/ \mathbb{C}^*\cong \mathrm{bl}_{S^0}(S/\!/ \mathbb{C}^*).$$ Since $X$ is smooth, \'etale locally we can choose our $S$ to be the normal space to the orbit of $y$ and $S$ is decomposed into the weight spaces $S^0\oplus S^+\oplus S^-$. As the action of $\mathbb{C}^*$ extends to $SL(2)$, the nonzero weights are $\pm l$ by assumption. If we choose coordinates $x_1,\cdots, x_r$ for $S^+$ and $y_1,\cdots, y_s$ for $S^-$, the invariants are polynomials of $x_iy_j$ and thus $(I^{2m})_{\mathbb{C}^*}=(I_{\mathbb{C}^*})^m$ for $m\ge 1$ where $I=\langle x_1,\cdots,x_r,y_1,\cdots,y_s \rangle$ is the ideal of $S^0$. By \cite[II Exe. 7.11]{Hartshorne}, we have $$\mathrm{bl}_{S^0}S=\mathrm{Proj}_S(\oplus_m I^m)\cong \mathrm{Proj}_S(\oplus_m I^{2m})$$ and thus $$(\mathrm{bl}_{S^0}S)/\!/ \mathbb{C}^* =\mathrm{Proj}_{S/\!/ \mathbb{C}^*}(\oplus_m I^{2m})_{\mathbb{C}^*} =\mathrm{Proj}_{S/\!/ \mathbb{C}^*}\left(\oplus_m (I_{\mathbb{C}^*})^{m}\right)=\mathrm{bl}_{I_{\mathbb{C}^*}}(S/\!/ \mathbb{C}^*).$$ Since $S$ is factorial and $I$ is reduced, $I_{\mathbb{C}^*}$ is reduced. (If $f^m\in I_{\mathbb{C}^*}$, then $f\in I$ and $(g\cdot f)^m=f^m$ for $g\in \mathbb{C}^*$. By factoriality, $g\cdot f$ may differ from $f$ only by a constant multiple, which must be an $m$-th root of unity. Because $\mathbb{C}^*$ is connected, the constant must be $1$ and hence $f\in I_{\mathbb{C}^*}$.) Therefore $I_{\mathbb{C}^*}$ is the reduced ideal of $S^0$ on $S/\!/ \mathbb{C}^*$ and hence $(\mathrm{bl}_{S^0}S)/\!/ \mathbb{C}^*\cong \mathrm{bl}_{S^0}(S/\!/ \mathbb{C}^*)$ as desired.
The last case is similar to the first case. Near a point in $W$, $X^{ss}$ is \'etale locally $\bar G\times_{\mathbb{Z}_2}S$ where $S=S_W\times\mathbb{C}$ for some smooth variety $S_W$. $\mathbb{Z}_2$ acts trivially on $S_W$ and by $\pm 1$ on $\mathbb{C}$. Etale locally $Y^{ss}$ is $\bar G\times_{\mathbb{Z}_2}S_Y$ where $S_Y=(S_W\cap Y)\times \mathbb{C}$. The quotients $X/\!/ G$, $Y/\!/ G$ and $\widetilde{X}/\!/ G$ are \'etale locally $S_W\times \mathbb{C}$, $(S_W\cap Y)\times \mathbb{C}$ and $\mathrm{bl}_{S_W\cap Y}S_W\times \mathbb{C}$. This proves our lemma. \end{proof} \begin{corollary}\label{cor-blcomquot} Suppose that (1) of Lemma \ref{lem-specialcasebl} holds. If $Y^{ss}=Y_1^{ss}\cup\cdots\cup Y_r^{ss}$ is a transversal union of smooth subvarieties of $X^{ss}$ and if $\widetilde{X}$ is the blow-up of $X^{ss}$ along $Y^{ss}$, then $\widetilde{X}/\!/ G$ is the blow-up of $X/\!/ G$ along the reduced ideal of $Y/\!/ G$ which is again a transversal union of smooth varieties $Y_i/\!/ G$. The same holds under the condition (3) of Lemma \ref{lem-specialcasebl} if furthermore $Y_i$ are transversal to $W$. \end{corollary} \begin{proof} Because of the assumption (1), $X^{ss}=X^s.$ If $Y^{ss}=Y_1^{ss}\cup\cdots\cup Y_r^{ss}$ is a transversal union of smooth subvarieties of $X^{ss}$ and if $\pi:\widetilde{X}\to X^{ss}$ is the blow-up along $Y^{ss}$, then $\widetilde{X}^s=\widetilde{X}^{ss}=\pi^{-1}(X^s)$ is the composition of smooth blow-ups along (the proper transforms of) the irreducible components $Y_i^{ss}$ by Proposition \ref{prop-bltrcen} below. For each of the smooth blow-ups, the quotient of the blown-up space is the blow-up of the quotient along the reduced ideal of the quotient of the center by Lemma \ref{lem-specialcasebl}. Hence $\widetilde{X}/\!/ G\to X/\!/ G$ is the composition of smooth blow-ups along irreducible smooth subvarieties which are proper transforms of $Y_i/\!/ G$. Hence $\widetilde{X}/\!/ G$ is the blow-up along the union $Y/\!/ G$ of $Y_i/\!/ G$ by Proposition \ref{prop-bltrcen} again.
The case (3) of Lemma \ref{lem-specialcasebl} is similar and we omit the detail. \end{proof}
Finally we recall Kirwan's partial desingularization construction of GIT quotients. Suppose $X^{ss} \ne X^s$ and $X^s$ is nonempty. Kirwan in \cite{Kirwan} introduced a systematic way of blowing up $X^{ss}$ along a sequence of nonsingular subvarieties to obtain a variety $\widetilde{X}$ with linearized $G$ action such that $\widetilde{X}^{ss} = \widetilde{X}^s$ and $\widetilde{X}/\!/ G$ has at worst finite quotient singularities only, as follows:\begin{enumerate} \item Find a maximal dimensional connected reductive subgroup $R$ such that the $R$-fixed locus $Z_R^{ss}$ in $X^{ss}$ is nonempty. Then $$GZ_R^{ss}\cong G\times_{N^R}Z_R^{ss}$$ is a nonsingular closed subvariety of $X^{ss}$ where $N^R$ denotes the normalizer of $R$ in $G$. \item Blow up $X^{ss}$ along $GZ_R^{ss}$ and find the semistable part $X_1^{ss}$. Go back to step 1 and repeat this precess until there are no more strictly semistable points. \end{enumerate} Kirwan proves that this process stops in finite steps and $\widetilde{X}/\!/ G$ is called the \emph{partial desingularization} of $X /\!/ G$. We will drop ``partial" if it is nonsingular.
\subsection{Blow-up along transversal center}\label{sec2.3} We show that the blow-up along a center whose irreducible components are transversal smooth varieties is isomorphic to the result of smooth blow-ups along the irreducible components in any order. This fact can be directly proved but instead we will see that it is an easy special case of beautiful results of L. Li in \cite{Li}.
\begin{definition} \cite[\S1]{Li} (1) For a nonsingular algebraic variety $X$, an \emph{arrangement} of subvarieties $S$ is a finite collection of nonsingular subvarieties such that all nonempty scheme-theoretic intersections of subvarieties in $S$ are again in $S$.
(2) For an arrangement $S$, a subset $B\subset S$ is called a \emph{building set} of $S$ if for any $s \in S- B$, the minimal elements in $\{b \in B : b \supset s\}$ intersect transversally and the intersection is $s$.
(3) A set of subvarieties $B$ is called a \emph{building set} if all the possible intersections of subvarieties in $B$ form an arrangement $S$ (called the induced arrangement of $B$) and $B$ is a building set of $S$. \end{definition}
The \emph{wonderful compactification} $X_B$ of $X^0=X-\cup_{b\in B} b$ is defined as the closure of $X^0$ in $\prod_{b\in B}\mathrm{bl}_bX$. Li then proves the following. \begin{theorem}\cite[Theorem 1.3]{Li} \label{thm-Li1} Let $X$ be a nonsingular variety and $B = \{b_1, \cdots,b_n\}$ be a nonempty building set of subvarieties of $X$. Let $I_i$ be the ideal sheaf of $b_i \in B$. \begin{enumerate}\item The wonderful compactification $X_B$ is isomorphic to the blow-up of $X$ along the ideal sheaf $I_1I_2\cdots I_n$. \item If we arrange $B = \{b_1, \cdots,b_n\}$ in such an order that the first $i$ terms $b_1,\cdots,b_i$ form a building set for any $1\le i\le n$, then $X_B = \mathrm{bl}_{\tilde{b}_n} \cdots \mathrm{bl}_{\tilde{b}_2} \mathrm{bl}_{b_1} X$, where each blow-up is along a nonsingular subvariety $\tilde{b}_i$.\end{enumerate} \end{theorem} Here $\tilde{b}_i$ is the \emph{dominant transform} of $b_i$ which is obtained by taking the proper transform when it doesn't lie in the blow-up center or the inverse image if it lies in the center, in each blow-up. (See \cite[Definition 2.7]{Li}.)
Let $X$ be a smooth variety and let $Y_1, \cdots, Y_n$ be transversally intersecting smooth closed subvarieties. Here, \emph{transversal intersection} means that for any nonempty $S \subset \{1, \cdots, n\}$ the intersection $Y_S:=\cap_{i \in S}Y_i$ is smooth and the normal bundle $N_{Y_S/X}$ in $X$ of $Y_S$ is the direct sum of the restrictions of the normal bundles $N_{Y_i/X}$ in $X$ of $Y_i$, i.e.
$$N_{Y_S/X} = \bigoplus_{i\in S} N_{Y_i/X}|_{Y_S}.$$ If we denote the ideal of $Y_i$ by $I_i$, the ideal of the union $\cup_{i=1}^n Y_i$ is the product $I_1I_2\cdots I_n$. Moreover for any permutation $\tau\in S_n$ and $1\le i\le n$, $B=\{Y_{\tau(1)},\cdots,Y_{\tau(i)}\}$ is clearly a building set. By Theorem \ref{thm-Li1} we obtain the following. \begin{proposition}\label{prop-bltrcen} Let $Y=Y_1\cup\cdots \cup Y_n$ be a union of transversally intersecting smooth subvarieties of a smooth variety $X$. Then the blow-up of $X$ along $Y$ is isomorphic to \[ \mathrm{bl}_{\tilde Y_{\tau(n)}}\cdots \mathrm{bl}_{\tilde Y_{\tau(2)}}\mathrm{bl}_{Y_{\tau(1)}} X \] for any permutation $\tau\in S_n$ where $\tilde{Y}_i$ denotes the proper transform of $Y_i$. \end{proposition}
\subsection{Log canonical model} Let $X$ be a normal projective variety and $D = \sum a_i D_i$ be a rational linear combination of prime divisors of $X$ with $0 < a_i \le 1$. A \emph{log resolution} of $(X, D)$ is a birational morphism $\pi : Y \to X$ from a smooth projective variety $Y$ to $X$ such that $\pi^{-1}(D_i)$ and the exceptional divisors $E_i$ of $\pi$ are simple normal crossing divisors on $Y$. Then the discrepancy formula \[
K_Y + \pi^{-1}_* (D) \equiv
\pi^*(K_X + D) + \sum_{E_i : \mbox{exceptional}} a(E_i, X, D)E_i, \] defines the \emph{discrepancy} of $(X, D)$ by \[
\mathrm{discrep}(X,D) := \inf \{ a(E, X, D) : E : \mbox{exceptional}\}. \]
Let $(X, D)$ be a pair where $X$ is a normal projective variety and $D = \sum a_i D_i$ be a rational linear combination of prime divisors with $0 < a_i \le 1$. Suppose that $K_X + D$ is $\mathbb{Q}$-Cartier. A pair $(X, D)$ is \emph{log canonical (abbrev. lc)} if $\mathrm{discrep}(X,D) \ge -1$ and \emph{Kawamata log terminal (abbrev. klt)} if $\mathrm{discrep}(X,D) > -1$ and $\lfloor D \rfloor \le 0$.
When $X$ is smooth and $D$ is a normal crossing effective divisor, $(X, D)$ is always lc and is klt if all $a_i < 1$.
\begin{definition} For lc pair $(X, D)$, the \emph{canonical ring} is \[
R(X, K_X + D) := \oplus_{l \ge 0} H^0(X, \mathcal{O} _X(\lfloor l (K_X + D) \rfloor)) \] and the \emph{log canonical model} is \[
\mathrm{Proj}\; R(X, K_X + D). \] \end{definition} In \cite{BCHM}, Birkar, Cascini, Hacon and McKernan proved that for any klt pair $(X, D)$, the canonical ring is finitely generated, so the log canonical model always exists.
\section{Moduli of weighted parameterized stable curves}\label{sec3}
Let $X$ be a smooth projective variety. In this section, we decompose the map $$X[n]\to X^n$$ defined by Fulton and MacPherson (\cite{FM}) into a \emph{symmetric} sequence of blow-ups along transversal centers. A. Mustata and M. Mustata already considered this problem in their search for intermediate moduli spaces for the stable map spaces in \cite[\S1]{Mustata}. Let us recall their construction.
\noindent \textbf{Stage 0}: Let $F_0=X^n$ and $\Gamma_0=X^n\times X$. For a subset $S$ of $\{1,2,\cdots,n\}$, we let \[
\Sigma^S_0=\{(x_1,\cdots,x_n)\in X^n\,|\, x_i=x_j \text{ if }
i,j\in S\}, \quad \Sigma^k_0=\cup_{|S|=k}\Sigma_0^S \] and let $\sigma^i_0\subset \Gamma_0$ be the graph of the $i$-th projection $X^n\to X$. Then $\Sigma_0^n\cong X$ is a smooth subvariety of $F_0$. For each $S$, fix any $i_S\in S$.
\noindent \textbf{Stage $1$}: Let $F_1$ be the blow-up of $F_0$ along $\Sigma_0^n$. Let $\Sigma_1^n$ be the exceptional divisor and $\Sigma_1^S$ be the proper transform of $\Sigma_0^S$ for
$|S|\ne n$. Let us define $\Gamma_1$ as the blow-up of $F_1\times_{F_0}\Gamma_0$ along $\Sigma^n_1\times_{F_0}\sigma^1_0$ so that we have a flat family \[ \Gamma_1\to F_1\times_{F_0}\Gamma_0 \to F_1 \] of varieties over $F_1$. Let $\sigma_1^i$ be the proper transform of $\sigma_0^i$ in $\Gamma_1$. Note that $\Sigma^S_{1}$ for
$|S|=n-1$ are all disjoint smooth varieties of same dimension.
\noindent \textbf{Stage $2$}: Let $F_2$ be the blow-up of $F_1$
along $\Sigma_1^{n-1}=\sum_{|S|=n-1}\Sigma_1^S$. Let $\Sigma_2^S$
be the exceptional divisor lying over $\Sigma_1^S$ if $|S|=n-1$ and $\Sigma_2^S$ be the proper transform of $\Sigma_1^S$ for
$|S|\ne n-1$. Let us define $\Gamma_2$ as the blow-up of $F_2\times_{F_1}\Gamma_1$ along the disjoint union of
$\Sigma^S_2\times_{F_1}\sigma^{i_S}_1$ for all $S$ with $|S|=n-1$ so that we have a flat family \[ \Gamma_2\to F_2\times_{F_1}\Gamma_1 \to F_2 \] of varieties over $F_2$. Let $\sigma_2^i$ be the proper transform of $\sigma_1^i$ in $\Gamma_2$. Note that $\Sigma^S_{2}$ for
$|S|=n-2$ in $F_2$ are all transversal smooth varieties of same dimension. Hence the blow-up of $F_2$ along their union is smooth by \S\ref{sec2.3}.
We can continue this way until we reach the last stage.
\noindent \textbf{Stage $n-1$}: Let $F_{n-1}$ be the blow-up of
$F_{n-2}$ along $\Sigma_{n-2}^2=\sum_{|S|=2}\Sigma_{n-2}^S$. Let $\Sigma_{n-1}^S$ be the exceptional divisor lying over
$\Sigma_{n-2}^S$ if $|S|=2$ and $\Sigma_{n-1}^S$ be the proper transform of $\Sigma_{n-2}^S$ for $|S|\ne 2$. Let us define $\Gamma_{n-1}$ as the blow-up of $F_{n-1}\times_{F_{n-2}}\Gamma_{n-2}$ along the disjoint union of $\Sigma^S_{n-1}\times_{F_{n-2}}\sigma^{i_S}_{n-2}$ for all $S$
with $|S|=2$ so that we have a flat family \[ \Gamma_{n-1}\to F_{n-1}\times_{F_{n-2}}\Gamma_{n-2} \to F_{n-1} \] of varieties over $F_{n-1}$. Let $\sigma_{n-1}^i$ be the proper transform of $\sigma_{n-2}^i$ in $\Gamma_{n-1}$.
Nonsingularity of the blown-up spaces $F_k$ are guaranteed by the following.
\begin{lemma}\label{lem3-1}
$\Sigma^S_{k}$ for $|S|\ge n-k$ are transversal in $F_{k}$ i.e. the normal bundle in $F_{k}$ of the intersection
$\cap_i\Sigma^{S_i}_k$ for distinct $S_i$ with $|S_i|\ge n-k$ is the direct sum of the restriction of the normal bundles in $F_k$ of $\Sigma^{S_i}_k$. \end{lemma} \begin{proof} This is a special case of the inductive construction of the wonderful compactification in \cite{Li}. (See \S \ref{sec2.3}.) In our situation, the building set is the set of all diagonals $B_0 =
\{\Sigma_0^S | S \subset \{1, 2, \cdots, n\}\}$. By \cite[Proposition 2.8]{Li}, $B_k=\{\Sigma_k^S\}$ is a building set of an arrangement in $F_k$ and hence the desired transversality follows. \end{proof}
By construction, $F_k$ are all smooth and $\Gamma_k\to F_k$ are equipped with $n$ sections $\sigma_k^i$. When $\dim X=1$, $\Sigma^2_{n-2}$ is a divisor and thus $F_{n-1}=F_{n-2}$. In \cite[Proposition 1.8]{Mustata}, Mustata and Mustata prove that the varieties $F_k$ are fine moduli spaces for some moduli functors as follows.
\begin{definition}\label{def3.1} \cite[Definition 1.7]{Mustata} A family of \emph{$k$-stable parameterized rational curves} over $S$ consists of a flat family of curves $\pi:C\to S$, a morphism $\phi:C\to S\times \mathbb{P}^1$ of degree 1 over each geometric fiber $C_s$ of $\pi$ and $n$ marked sections $\sigma^1,\cdots, \sigma^n$ of $\pi$ such that for all $s\in S$, \begin{enumerate} \item no more than $n-k$ of the marked points $\sigma^i(s)$ in $C_s$ coincide; \item any ending irreducible curve in $C_s$, except the parameterized one, contains more than $n-k$ marked points; \item all the marked points are smooth points of the curve $C_s$; \item $C_s$ has finitely many automorphisms preserving the marked points and the map to $\mathbb{P}^1$. \end{enumerate} \end{definition}
\begin{proposition}\label{prop3.2} \cite[Proposition 1.8]{Mustata} Let $X=\mathbb{P}^1$. The smooth variety $F_k$ finely represents the functor of isomorphism classes of families of $k$-stable parameterized rational curves. In particular, $F_{n-2}=F_{n-1}$ is the Fulton-MacPherson space $\mathbb{P}^1[n]$. \end{proposition}
\section{Blow-up construction of moduli of pointed stable curves}\label{sec4}
In the previous section, we decomposed the natural map $\mathbb{P}^1[n]\to (\mathbb{P}^1)^n$ of the Fulton-MacPherson space into a sequence \begin{equation}\label{eq4-1}\xymatrix{ \mathbb{P}^1[n]\ar@{=}[r] & F_{n-2}\ar[r]^{\psi_{n-2}} & F_{n-3}\ar[r]^{\psi_{n-3}} & \cdots \ar[r]^{\psi_2} & F_1\ar[r]^{\psi_1} & F_0\ar@{=}[r] & (\mathbb{P}^1)^n }\end{equation} of blow-ups along transversal centers. By construction the morphisms above are all equivariant with respect to the action of $G=SL(2)$. For GIT stability, we use the \emph{symmetric} linearization $L_0=\mathcal{O} (1,\cdots,1)$ for $F_0$. For $F_k$ we use the linearization $L_k$ inductively defined by $L_k=\psi_k^*L_{k-1}\otimes \mathcal{O} (-\delta_kE_k)$ where $E_k$ is the exceptional divisor of $\psi_k$ and $\{\delta_k\}$ is a decreasing sequence of sufficiently small positive numbers. Let $m=\lfloor\frac{n}{2}\rfloor$. In this section, we prove the following.
\begin{theorem}\label{thm4-1} (i) The GIT quotient $F_{n-m+k}/\!/ G$ for $1\le k\le m-2$ is isomorphic to Hassett's moduli space of weighted pointed stable rational curves $\overline{M}_{0,n\cdot \epsilon_k} $ with weights $n\cdot \epsilon_k=(\epsilon_k,\cdots,\epsilon_k)$ where $\frac1{m+1-k}<\epsilon_k\le \frac1{m-k}$. The induced maps on quotients \[ \overline{M}_{0,n\cdot \epsilon_k} = F_{n-m+k}/\!/ G \to F_{n-m+k-1}/\!/ G =\overline{M}_{0,n\cdot \epsilon_{k-1}} \] are blow-ups along transversal centers for $k=2,\cdots, m-2$.
(ii) If $n$ is odd, $$F_{m+1}/\!/ G=\cdots =F_0/\!/ G=(\mathbb{P}^1)^n/\!/ G=\overline{M}_{0,n\cdot \epsilon_0} $$ and we have a sequence of blow-ups \[ \overline{M}_{0,n} =\overline{M}_{0,n\cdot \epsilon_{m-2}} \to \overline{M}_{0,n\cdot \epsilon_{m-3}} \to \cdots \to \overline{M}_{0,n\cdot \epsilon_1} \to \overline{M}_{0,n\cdot \epsilon_0} = (\mathbb{P}^1)^n/\!/ G \] whose centers are transversal unions of equidimensional smooth varieties.
(iii) If $n$ is even, $\overline{M}_{0,n\cdot \epsilon_1} $ is a desingularization of $$(\mathbb{P}^1)^n/\!/ G=F_0/\!/ G=\cdots =F_m/\!/ G,$$ obtained by blowing up $\frac12\binom{n}{m}$ singular points so that we have a sequence of blow-ups \[ \overline{M}_{0,n} =\overline{M}_{0,n\cdot \epsilon_{m-2}} \to \overline{M}_{0,n\cdot \epsilon_{m-3}} \to \cdots \to \overline{M}_{0,n\cdot \epsilon_1} \to (\mathbb{P}^1)^n/\!/ G. \] \end{theorem} \begin{remark} (1) When $n$ is even, $\overline{M}_{0,n\cdot \epsilon_0} $ is not defined because the sum of weights does not exceed $2$.
(2) When $n$ is even, $\overline{M}_{0,n\cdot \epsilon_1} $ is Kirwan's (partial) desingularization of the GIT quotient $(\mathbb{P}^1)^n/\!/ G$ with respect to the symmetric linearization $L_0=\mathcal{O} (1,\cdots,1)$. \end{remark}
Let $F_k^{ss}$ (resp. $F_k^s$) denote the semistable (resp. stable) part of $F_k$. By \eqref{eq-StabBlowup}, we have \begin{equation}\label{eq4-2} \psi_k(F_k^{ss})\subset F_{k-1}^{ss},\qquad \psi_k^{-1}(F_{k-1}^s)\subset F_k^s. \end{equation} Also recall from \cite{K1} that $x=(x_1,\cdots,x_n)\in (\mathbb{P}^1)^n$ is semistable (resp. stable) if $> \frac{n}2$ (resp. $\ge \frac{n}2$) of $x_i$'s are not allowed to coincide. In particular, when $n$ is odd, $\psi_k^{-1}(F_{k-1}^s)=F_k^s=F_k^{ss}$ for all $k$ and \begin{equation}\label{eq4-3} F_{m+1}^s=F_{m}^s=\cdots =F_0^s, \end{equation} because the blow-up centers lie in the unstable part. Therefore we have \begin{equation}\label{eq4-4} F_{m+1}/\!/ G = \cdots =F_0/\!/ G= (\mathbb{P}^1)^n/\!/ G. \end{equation} When $n$ is even, $\psi_k$ induces a morphism $F_k^{ss}\to F_{k-1}^{ss}$ and we have \begin{equation}\label{eq4-5} F_m^{ss}=F_{m-1}^{ss}=\cdots =F_0^{ss} \quad \text{and}\quad F_m/\!/ G=\cdots =F_0/\!/ G=(\mathbb{P}^1)^n/\!/ G. \end{equation}
Let us consider the case where $n$ is odd first. By forgetting the parameterization of the parameterized component of each member of family $(\Gamma_{m+k+1}\to F_{m+k+1},\sigma_{m+k+1}^i)$, we get a rational map $F_{m+k+1}\dashrightarrow \overline{M}_{0,n\cdot \epsilon_k} $ for $k=0,1,\cdots, m-2$. By the definition of the stability in \S\ref{sec2.1}, a fiber over $\xi\in F_{m+k+1}$ is not stable with respect to $n\cdot \epsilon_k=(\epsilon_k,\cdots,\epsilon_k)$ if and only if, in each irreducible component of the curve, the number $a$ of nodes and the number $b$ of marked points satisfy $b\epsilon_k+a\le 2$. Obviously this cannot happen on the (GIT) stable part $F_{m+k+1}^s$. Therefore we obtain a morphism $F_{m+k+1}^s\to \overline{M}_{0,n\cdot \epsilon_k} $. By construction this morphism is $G$-invariant and thus induces a morphism $$\phi_k:F_{m+k+1}/\!/ G\to \overline{M}_{0,n\cdot \epsilon_k} .$$ Since the stabilizer groups in $G$ of points in $F_0^s$ are all $\{ \pm 1\}$, the quotient $$\bar{\psi}_{m+k+1}:F_{m+k+1}/\!/ G\to F_{m+k}/\!/ G$$ of $\psi_{m+k+1}$ is also a blow-up along a center which consists of transversal smooth varieties by Corollary \ref{cor-blcomquot}.
Since the blow-up center has codimension $\ge 2$, the Picard number increases by $\binom{n}{m-k+1}$ for $k=1, \cdots, m-2$. Since the character group of $SL(2)$ has no free part, by the descent result in \cite{DN}, the Picard number of $F_{m+1}/\!/ G=F_0^s/G$ is the same as the Picard number of $F_0^s$ which equals the Picard number of $F_0$. Therefore $\rho(F_{m+1}/\!/ G)=n$ and the Picard number of $F_{m+k+1}/\!/ G$ is \[ n+\sum_{i=1}^{k}\binom{n}{m-i+1} \] which equals the Picard number of $\overline{M}_{0,n\cdot \epsilon_k} $ by Lemma \ref{lem-PicNumMze}. Since $\overline{M}_{0,n\cdot \epsilon_k} $ and $F_{m+k+1}/\!/ G$ are smooth and their Picard numbers coincide, we conclude that $\phi_k$ is an isomorphism as we desired. So we proved Theorem \ref{thm4-1} for odd $n$.
Now let us suppose $n$ is even. For ease of understanding, we divide our proof into several steps.
\noindent \underline{Step 1:} For $k\ge 1$, $F_{m+k}/\!/ G$ are nonsingular and isomorphic to the partial desingularizations $\tilde{F}_{m+k}/\!/ G$.
The GIT quotients $F_{m+k}/\!/ G$ may be singular because there are $\mathbb{C}^*$-fixed points in the semistable part $F_{m+k}^{ss}$. So we use Kirwan's partial desingularization of the GIT quotients $F_{m+k}/\!/ G$ (\S\ref{sec2.2}). The following lemma says that the partial desingularization process has no effect on the quotient $F_{m+k}/\!/ G$ for $k\ge 1$.
\begin{lemma}\label{lem4-3} Let $F$ be a smooth projective variety with linearized $G=SL(2)$ action and let $F^{ss}$ be the semistable part. Fix a maximal torus $\mathbb{C}^*$ in $G$. Let $Z$ be the set of $\mathbb{C}^*$-fixed points in $F^{ss}$. Suppose the stabilizers of all points in the stable part $F^{s}$ are $\{\pm 1\}$ and $Y=GZ$ is the union of all closed orbits in $F^{ss}-F^s$. Suppose that the stabilizers of points in $Z$ are precisely $\mathbb{C}^*$. Suppose further that $Y=GZ$ is of codimension $2$. Let $\tilde{F}\to F^{ss}$ be the blow-up of $F^{ss}$ along $Y$ and let $\tilde{F}^s$ be the stable part in $\tilde{F}$ with respect to a linearization as in \S\ref{sec2.2}. Finally suppose that for each $y\in Z$, the weights of the $\mathbb{C}^*$ action on the normal space to $Y$ is $\pm l$ for some $l>0$. Then $\tilde{F}/\!/ G=\tilde{F}^s/G\cong F/\!/ G$ and $F/\!/ G$ is nonsingular. \end{lemma} \begin{proof} Since $\bar G=G/\{\pm 1\}$ acts freely on $F^s$, $F^s/G$ is smooth. By assumption, $Y$ is the union of all closed orbits in $F^{ss}-F^s$ and hence $F/\!/ G-F^s/G=Y/G$. By Lemma \ref{lem-specialcasebl} (2), $\tilde{F}^s/G$ is the blow-up of $F/\!/ G$ along the reduced ideal of $Y/G$. By our assumption, $Z$ is of codimension $4$ and $$Y/G=GZ/G\cong G\times _{N^{\mathbb{C}^*}}Z/G\cong Z/\mathbb{Z}_2$$ where $N^{\mathbb{C}^*}$ is the normalizer of $\mathbb{C}^*$ in $G$. Since the dimension of $F/\!/ G$ is $\dim F-3$, the blow-up center $Y/G$ is nonsingular of codimension $1$. By Luna's slice theorem (\cite[Appendix 1.D]{MFK}), the singularity of $F/\!/ G$ at any point $[Gy]\in Y/G$ is $\mathbb{C}^2/\!/ \mathbb{C}^*$ where the weights are $\pm l$. Obviously this is smooth and hence $F/\!/ G$ is smooth along $Y/G$. Since the blow-up center is a smooth divisor, the blow-up map $\tilde{F}^s/G\to F/\!/ G$ has to be an isomorphism. \end{proof}
Let $Z_{m+k}$ be the $\mathbb{C}^*$-fixed locus in $F_{m+k}^{ss}$ and let $Y_{m+k}=GZ_{m+k}$. Then $Y_{m+k}$ is the disjoint union of
$$\Sigma_{m+k}^{S,S^c}:=\Sigma_{m+k}^S\cap \Sigma_{m+k}^{S^c}\cap F_{m+k}^{ss} \quad \text{for }|S|=m, S^c=\{1,\cdots,n\}-S $$ which are nonsingular of codimension $2$ for $k\ge 1$ by Lemma \ref{lem3-1}. For a point $$(C,p_1,\cdots,p_n,f:C\to\mathbb{P}^1)\in \Sigma^{S,S^c}_{m+k},$$ the parameterized component of $C$ (i.e. the unique component which is not contracted by $f$) has two nodes and no marked points. The normal space $\mathbb{C}^2$ to $\Sigma^{S,S^c}_{m+k}$ is given by the smoothing deformations of the two nodes and hence the stabilizer $\mathbb{C}^*$ acts with weights $2$ and $-2$.
The blow-up $\tilde{F}_{m+k}$ of $F_{m+k}^{ss}$ along $Y_{m+k}$ has no strictly semistable points by \cite[\S6]{Kirwan}. In fact, the unstable locus in $\tilde{F}_{m+k}$ is the proper transform of $\Sigma^S_{m+k}\cup \Sigma^{S^c}_{m+k}$ and the stabilizers of points in $\tilde{F}^s_{m+k}$ are either $\mathbb{Z}_2=\{\pm 1\}$ (for points not in the exceptional divisor of $\tilde{F}^s_{m+k}\to F^{ss}_{m+k}$) or $\mathbb{Z}_4=\{\pm 1,\pm i\}$ (for points in the exceptional divisor). Therefore, by Lemma \ref{lem4-3} and Lemma \ref{lem-specialcasebl} (3), we have isomorphisms \begin{equation}\label{eq4-10} \tilde{F}_{m+k}^s/G\cong F_{m+k}/\!/ G \end{equation} and $F_{m+k}/\!/ G$ are nonsingular for $k\ge 1$.
\noindent \underline{Step 2:} The partial desingularization $\tilde{F}_m/\!/ G$ is a nonsingular variety obtained by blowing up the $\frac12\binom{n}{m}$ singular points of $F_m/\!/ G=(\mathbb{P}^1)^n/\!/ G$.
Note that $Y_m$ in $F_m^{ss}$ is the disjoint union of
$\frac12\binom{n}{m}$ orbits $\Sigma_m^{S,S^c}$ for $|S|=m$. By Lemma \ref{lem-specialcasebl} (2), the morphism $\tilde{F}_m^s/G\to F_m/\!/ G$ is the blow-up at the $\frac12\binom{n}{m}$ points given by the orbits of the blow-up center. A point in $\Sigma^{S,S^c}_{m}$ is represented by $(\mathbb{P}^1,p_1,\cdots,p_n,\mathrm{id})$ with $p_i=p_j$ if $i,j\in S$ or $i,j\in S^c$. Without loss of generality, we may let $S=\{1,\cdots, m\}$. The normal space to an orbit $\Sigma^{S,S^c}_{m}$ is given by \[ (T_{p_1}\mathbb{P}^1)^{m-1}\times (T_{p_{m+1}}\mathbb{P}^1)^{m-1}=\mathbb{C}^{m-1}\times\mathbb{C}^{m-1} \] and $\mathbb{C}^*$ acts with weights $2$ and $-2$ respectively on the two factors. By Luna's slice theorem, \'etale locally near $\Sigma^{S,S^c}_{m}$, $F_m^{ss}$ is $G\times_{\mathbb{C}^*}(\mathbb{C}^{m-1}\times\mathbb{C}^{m-1})$ and $\tilde{F}_m$ is $G\times_{\mathbb{C}^*}\mathrm{bl}_{0}(\mathbb{C}^{m-1}\times\mathbb{C}^{m-1})$ while $\tilde{F}_m^s$ is $G\times_{\mathbb{C}^*}\left[\mathrm{bl}_{0}(\mathbb{C}^{m-1}\times\mathbb{C}^{m-1}) -\mathrm{bl}_{0}\mathbb{C}^{m-1}\sqcup \mathrm{bl}_{0}\mathbb{C}^{m-1}\right]$. By an explicit local calculation, the stabilizers of points on the exceptional divisor of $\widetilde{F}_m$ are $\mathbb{Z}_4=\{\pm 1,\pm i\}$ and the stabilizers of points over $F_m^s$ are $\mathbb{Z}_2=\{\pm 1\}$. Since the locus of nontrivial stabilizers for the action of $\bar G$ on $\tilde{F}^s_m$ is a smooth divisor with stabilizer $\mathbb{Z}_2$, $\tilde{F}_m/\!/ G=\tilde{F}^s_m/G$ is smooth and hence $\tilde{F}_m^s/G$ is the desingularization of $F_m/\!/ G$ obtained by blowing up its $\frac12\binom{n}{m}$ singular points.
\noindent \underline{Step 3:} The morphism $\bar\psi_{m+k+1}:F_{m+k+1}/\!/ G\to F_{m+k}/\!/ G$ is the blow-up along the union of transversal smooth subvarieties for $k\ge 1$. For $k=0$, we have $\tilde{F}^s_{m+1}=\tilde{F}^s_m$ and thus $$F_{m+1}/\!/ G\cong \tilde{F}^s_{m+1}/G=\tilde{F}^s_m/G=\tilde{F}_m/\!/ G$$ is the blow-up along its $\frac12\binom{n}{m}$ singular points.
From Lemma \ref{lem3-1}, we know $\Sigma^S_{m+k}$ for $|S|\ge m-k$ are transversal in $F_{m+k}$. In particular,
$$\bigcup_{|S|=m}\Sigma_{m+k}^S\cap \Sigma_{m+k}^{S^c}$$ intersects transversely with the blow-up center $$\bigcup_{|S'|=m-k} \Sigma^{S'}_{m+k}$$ for $\psi_{m+k+1}:F_{m+k+1}\to F_{m+k}$. Hence, by Proposition \ref{prop-bltrcen} we have a commutative diagram \begin{equation}\label{eq4-12} \xymatrix{ \tilde{F}_{m+k+1}\ar[r]\ar[d] & \tilde{F}_{m+k}\ar[d]\\ F_{m+k+1}^{ss}\ar[r] & F_{m+k}^{ss} } \end{equation} for $k\ge 1$ where the top horizontal arrow is the blow-up along the proper transforms $\tilde{\Sigma}^{S'}_{m+k}$ of
$\Sigma^{S'}_{m+k}$, $|S'|=m-k$. By Corollary \ref{cor-blcomquot}, we deduce that for $k\ge 1$, $\bar\psi_{m+k+1}$ is the blow-up along the transversal union of smooth subvarieties $\tilde{\Sigma}^{S'}_{m+k}/\!/ G\cong \Sigma^{S'}_{m+k}/\!/ G$.
For $k=0$, the morphism $\tilde{F}_{m+1}\to \tilde{F}_m$ is the blow-up along the proper transforms of $\Sigma_m^S$ and
$\Sigma_m^{S^c}$ for $|S|=m$. But these are unstable in $\tilde{F}_m$ and hence the morphism $\tilde{F}_{m+1}^s\to \tilde{F}_m^s$ on the stable part is the identity map. So we obtain $\tilde{F}_{m+1}^s= \tilde{F}_m^s$ and $\tilde{F}_{m+1}^s/G\cong \tilde{F}_m^s/G$.
\noindent \underline{Step 4:} Calculation of Picard numbers.
The Picard number of $F_m^{ss}=F_0^{ss}\subset F_0=(\mathbb{P}^1)^n$ is $n$ and so the Picard number of $\tilde{F}_m$ is $n+\frac12\binom{n}{m}$. By the descent lemma of \cite{DN} as in the odd degree case, the Picard number of $$F_{m+1}/\!/ G\cong \tilde{F}_{m+1}^s/G=\tilde{F}_m^s/G$$ equals the Picard number $n+\frac12\binom{n}{m}$ of $\tilde{F}_m^s$. Since the blow-up center of $\tilde{F}_{m+k}/\!/ G\to \tilde{F}_{m+k-1}/\!/ G$ has $\binom{n}{m-k+1}$ irreducible components, the Picard number of $\tilde{F}_{m+k}/\!/ G\cong F_{m+k}/\!/ G$ is \begin{equation}\label{eq4-13} n+\frac12\binom{n}{m}+\sum_{i=2}^{k}\binom{n}{m-i+1} \end{equation} for $k\ge 2$.
\noindent \underline{Step 5:} Completion of the proof.
As in the odd degree case, for $k\ge 1$ the universal family $\pi_k:\Gamma_{m+k}\to F_{m+k}$ gives rise to a family of pointed curves by considering the linear system $K_{\pi_k}+\epsilon_k\sum_i\sigma^i_{m+k}$. Over the semistable part $F_{m+k}^{ss}$ it is straightforward to check that this gives us a family of $n\cdot \epsilon_k$-stable pointed curves. Therefore we obtain an invariant morphism $$F_{m+k}^{ss}\to \overline{M}_{0,n\cdot \epsilon_k} $$ which induces a morphism $$F_{m+k}/\!/ G\to \overline{M}_{0,n\cdot \epsilon_k} .$$ By Lemma \ref{lem-PicNumMze}, the Picard number of $\overline{M}_{0,n\cdot \epsilon_k} $ coincides with that of $F_{m+k}/\!/ G$ given in \eqref{eq4-13}. Hence the morphism $F_{m+k}/\!/ G\to \overline{M}_{0,n\cdot \epsilon_k} $ is an isomorphism as desired. This completes our proof of Theorem \ref{thm4-1}.
\begin{remark}\label{2010rem1}
Let $S \subset \{1, 2, \cdots, n\}$ with $|S| = m-k$. On $\overline{M}_{0,n\cdot \epsilon_{k}}$, the blow-up center for $\overline{M}_{0,n\cdot \epsilon_{k+1}}\to\overline{M}_{0,n\cdot \epsilon_k} $ is the union of $n \choose {m-k}$ smooth subvarieties $\Sigma_{n-m+k}^S/\!/ G$. Each $\Sigma_{n-m+k}^S /\!/ G$ parameterizes weighted pointed stable curves with $m-k$ colliding marked points $s_{i_1}, s_{i_2}, \cdots, s_{i_{m-k}}$ for $i_j \in S$. On the other hand, for any member of $\overline{M}_{0,n\cdot \epsilon_k} $, no $m-k+1$ marked points can collide. So we can replace $m-k$ marked points $s_{i_j}$ with $i_j \in S$ by a single marked point which cannot collide with any other marked points. Therefore, an irreducible component $\Sigma_{n-m+k}^S /\!/ G$ of the blow-up center is isomorphic to the moduli space of weighted pointed rational curves $\overline{M}_{0, (1, \epsilon_k, \cdots, \epsilon_k)}$ with $n-m+k+1$ marked points as discovered by Hassett. (See Proposition \ref{reduction}.) \end{remark}
\begin{remark} For the moduli space of \emph{unordered} weighted pointed stable curves $\overline{M}_{0,n\cdot \epsilon_k} /S_n$, we can simply take quotients by the $S_n$ action of the blow-up process in Theorem \ref{thm4-1}. In particular, $\overline{M}_{0,n} /S_n$ is obtained by a sequence of weighted blow-ups from $\left((\mathbb{P}^1)^n/\!/ G\right)/ S_n=\mathbb{P}^n/\!/ G.$ \end{remark}
\section{Log canonical models of $\overline{M}_{0,n} $}\label{sec6}
In this section, we give a relatively elementary and straightforward proof of the following theorem of M. Simpson by using Theorem \ref{thm4-1}. Let $M_{0,n}$ be the moduli space of $n$ \emph{distinct} points in $\mathbb{P}^1$ up to $\mathrm{Aut}(\mathbb{P}^1)$.
\begin{theorem} \label{thm6.1}\emph{(M. Simpson \cite{Simpson})} Let $\alpha$ be a rational number satisfying $\frac{2}{n-1} < \alpha \le 1$ and let $D=\overline{M}_{0,n} -M_{0,n}$ denote the boundary divisor. Then the log canonical model \[
\overline{M}_{0,n} (\alpha) =
\mathrm{Proj}\;\left(\bigoplus_{l \ge 0} H^0(\overline{M}_{0,n} , \mathcal{O} (\lfloor l (K_{\overline{M}_{0,n} }
+\alpha D) \rfloor))\right) \] satisfies the following: \begin{enumerate} \item If $\frac{2}{m-k+2} < \alpha \le \frac{2}{m-k+1}$ for $1\le k\le m-2$, then $\overline{M}_{0,n} (\alpha) \cong \overline{M}_{0,n\cdot \epsilon_k} $.\item If $\frac{2}{n-1} < \alpha \le \frac{2}{m+1}$, then $\overline{M}_{0,n} (\alpha) \cong (\mathbb{P}^1)^n /\!/ G$ where the quotient is taken with respect to the symmetric linearization $\mathcal{O} (1,\cdots,1)$. \end{enumerate} \end{theorem} \begin{remark} Keel and McKernan prove (\cite[Lemma 3.6]{KeelMcKer}) that $K_{\overline{M}_{0,n} } + D$ is ample. Because $$\overline{M}_{0,n\cdot \epsilon_{m-2}} \cong\overline{M}_{0,n\cdot \epsilon_{m-1}} = \overline{M}_{0,n} $$ by definition, we find that (1) above holds for $k=m-1$ as well.\end{remark}
For notational convenience, we denote $(\mathbb{P}^1)^n/\!/ G$ by $\overline{M}_{0,n\cdot \epsilon_0} $ for even $n$ as well. Let $\Sigma_k^S$ denote the subvarieties of $F_k$ defined in \S\ref{sec3} for $S\subset \{1,\cdots,n\}$,
$|S|\le m$. Let $$D_k^S=\Sigma_{n-m+k}^S/\!/ G\subset F_{n-m+k}/\!/ G\cong \overline{M}_{0,n\cdot \epsilon_k} .$$
Then $D_k^S$ is a divisor of $\overline{M}_{0,n\cdot \epsilon_k} $ for $|S|=2$ or $m-k<|S|\le m$. Let $D_k^j=(\cup_{|S|=j}\Sigma_{n-m+k}^S)/\!/ G$ and $D_k=D_k^2+\sum_{j>m-k}D^j_k$. Then $D_k$ is the boundary divisor of $\overline{M}_{0,n\cdot \epsilon_k} $, i.e. $\overline{M}_{0,n\cdot \epsilon_k} -M_{0,n}=D_k$. When $k = m-2$ so $\overline{M}_{0,n\cdot \epsilon_k} \cong \overline{M}_{0,n} $, sometimes we will drop the subscript $k$. Note that if $n$ is even and
$|S|=m$, $D_k^S=D_k^{S^c}=\Sigma^{S,S^c}_{n-m+k}/\!/ G$.
By Theorem \ref{thm4-1}, there is a sequence of blow-ups \begin{equation}\label{eq6.1} \overline{M}_{0,n} \cong \overline{M}_{0,n\cdot \epsilon_{m-2}} \mapright{\varphi_{m-2}} \overline{M}_{0,n\cdot \epsilon_{m-3}} \mapright{\varphi_{m-3}}\cdots \mapright{\varphi_{2}}\overline{M}_{0,n\cdot \epsilon_1} \mapright{\varphi_{1}} \overline{M}_{0,n\cdot \epsilon_0} \end{equation} whose centers are transversal unions of smooth subvarieties, except for $\varphi_1$ when $n$ is even. Note that the irreducible components of the blow-up center of $\varphi_k$ furthermore intersect transversely with $D^j_{k-1}$ for $j>m-k+1$ by Lemma \ref{lem3-1} and by taking quotients. \begin{lemma}\label{computepushandpull} Let $1\le k\le m-2$. \begin{enumerate} \item $\varphi_k^* (D^j_{k-1}) = D^j_k$ for $j> m-k+1$. \item $\varphi_k^* (D^2_{k-1}) = D^2_k + {m-k+1 \choose 2} D^{m-k+1}_k$. \item $\varphi_{k *} (D^j_k) = D^j_{k-1}$ for $j > m-k+1$ or $j=2$. \item $\varphi_{k *} (D^j_k) =0$ for $j=m-k+1$.\end{enumerate} \end{lemma}
\begin{proof} The push-forward formulas (3) and (4) are obvious. Recall from \S\ref{sec4} that $\varphi_k=\bar\psi_{n-m+k}$ is the quotient of $\psi_{n-m+k}:F_{n-m+k}^{ss}\to F_{n-m+k-1}^{ss}$. Suppose $n$ is not even or $k$ is not $1$. Since $D^S_k$ for
$|S|>2$ does not contain any component of the blow-up center,
$\varphi_k^*(D^S_{k-1}) = D^S_k$. If $|S|=2$, $D^S_{k-1}$ contains a component $D^{S'}_{k-1}$ of the blow-up center if and only if $S' \supset S$. Therefore we have \[
\varphi_k^*(D^S_{k-1}) = D^S_k +
\sum_{S' \supset S, |S'| = m-k+1} D^{S'}_k. \]
By adding them up for all $S$ such that $|S|=2$, we obtain (2).
When $n$ is even and $k=1$, we calculate the pull-back before quotient. Let $\pi:\tilde{F}_{m}^s\to F_m^{ss}$ be the map obtained by blowing up $\cup_{|S|=m}\Sigma^{S,S^c}_m$ and removing unstable points. Recall that $\tilde{F}_m^s/G\cong F_{m+1}/\!/ G\cong \overline{M}_{0,n\cdot \epsilon_1} $ and the quotient of $\pi$ is $\varphi_1$. Then a direct calculation similar to the above gives us $\pi^*\Sigma^2_m=\tilde{\Sigma}_m^2+2\binom{m}{2}\tilde{\Sigma}^m_m$
where $\Sigma^2_m=\cup_{|S|=2}\Sigma^S_m$ and $\tilde{\Sigma}_m^2$ is the proper transform of $\Sigma^2_m$ while $\tilde{\Sigma}_m^m$ denotes the exceptional divisor. Note that by the descent lemma (\cite{DN}), the divisor $\Sigma_m^2$ and $\tilde{\Sigma}^2_m$ descend to $D^2_0$ and $D_{1}^2$. However $\tilde{\Sigma}_m^m$ does not descend because the stabilizer group $\mathbb{Z}_2$ in $\bar G=PGL(2)$ of points in $\tilde{\Sigma}_m^m$ acts nontrivially on the normal spaces. But by the descent lemma again, $2\tilde{\Sigma}^m_m$ descends to $D^m_1$. Thus we obtain (2). \end{proof}
Next we calculate the canonical divisors of $\overline{M}_{0,n\cdot \epsilon_k} $. \begin{proposition}\label{canonicaldiv1} \cite[Proposition 1]{Pand} The canonical divisor of $\overline{M}_{0,n} $ is \[
K_{\overline{M}_{0,n} } \cong -\frac{2}{n-1} D^2 + \sum_{j=3}^m
\left(-\frac{2}{n-1}{j \choose 2} +(j-2)\right) D^j. \] \end{proposition}
\begin{lemma}\label{canonicaldiv2} (1) The canonical divisor of $(\mathbb{P}^1)^n /\!/ G$ is \[
K_{(\mathbb{P}^1)^n /\!/ G} \cong -\frac{2}{n-1}D_0^2. \] (2) For $1\le k\le m-2$, the canonical divisor of $\overline{M}_{0,n\cdot \epsilon_k} $ is \[
K_{\overline{M}_{0,n\cdot \epsilon_k} } \cong -\frac{2}{n-1}D_k^2 + \sum_{j\ge m-k+1}^m
\left(-\frac{2}{n-1}{j \choose 2} + (j-2) \right)D_k^j. \] \end{lemma}
\begin{proof} It is well known by the descent lemma (\cite{DN}) that $\mathrm{Pic}((\mathbb{P}^1)^n /\!/ G)$ is a free abelian group of rank $n$(See \S6). The symmetric group $S_n$ acts on $(\mathbb{P}^1)^n /\!/ G$ in the obvious manner, and there is an induced action on its Picard group. Certainly the canonical bundle $K_{(\mathbb{P}^1)^n /\!/ G}$ and $D^2_0$ are $S_n$-invariant. On the other hand, the $S_n$-invariant part of the rational Picard group is a one dimensional vector space generated by the quotient $D_0^2$ of $\mathcal{O} _{(\mathbb{P}^1)^n}(n-1,\cdots,n-1)$ and hence we have $K_{(\mathbb{P}^1)^n /\!/ G} \cong c D_0^2$ for some $c\in \mathbb{Q}$.
Suppose $n$ is odd. The contraction morphisms $\varphi_k$ are all compositions of smooth blow-ups for $k \ge 1$. From the blow-up formula of canonical divisors (\cite[II Exe. 8.5]{Hartshorne}) and Lemma \ref{computepushandpull}, we deduce that \[
K_{\overline{M}_{0,n\cdot \epsilon_k} } = cD^2_k + \sum_{j\ge m-k+1}^m
\left(c{j \choose 2} + (j-2)\right)D^j_k. \] Since $\overline{M}_{0,n} \cong \overline{M}_{0,n\cdot \epsilon_{m-2}} $, we get $c = -\frac{2}{n-1}$ from Proposition \ref{canonicaldiv1}.
When $n$ is even, $\varphi_1^*(K_{(\mathbb{P}^1)^n /\!/ G}) = cD_1^2 + c{m \choose 2} D^m_1$ by Lemma \ref{computepushandpull}. We write $K_{\overline{M}_{0,n\cdot \epsilon_1} } = cD_1^2 + (c{m \choose 2} + a)D_1^m$. By the blow-up formula of canonical divisors (\cite[II Exe. 8.5]{Hartshorne}) again, we deduce that \[
K_{\overline{M}_{0,n\cdot \epsilon_k} } = cD_k^2 + \sum_{j \ge m-k+1}^{m-1}
\left( c{j \choose 2} + (j-2)\right)D_k^j + (c{m \choose 2} + a)D_k^m. \] From Proposition \ref{canonicaldiv1} again, we get $c = -\frac{2}{n-1}$ and $a = m-2$. \end{proof}
We are now ready to prove Theorem \ref{thm6.1}. By \cite[Corollary 3.5]{Simpson}, the theorem is a direct consequence of the following proposition. \begin{proposition}\label{prop-amplerange} (1) $K_{\overline{M}_{0,n\cdot \epsilon_0} }+\alpha D_0$ is ample if $\frac{2}{n-1} < \alpha \le \frac{2}{m+1}$.\\ (2) For $1\le k\le m-2$, $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample if $\frac{2}{m-k+2} < \alpha \le \frac{2}{m-k+1}$.
\end{proposition}
Since any positive linear combination of an ample divisor and a nef divisor is ample \cite[Corollary 1.4.10]{Larz1}, it suffices to show the following: \begin{enumerate}\item[(a)] Nefness of $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ for $\alpha =\frac{2}{m-k+1}+s$ where $s$ is some (small) positive number; \item[(b)] Ampleness of $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ for $\alpha=\frac{2}{m-k+2}+t$ where $t$ is \emph{any} sufficiently small positive number. \end{enumerate} We will use Alexeev and Swinarski's intersection number calculation in \cite{AlexSwin} to achieve (a) (See Lemma \ref{lem-otherextreme}.) and then (b) will immediately follow from our Theorem \ref{thm4-1}.
\begin{definition} (\cite{Simpson}) Let $\varphi=\varphi_{n\cdot\epsilon_{m-2}, n\cdot\epsilon_k} : \overline{M}_{0,n} \to \overline{M}_{0,n\cdot \epsilon_k} $ be the natural contraction map (\S\ref{sec2.1}). For $k = 0,1,\cdots,m-2$ and $\alpha > 0$, define $A(k, \alpha)$ by \begin{eqnarray*}
A(k, \alpha) &:=& \varphi^*(K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k)\\
&=& \sum_{j = 2}^{m-k} {j \choose 2}\left(\alpha - \frac{2}{n-1}\right)D^j
+ \sum_{j \ge m-k+1}^m \left(\alpha - \frac{2}{n-1}{j \choose 2}
+ j-2\right)D^j. \end{eqnarray*} \end{definition} Notice that the last equality is an easy consequence of Lemma \ref{computepushandpull}.
By \cite{Kapranov}, there is a birational morphism $\pi_{\vec{x}} : \overline{M}_{0,n} \to (\mathbb{P}^1)^n /\!/ _{\vec{x}} G$ for any linearization $\vec{x} = (x_1, \cdots, x_n) \in \mathbb{Q}_+^n$. Since the canonical ample line bundle $\mathcal{O} _{(\mathbb{P}^1)^n }(x_1, \cdots, x_n)/\!/ G$ over $(\mathbb{P}^1)^n /\!/ _{\vec{x}} G$ is ample, its pull-back $L_{\vec{x}}$ by $\pi_{\vec{x}}$ is certainly nef.
\begin{definition}\cite[Definition 2.3]{AlexSwin}\label{def-symnefdiv} Let $x$ be a rational number such that $\frac{1}{n-1} \le x \le \frac{2}{n}$. Set $\vec{x} = \mathcal{O} (x, \cdots, x, 2-(n-1)x)$. Define \[
V(x, n) := \frac{1}{(n-1)!}\bigotimes_{\tau \in S_n} L_{\tau
\vec{x}}. \] Obviously the symmetric group $S_n$ acts on $\vec{x}$ by permuting the components of $\vec{x}$. \end{definition} Notice that $V(x,n)$ is nef because it is a positive linear combination of nef line bundles.
\begin{definition}\cite[Definition 3.5]{AlexSwin} Let $C_{a,b,c,d}$ be \emph{any} vital curve class corresponding to a partition $S_a \sqcup S_b \sqcup S_c \sqcup S_d$ of $\{1, 2, \cdots, n\}$
such that $|S_a| = a, \cdots, |S_d|=d$.\\ (1) Suppose $n=2m+1$ is odd. Let $C_i = C_{1, 1, m-i, m+i-1}$, for $i = 1, 2, \cdots, m-1$.\\ (2) Suppose $n=2m$ is even. Let $C_i = C_{1,1,m-i, m+i-2}$ for $i = 1, 2, \cdots, m-1$. \end{definition}
By \cite[Corollary 4.4]{KeelMcKer}, the following computation is straightforward. \begin{lemma}\label{lem-intAkalpha} The intersection numbers $C_i \cdot A(k, \alpha)$ are \[
C_i \cdot A(k, \alpha) = \left\{\begin{array}{ll}
\alpha&\mbox{if } i < k\\
\left(2-{m-k \choose 2}\right)\alpha + m-k-2&\mbox{if } i = k\\
\left({m-k+1 \choose 2}-1\right)\alpha-m+k+1
&\mbox{if } i = k+1\\
0&\mbox{if } i > k+1.
\end{array}\right. \] \end{lemma}
This lemma is in fact a slight generalization of \cite[Lemma 3.7]{AlexSwin} where the intersection numbers for $\alpha=\frac{2}{m-k+1}$ only are calculated.
The $S_n$-invariant subspace of Neron-Severi vector space of $\overline{M}_{0,n} $ is generated by $D^j$ for $j=2,3, \cdots, m$ (\cite[Theorem 1.3]{KeelMcKer}). Therefore, in order to determine the linear dependency of $S_n$-invariant divisors, we find $m-1$ linearly independent curve classes, and calculate the intersection numbers of divisors with these curves classes. Let $U$ be an $(m-1) \times (m-1)$ matrix with entries $U_{ij} = (C_i \cdot V(\frac{1}{m+j}, n))$ for $1 \le i,j \le m-1$. Since $V(\frac{1}{m+j}, n)$'s are all nef, all entries of $U$ are nonnegative. \begin{lemma}\cite[\S3.2, \S3.3]{AlexSwin}\label{lem-nefbdl} (1) The intersection matrix $U$ is upper triangular and if $i \le j$, then $U_{ij} > 0$. In particular, $U$ is invertible.\\ (2) Let $\vec{a} = ((C_1 \cdot A(k, \frac{2}{m-k+1})), \cdots, (C_{m-1} \cdot A(k,\frac{2}{m-k+1})))^t$ be the column vector of intersection numbers. Let $\vec{c} = (c_1, c_2, \cdots, c_{m-1})^t$ be the unique solution of the system of linear equations $U \vec{c} = \vec{a}$. Then $c_i > 0$ for $i \le k+1$ and $c_i = 0$ for $i \ge k+2$. \end{lemma}
This lemma implies that $A(k,\frac{2}{m-k+1})$ is a positive linear combination of $V(\frac{1}{m+j}, n)$ for $j = 1, 2, \cdots, k+1$. Note that $A(k,\frac{2}{m-k+2}) = A(k-1,\frac{2}{m-(k-1)+1})$ and that for $\frac{2}{m-k+2} \le \alpha \le \frac{2}{m-k+1}$, $A(k,\alpha)$ is a nonnegative linear combination of $A(k,\frac{2}{m-k+2})$ and $A(k,\frac{2}{m-k+1})$. Hence by the numerical result in Lemma \ref{lem-nefbdl} and the convexity of the nef cone, $A(k,\alpha)$ is nef for $\frac{2}{m-k+2} \le \alpha \le \frac{2}{m-k+1}$. Actually we can slightly improve this result by using continuity.
\begin{lemma}\label{lem-otherextreme} For each $k = 0,1,\cdots,m-2$, there exists $s > 0$ such that $A(k,\alpha)$ is nef for $\frac{2}{m-k+2} \le \alpha \le \frac{2}{m-k+1}+s$. Therefore, $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is nef for $\frac{2}{m-k+2} \le \alpha \le \frac{2}{m-k+1}+s$. \end{lemma} \begin{proof} Let $\vec{a}^\alpha = ((C_1 \cdot A(k,\alpha)), \cdots, (C_{m-1} \cdot A(k,\alpha)))^t$ and let $\vec{c}^\alpha = (c^\alpha_1, \cdots, c^\alpha_{m-1})^t$ be the unique solution of equation $U \vec{c}^\alpha = \vec{a}^\alpha$. Then by continuity, the components $c^\alpha_1, c^\alpha_2, \cdots, c^\alpha_{k+1}$ remain positive when $\alpha$ is slightly increased. By Lemma \ref{lem-intAkalpha} and the upper triangularity of $U$, $c^\alpha_i$ for $i > k+1$ are all zero. Hence $A(k, \alpha)$ is still nef for $\alpha = \frac{2}{m-k+1}+s$ with sufficiently small $s > 0$. \end{proof}
With this nefness result, the proof of Proposition \ref{prop-amplerange} is obtained as a quick application of Theorem \ref{thm4-1}.
\begin{proof}[Proof of Proposition \ref{prop-amplerange}] We prove that in fact $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample for $\frac{2}{m-k+2} < \alpha < \frac{2}{m-k+1}+s$ where $s$ is the small positive rational number in Lemma \ref{lem-otherextreme}. Since a positive linear combination of an ample divisor and a nef divisor is ample by \cite[Corollary 1.4.10]{Larz1}, it suffices to show that $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample when $\alpha= \frac{2}{m-k+2}+t$ for any sufficiently small $t>0$ by Lemma \ref{lem-otherextreme}.
We use induction on $k$. It is certainly true when $k=0$ by Lemma \ref{canonicaldiv2} because $D^2_0$ is ample as the quotient of $\mathcal{O} (n-1,\cdots,n-1)$. Suppose $K_{\overline{M}_{0,n\cdot \epsilon_{k-1}} }+\alpha D_{k-1}$ is ample for $\frac{2}{m-k+3} < \alpha < \frac{2}{m-k+2}+s'$ where $s'$ is the small positive number in Lemma \ref{lem-otherextreme} for $k-1$. Since $\varphi_k$ is a blow-up with exceptional divisor $D_k^{m-k+1}$, \[
\varphi_k^*(K_{\overline{M}_{0,n\cdot \epsilon_{k-1}} }+\alpha D_{k-1})-\delta D_k^{m-k+1} \] is ample for any sufficiently small $\delta>0$ by \cite[II 7.10]{Hartshorne}. A direct computation with Lemmas \ref{computepushandpull} and \ref{canonicaldiv2} provides us with \[ \varphi_k^*(K_{\overline{M}_{0,n\cdot \epsilon_{k-1}} }+\alpha D_{k-1})-\delta D_k^{m-k+1}\] \[ =K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_{k}+\left( \binom{m-k+1}{2}\alpha -\alpha-(m-k-1)-\delta \right)D_k^{m-k+1}. \] If $\alpha=\frac{2}{m-k+2}$, $\binom{m-k+1}{2}\alpha -\alpha-(m-k-1)=0$ and thus we can find $\alpha>\frac{2}{m-k+2}$ satisfying $\binom{m-k+1}{2}\alpha -\alpha-(m-k-1)-\delta =0$. If $\delta$ decreases to $0$, the solution $\alpha$ decreases to $\frac{2}{m-k+2}$. Hence $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is ample when $\alpha= \frac{2}{m-k+2}+t$ for any sufficiently small $t>0$ as desired. \end{proof}
\begin{remark}\label{rem-compproofs} There are already two different proofs of M. Simpson's theorem (Theorem \ref{thm6.1}) given by Fedorchuk--Smyth \cite{FedoSmyt}, and by Alexeev--Swinarski \cite{AlexSwin} without relying on Fulton's conjecture. Here we give a brief outline of the two proofs.
In \cite[Corollary 3.5]{Simpson}, Simpson proves that Theorem \ref{thm6.1} is an immediate consequence of the ampleness of $K_{\overline{M}_{0,n\cdot \epsilon_k} } + \alpha D_k$ for $\frac{2}{m-k+2} < \alpha \le \frac{2}{m-k+1}$ (Proposition \ref{prop-amplerange}). The differences in the proofs of Theorem \ref{thm6.1} reside solely in different ways of proving Proposition \ref{prop-amplerange}.
The ampleness of $K_{\overline{M}_{0,n\cdot \epsilon_k} } + \alpha D_k$ follows if the divisor $A(k, \alpha) = \varphi^*(K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k)$ is nef and its linear system contracts only $\varphi$-exceptional curves. Here, $\varphi : \overline{M}_{0,n} \to \overline{M}_{0,n\cdot \epsilon_k} $ is the natural contraction map (\S\ref{sec2.1}). Alexeev and Swinarski prove Proposition \ref{prop-amplerange} in two stages: First the nefness of $A(k, \alpha)$ for suitable ranges is proved and next they show that the divisors are the pull-backs of ample line bundles on $\overline{M}_{0,n\cdot \epsilon_k} $. Lemma \ref{lem-otherextreme} above is only a negligible improvement of the nefness result in \cite[\S3]{AlexSwin}. In \cite[Theorem 4.1]{AlexSwin}, they give a criterion for a line bundle to be the pull-back of an ample line bundle on $\overline{M}_{0,n\cdot \epsilon_k} $. After some rather sophisticated combinatorial computations, they prove in \cite[Proposition 4.2]{AlexSwin} that $A(k, \alpha)$ satisfies the desired properties.
On the other hand, Fedorchuk and Smyth show that $K_{\overline{M}_{0,n\cdot \epsilon_k} } + \alpha D_k$ is ample as follows. Firstly, by applying the Grothendieck-Riemann-Roch theorem, they represent $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ as a linear combination of boundary divisors and tautological $\psi$-classes. Secondly, for such a linear combination of divisor classes and for a complete curve in $\overline{M}_{0,n\cdot \epsilon_k} $ parameterizing a family of curves with smooth general member, they perform brilliant computations and get several inequalities satisfied by their intersection numbers (\cite[Proposition 3.2]{FedoSmyt}). Combining these inequalities, they prove in particular that $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ has positive intersection with any complete curve on $\overline{M}_{0,n\cdot \epsilon_k} $ with smooth general member (\cite[Theorem 4.3]{FedoSmyt}). Thirdly, they prove that if the divisor class intersects positively with any curve with smooth general member, then it intersects positively with all curves by an induction argument on the dimension. Thus they establish the fact that $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ has positive intersection with all curves. Lastly, they prove that the same property holds even if $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ is perturbed by any small linear combination of boundary divisors. Since the boundary divisors generate the Neron-Severi vector space, $K_{\overline{M}_{0,n\cdot \epsilon_k} }+\alpha D_k$ lies in the interior of the nef cone and the desired ampleness follows. \end{remark}
\section{The Picard groups of $\overline{M}_{0,n\cdot \epsilon_k} $}\label{sec5}
As a byproduct of our GIT construction of the moduli spaces of weighted pointed curves, we give a basis of the \emph{integral} Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ for $0 \le k \le m-2$.
Let $e_i$ be the $i$-th standard basis vector of $\mathbb{Z}^n$. For notational convenience, set $e_{n+1} = e_1$. For $S\subset
\{1,2,\cdots,n\}$, let $D_k^S=\Sigma_{n-m+k}^S/\!/ G\subset F_{n-m+k}/\!/ G\cong \overline{M}_{0,n\cdot \epsilon_k} $. Note that if $m-k<|S|\le m$ or
$|S|=2$, $D_k^S$ is a divisor of $\overline{M}_{0,n\cdot \epsilon_k} $.
\begin{theorem}\label{picardgroup} (1) If $n$ is odd, then the Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ is \[
\mathrm{Pic} (\overline{M}_{0,n\cdot \epsilon_k} ) \cong \bigoplus_{m-k < |S| \le m} \mathbb{Z} D^S_k \oplus
\bigoplus_{i=1}^n \mathbb{Z} D_k^{\{i, i+1\}} \] for $0 \le k \le m-2$.\\ (2) If $n$ is even, then the Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ is \[
\mathrm{Pic} (\overline{M}_{0,n\cdot \epsilon_k} ) \cong \bigoplus_{m-k < |S| < m} \mathbb{Z} D^S_k \oplus
\bigoplus_{1 \in S, |S| = m} \mathbb{Z} D^S_k \oplus
\bigoplus_{i=1}^{n-1} \mathbb{Z} D_k^{\{i, i+1\}}
\oplus \mathbb{Z} D_k^{\{1, n-1\}}. \] for $1 \le k \le m-2$. \end{theorem}
\begin{proof} Since the codimensions of unstable strata in $(\mathbb{P}^1)^n$ are greater than 1, $$\mathrm{Pic} (((\mathbb{P}^1)^n)^{ss}) = \mathrm{Pic} ((\mathbb{P}^1)^n) \cong \oplus_{1 \le i \le n} \mathbb{Z} \mathcal{O} (e_i).$$ For all $x \in ((\mathbb{P}^1)^n)^s$, $G_x \cong \{\pm 1\}$. If $n$ is even and $x$ is strictly semistable point with closed orbit, then $G_x \cong \mathbb{C}^*$. Since $G$ is connected, $G$ acts on the discrete set $\mathrm{Pic}((\mathbb{P}^1)^n)$ trivially. By Kempf's descent lemma (\cite[theorem 2.3]{DN}) and by checking the actions of the stabilizers on the fibers of line bundles, we deduce that $\mathcal{O} (a_1, a_2, \cdots, a_n)$ descends to $((\mathbb{P}^1)^n)^{ss} /\!/ G$ if and only if $2$ divides $\sum a_i$.
Consider the case when $n$ is odd first. It is elementary to check that the subgroup $\{(a_1, \cdots, a_n) \in \mathbb{Z}^n | \sum a_i \in 2\mathbb{Z} \}$ is free abelian of rank $n$ and $\{e_i + e_{i+1}\}$ for $1 \le i \le n$ form a basis of this group. Furthermore, for $S=\{i,j\}$ with $i\ne j$, the big diagonal $(\Sigma_{m+1}^S)^s = (\Sigma_0^S)^s$ satisfies $\mathcal{O} ((\Sigma_0^S)^s) \cong \mathcal{O} _{F_0^s}(e_i+e_j)$. Hence in $F_{m+1}/\!/ G = F_0 /\!/ G$, $\mathcal{O} (\Sigma_0^S /\!/ G) \cong \mathcal{O} (e_i + e_j)$. Therefore we have \[
\mathrm{Pic} (\overline{M}_{0,n\cdot \epsilon_0} ) = \mathrm{Pic} (F_{m+1} /\!/ G)
= \bigoplus_{i = 1}^n \mathbb{Z} D_0^{\{i, i+1\}}. \]
By Theorem \ref{thm4-1}, the contraction morphism $\varphi_k : \overline{M}_{0,n\cdot \epsilon_k} \to \overline{M}_{0,n\cdot \epsilon_{k-1}} $ is the blow-up along the union of transversally intersecting smooth subvarieties. By \S2.3, this is a composition of smooth blow-ups. In $\overline{M}_{0,n\cdot \epsilon_k} $, the exceptional divisors are
$D^S_k$ for $|S| = {m-k+1}$. So the Picard group of $\overline{M}_{0,n\cdot \epsilon_k} $ is \[
\mathrm{Pic} (\overline{M}_{0,n\cdot \epsilon_k} ) \cong \varphi_k^* \mathrm{Pic} (\overline{M}_{0,n\cdot \epsilon_{k-1}} ) \oplus
\bigoplus_{|S| = {m-k+1}}\mathbb{Z} D^S_k. \]
by \cite[II Exe. 8.5]{Hartshorne}. For any $S$ with $|S| = 2$, $D_{k-1}^S$ contains the blow-up center $D_{k-1}^{S'}$ if $S \subset {S'}$. So $\varphi_k^* (D_{k-1}^S)$ is the sum of $D_k^S$
and a linear combination of $D_k^{S'}$ for $S' \supset S, |S'| =
m-k+1$. If $|S| > 2$, then $\varphi_k^* D^S_{k-1} = D^S_k$ since it does not contain any blow-up centers. After obvious basis change, we get the desired result by induction.
Now suppose that $n$ is even. Still the group $\{ (a_1, \cdots, a_n) \in \mathbb{Z}^n | \sum a_i \in 2\mathbb{Z}\}$ is free abelian of rank $n$ and $\{e_i + e_{i+1}\}_{1 \le i \le n-1} \cup \{e_1 + e_{n-1}\}$ form a basis. In $F_m/\!/ G = F_0 /\!/ G$, $\mathcal{O} (\Sigma_m^S/\!/ G) \cong \mathcal{O} (e_i + e_j)$ when $S = \{i, j\}$ with $i\ne j$. Hence \[
\mathrm{Pic} (F_m /\!/ G) = \bigoplus_{i=1}^{n-1}
\mathbb{Z} D_0^{\{i, i+1\}} \oplus \mathbb{Z} D_0^{\{1, n-1\}}. \]
In $\tilde{F}_m$, the unstable loci have codimension two. Therefore we have \[
\mathrm{Pic} (\tilde{F}_m^s) = \mathrm{Pic}(\tilde{F}_m)
= \pi_m^* \mathrm{Pic} (F_m^{ss}) \oplus \bigoplus_{1 \in S, |S| = m}
\mathbb{Z} \tilde{\Sigma}_m^S, \] where $\pi_m : \tilde{F}_m \to F_m^{ss}$ is the blow-up morphism, and $\tilde{\Sigma}_m^S = \pi_m^{-1}(\Sigma_m^S \cap \Sigma_m^{S^c})$
for $|S|=m$.
By Kempf's descent lemma, $\mathrm{Pic}(F_{m+k}/\!/ G)$ is a subgroup of $\mathrm{Pic}(F_{m+k}^{ss})$ and $\mathrm{Pic}(\tilde{F}_{m+k}^s)$ for $0 \le k \le m-2$. From our blow-up description, all arrows except possibly $\bar{\psi}_{m+1}^*$ in following commutative diagram \[
\xymatrix{\mathrm{Pic}(\tilde{F}_{m+1}^s)&
\mathrm{Pic}(\tilde{F}_m^s)\ar@{=}[l]_{\tilde{\psi}_{m+1}^*}\\
\mathrm{Pic}(F_{m+1}^{ss})\ar[u]_{\pi_{m+1}^*}&
\mathrm{Pic}(F_m^{ss})\ar[l]_{\psi_{m+1}^*}\ar[u]_{\pi_m^*}\\
\mathrm{Pic}(F_{m+1}/\!/ G)\ar[u]&
\mathrm{Pic}(F_m /\!/ G)\ar[l]_{\bar{\psi}_{m+1}^*}\ar[u]} \] are injective, and thus the bottom arrow $\bar{\psi}_{m+1}^*$ is also injective. Hence $\mathrm{Pic}(F_{m+1}/\!/ G)$ contains the pull-back of $\mathrm{Pic}(F_m /\!/ G)$ as a subgroup. Also, for the quotient map $p : \tilde{F}_{m+1}^s \to F_{m+1}/\!/ G$, $p^*
D_1^S = \tilde{\Sigma}_{m+1}^S$ for $|S| = m$. Let $H$ be the subgroup of $\mathrm{Pic}(\tilde{F}_{m+1}^s)$ generated by the images of $\bar{\psi}_{m+1}^* \mathrm{Pic}(F_m /\!/ G)$ and the divisors $D_1^S$ with $|S| = m$. By definition, the image of $\mathrm{Pic}(F_{m+1}/\!/ G)$ contains $H$. Now by checking the action of stabilizers on the fibers of line bundles, it is easy to see that no line bundles in $\mathrm{Pic}(\tilde{F}_{m+1}^s)- H$ descend to $F_{m+1}/\!/ G$. Hence we have \begin{equation}\label{mzeo1}
\mathrm{Pic}(\overline{M}_{0,n\cdot \epsilon_1} ) = \mathrm{Pic}(F_{m+1} /\!/ G)
= \bar{\psi}_{m+1}^* \mathrm{Pic}(F_m /\!/ G) \oplus
\bigoplus_{1 \in S, |S| = m}\mathbb{Z} D_1^S. \end{equation}
For an $S$ with $|S|=2$, $\Sigma_m^S/\!/ G$ contains the blow-up center $\Sigma_m^{S'} \cap \Sigma_m^{{S'}^c}/\!/ G$ if $S \subset S'$ or $S \subset {S'}^c$. So $\bar{\psi}_{m+1}^* (D_0^S)$ is the sum of $D_{1}^S$ and a linear combination of divisors $D_1^{S'}$
for $S' \supset S$ or ${S'}^c\supset S$ with $|S'| = m$. From this and (\ref{mzeo1}), we get the following by an obvious basis change: \[
\mathrm{Pic}(\overline{M}_{0,n\cdot \epsilon_1} ) = \bigoplus_{1 \in S, |S| = m}\mathbb{Z} D_1^S
\oplus \bigoplus_{i=1}^{n-1} \mathbb{Z} D_1^{\{i,i+1\}}
\oplus \mathbb{Z} D_1^{\{1,n-1\}}. \] The rest of the proof is identical to the odd $n$ case and so we omit it. \end{proof}
\end{document} |
\begin{document}
\title{Cohesive dynamics and brittle fracture}
\begin{abstract} We formulate a nonlocal cohesive model for calculating the deformation state inside a cracking body. In this model a more complete set of physical properties including elastic and softening behavior are assigned to each point in the medium. We work within the small deformation setting and use the peridynamic formulation. Here strains are calculated as difference quotients. The constitutive relation is given by a nonlocal cohesive law relating force to strain. At each instant of the evolution we identify a process zone where strains lie above a threshold value. Perturbation analysis shows that jump discontinuities within the process zone can become unstable and grow. We derive an explicit inequality that shows that the size of the process zone is controlled by the ratio given by the length scale of nonlocal interaction divided by the characteristic dimension of the sample. The process zone is shown to concentrate on a set of zero volume in the limit where the length scale of nonlocal interaction vanishes with respect to the size of the domain. In this limit the dynamic evolution is seen to have bounded linear elastic energy and Griffith surface energy. The limit dynamics corresponds to the simultaneous evolution of linear elastic displacement and the fracture set across which the displacement is discontinuous. We conclude illustrating how the approach developed here can be applied to limits of dynamics associated with other energies that $\Gamma$- converge to the Griffith fracture energy. \end{abstract}
\begin{flushleft} {\bf Keywords:} \,\,peridynamics, dynamic brittle fracture, fracture toughness, process zone, $\Gamma$- convergence \end{flushleft}
\begin{flushleft} {\bf Mathematics Subject Classification}: 34A34, 74H55, 74R10 \end{flushleft}
\pagestyle{myheadings}
\markboth{R. LIPTON}{Cohesive Dynamics and Brittle Fracture}
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Introduction} \label{introduction}
Dynamic brittle fracture is a multiscale phenomenon operating across a wide range of length and time scales. Contemporary approaches to brittle fracture modeling can be broadly characterized as bottom-up and top-down. Bottom-up approaches take into account the discreteness of fracture at the smallest length scales and are expressed through lattice models. This approach has provided insight into the dynamics of the fracture process \cite{6,16,17,21}. Complementary to the bottom-up approaches are top-down computational approaches using cohesive surface elements \cite{Cox}, \cite{14}, \cite{22}, \cite{Remmers}. In this formulation the details of the process zone are collapsed onto an interfacial element with a force traction law given by the cohesive zone model \cite{Barenblatt}, \cite{Dougdale}. Cohesive surfaces have been applied within the extended finite element method \cite{5}, \cite{Duarte}, \cite{18} to minimize the effects of mesh dependence on free crack paths. Higher order multi-scale cohesive surface models involving excess properties and differential momentum balance are developed in \cite{WaltonSendova}. Comparisons between different cohesive surface models are given in \cite{Falk}. More recently variational approaches to brittle fracture based on quasi-static evolutions of global minimizers of Grifffith's fracture energy have been developed \cite{FrancfortMarigo}, \cite{BourdinFrancfortMarigo}, and \cite{FrancfortLarsen}. Phase field approaches have also been developed to model brittle fracture evolution from a continuum perspective \cite{BourdinFrancfortMarigo}, \cite{BourdinLarsenRichardson}, \cite{Miehe}, \cite{Hughes}, \cite{Ortiz}, \cite{Wheeler}. In the phase field approach a second field is introduced to interpolate between cracked and undamaged elastic material. The evolution of the phase field is used to capture the trajectory of the crack. A concurrent development is the emergence of the peridynamic formulation introduced in \cite{Silling1} and \cite{States}. Peridynamics is a nonlocal formulation of continuum mechanics expressed in terms of displacement differences as opposed to spatial derivatives of the displacement field. These features provide the ability to simultaneously simulate kinematics involving both smooth displacements and defect evolution. Numerical simulations based on peridynamic modeling exhibit the formation and evolution of sharp interfaces associated with defects and fracture \cite{Bobaru1}, \cite{BhattacharyaDyal}, \cite{Foster}, \cite{Bobaru2}, \cite{SillingBobaru}, \cite{SillingAscari2}, \cite{WecknerAbeyaratne}. In an independent development nonlocal formulations have been introduced for modeling the passage from discrete to continuum limits of energies for quasistatic fracture models \cite{Alicandro}, \cite{Buttazzo}, \cite{Braides}, \cite{BraidesGelli}, for smeared crack models \cite{Negri} and for image processing \cite{Gobbino1} and \cite{Gobbino3}. A complete review of contemporary methods is beyond the scope of this paper however the reader is referred to \cite{Agwai}, \cite{Bazant}, \cite{BelitchReview}, \cite{Bouchbinder}, \cite{BourdinFrancfortMarigo}, \cite{Braides1}, \cite{Braides2} for a more complete guide to the literature.
In this paper we formulate a nonlocal, multi-scale, cohesive continuum model for assessing the deformation state inside a cracking body. This model is expressed using the peridynamic formulation introduced in \cite{Silling1}, \cite{States}. Here strains are calculated as difference quotients of displacements between two points $x$ and $y$. In this approach the force between two points $x$ and $y$ is related to the strain through a nonlinear cohesive law that depends upon the magnitude and direction of the strain. The forces are initially elastic for small strains and soften beyond a critical strain. We introduce the dimensionless length scale $\epsilon$ given by the ratio of the length scale of nonlocal interaction to the characteristic length of the material sample $D$. Working in the new rescaled coordinates the nonlocal interactions between $x$ and its neighbors $y$ occur within a horizon of radius $\epsilon$ about $x$ and the characteristic length of $D$ is taken to be unity. This neighborhood of $x$ is denoted by $\mathcal{H}_\epsilon(x)$.
To define the potential energy we first assume the deformation $z$ is given by $z(x)=u(x)+x$ where $u$ is the displacement field. The strain between two points $x$ and $y$ inside $D$ is given by \begin{eqnarray}
\mathcal{S}=\frac{|z(y)-z(x)|-|y-x|}{|y-x|}. \label{bigdefstrain} \end{eqnarray}
In this treatment we assume small deformation kinematics and the displacements are small (infinitesimal) relative to the size of the body $D$. Under this hypothesis \eqref{bigdefstrain} is linearized and the strain is given by $$\mathcal{S}=\frac{u(y)-u(x)}{|y-x|}\cdot e,$$ where $e=\frac{y-x}{|y-x|}$. Both two and three dimensional problems will be considered and the dimension is denoted by $d=2,3$. The cohesive model is characterized through a nonlocal potential $$W^\epsilon(\mathcal{S},y-x),$$ associated with points $x$ and $y$. The associated energy density is obtained on integrating over $y$ for $x$ fixed and is given by \begin{eqnarray} {\bf{W}}^\epsilon(\mathcal{S},x)=\frac{1}{V_d}\int_{\mathcal{H}_\epsilon(x)}\,W^\epsilon(\mathcal{S},y-x)\,dy \label{densityy} \end{eqnarray} where $V_d=\epsilon^d\omega_d$ and $\omega_d$ is the (area) volume of the unit ball in dimensions $d=(2)3$. The potential energy of the body is given by \begin{eqnarray} PD^\epsilon(u)=\int_{D}\,{\bf{W}}^\epsilon(\mathcal{S},x)\,dx \label{the peridynamicenergy} \end{eqnarray}
\begin{figure}
\caption{\bf Cohesive potential as a function of $\mathcal{S}$ for $x$ and $y$ fixed.}
\label{ConvexConcave}
\end{figure}
\begin{figure}
\caption{{\bf Cohesive relation between force and strain for $x$ and $y$ fixed.}}
\label{SofteningBond}
\end{figure}
We introduce the class of potentials associated with a cohesive force that is initially elastic and then softens after a critical strain. These potentials are of the generic form given by \begin{eqnarray}
W^\epsilon(S,y-x)=|y-x|\mathcal{W}^\epsilon(\mathcal{S},y-x), \label{potentialdensity1a} \end{eqnarray} where $\mathcal{W}^\epsilon(\mathcal{S},y-x)$ is the peridynamic potential per unit length associated with $x$ and $y$ given by \begin{eqnarray}
\mathcal{W}^\epsilon(\mathcal{S},y-x)=\frac{1}{\epsilon}J^\epsilon\left(|y-x|\right)\left(\frac{1}{|y-x|}f\left(|y-x|\mathcal{S}^2\right)\right). \label{potentialdensity2a} \end{eqnarray} These potentials are of a general form and are associated with potential functions $f:[0,\infty)\rightarrow\mathbb{R}$ that are positive, smooth and concave with the properties \begin{eqnarray} \lim_{r\rightarrow 0^+}\frac{f(r)}{r}=f'(0)>0,&&\lim_{r\rightarrow\infty}f(r)=f_\infty <\infty. \label{properties} \end{eqnarray}
The composition of $f$ with $|y-x|\mathcal{S}^2$ given by \eqref{potentialdensity2a} delivers the convex-concave dependence of $\mathcal{W}^\epsilon(\mathcal{S},y-x)$ on $\mathcal{S}$ for fixed values of $x$ and $y$, see Figure \ref{ConvexConcave}. Here $J^\epsilon(|y-x|)$ is used to prescribe the influence of separation length $|y-x |$ on the force between $x$ and $y$ with $0\leq J^\epsilon(|y-x|)<M $ for $0\leq |y-x|< \epsilon$ and $J^\epsilon(|y-x|)=0$ for $\epsilon\leq |y-x |$. For fixed $x$ and $y$ the inflection point for the potential energy \eqref{potentialdensity2a} with respect to the strain $\mathcal{S}$ is given by $\overline{r}/\sqrt{|y-x|}$, where $\overline{r}$ is the inflection point for the function $r:\rightarrow f(r^2)$, see Figure \ref{ConvexConcave}. This choice of potential delivers an initially elastic and then softening constitutive law for the force per unit length along the direction $e$ given by \begin{eqnarray}
\hbox{\rm force per unit length}=\partial_\mathcal{S} \mathcal{W}^\epsilon(\mathcal{S},y-x)=\frac{2}{\epsilon}\left(J^\epsilon(|y-x|)f'\left(|y-x|\mathcal{S}^2\right)\mathcal{S}\right). \label{forcestate} \end{eqnarray} The force between points $y$ and $x$ begins to drop when the strain $\mathcal{S}$ exceeds the critical strain \begin{eqnarray}
|\mathcal{S}|>\frac{\overline{r}}{\sqrt{|y-x|}}=\mathcal{S}_c, \label{sqrtsingularity} \end{eqnarray} see Figure \ref{SofteningBond}. This is the same singularity strength associated with a strain concentration in the vicinity of a crack tip as in the classic theory of brittle fracture \cite{Freund}. A future goal will be to inform the cohesive model introduced here by resolving atomistic or molecular dynamics across smaller length scales.
We apply the principle of least action to recover the cohesive equation of motion describing the state of displacement inside the body $D\subset\mathbb{R}^d$ given by \begin{eqnarray}
\rho\partial_{tt}^2 u(t,x)=2\frac{1}{V_d}\int_{\mathcal{H}_\epsilon(x)}\,\partial_\mathcal{S} \mathcal{W}^\epsilon(\mathcal{S},y-x)\frac{y-x}{|y-x|}\,dy+b(t,x) \label{eqofmotion} \end{eqnarray}
where $\rho$ is the density and $b(t,x)$ is the body force. This is a well posed formulation in that existence and uniqueness (within a suitable class of evolutions) can be shown, see Section \ref{sec2} and Theorem \ref{existenceuniqueness} of Section \ref{EE}. In this model a more complete set of physical properties including elastic and softening behavior are assigned to each point in the medium. Here each point in the domain is connected to its neighbors by a cohesive law see Figure \ref{SofteningBond}. We define the {\em process zone} to be the collection of points $x$ inside the body $D$ associated with peridynamic neighborhoods $\mathcal{H}_\epsilon(x)$ for which the strain $\mathcal{S}$ between $x$ and $y$ exceeds a threshold value for a sufficiently large proportion of points $y$ inside $\mathcal{H}_\epsilon(x)$. Here the force vs. strain law departs from linear behavior when the strain exceeds the threshold value. The mathematically precise definition of the process zone is given in Section \ref{sec4}, see Definition \ref{processZone}. In this model the {\em fracture set} is associated with peridynamic neighborhoods $\mathcal{H}_\epsilon(x)$ with strains $|\mathcal{S}|>\mathcal{S}_c$ for which the force vs. strain law begins to soften and is defined in Section \ref{sec4}, see Definition \ref{Fractureset}. The nonlinear elastic--softening behavior put forth in this paper is similar to the ones used in cohesive zone models \cite{Dougdale}, \cite{Barenblatt}. However for this model the dynamics selects whether a material point lies inside or outside the process zone. The principle feature of the cohesive dynamics model introduced here is that the evolution of the process zone together with the the fracture set is goverened by one equation consistent with Newton's second law given by \eqref{eqofmotion}. This is a characteristic feature of peridynamic models \cite{Silling1}, \cite{States} and lattice formulations for fracture evolution \cite{6,16,17,21}.
In this paper the goal is to characterize the size of the process zone for cohesive dynamics as a function of domain size and the length scale of the nonlocal forces. The second focus is to identify properties of the distinguished limit evolution for these models in the limit of vanishing non-locality as characterized by the $\epsilon\rightarrow 0$ limit. For the model introduced here the parameter that controls the size of the process zone is given by the radius of the horizon $\epsilon$. We derive an explicit inequality that shows that the size of the process zone is controlled by the horizon radius. Perturbation analysis shows that jump discontinuities within the process zone can become unstable and grow. This analysis shows that {\em the horizon size $\epsilon$ for cohesive dynamics is a modeling parameter} that can be calibrated according to the size of the process zone obtained from experimental measurements.
Further calculation shows that the volume of the process zone goes to zero with $\epsilon$ in the limit of vanishing non-locality, $\epsilon\rightarrow 0$. Distinguished $\epsilon\rightarrow 0$ limits of cohesive evolutions are identified and are found to have both bounded linear elastic energy and Griffith surface energy. Here the limit dynamics corresponds to the simultaneous evolution of linear elastic displacement and a fracture set across which the displacement is discontinuous. Under suitable hypotheses it is found that for points in space-time away from the fracture set that the displacement field evolves according to the linear elastic wave equation. Here the linear wave equation provides a dynamic coupling between elastic waves and the evolving fracture path inside the media. The elastic moduli, wave speed and energy release rate for the evolution are explicitly recovered from moments of the peridynamic potential energy. These features are consistent with the asymptotic behavior seen in the convergence of solutions of the Barenblatt model to the Griffith model when cohesive forces confined to a surface act over a sufficiently short range \cite{MarigoTruskinovsky}, \cite{Willis}.
Earlier work has shown that linear peridynamic formulations recover the classic linear elastic wave equation in the limit of vanishing non-locality see \cite{EmmrichWeckner}, \cite{SillingLehoucq}. The convergence of linear peridynamics to Navier's equation in the sense of solution operators is demonstrated in \cite{MengeshaDu}. Recent work shows that analogous results can be found for dynamic problems and fully nonlinear peridynamics \cite{LiptonJElast2014} in the context of antiplane shear. There distinguished $\epsilon\rightarrow 0$ limits of cohesive evolutions are identified and are found to have both bounded linear elastic energy and Griffith surface energy. It is shown that the limiting displacement evolves according to the linear elastic wave equation away from the crack set, see \cite{LiptonJElast2014}. For large deformations, the connection between hyperelastic energies and the small horizon limits of nonlinear peridynamic energies is recently established in \cite{Bellido}. In the current paper both two and three dimensional problems involving multi-mode fracture are addressed. For these problems new methods are required to identify the existence of a limit dynamics as the length scale of nonlocal interaction $\epsilon$ goes to zero. A crucial step is to establish a suitable notion of compactness for sequences of cohesive evolutions. The approach taken here employs nonlocal Korn inequalities introduced in \cite{DuGunzbergerlehoucqmengesha}. This method is presented in Section \ref{CC}. We conclude noting that the cohesive dynamics model introduced here does not have an irreversibility constraint and that the constitutive law \eqref{forcestate} applies at all times in the fracture evolution. However with this caveat in mind, the nonlocal cohesive model offers new computational and analytical opportunities for understanding the effect of the process zone on fracture patterns.
In the next section we write down the Lagrangian formulation for the cohesive dynamics and apply the principle of least action to recover the equation of motion. In that section it is shown that the nonlinear-nonlocal cohesive evolution is a well posed initial boundary value problem. It is also shown that energy balance is satisfied by the cohesive dynamics. A formal stability analysis is carried out in Section \ref{sec3} showing that jump discontinuities within the process zone can become unstable and grow, see Proposition \ref{Nuccriteria}. In Section \ref{sec4} we provide a mathematically rigorous inequality explicitly showing how the volume of the process zone for the cohesive evolutions is controlled by the length scale of nonlocal interaction $\epsilon$, see Theorem \ref{epsiloncontropprocesszone}. It is shown that the process zone concentrates on a set of zero volume in the limit, $\epsilon\rightarrow 0$, see Theorem \ref{bondunstable}. In Sections \ref{sec5} and \ref{sec6} we introduce suitable technical hypothesis and identify the distinguished limit of the cohesive evolutions as $\epsilon\rightarrow 0$, see Theorem \ref{LimitflowThm}. It is shown that the dynamics can be expressed in terms of displacements that satisfy the linear elastic wave equation away from the crack set, see Theorem \ref{waveequation}. These displacements are shown to have bounded bulk elastic and surface energy in the sense of Linear Elastic Fracture Mechanics (LEFM), see Theorem \ref{LEFMMThm}. In Section \ref{EightIntro} we provide the mathematical underpinnings and proofs of the theorems. In Section \ref{Ninth} we apply the approach developed here to examine limits of dynamics associated with other energies that $\Gamma$- converge to the Griffith fracture energy. As an illustrative example we examine the Ambrosio-Tortorelli \cite{AT} approximation as applied to the dynamic problem in \cite{BourdinLarsenRichardson} and \cite{LarsenOrtnerSuli}.
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Cohesive dynamics} \label{sec2}
We formulate the initial boundary value problem for the cohesive evolution. Since the problem is nonlocal the domain $D$ is split into a boundary layer called the constraint set $D_c$, and the interior $D_s$. To fix ideas the thickness of the boundary layer is denoted by $\alpha$ and $2\epsilon<\alpha$ where $2\epsilon$ is the diameter of nonlocal interaction see Figure \ref{Domains}. The boundary condition for the displacement $u$ is given by $u(t,x)=0$ for $x$ in $D_c$. To incorporate nonlocal boundary conditions we introduce the space $L^2_0(D;\mathbb{R}^d)$, of displacements that are square integrable over $D$ and zero on $D_c$. The initial conditions for the cohesive dynamics belong to $L^2_0(D;\mathbb{R}^d)$ and are given by \begin{eqnarray} u(x,0)=u_0(x),\hbox{ and }u_t(x,0)=v_0(x). \label{initialconditions} \end{eqnarray}
We will investigate the evolution of the deforming domain for general initial conditions. These can include an initially un-cracked body or one with a preexisting system of cracks. For two dimensional problems the cracks are given by a system of curves of finite total length, while for three dimensional problems the crack set is given by a system of surfaces of finite total surface area. Depending on the dimension of the problem the displacement suffers a finite jump discontinuity across each curve or surface. The initial condition is specified by a crack set $K$ and displacement $u_0$. The strain $\mathcal{E}u_0=(\nabla u_0+\nabla u_0^T)/2$ is defined off the crack set and the displacement $u_0$ can suffer jumps across $K$. Griffith's theory of fracture asserts that the energy necessary to produce a crack $K$ is proportional to the crack length (or surface area). For Linear Elastic Fracture Mechanics (LEFM) the total energy associated with bulk elastic and surface energy is given by \begin{eqnarray}
LEFM(u_0)=\int_{D}\left(2\mu|\mathcal{E}u_0|^2+\lambda|{\rm div}\,u_0|^2\right)\,dx+\mathcal{G}_c |K|, \label{Gcrackenergy} \end{eqnarray}
where $\mu$, $\lambda$ are the the shear and Lam\'e moduli and $\mathcal{G}_c$ is the critical energy release rate for the material. Here $|K|$ denotes the length or surface area of the crack. In what follows we will assume that the bulk elastic energy and surface energy of the initial displacement are bounded as well as the the initial velocity and displacement. For future reference we describe initial data $u_0$ and $v_0$ that satisfy these conditions as {\em LEFM initial data} and we have the inequality between the peridynamic energy and the energy of Linear Elastic Fracture Mechanics given by \begin{eqnarray} PD^\epsilon(u_0)\leq LEFM(u_0), \label{basicinequality} \end{eqnarray} when $\mu$, $\lambda$, and $\mathcal{G}_c$ are related to the nonlocal potentials according to \eqref{calibrate1} and \eqref{calibrate2}. This inequality is established in Section \ref{CC}, see \eqref{upperboundperi}.
In what follows we write $u(t,x)$ as $u(t)$ to expedite the presentation. The cohesive dynamics is described by the Lagrangian \begin{eqnarray} L^\epsilon(u(t),\partial_t u(t),t)=K(\partial_t u(t))-PD^\epsilon(u(t))+U(u(t)), \label{Lagrangian} \end{eqnarray} with \begin{eqnarray}
K(\partial_t u(t))&=&\frac{1}{2}\int_{D}\rho|\partial_t u(t,x)|^2\,dx, \hbox{ and }\nonumber\\ U(u(t))&=&\int_{D}b(t,x) u(t,x)\,dx, \label{Components} \end{eqnarray} where $\rho$ is the mass density of the material and $b(t,x)$ is the body force density. The initial conditions $u^\epsilon(0,x)=u_0(x)$ and $u^\epsilon_t(0,x)=v_0(x)$ are prescribed and the action integral for the peridynamic evolution is \begin{eqnarray} I^\epsilon(u)=\int_0^TL^\epsilon(u(t),\partial_t u(t),t)\,dt. \label{Action} \end{eqnarray} The Euler Lagrange Equation for this system delivers the the cohesive dynamics described by \begin{eqnarray} \rho u^\epsilon_{tt}&=&-\nabla PD^\epsilon(u^\epsilon)+b \label{stationary} \end{eqnarray} where \begin{eqnarray}
\nabla PD^\epsilon(u^\epsilon)=-\frac{2}{V_d}\int_{\mathcal{H}_\epsilon(x)}\partial_\mathcal{S} \mathcal{W}^\epsilon(\mathcal{S},y-x)\frac{y-x}{|y-x|}\,dy, \label{GradPD} \end{eqnarray} and \begin{eqnarray}
\mathcal{S}=\frac{u^\epsilon(y)-u^\epsilon(x)}{|y-x|}\cdot e. \label{sepsilon} \end{eqnarray} The displacement $u^\epsilon(t,x)$ is twice differentiable in time taking values in $L^2_0(D;\mathbb{R}^d)$. The space of such functions is denoted by $C^2([0,T];L^2_0(D;\mathbb{R}^d))$. The initial value problem for the peridynamic evolution \eqref{stationary} is seen to have a unique solution in this space, see Theorem \ref{existenceuniqueness} of Section \ref{EE}. The cohesive evolution $u^\epsilon(x,t)$ is uniformly bounded in the mean square norm over bounded time intervals $0<t<T$, i.e., \begin{eqnarray} \max_{0<t<T}\left\{\Vert u^\epsilon(x,t)\Vert_{L^2(D;\mathbb{R}^d)}^2\right\}<C. \label{bounds} \end{eqnarray}
Here $\Vert u^\epsilon(x,t)\Vert_{L^2(D;\mathbb{R}^d)}=\left(\int_D|u^\epsilon(x,t)|^2\,dx\right)^{1/2}$ and the upper bound $C$ is independent of $\epsilon$ and depends only on the initial conditions and body force applied up to time $T$, see Section \ref{GKP}.
\begin{figure}
\caption{{\bf Domain $D=D_c\cup D_s$.}}
\label{Domains}
\end{figure}
The cohesive evolution has the following properties that are established in Section \ref{GKP}. The evolution has uniformly bounded kinetic and elastic potential energy \begin{theorem} \label{Gronwall} {\rm \bf Bounds on kinetic and potential energy for cohesive dynamics}\\ There exists a positive constant $C$ depending only on $T$ and independent of $\epsilon$ for which \begin{eqnarray} \sup_{0\leq t\leq T}\left\{PD^{\epsilon}(u^{\epsilon}(t))+\frac{\rho}{2}\Vert u_t^{\epsilon}(t)\Vert^2_{L^2(D;\mathbb{R}^d)}\right\}\leq C. \label{boundenergy} \end{eqnarray} \end{theorem}
The evolution is uniformly continuous in time as measured by the mean square norm. \begin{theorem}{\rm \bf Continuous cohesive evolution in mean square norm}\\ There is a positive constant $K$ independent of $t_2 < t_1$ in $[0,T]$ and index $\epsilon$ for which \begin{eqnarray}
\Vert u^{\epsilon}(t_1)-u^{\epsilon}(t_2)\Vert_{L^2(D;\mathbb{R}^d)}\leq K |t_1-t_2|. \label{holderest} \end{eqnarray} \label{holdercont} \end{theorem}
The evolution satisfies energy balance. The total energy of the cohesive evolution at time $t$ is given by \begin{eqnarray} \mathcal{EPD}^\epsilon(t,u^\epsilon(t))=\frac{\rho}{2}\Vert u^\epsilon_t(t)\Vert^2_{L^2(D;\mathbb{R}^d)}+PD^\epsilon(u^\epsilon(t))-\int_{D}b(t)\cdot u^\epsilon(t)\,dx \label{energyt} \end{eqnarray} and the total energy of the system at time $t=0$ is \begin{eqnarray} \mathcal{EPD}^\epsilon(0,u^\epsilon(0))=\frac{\rho}{2}\Vert v_0\Vert^2_{L^2(D;\mathbb{R}^d)}+PD^\epsilon(u_0)-\int_{D}b(0)\cdot u_0\,dx. \label{energy0} \end{eqnarray} The cohesive dynamics is seen to satisfy energy balance at every instant of the evolution. \begin{theorem}{\rm \bf Energy balance for cohesive dynamics}\\ \label{Ebalance} \begin{eqnarray} \mathcal{EPD}^\epsilon(t,u^\epsilon(t))=\mathcal{EPD}^\epsilon(0,u^\epsilon(0))-\int_0^t\int_{D} b_t(\tau)\cdot u^\epsilon(\tau)\,dx\,d\tau.\label{BalanceEnergy} \end{eqnarray} \end{theorem}
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Dynamic instability and fracture nucleation} \label{sec3} In this section we present a fracture nucleation condition that arises from the unstable force law \eqref{forcestate}. This condition is manifested as a dynamic instability. In the following companion section we investigate the localization of dynamic instability as $\epsilon_k\rightarrow 0$ and define the notion of process zone for the cohesive evolution. Fracture nucleation conditions can be viewed as instabilities and have been identified for peridynamic evolutions in \cite{SillingWecknerBobaru}. Fracture nucleation criteria formulated as instabilities for one dimensional peridynamic bars are developed in \cite{WecknerAbeyaratne}. In this treatment we define a source for crack nucleation as jump discontinuity in the displacement field that can become unstable and grow in time. Here we establish a direct link between the growth of jump discontinuities and the appearance of strain concentrations inside the deforming body.
We proceed with a formal perturbation analysis and consider a time independent body force density $b$ and a smooth equilibrium solution $u$ of \eqref{stationary}. Now perturb $u$ in the neighborhood of a point $x$ by adding a piecewise smooth discontinuity denoted by the vector field $\delta$. The perturbation takes the value zero on one side of a plane with normal vector $\nu$ passing through $x$ and on the other side of the plane takes the value $\delta=\overline{u}s(t)$. Here $s(t)$ is a scalar function of time and $\overline{u}$ is a constant vector. Consider the neighborhood $\mathcal{H}_\epsilon(x)$, then $\delta(y)=0$ for $(y-x)\cdot\nu<0$ and $\delta(y)=\overline{u}s(t)$ for $(y-x)\cdot\nu\geq 0$, see Figure \ref{plane}. The half space on the side of the plane for which $(y-x)\cdot\nu<0$ is denoted by $E^-_{\nu}$.
Write $u^p=u+\delta$ and assume \begin{eqnarray} \rho u^p_{tt}&=&-\nabla PD^\epsilon(u^p)+b.\label{stationarydiff1} \end{eqnarray} We regard $s(t)$ as a small perturbation and expand the integrand of $\nabla PD^\epsilon(u^p)$ in a Taylor series to recover the linearized evolution equation for the jump $s=s(t)$. The evolution equation is given by \begin{eqnarray} \rho s_{tt}\overline{u}=\mathcal{A}_{\nu}(x)\overline{u}s, \label{pertevolution} \end{eqnarray} where the stability matrix $\mathcal{A}_{\nu}(x)$ is a $d\times d$ symmetric matrix with real eigenvalues and is defined by \begin{eqnarray}
\mathcal{A}_{\nu}(x)&=&-\frac{2}{\epsilon V_d}\left\{\int_{\mathcal{H}_\epsilon(x)\cap E^-_{\nu}}\frac{1}{|y-x|}\partial^2_{\mathcal{S}}\mathcal{W}^\epsilon(\mathcal{S},y-x)\frac{y-x}{|y-x|}\otimes\frac{y-x}{|y-x|}dy\right\}, \label{instabilitymatrix} \end{eqnarray}
and $$\mathcal{S}=\mathcal{S}(y,x)=\left(\frac{u(y)-u(x)}{|y-x|}\right)\cdot\frac{y-x}{|y-x|}.$$ Calculation shows that \begin{eqnarray}
\partial^2_{\mathcal{S}}\mathcal{W}^\epsilon(\mathcal{S},y-x)=\frac{2}{\epsilon}J^\epsilon(|y-x|)\left(f'\left(|y|\mathcal{S}^2\right)+2f''\left(|y-x|\mathcal{S}^2\right)|y-x|\mathcal{S}^2\right),\label{expand} \end{eqnarray}
where $f'(|y|\mathcal{S}^2)>0$ and $f''(|y|\mathcal{S}^2)<0$. On writing \begin{eqnarray}
\mathcal{S}_c=\frac{\overline{r}}{\sqrt{|y-x|}} \label{critstrain} \end{eqnarray} we have that \begin{eqnarray}
\partial_\mathcal{S}^2\mathcal{W}^\epsilon(\mathcal{S},y)>0\hbox{ for }|\mathcal{S}(y,x)|<\mathcal{S}_c, \label{loss1} \end{eqnarray} and \begin{eqnarray}
\partial_\mathcal{S}^2W^\epsilon(\mathcal{S},y)<0\hbox{ for }|\mathcal{S}(y,x)|>\mathcal{S}_c. \label{loss2} \end{eqnarray} Here $\overline{r}$ is the inflection point for the function $r:\rightarrow f(r^2)$ and is the root of the equation \begin{eqnarray} f'(r)+2{r}f''({r})=0. \label{rootroverline} \end{eqnarray} Note that the critical strain $\mathcal{S}_c$ for which the cohesive force between a pair of points $y$ and $x$ begins to soften is akin to the square root singularity seen at the crack tip in classical brittle fracture mechanics.
For eigenvectors $\overline{u}$ in the eigenspace associated with positive eigenvalues $\lambda$ of $\mathcal{A}_\nu(x)$ one has \begin{eqnarray} \rho\partial^2_{tt}s(t)=\lambda s(t) \label{stabeq} \end{eqnarray} and the perturbation $s(t)$ can grow exponentially. Observe from \eqref{loss2} that the quadratic form \begin{eqnarray}
\mathcal{A}_{\nu}(x)\overline{w}\cdot\overline{w}=-\frac{2}{\epsilon V_d}\left\{\int_{\mathcal{H}_\epsilon(x)\cap E^-_{\nu}}\frac{1}{|y-x|}\partial^2_{\mathcal{S}}\mathcal{W}^\epsilon(\mathcal{S},y-x)(\frac{y-x}{|y-x|}\cdot\overline{w})^2dy\right\} \label{quadformunstable} \end{eqnarray} will have at least one positive eigenvalue provided a sufficiently large proportion of bonds $y-x$ inside the horizon have strains satisfying \begin{eqnarray}
|\mathcal{S}(x,y)|>\mathcal{S}_c \label{exceed} \end{eqnarray} for which the cohesive force is in the unstable phase. For this case we see that the jump can grow exponentially. The key feature here is that dynamic instability is explicitly linked to strain concentrations in this cohesive model as is seen from \eqref{loss2} together with \eqref{quadformunstable}. Collecting results we have the following proposition.
\begin{proposition}{\em \bf Fracture nucleation condition for cohesive dynamics}\\ \label{Nuccriteria}
A condition for crack nucleation at a point $x$ is that there is at least one direction $\nu$ for which $\mathcal{A}_{\nu}(x)$ has at least one positive eigenvalue. This occurs if there is a square root strain concentration $|\mathcal{S}(y,x)|>\mathcal{S}_c$ over a sufficiently large proportion of cohesive bonds inside the peridynamic horizon. \end{proposition} \noindent Proposition \ref{Nuccriteria} together with (\ref{loss2}) provide the explicit link between dynamic instability and the critical strain where the cohesive law begins to soften.
\begin{figure}
\caption{{\bf Jump discontinuity.}}
\label{plane}
\end{figure}
More generally we may postulate a condition for the direction along which the opposite faces of a nucleating fissure are oriented and the direction of the displacement jump across it. Recall that two symmetric matrices $A$ and $B$ satisfy $A\geq B$ in the sense of quadratic forms if $A\overline{w}\cdot\overline{w}\geq B\overline{w}\cdot\overline{w}$ for all $\overline{w}$ in $\mathbb{R}^d$. We say that a matrix $A$ is the maximum of a collection of symmetric matrices if $A\geq B$ for all matrices $B$ in the collection.
We postulate that the faces of the nucleating fissure are perpendicular to the direction $\nu^*$ associated with the the matrix $\mathcal{A}_{\nu^*}(x)$ for which \begin{eqnarray} \mathcal{A}_{\nu^*}(x)=\max\left\{\mathcal{A}_{\nu}(x);\, \hbox{over all directions } \nu\hbox{ such that } \mathcal{A}_{\nu}(x) \hbox{ has a positive eigenvalue}\right\}, \label{bestdirctionforgrowth} \end{eqnarray} and that the orientation of the jump in displacement across opposite sides of the fissure lies in the eigenspace associated with the largest positive eigenvalue of $\mathcal{A}_{\nu^*}$, {\em i.e., the fissure is oriented along the most unstable orientation and the displacement jump across the nucleating fissure is along the most unstable direction.}
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{The process zone for cohesive dynamics and its localization in the small horizon limit } \label{sec4}
In this section it is shown that the collection of centroids of peridynamic neighborhoods with strain exceeding a prescribed threshold concentrate on sets with zero volume in the limit of vanishing non-locality. In what follows we probe the dynamics to obtain mathematically rigorous and explicit estimates on the size of the process zone in terms of the radius of the peridynamic horizon $0<\epsilon<1$.
The continuity of the displacement inside the neighborhood $\mathcal{H}_\epsilon(x)$ is measured quantitatively by \begin{eqnarray}
|u(y)-u(x)|\leq\underline{k}\,|y-x|^\alpha, \hbox{ for $y\in \mathcal{H}_\epsilon(x)$}, \label{moduluscont} \end{eqnarray} with $0<\underline{k}$ and $0\leq\alpha\leq 1$. In what follows we focus on the reduction of continuity measured quantitatively by \begin{eqnarray}
|u(y)-u(x)|>\underline{k}\,|y-x|^\alpha \hbox{ for $y\in \mathcal{H}_\epsilon(x)$}. \label{modulusdiscont} \end{eqnarray}
Observe when \eqref{modulusdiscont} holds and $\alpha=1/2$ and $\underline{k}=\overline{r}$ then $|\mathcal{S}(y,x)|>\mathcal{S}_c$ and there is softening in the cohesive force-strain behavior given by \eqref{forcestate} .
We now consider solutions $u^\epsilon$ of \eqref{stationary} and define a mathematical notion of process zone based the strain exceeding threshold values associated with \eqref{modulusdiscont}.
The process zone is best described in terms of the basic unit of peridynamic interaction: the peridynamic neighborhoods $\mathcal{H}_\epsilon(x)$ of radius $\epsilon>0$ with centers $x\in D$. We fix a choice of $\underline{k}$ and $\alpha$ belonging to the intervals $0< \underline{k}\leq \overline{r}$ and $1/2\leq \alpha<1$. The strain between $x$ and a point $y$ inside the neighborhood is denoted by $\mathcal{S}^\epsilon(y,x)$. The collection of points $y$ inside $\mathcal{H}_\epsilon(x)$ for which the strain $|\mathcal{S}^\epsilon(y,x)|$ exceeds the threshold function $\underline{k}\,|y-x|^{\alpha-1}$ is denoted by
$\{y\hbox{ $ \in$ }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(x,y)|>\underline{k}\,|y-x|^{\alpha-1}\}$. Note for $0<\underline{k}<\overline{r}$ and $1/2<\alpha<1$ that \begin{eqnarray}
&&\left\{y\hbox{ $\in$ }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(y,x)|>\mathcal{S}_c\right\}\subset\{y\hbox{ $\in$ }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(y,x)|>\frac{\underline{k}}{|y-x|^{1-\alpha}}\}.\label{nonlipshitz} \end{eqnarray}
The fraction of points inside the neighborhood $\mathcal{H}_\epsilon(x)$ with strains exceed the threshold is written \begin{eqnarray}
P\left(\{y\hbox{ in }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(y,x)|>\underline{k}\,|y-x|^{\alpha-1}\}\right),\label{weight} \end{eqnarray} where the weighted volume fraction for any subset $B$ of $\mathcal{H}_\epsilon(x)$ is defined as \begin{eqnarray}
P(B)=\frac{1}{\epsilon^d m}\int_{B}\,(|y-x|/\epsilon)J(\vert y-x\vert/\epsilon)\,dy, \label{weightdefined} \end{eqnarray} with normalization constant \begin{eqnarray}
m=\int_{\mathcal{H}_1(0)}|\xi||J(|\xi|)\,d\xi \label{normalize} \end{eqnarray} chosen so that $P(\mathcal{H}_\epsilon(x))=1$.
\begin{definition}{\bf Process Zone.} \label{processZone}
Fix a volume fraction $0<\overline{\theta}\leq 1$, $0<\underline{k}\leq\overline{r}$, and $1/2\leq\alpha<1$ and at each time $t$ in the interval $0\leq t\leq T$, define the process zone $PZ^\epsilon(\underline{k},\alpha,\overline{\theta},t)$ to be the collection of centers of peridynamic neighborhoods for which the portion of points $y$ with strain $\mathcal{S}^\epsilon(t,y,x)$ exceeding the threshold $\underline{k}\,|y-x|^{\alpha-1}$ is greater than $\overline{\theta}$, i.e., $P\left(\{y\hbox{ in }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(t,y,x)|>\underline{k}\,|y-x|^{\alpha-1}\}\right)>\overline{\theta}$. \end{definition}
The fracture set is defined to be the process zone for which strains exceed the threshold $\mathcal{S}_c$ and the force vs. strain curve begins to soften. \begin{definition}{\bf Fracture Set.} \label{Fractureset}
The fracture set is defined to be the process zone associated with the values ${\theta}=1/2$, $\underline{k}=\overline{r}$, and $\alpha=1/2$ and at each time $t$ in the interval $0\leq t\leq T$, and is defined by $PZ^\epsilon(\overline{r},1/2,1/2,t)$ to be the collection of centers of peridynamic neighborhoods for which the portion of points $y$ with strain $\mathcal{S}^\epsilon(t,y,x)$ exceeding the threshold $\mathcal{S}_c$ is greater than $1/2$, i.e., $P\left(\{y\hbox{ in }\mathcal{H}_\epsilon(x):\, |\mathcal{S}^\epsilon(t,y,x)|>\mathcal{S}_c\}\right)>1/2$. \end{definition} \noindent It is clear from the definition that the fracture set defined for this model contains the set of jump discontinuities for the displacement. This definition of fracture set given here is different that the usual one which collapses material damage onto a surface across which the displacement jumps.
It follows from Proposition \ref{Nuccriteria} that the process zone contains peridynamic neighborhoods associated with softening cohesive forces. Within this zone pre-existing jump discontinuities in the displacement field can grow. \begin{remark} Here we have described a range of process zones depending upon the choice of $\alpha$, $\underline{k}$ and $\overline{\theta}$. In what follows we show that for any choice of $\alpha$ in $1/2\leq \alpha <1$ and $\underline{k}$ in $0<\underline{k}\leq\overline{r}$ and $0<\overline{\theta}\leq 1$ the volume of the process zone is explicitly controlled by the radius of the peridynamic horizon $\epsilon$. \end{remark}
We consider problem formulations in two and three dimensions and the volume or area of a set is given by the $d$ dimensional Lebesgue measure denoted by $\mathcal{L}^d$, for $d=2,3$. We let \begin{eqnarray} \label{upbdconst} C(t)= \left( (2LEFM(u_0)+{\rho}\Vert v_0\Vert_{L^2(D;\mathbb{R}^d)}+1)^{1/2}+\sqrt{\rho^{-1}}\int_0^t\Vert b(\tau)\Vert_{L^2(D;\mathbb{R}^d)}\,d\tau\right)^2-1, \end{eqnarray} and note that $C(t)\leq C(T)$ for $t<T$.
We now give the following bound on the size of the process zone. \begin{theorem} {\bf Dependence of the process zone on the radius of the peridynamic horizon} \label{epsiloncontropprocesszone} \begin{eqnarray} \mathcal{L}^d\left(PZ^\epsilon(\underline{k},\alpha,\overline{\theta},t) \right)\leq \frac{\epsilon^{1-\beta}}{\overline{\theta}\underline{k}^2(f'(0)+o(\epsilon^\beta))}\times \frac{C(t)}{2m}, \label{controlbyepsilon} \end{eqnarray} where $0\leq\beta<1$ and $\beta=2\alpha-1$ and $0\leq t\leq T$. \end{theorem} \noindent Theorem \ref{epsiloncontropprocesszone} explicitly shows that the size of the process zone is controlled by the radius $\epsilon$ of the peridynamic horizon, uniformly in time. This theorem is proved in Section \ref{proofbondunstable}.
\begin{remark} \label{ModelParameter} This analysis shows that {\em the horizon size $\epsilon$ for cohesive dynamics is a modeling parameter} that may be calibrated according to the size of the process zone obtained from experimental measurements. \end{remark}
Next we show how the process zone localizes and concentrates on sets with zero volume in the small horizon limit. To proceed choose $\delta>0$ and consider the sequence of solutions $u^{\epsilon_k}(t,x)$ to the cohesive dynamics for a family of radii $\epsilon_k=\frac{1}{2^k}$, $k=1,\ldots$. The set of centers $x$ of neighborhoods $\mathcal{H}_{\epsilon_k}(x)$ that belong to at least one of the process zones $PZ^{\epsilon_k}(\underline{k},\alpha,\overline{\theta},t)$ for some $\epsilon_k<\delta$ at time $t$ is denoted by $CPZ^\delta(\underline{k},\alpha,\overline{\theta},t)$. Let $CPZ^0(\underline{k},\alpha,\overline{\theta},t)=\cap_{0<\delta}CPZ^\delta(\underline{k},\alpha,\overline{\theta},t)$ be the collection of centers of neighborhoods such that for every $\delta>0$ they belong to a process zone $PZ^{\epsilon_k}(\underline{k},\alpha,\overline{\theta},t)$ for some $\epsilon_k<\delta$. The localization and concentration of the process zone is formulated in the following theorem. \begin{theorem}{\rm\bf Localization of process zone in the small horizon limit.}\\ \label{bondunstable} The collection of process zones $CPZ^\delta(\underline{k},\alpha,\overline{\theta},t)$ is decreasing with $\delta\rightarrow 0$ and there is a positive constant $K$ independent of $t$ and $\delta$ for which \begin{eqnarray} && \mathcal{L}^d\left(CPZ^\delta(\underline{k},\alpha,\overline{\theta},t)\right)\leq K{\delta^{1-\beta}}, \hbox{ for, } 0\leq t\leq T,\,\,0<\beta=2\alpha-1\leq 1, \hbox{ with }\nonumber\\ && \mathcal{L}^d\left(CPZ^0(\underline{k},\alpha,\overline{\theta},t)\right)=\lim_{\delta\rightarrow 0}\mathcal{L}^d\left(CPZ^\delta(\underline{k},\alpha,\overline{\theta},t)\right)=0. \label{limdelta} \end{eqnarray} For any choice of $0<\overline{\theta}\leq1$ the collection of centers of neighborhoods for which there exists a positive $\delta$ such that \begin{eqnarray}
P\left(\{y\hbox{ in }H_{\epsilon_k}(x):\, |\mathcal{S}^{\epsilon_k}(t,y,x)|\leq\underline{k}\,|y-x|^{\alpha-1}\}\right)\geq1-\overline{\theta}, \label{contolledstrain} \end{eqnarray} for all $\epsilon_k<\delta$ is a set of full measure for every choice of $0<\underline{k}\leq\overline{r}$ and $1/2\leq\alpha<1$, i.e., $\mathcal{L}^d(D)=\mathcal{L}^d(D\setminus CPZ^0(\underline{k},\alpha,\overline{\theta},t))$. \end{theorem}
\begin{remark} \label{nearlipschitz} The theorem shows that the process zone concentrates on a set of zero volume in the limit of vanishing peridynamic horizon. Note \eqref{contolledstrain} holds for any $0<\overline{\theta}\leq 1$. On choosing $\overline{\theta}\approx 0$, and $\alpha\approx 1$ it is evident that the modulus of continuity for displacement field is close to Lipschitz outside of the process zone in the limit of vanishing nonolcality, $\epsilon_k\rightarrow 0$. The concentration of the process zone is inevitable for the cohesive model and is directly linked to the constraint on the energy budget associated with the cohesive dynamics as described by Theorem \ref{Gronwall}. This bound forces the localization of the process zone as shown in Section \ref{proofbondunstable}. \end{remark} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{The small horizon limit of cohesive dynamics}
\label{sec5}
In this section we identify the distinguished small horizon $\epsilon\rightarrow 0$ limit for cohesive dynamics. It is shown here that the limit dynamics has bounded bulk linear elastic energy and Griffith surface energy characterized by the shear modului $\mu$, Lam\'e modulus $\lambda$, and energy release rate $\mathcal{G}_c$ respectively. In order to make the connection between the limit dynamics and cohesive dynamics we will identify the relationship between the potentials $W^\epsilon(\mathcal{S},y-x)$ and the triple $\mu$, $\lambda$, $\mathcal{G}_c$.
To reveal this connection consider a family of cohesive evolutions $u^{\epsilon_k}$, each associated with a fixed potential $W^{\epsilon_k}$ and horizon length $\epsilon_k$, with $k=1,2,\ldots$ and $\epsilon_k\rightarrow 0$. Each $u^{\epsilon_k}(x,t)$ can be thought of as being the result of a perfectly accurate numerical simulation of a cohesive evolution associated with the potential $W^{\epsilon_k}$. It is shown in this section that the cohesive dynamics $u^{\epsilon_k}(x,t)$ converges to a limit evolution $u^0(x,t)$ in the limit, $\epsilon_k\rightarrow 0$. The limit evolution describes the dynamics of the cracked body when the scale of nonlocality is infinitesimally small with respect to the material specimen. Here the limiting free crack evolution is mediated through the triple $\mu$, $\lambda$, and $\mathcal{G}_c$ that are described by explicit formulas associated with the sequence of potentials $W^{\epsilon_k}$, see \eqref{calibrate1}, \eqref{calibrate2} and Theorem \ref{LEFMMThm} below.
\noindent {\em It is of fundamental importance to emphasize that we do do not impose a-priori relations between $W^{\epsilon_k}$ and the triple $\mu$, $\lambda$, and $\mathcal{G}_c$; we show instead that the cohesive dynamics $u^{\epsilon_k}(x,t)$ approaches the limit dynamics $u^0(x,t)$ characterized by $\mu$, $\lambda$, and $\mathcal{G}_c$ given by the formulas \eqref{calibrate1} and \eqref{calibrate2} in the limit when $\epsilon_k\rightarrow 0$} .
\noindent In what follows the sequence of cohesive dynamics described by $u^{\epsilon_k}$ is seen to converge to the limiting free crack evolution $u^0(x,t)$ in mean square, uniformly in time, see Theorem \ref{LimitflowThm}. The limit evolution is shown to have the following properties: \begin{itemize} \item It has uniformly bounded energy in the sense of linear elastic fracture mechanics for $0\leq t \leq T$. \item It satisfies an energy inequality involving the kinetic energy of the motion together with the bulk elastic and surface energy associated with linear elastic fracture mechanics for $0\leq t\leq T$. \end{itemize}
\noindent We provide explicit conditions under which these properties are realized for the limit dynamics. \begin{hypothesis} \label{remarkone} We suppose that the magnitude of the displacements $u^{\epsilon_k}$ for cohesive dynamics are bounded for $0\leq t\leq T$ uniformly in $\epsilon_k$, i.e., $\sup_{\epsilon_k}\sup_{0\leq t\leq T}\Vert u^{\epsilon_k}(t)\Vert_{L^\infty(D;\mathbb{R}^d)}<\infty$. \end{hypothesis}
The convergence of cohesive dynamics is given by the following theorem, \begin{theorem} \label{LimitflowThm} {\rm\bf Convergence of cohesive dynamics}\\ For each $\epsilon_k$ we prescribe identical LEFM initial data $u_0(x)$ and $v_0(x)$ and the solution to the cohesive dynamics initial value problem is denoted by $u^{\epsilon_k}$. Now consider a sequence of solutions $u^{\epsilon_k}$ associated with a vanishing peridynamic horizon $\epsilon_k\rightarrow 0$ and suppose Hypothesis \ref{remarkone} holds true. Then, on passing to a subsequence if necessary, the cohesive evolutions $u^{\epsilon_k}$ converge in mean square uniformly in time to a limit evolution $u^0$ with the same LEFM initial data, i.e., \begin{eqnarray} \lim_{\epsilon_k\rightarrow 0}\max_{0\leq t\leq T}\left\{\Vert u^{\epsilon_k}(t)-u^0(t)\Vert_{L^2(D;\mathbb{R}^d)}\right\}=0 \label{unifconvg} \end{eqnarray} and $u^0(x,0)=u_0(x)$ and $\partial_t u^0(x,0)=v_0(x)$. \end{theorem}
To appropriately characterize the LEFM energy for the limit dynamics with freely propagating cracks one needs a generalization of the strain tensor. The appropriate notion of displacement and strain useful for problems involving discontinuities is provided by functions of bounded deformation BD introduced in \cite{Matthies}, \cite{Suquet}. The subspace of BD given by the special functions of bounded deformation $SBD$ introduced in \cite{Bellettini} is appropriate for describing discontinuities associated with linear elastic fracture. Functions in $u$ SBD belong to $L^1(D;\mathbb{R}^d)$ and are approximately continuous, i.e., have Lebesgue limits for almost every $x\in D$ given by \begin{eqnarray}
\lim_{r\searrow 0}\frac{1}{\pi r^d}\int_{B(x,r)}\,|u(y)-u(x)|\,dy=0, \hbox{ $d=2,3$} \label{approx} \end{eqnarray} where $B(x,r)$ is the ball of radius $r$ centered at $x$. The jump set $J_{u}$ for elements of $SBD$ is defined to be the set of points of discontinuity which have two different one sided Lebesgue limits. One sided Lebesgue limits of $u$ with respect to a direction $\nu_u(x)$ are denoted by $u^-(x)$, $u^+(x)$ and are given by \begin{eqnarray}
\lim_{r\searrow 0}\frac{1}{\pi r^d}\int_{B^-(x,r)}\,|u(y)-u^-(x)|\,dy=0,\,\,\, \lim_{r\searrow 0}\frac{1}{\pi r^d}\int_{B^+(x,r)}\,|u(y)-u^+(x)|\,dy=0, \hbox{$d=2,3$}, \label{approxjump} \end{eqnarray} where $B^-(x,r)$ and $B^+(x,r)$ are given by the intersection of $B(x,r)$ with the half spaces $(y-x)\cdot \nu_u(x)<0$ and $(y-x)\cdot \nu_u(x)>0$ respectively. SBD functions have jump sets $J_u$, described by a countable number of components $K_1,K_2,\ldots$, contained within smooth manifolds, with the exception of a set $K_0$ that has zero $d-1$ dimensional Hausdorff measure \cite{AmbrosioCosicaDalmaso}. Here the notion of arc length or (surface area) is the $d-1$ dimensional Hausdorff measure of $J_{u}$ and $\mathcal{H}^{d-1}(J_{u})=\sum_i\mathcal{H}^{d-1}(K_i)$. The strain \cite{AmbrosioCosicaDalmaso} of a displacement $u$ belonging to SBD, written as $\mathcal{E}u$, is a generalization of the classic strain tensor and satisfies the property \begin{eqnarray}
\lim_{r\searrow 0}\frac{1}{\pi r^d}\int_{B(x,r)}\,\frac{|(u(t,y)-u(t,x)-\mathcal{E}u(t,x)(y-x))\cdot(y-x)|}{|y-x|^2}\,dy=0, \hbox{ $d=2,3$} \label{appgrad} \end{eqnarray} for almost every $x\in D$, with respect to $d$-dimensional Lebesgue measure $\mathcal{L}^d$. The symmetric part of the distributional derivative of $u$, $E u=1/2(\nabla u+\nabla u^T)$ for $SBD$ functions is a $d\times d$ matrix valued Radon measure with absolutely continuous part described by the density $\mathcal{E}u$ and singular part described by the jump set \cite{AmbrosioCosicaDalmaso}, \cite{Bellettini} and \begin{eqnarray} \langle E u,\Phi\rangle=\int_D\,\sum_{i,j=1}^d\mathcal{E}u_{ij}\Phi_{ij}\,dx+\int_{J_{u}}\,\sum_{i,j=1}^d(u^+_i - u^-_i)\nu_j\Phi_{ij}\,d\mathcal{H}^{d-1}, \label{distderiv} \end{eqnarray} for every continuous, symmetric matrix valued test function $\Phi$. A description of $BD$ functions including their fine properties and structure, together with the characterization of $SBD$ functions on slices is developed in \cite{AmbrosioCosicaDalmaso} and \cite{Bellettini}.
The energy of linear elastic fracture mechanics extended to the class of $SBD$ functions is given by: \begin{eqnarray}
LFEM(u,D)=\int_{D}\left(2\mu |\mathcal{E} u|^2+\lambda |{\rm div}\,u|^2\right)\,dx+\mathcal{G}_c\mathcal{H}^{d-1}(J_{u}), \hbox{ $d=2,3$,} \label{LEFMSBVDefinition} \end{eqnarray} for $u$ belonging to $SBD$. We now describe the elastic energy for the limit dynamics. \begin{theorem} {\rm\bf The limit dynamics has bounded LEFM energy}\\ The limit evolution $u^0$ belongs to SBD for every $t\in[0,T]$. Furthermore there exists a constant $C$ depending only on $T$ bounding the LEFM energy, i.e., \begin{eqnarray}
\int_{D}\,2\mu |\mathcal{E} u^0(t)|^2+\lambda |{\rm div}\,u^0(t)|^2\,dx+\mathcal{G}_c\mathcal{H}^{d-1}(J_{u^0(t)})\leq C, \hbox{ $d=2,3$,} \label{LEFMbound} \end{eqnarray} for $0\leq t\leq T$. Here $\mu$, $\lambda$, and $\mathcal{G}_c$ are given by the explicit formulas \begin{eqnarray} \mu=\lambda=\frac{1}{4} f'(0)\int_{0}^1r^2J(r)dr, \hbox{ $d=2$}&\hbox{ and }& \mu=\lambda=\frac{1}{5} f'(0)\int_{0}^1r^3J(r)dr, \hbox{ $d=3$}\label{calibrate1} \end{eqnarray} and \begin{eqnarray} \mathcal{G}_c=\frac{2\omega_{d-1}}{\omega_d}\, f_\infty \int_{0}^1r^dJ(r)dr, \hbox{ for $d=2.3$} \label{calibrate2} \end{eqnarray} where $f_\infty$ is defined by \eqref{properties} and $\omega_{n}$ is the volume of the $n$ dimensional unit ball, $\omega_1=2,\omega_2=\pi,\omega_3=4\pi/3$. The potential $f$ and influence function $J$ can always be chosen to satisfy \eqref{calibrate1} and \eqref{calibrate2} for any $\mu=\lambda>0$ corresponding to the Poisson ratio $\nu=1/3$, for $d=2$ and $\nu=1/4$, for $d=3$, and $\mathcal{G}_c>0$. \label{LEFMMThm} \end{theorem}
\begin{remark} \label{boundedhausdorffmeasure} The absolutely continuous part of the strain $\mathcal{E}u^0$ is defined for points away from the jump set $J_{u^0}$ and in this sense the process zone for the limit evolution can be viewed as being confined to the jump set $J_{u^0}$ . Theorem \ref{LEFMMThm} shows that the jump set $J_{u^0}$ for the limit evolution $u^0(t,x)$ is confined to a set of finite $d-1$ dimensional Hausdorff measure. \end{remark}
We now present an energy inequality for the limit evolution. The sum of energy and work for the displacement $u^0$ at time $t$ is written \begin{eqnarray} \mathcal{GF}(u^0(t),D)=\frac{\rho}{2}\Vert u_t^0(t)\Vert^2_{L^2(D;\mathbb{R}^d)}+LEFM(u^0(t),D)-\int_{D}b(t)\cdot u^0(t)\,dx. \label{sumtt} \end{eqnarray} The sum of energy and work for the initial data $u_0,v_0$ is written \begin{eqnarray} \mathcal{GF}(u_0,D)=\frac{\rho}{2}\Vert v_0\Vert^2_{L^2(D;\mathbb{R}^d)}+LEFM(u_0,D)-\int_{D}b(0)\cdot u_0\,dx. \label{sumt0} \end{eqnarray} The energy inequality for the limit evolution $u^0$ is given by, \begin{theorem} {\rm \bf Energy Inequality}\\ \label{energyinequality} For almost every $t$ in $[0, T]$, \begin{eqnarray} \mathcal{GF}(u^0(t),D)\leq\mathcal{GF}(u_0,D)-\int_0^t\int_{D} b_t(\tau) \cdot u^0(\tau)\,dx\,d\tau. \label{enegineq} \end{eqnarray} \end{theorem}
\begin{remark} \label{remarkfinal} The equality $\lambda=\mu$ appearing in Theorem \ref {LEFMMThm} is a consequence of the central force nature of the local cohesive interaction mediated by \eqref{forcestate}. More general non-central interactions are proposed in Section 15 of \cite{Silling1} and in the state based peridynamic formulation \cite{States}. The non-central formulations deliver a larger class of energy-volume-shape change relations for homogeneous deformations. Future work will address state based formulations that deliver general anisotropic elastic response for the bulk energy associated with the limiting dynamics. \end{remark}
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Free crack propagation in the small horizon limit} \label{sec6}
We recall that the process zone concentrates on a set of zero volume (Lebesgue measure) in the small horizon limit and identify conditions for which the limit dynamics $u^0$ solves the wave equation away from the evolving crack set. To begin we make a technical hypothesis on the regularity of the jump set of the limit dynamics $u^0(x,t)$. \begin{hypothesis} \label{remarkthree} We suppose that the crack set given by $J_{u^0(t)}$ is a closed set for $0\leq t\leq T$. \end{hypothesis} \noindent The next hypothesis applies to the concentration of the process zones as $\epsilon\rightarrow 0$ and their relation to the crack set for the limit dynamics. \begin{hypothesis} \label{remark555} \noindent Theorem \ref{bondunstable} shows that the fracture sets defined as process zones with strains above $\mathcal{S}_c$, see Definition \ref{Fractureset}, concentrate on the set $CPZ^0(\overline{r},1/2,1/2,t)$. Here we assume that $J_{u^0(t)}=CPZ^0(\overline{r},1/2,1/2,t)$ for $0\leq t\leq T$. \end{hypothesis}
\noindent The next hypothesis applies to neighborhoods $\mathcal{H}_{\epsilon_k}(x)$ for which the strain is subcritical, i.e., $|\mathcal{S}^\epsilon|<\overline{r}/\sqrt{|y-x|}$, for $y$ in $\mathcal{H}_{\epsilon_k}(x)$. These neighborhoods will be referred to as neutrally stable. \begin{hypothesis} \label{remark556} We suppose that $\epsilon_k=\frac{1}{2^k}<\delta$ and $0\leq t\leq T$. Consider the collection of centers of peridynamic neighborhoods in $CPZ^\delta(\overline{r},1/2,1/2,t)$. We fatten out $CPZ^\delta(\overline{r},1/2,1/2,t)$ and consider $C\widetilde{PZ}^\delta(\overline{r},1/2,1/2,t)=\{x\in D:\, dist(x,CPZ^\delta(\overline{r},1/2,1/2,t)<\delta\}$. We suppose that all neighborhoods $H_{\epsilon_k}(x)$ that do not intersect the set $C\widetilde{PZ}^\delta(\overline{r},1/2,1/2,t)$ are neutrally stable. \end{hypothesis}
\noindent With these conditions satisfied the limit evolution $u^0$ is identified as a solution of the linear elastic wave equation.
\begin{theorem} \label{waveequation} Suppose Hypotheses \ref{remarkthree}, \ref{remark555} and \ref{remark556} hold true then the limit evolution $u^0(t,x)$ is a solution of the following wave equation (the first law of motion of Cauchy) in the sense of distributions on the domain $[0,T]\times D$ given by \begin{eqnarray} \rho u^0_{tt}= {\rm div}\sigma+b, \hbox{on $[0,T]\times D$}, \label{waveequationn} \end{eqnarray} where the stress tensor $\sigma$ is given by, \begin{eqnarray} \sigma =\lambda I_d Tr(\mathcal{E}\,u^0)+2\mu \mathcal{E}u^0, \label{stress} \end{eqnarray} where $I_d$ is the identity on $\mathbb{R}^d$ and $Tr(\mathcal{E}\,u^0)$ is the trace of the strain. Here the second derivative $u_{tt}^0$ is the time derivative in the sense of distributions of $u^0_t$ and ${\rm div}\sigma$ is the divergence of the stress tensor $\sigma$ in the distributional sense. \end{theorem}
\begin{remark} \label{Disjointsets} For completeness we recall that the strain $\mathcal{E} u^0(x,t)$ and jump set $J_{u^0(t)}$ are defined over disjoint sets in $[0,T]\times D$. \end{remark}
\begin{remark} \label{displacementcrack} The limit of the cohesive dynamics model is given by the displacement - crack set pair $u^0(t,x)$, $J_{u^0(t)}$. The wave equation provides the dynamic coupling between elastic waves and the evolving fracture path inside the media. \end{remark}
\begin{remark} \label{remarknearlyfinal} Hypotheses \ref{remarkthree}, \ref{remark555}, and \ref{remark556} are applied exclusively to establish Lemma \ref{twolimitsB} which identifies the absolutely continuous part of the limit strain
$\mathcal{E}_{ij}u^0e_ie_j$, $e=\xi/|\xi|$, with the weak $L^2 (D\times\mathcal{H}_1 (0))$ limit of the strain $\mathcal{S}^\epsilon$restricted to pairs $(x,\xi)\in D\times\mathcal{H}_1 (0)$ for which $|\mathcal{S}^\epsilon|\leq\mathcal{S}_c$. \end{remark}
\begin{remark} \label{remarkfinal1} We point out that the cohesive model addressed in this work does not have an irreversibility constraint and the constitutive law \eqref{forcestate} applies at all times in the peridynamic evolution. Because of this the crack set at each time is given by $J_{u^0(t)}$. For rapid monotonic loading we anticipate that crack growth is increasing for this model, i.e., $J_{u^0(t')}\subset J_{u^0(t)}$ for $t'<t$. For cyclic loading this is clearly not the case and the effects of irreversibility (damage) must be incorporated into in the cohesive model. \end{remark}
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0}\setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Mathematical underpinnings and analysis} \label{EightIntro}
In this section we provide the proofs of theorems stated in Sections \ref{sec2}, \ref{sec4}, \ref{sec5} and \ref{sec6}. The first subsection asserts the Lipschitz continuity of $\nabla PD^{\epsilon_k}(u)$ for $u$ in $L^2_0(D;\mathbb{R}^d)$ and applies the theory of ODE to deduce existence of the cohesive dynamics, see Section \ref{EE}. A Gronwall inequality is used to bound the cohesive potential energy and kinetic energy uniformly in time, see Section \ref{GKP}. Uniformly bounded sequences $\{u^{\epsilon_k}\}_{k=1}^\infty$ of cohesive dynamics are shown to be compact in $C([0,T]; L^2_0(D;\mathbb{R}^d))$, see Section \ref{CC}. Any limit point $u^0$ for the sequence $u^{\epsilon_k}$ is shown to belong to SBD for every $0\leq t\leq T$, see Section \ref{CC}. The limit evolutions $u^0$ are shown to have uniformly bounded elastic energy in the sense of linear elastic fracture mechanics for $0\leq t\leq T$, see Section \ref{CC}. In Section \ref{EI} we pass to the limit in the energy balance equation for cohesive dynamics \eqref{BalanceEnergy} to recover an energy inequality for the limit flow. The wave equation satisfied by the limit flow is obtained on identifying the weak $L^2$ limit of the sequence $\{\nabla PD^{\epsilon_k}(u^{\epsilon_k})\}_{k=1}^\infty$ and passing to the limit in the weak formulation of \eqref{stationary}, see Section \ref{SC}. We conclude with the proof of Theorems \ref{epsiloncontropprocesszone} and \ref{bondunstable}.
\subsection{Existence of a cohesive evolution} \label{EE}
The peridynamic equation \eqref{eqofmotion} for cohesive dynamics is written as an equivalent first order system. We set $y^{\epsilon_k}=(y^{\epsilon_k}_1,y^{\epsilon_k}_2)^T$ where $y^{\epsilon_k}_1=u^{\epsilon_k}$ and $y_2^{\epsilon_k}=u_t^{\epsilon_k}$. Set $F^{\epsilon_k}(y^{\epsilon_k},t)=(F^{\epsilon_k}_1(y^{\epsilon_k},t),F^{\epsilon_k}_2(y^{\epsilon_k},t))^T$ where \begin{eqnarray} F^{\epsilon_k}_1(y^{\epsilon_k},t)&=&y_2^{\epsilon_k}\nonumber\\ F^{\epsilon_k}_2(y^{\epsilon_k},t)&=&\nabla PD^{\epsilon_k}(y_1^{\epsilon_k})+b(t).\nonumber \end{eqnarray} The initial value problem for $y^{\epsilon_k}$ given by the first order system is \begin{eqnarray} \frac{d}{dt} y^{\epsilon_k}=F^{\epsilon_k}(y^{\epsilon_k},t)\label{firstordersystem} \end{eqnarray} with initial conditions $y^{\epsilon_k}(0)=(u_0,v_0)^T$ satisfying LEFM initial conditions. In what follows we consider the more general class of initial data $(u_0,v_0)$ belonging to $L^2_0(D;\mathbb{R}^d)\times L^2_0(D;\mathbb{R}^d)$. \begin{theorem} For $0\leq t\leq T$ there exists unique solution in $C^1([0,T];L^2_0(D;\mathbb{R}^d)$ for the mesoscopic dynamics described by \eqref{firstordersystem} with initial data in $L^2_0(D;\mathbb{R}^d)\times L^2_0(D;\mathbb{R}^d)$ and body force $b(t,x)$ in $C^1([0,T];L^2_0(D;\mathbb{R}^d)$. \label{existenceuniqueness} \end{theorem} \noindent It now follows that for $LEFM$ initial data that one has a unique solution $u^{\epsilon_k}$ of \eqref{stationary} in Section \ref{sec2} belonging to $C^2([0,T];L^2_0(D;\mathbb{R}^d)$.
{\bf Proof of Theorem \ref{existenceuniqueness} }. A straight forward calculation shows that for a generic positive constant $C$ independent of $\mathcal{S}$, $y-x$, and $\epsilon_k$, that \begin{eqnarray}
\sup_{\mathcal{S}}|\partial_{\mathcal{S}}^2 W^{\epsilon_k}(\mathcal{S},y-x)|\leq \frac{C}{\epsilon_k|y-x|} \times J(|y-x|/\epsilon_k). \label{secondderv} \end{eqnarray} From this it easily follows from H\"older and Minkowski inequalities that $\nabla PD^{\epsilon_k}$ is a Lipschitz continuous map from $L^2_0(D;\mathbb{R}^d)$ into $L^2_0(D;\mathbb{R}^d)$ and there is a positive constant $C$ independent of $0\leq t\leq T$, such that for any pair of vectors $y=(y_1,y_2)^T$, $z=(z_1,z_2)^T$ in $L^2_0(D;\mathbb{R}^d)\times L^2_0(D;\mathbb{R}^d)$ \begin{eqnarray} \Vert F^{\epsilon_k}(y,t)-F^{\epsilon_k}(z,t)\Vert_{L^2(D;\mathbb{R}^d)^2}\leq \frac{C}{\epsilon_k}\Vert y-z\Vert_{L^2(D;\mathbb{R}^d)^2} \hbox{ for $0\leq t\leq T$}. \label{lipschitz} \end{eqnarray} Here for any element $w=(w_1,w_2)$ of $L^2_0(D;\mathbb{R}^d)\times L^2_0(D;\mathbb{R}^d)$, $\Vert w \Vert^2_{L^2(D;\mathbb{R}^d)^2}=\Vert w_1\Vert_{L^2(D;\mathbb{R}^d)}^2+\Vert w_2\Vert_{L^2(D;\mathbb{R}^d)}^2$. Since \eqref{lipschitz} holds the theory of ODE in Banach space \cite{Driver} shows that there exists a unique solution to the initial value problem \eqref{firstordersystem} with $y^{\epsilon_k}$ and $\partial_t y^{\epsilon_k}$ belonging to $C([0,T]; L^2_0(D;\mathbb{R}^d))$ and Theorem \ref{existenceuniqueness} is proved. In this context we point out the recent work of \cite{EmmrichPhulst} where an existence theory for peridynamic evolutions for general pairwise force functions that are Lipschitz continuous with respect to the peridynamic deformation state is presented.
\subsection{Bounds on kinetic and potential energy for solutions of PD} \label{GKP} In this section we apply Gronwall's inequality to obtain bounds on the kinetic and elastic energy for peridynamic flows described by Theorem \ref{Gronwall}. The bounds are used to show that the solutions of the PD initial value problem are Lipschitz continuous in time
We now prove Theorem \ref{Gronwall}. Multiplying both sides of \eqref{stationary} by $u_t^{\epsilon_k}(t)$ and integration together with a straight forward calculation gives \begin{eqnarray} &&\frac{1}{2}\frac{d}{dt}\left\{2PD^{\epsilon_k}(u^{\epsilon_k}(t))+{\rho}\Vert u_t^{\epsilon_k}(t)\Vert^2_{L^2(D;\mathbb{R}^d)}\right\}\nonumber\\ &&=\int_{D}(\nabla PD^{\epsilon_k}(u^{\epsilon_k}(t))+\rho u_{tt}^{\epsilon_k}(t))\cdot u_t^{\epsilon_k}(t)\,dx\nonumber\\ &&=\int_{D} u_t^{\epsilon_k}(t)\cdot b(t)\,dx\,\leq \, \Vert u_t^{\epsilon_k}\Vert_{L^2(D;\mathbb{R}^d)}\Vert b(t)\Vert_{L^2(D;\mathbb{R}^d)}.\label{esttime1} \end{eqnarray} Set \begin{eqnarray} && W(t)=2PD^{\epsilon_k}(u^{\epsilon_k}(t))+{\rho}\Vert u_t^{\epsilon_k}(t)\Vert^2_{L^2(D;\mathbb{R}^d)}+1, \label{wt} \end{eqnarray} and applying \eqref{esttime1} gives \begin{eqnarray} &&\frac{1}{2}W'(t) \leq \, \Vert u_t^{\epsilon_k}\Vert_{L^2(D;\mathbb{R}^d)}\Vert b(t)\Vert_{L^2(D;\mathbb{R}^d)}\leq\frac{1}{\sqrt{\rho}}\sqrt{W(t)}\Vert b(t)\Vert_{L^2(D;\mathbb{R}^d)}\label{esttime2} \end{eqnarray} and \begin{eqnarray} \frac{1}{2}\int_0^t\frac{W'(\tau)}{\sqrt{W(\tau)}}\,d\tau\leq\frac{1}{\sqrt{\rho}}\int_0^t\Vert b(\tau)\Vert_{L^2(D;\mathbb{R}^d)}\,d\tau. \label{estime3} \end{eqnarray} Hence \begin{eqnarray} \sqrt{W(t)}-\sqrt{W(0)}\leq\frac{1}{\sqrt{\rho}}\int_0^t\Vert b(\tau)\Vert_{L^2(D;\mathbb{R}^d)}\,d\tau \label{estime4} \end{eqnarray} and \begin{eqnarray} && 2PD^{\epsilon_k}(u^{\epsilon_k}(t))+{\rho}\Vert u_t^{\epsilon_k}(t)\Vert^2_{L^2(D;\mathbb{R}^d)} \leq \left(\frac{1}{\sqrt{\rho}}\int_0^t\Vert b(\tau)\Vert_{L^2(D;\mathbb{R}^d)}\,d\tau +\sqrt{W(0)}\right )^2-1. \label{gineq} \end{eqnarray}
For now we postpone the proof of \eqref{basicinequality} to Section \ref{CC} (see the discussion preceeding \eqref{upperboundperi}) and apply \eqref{basicinequality} to get the upper bound \begin{eqnarray} PD^{\epsilon_k}(u_0)\leq LEFM (u_0,D)\hbox{ for every $\epsilon_k$, \,\,$k=1,2,\ldots$}, \label{upperbound} \end{eqnarray} where $LEFM(u_0,D)$ is the elastic potential energy for linear elastic fracture mechanics given by \eqref{Gcrackenergy} or equivelently \eqref{LEFMSBVDefinition}. Theorem \ref{Gronwall} now follows from \eqref{gineq} and \eqref{upperbound}.
Theorem \ref{Gronwall} implies that PD solutions are Lipschitz continuous in time; this is stated explicitly in Theorem \ref{holdercont} of Section \ref{sec2}. To prove Theorem \ref{holdercont} we write \begin{eqnarray}
&&\Vert u^{\epsilon_k}(t_1)-u^{\epsilon_k}(t_2)\Vert_{L^2(D;\mathbb{R}^d)}=\left (\int_{D}|\int_{t_2}^{t_1} u^{\epsilon_k}_\tau(\tau)\,d\tau |^2\,dx\right )^{\frac{1}{2}}\nonumber\\
&&=\left (\int_{D}|t_1-t_2|^{2}\left|\frac{1}{|t_1-t_2|}\int_{t_2}^{t_1} u^{\epsilon_k}_\tau(\tau)\,d\tau \right|^2\,dx\right )^{\frac{1}{2}}\nonumber\\
&&\leq\left (\int_{D}|t_1-t_2|\int_{t_2}^{t_1} |u^{\epsilon_k}_\tau(\tau)|^2\,d\tau\,dx\right )^{\frac{1}{2}}\nonumber\\
&&\leq\left(|t_1-t_2|\int_{t_2}^{t_1}\Vert u_\tau^{\epsilon_k}(\tau)\Vert_{L^2(D;\mathbb{R}^d)}^2\,d\tau\right)^{1/2}\nonumber\\
&&\leq K|t_1-t_2|, \label{lip} \end{eqnarray} where the third to last line follows from Jensen's inequality, the second to last line from Fubini's theorem and the last inequality follows from the upper bound for $\Vert u_t^{\epsilon_k}(t)\Vert^2_{L^2(D;\mathbb{R}^d)}$ given by Theorem \ref{Gronwall}.
\subsection{Compactness and convergence} \label{CC} In this section we prove Theorems \ref{LimitflowThm} and \ref{LEFMMThm} . We start by establishing the inequality \eqref{basicinequality} between the elastic energies $PD^{\epsilon_k}(u)$ and for $LEFM(u,D)$. This is illustrated for any $u$ in $L^2_0(D;\mathbb{R}^d)\cap L^\infty(D;\mathbb{R}^d)$ and $LEFM(u,D)$ given by \eqref{LEFMSBVDefinition}. Here \eqref{LEFMSBVDefinition} reduces to \eqref{Gcrackenergy} when $u$ is piecewise smooth and the crack $K$ consists of a finite number of smooth components. To obtain the upper bound we can directly apply the slicing technique of \cite{Gobbino3} to reduce to the one dimensional case to obtain an upper bound on one dimensional sections and then apply integral-geometric arguments to conclude. Here the slicing theorem and integral-geometric measure appropriate for this approach in the context of SBD are given by Theorems 4.5 and 4.10 of \cite{AmbrosioCosicaDalmaso}. These arguments deliver the following inequality \begin{eqnarray} &&PD^{\epsilon_k}(u)\leq LEFM(u,D), \hbox{ for every $u$ in $L^2_0(D;\mathbb{R}^d)$, and $\epsilon_k>0$}. \label{upperboundperi} \end{eqnarray}
To proceed with the proof of Theorem \ref{LimitflowThm} we require the compactness theorem. \begin{theorem} {\rm \bf Compactness.}\\ Given a sequence of functions $u^{\epsilon_k}\in L^2_0(D;\mathbb{R}^d)$, $\epsilon_k=1/k,k=1,2,\ldots$ such that \begin{eqnarray} \sup_{\epsilon_k}\left(PD^{\epsilon_k}(u^{\epsilon_k})+\Vert u^{\epsilon_k}\Vert_{L^\infty(D;\mathbb{R}^d)}\right)<\infty, \label{unifbound} \end{eqnarray} then there exists a subsequence $u^{{\epsilon_{k}}'}$ and limit point $u$ in $L_0^2(D;\mathbb{R}^d)$ for which \begin{eqnarray} u^{{\epsilon_k}'}\rightarrow u \hbox{ in $L^2(D;\mathbb{R}^d)$ as } {\epsilon_k}'\rightarrow 0. \label{compactness} \end{eqnarray} \label{L2compact} \end{theorem}
In what follows its convenient to change variables $y=x+\delta\xi$ for $|\xi|<1$ and $0<\delta<\alpha/2<1$, here the peridynamic neighborhood $\mathcal{H}_\delta(x)$ transforms to $\mathcal{H}_1(0)=\{\xi\in\mathbb{R}^d;\,|\xi|<1\}$. The unit vector $\xi/|\xi|$ is denoted by $e$. To prove Theorem \ref{L2compact} we need the following upper bound given by the following theorem. \begin{theorem}{\rm \bf Upper bound}\\ \label{coercivityth} For any $0<\delta<\alpha/2$ there exists positive constants $\tilde{K}_1$ and $\tilde{K}_2$ independent of $u\in L^2_0(D;\mathbb{R}^d)\cap L^\infty(D;\mathbb{R}^d)$ such that \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|u(x+\delta\xi)-u(x)|^2dx\,J(|\xi|)d\xi\leq \delta(\tilde{K}_1+\tilde{K}_2\Vert u\Vert_{L^\infty(D;\mathbb{R}^d)}^2) PD^\delta(u). \label{coercivity} \end{eqnarray} \end{theorem} We establish the upper bound in two steps. \begin{lemma}{\rm \bf Coercivity}\\ \label{coercivitya} There exists a positive constant $C$ independent of $u\in L^2_0(D;\mathbb{R}^d)$ for which \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|u(x+\delta\xi)-u(x)|^2dx\,J(|\xi|)d\xi\leq C \int_{\mathcal{H}_1(0)}\int_D|(u(x+\delta\xi)-u(x))\cdot e|^2dx\,J(|\xi|)d\xi \label{coercivea} \end{eqnarray} \end{lemma}
{\bf Proof of Lemma \ref{coercivitya}} We prove by contradiction. Suppose for every positive integer $N>0$ there is an element $u^N\in L_0^2(D;\mathbb{R}^d)$ for which \begin{eqnarray}
&&N\int_{\mathcal{H}_1(0)}\int_D|(u^N(x+\delta\xi)-u^N(x))\cdot e |^2dx\,J(|\xi|)d\xi\nonumber\\
&&\leq\int_{\mathcal{H}_1(0)}\int_D|(u^N(x+\delta\xi)-u^N(x)) |^2dx\,J(|\xi|)d\xi. \label{contra} \end{eqnarray} The Cauchy Schwartz inequality together with the triangle inequality deliver a constant $\overline{K}>0$ for which \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|(u(x+\delta\xi)-u(x))|^2dx\,J(|\xi|)d\xi\leq\overline{K}\Vert u\Vert^2_{L^2(D;\mathbb{R}^d)}. \label{upineq} \end{eqnarray} An application of the nonlocal Korn inequality, Lemma 5.5 of \cite{DuGunzbergerlehoucqmengesha} gives the existence of a constant $\underline{K}>0$ independent of $u$ in $L_0^2(D;\mathbb{R}^d)$ for which \begin{eqnarray}
\underline{K} \Vert u\Vert^2_{L^2(D;\mathbb{R}^d)}\leq\int_{\mathcal{H}_1(0)}\int_D|(u(x+\delta\xi)-u(x))\cdot e|^2dx\,J(|\xi|)d\xi. \label{korn} \end{eqnarray} Applying the inequalities \eqref{contra}, \eqref{upineq}, and \eqref{korn} we discover that $\overline{K}/N\geq\underline{K}$ for all integers $N>0$ to conclude $\underline{K}=0$ which is a contradiction and Lemma \ref{coercivitya} is proved. Theorem \ref{coercivityth} now follows from Lemma \ref{coercivitya} and the upper bound given by \begin{lemma} {\rm Upper bound}\\ \label{coercivityb} \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|(u(x+\delta\xi)-u(x))\cdot e|^2dx\,J(|\xi|)d\xi\leq \delta(\tilde{K}_1+\tilde{K}_2\Vert u\Vert_{L^\infty(D;\mathbb{R}^d)}^2) PD^\delta(u). \label{coerciveb} \end{eqnarray} \end{lemma} {\rm \bf Proof of Lemma \ref{coercivityb}}. Consider the concave potential function $f$ described in the introduction, recall $f(0)=0$ and given $M>0$ set $H_M=f(M)/M$. For $0<r<M$ one has $r<H_M^{-1}f(r)$ and set \begin{eqnarray}
A_{\delta\xi}=\{x\in D; |(u(x+\delta\xi)-u(x))\cdot e|^2>\delta|\xi|M\}, \label{Aset} \end{eqnarray} so \begin{eqnarray}
&&\int_{D\setminus A_{\delta\xi}}\,|(u(x+\delta\xi)-u(x))\cdot e|^2\,dx=
\delta|\xi|\int_{D\setminus A_{\delta\xi}}\delta|\xi||\mathcal{S}|^2\,dx\nonumber\\
&&\leq\frac{\delta|\xi|}{H_M}\int_{D\setminus A_{\delta\xi}}\frac{1}{\delta|\xi|}f\left(\delta|\xi|\mathcal{S}|^2\right)\,dx. \label{starter} \end{eqnarray} Now $f(r)>f(M)$ for $r>M$ gives \begin{eqnarray}
\frac{1}{\delta|\xi|}f(M)\mathcal{L}^d(A_{\delta\xi})\leq
\int_{A_{\delta\xi}}\frac{1}{\delta|\xi|}f\left(\delta|\xi|\mathcal{S}|^2\right)\,dx \label{vol1} \end{eqnarray} and \begin{eqnarray} \mathcal{L}^d(A_{\delta\xi}\label{ineqonA})\leq
\frac{\delta|\xi|}{f(M)}\int_{D}\frac{1}{\delta|\xi|}f\left(\delta|\xi|\mathcal{S}|^2\right)\,dx. \label{volumebound} \end{eqnarray} Noting that \begin{eqnarray}
\int_{A_{\delta\xi}}\,|(u(x+\delta\xi)-u(x))\cdot e|^2\,dx\leq 2\Vert u\Vert_{L^\infty(D;\mathbb{R}^d}^2\mathcal{L}^d(A_{\delta\xi}) \label{basiccc} \end{eqnarray} and collecting results one has \begin{eqnarray} \label{almostestimate}
\int_{D}\,|(u(x+\delta\xi)-u(x))\cdot e|^2\,dx\leq\delta|\xi|\left(\frac{1}{H_M}+\frac{2\Vert u\Vert_{L^\infty(D;\mathbb{R}^d)}^2}{f(M)}\right)
\int_{D}\frac{1}{\delta|\xi|}f\left(\delta|\xi|\mathcal{S}|^2\right)\,dx. \end{eqnarray}
Lemma \ref{coercivityb} follows on multiplying both sides of \eqref{almostestimate} by $J(|\xi|)$ and integration over $\mathcal{H}_1(0)$. Theorem \ref{coercivityth} follows from Lemmas \ref{coercivitya} and \ref{coercivityb}.
Arguing as in \cite{Gobbino3} we have the monotonicity given by \begin{lemma}{\rm\bf Monotonicity}\\ For any integer $M$, $\eta>0$ and $u\in L^\infty(D;\mathbb{R}^d)$ one has \begin{eqnarray} PD^{M\eta}(u)\leq PD^\eta(u). \label{monotone} \end{eqnarray} \label{monotonicity} \end{lemma} Now choose the subsequence $\epsilon_k=1/2^k,\,i=1,2,\ldots$ and from Theorem \ref{coercivityth} and Lemma \ref{monotonicity} we have for any $0<K<k$ with $\delta=2^{-K}$, $\epsilon_k=2^{-k}$, \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|u^{\epsilon_k}(x+\delta\xi)-u^{\epsilon_k}(x)|^2dx\,J(|\xi|)d\xi\leq \delta(\tilde{K}_1+\tilde{K}_2\Vert u^{\epsilon_k}\Vert_{L^\infty(D;\mathbb{R}^d)}^2) PD^{\epsilon_k}(u^{\epsilon_k}).\label{precompactness} \end{eqnarray} Applying the hypothesis \eqref{unifbound} to inequality \eqref{precompactness} gives a finite constant $B$ independent of $\epsilon_k$ and $\delta$ for which \begin{eqnarray}
\int_{\mathcal{H}_1(0)}\int_D|u^{\epsilon_k}(x+\delta\xi)-u^{\epsilon_k}(x)|^2dx\,J(|\xi|)d\xi\leq \delta B,\label{precompactnesses} \end{eqnarray} for all $\epsilon_k<\delta$. One can then apply \eqref{precompactnesses} as in \cite{Gobbino3}, (or alternatively apply \eqref{precompactnesses} and arguments similar to the proof of the Kolomogorov-Riesz compactness theorem \cite{OlsenHolden}) to show that the sequence $\{u^{\epsilon_k}\}_{k=1}^\infty$ is a totally bounded subset of $L^2_0(D;\mathbb{R}^d)$ and Theorem \ref{L2compact} is proved.
Now it is shown that the family of mesoscopic dynamics $\{u^{\epsilon_k}\}_{k=1}^\infty$ is relatively compact in \\ $C([0,T];L_0^2(D;\mathbb{R}^d))$. For each $t$ in $[0,T]$ we apply Theorem \ref{Gronwall} and Hypothesis \ref{remarkone} to obtain the bound \begin{eqnarray} PD^{\epsilon_k}(u^{\epsilon_k}(t))+\Vert u^{\epsilon_k}(t)\Vert_{L^\infty(D)}<C \label{cpactbnd} \end{eqnarray} where $C<\infty$ and is independent of $\epsilon_k$, $k=1,2,\ldots$, and $0\leq t\leq T$. With this bound we apply Theorem \ref{L2compact} to assert that for each $t$ the sequence $\{u^{\epsilon_k}(t)\}_{k=1}^\infty$ is relatively compact in $L^2(D;\mathbb{R}^d)$. From Theorem \ref{holdercont} the sequence $\{u^{\epsilon_k}\}_{k=1}^\infty$, is seen to be uniformly equi-continuous in $t$ with respect to the $L^2(D;\mathbb{R}^d)$ norm and we immediately conclude from the Ascoli theorem that $\{u^{\epsilon_k}\}_{k=1}^\infty$ is relatively compact in $C([0,T];L^2(D;\mathbb{R}^d))$. Therefore we can pass to a subsequence also denoted by $\{u^{\epsilon_{k}}(t)\}_{k=1}^\infty$ to assert the existence of a limit evolution $u^0(t)$ in $C([0,T];L^2(D;\mathbb{R}^d))$ for which \begin{eqnarray} \lim_{k\rightarrow\infty}\left\{\sup_{t\in[0,T]}\Vert u^{\epsilon_{k}}(t)-u^0(t)\Vert_{\scriptscriptstyle{{L^2(D;\mathbb{R}^d)}}}\right\}=0 \label{unfconvergence} \end{eqnarray} and Theorem \ref{LimitflowThm} is proved.
We now prove Theorem \ref{LEFMMThm}. One has that limit points of sequences satisfying \eqref{unifbound} enjoy higher regularity.
\begin{theorem}{\rm\bf Higher regularity}\\ \label{higherreg} Every limit point of a sequence $\{u^{\epsilon_k}\}_{k=1}^\infty$ in $L_0^2(D;\mathbb{R}^d)$ satisfying \eqref{unifbound} belongs to $SBD$. \end{theorem}
{\bf Proof.} To recover higher regularity one can directly apply the slicing technique of \cite{Gobbino3} to reduce to the one dimensional case and construct sequences of functions converging in SBV to the limit point along one dimensional sections. One then applies Theorem 4.7 of \cite{AmbrosioCosicaDalmaso} to conclude that the limit point belongs to $SBD$.
It now follows from Theorem \ref{higherreg} that the limit evolution $u^0(t)$ belongs to $SBD$ for $0\leq t\leq T$. Next we recall the properties of $\Gamma$-convergence and apply them to finish the proof of Theorem \ref{LEFMMThm}. Consider a sequence of functions $\{F_j\}$ defined on a metric space $\mathbb{M}$ with values in $\overline{\mathbb{R}}$ together with a function $F$ also defined on $\mathbb{M}$ with values in $\overline{\mathbb{R}}$.
\begin{definition} \label{Gammaconvergence} We say that $F$ is the $\Gamma$-limit of the sequence $\{F_j\}$ in $\mathbb{M}$ if the following two properties hold: \begin{enumerate} \item for every $x$ in $\mathbb{M}$ and every sequence $\{x_j\}$ converging to $x$, we have that \begin{eqnarray} F(x)\leq \liminf_{j\rightarrow\infty} F_j(x_j),\label{lowerbound} \end{eqnarray} \item for every $x$ in $\mathbb{M}$ there exists a recovery sequence $\{x_j\}$ converging to $x$, for which \begin{eqnarray} F(x)=\lim_{j\rightarrow\infty} F_j(x_j).\label{recovery} \end{eqnarray} \end{enumerate} \end{definition}
For $u$ in $L^2_0(D;\mathbb{R}^d)$ define $PD^0:\,L^2_0(D;\mathbb{R}^d)\rightarrow [0,+\infty]$ by \begin{equation} PD^0(u)=\left\{ \begin{array}{ll} LEFM(u,D)&\hbox{if $u$ belongs to $SBD$}\\ +\infty&\hbox{otherwise} \end{array} \right. \label{Gammalimit} \end{equation}
A straight forward argument following Theorem 4.3 $(ii)$ and $(iii)$ of \cite{Gobbino3} and invoking Theorems 4.5, 4.7, and 4.10 of \cite{AmbrosioCosicaDalmaso} as appropriate delivers
\begin{theorem}{\bf $\Gamma$- convergence and point wise convergence of peridynamic energies for cohesive dynamics.} \begin{eqnarray} &&PD^0 \hbox{ is the $\Gamma$-limit of $\{PD^{\epsilon_k}\}$ in $L^2_0(D;\mathbb{R}^d)$}, \hbox{ and } \label{gammaconvpd}\\ &&\lim_{k\rightarrow\infty}PD^{\epsilon_k}(u)=PD^0(u), \hbox{ for every $u$ in $L^2_0(D;\mathbb{R}^d)$}.\label{pointwise} \end{eqnarray} \label{Gammaandpointwise} \end{theorem}
Observe that since the sequence of peridynamic energies $\{PD^{\epsilon_k}\}$ $\Gamma$-converge to $PD^0$ in $L^2(D;\mathbb{R}^d)$ we can apply the lower bound property \eqref{lowerbound} of $\Gamma$-convergence to conclude that the limit has bounded elastic energy in the sense of fracture mechanics, i.e., \begin{eqnarray} LEFM(u^0(t),D)=PD^0(u^0(t))\leq\liminf_{k\rightarrow\infty}PD^{\epsilon_{k}}(u^{\epsilon_{k}}(t))<C. \label{GSBV} \end{eqnarray} This concludes the proof of Theorem \ref{LEFMMThm}.
\subsection{Energy inequality for the limit flow} \label{EI} In this section we prove Theorem \ref{energyinequality}. We begin by showing that the limit evolution $u^0(t,x)$ has a weak derivative $u_t^0(t,x)$ belonging to $L^2([0,T]\times D;\mathbb{R}^d)$. This is summarized in the following theorem. \begin{theorem} \label{weaktimederiviative} On passage to subsequences if necessary the sequence $u_t^{\epsilon_k}$ weakly converges in $L^2([0,T]\times D;\mathbb{R}^d)$ to $u^0_t$ where \begin{eqnarray} -\int_0^T\int_D\partial_t\psi \cdot u^0\, dxdt=\int_0^T\int_D\psi \cdot u^0_t\, dxdt, \label{weakl2time} \end{eqnarray} for all compactly supported smooth test functions $\psi$ on $[0,T]\times D$. \end{theorem}
{\bf Proof.} The bound on the kinetic energy given in Theorem \ref{Gronwall} implies \begin{eqnarray} \sup_{\epsilon_k>0}\left(\sup_{0\leq t\leq T}\Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}\right)< \infty. \label{bddd} \end{eqnarray} Therefore the sequence $u^{\epsilon_k}_t$ is bounded in $L^2([0,T]\times D;\mathbb{R}^d)$ and passing to a subsequence if necessary we conclude that there is a limit function $\tilde{u}^0$ for which $u_t^{\epsilon_k}\rightharpoonup\tilde{u}^0$ weakly in $L^2([0,T]\times D;\mathbb{R}^d)$. Observe also that the uniform convergence \eqref{unfconvergence} implies that $u^{\epsilon_k}\rightarrow u^0$ in $L^2([0,T]\times D;\mathbb{R}^d)$. On writing the identity \begin{eqnarray} -\int_0^T\int_D\partial_t\psi\cdot u^{\epsilon_k}\, dxdt=\int_0^T\int_D\psi \cdot u^{\epsilon_k}_t\, dxdt. \label{weakidentity} \end{eqnarray} applying our observations and passing to the limit it is seen that $\tilde{u}^0=u_t^0$ and the theorem follows.
To establish Theorem \ref{energyinequality} we require the following inequality. \begin{lemma} For almost every $t$ in $[0,T]$ we have \label{weakinequality} \begin{eqnarray} \Vert u^0_t(t)\Vert_{L^2(D;\mathbb{R}^d)}\leq \liminf_{\epsilon_k\rightarrow 0}\Vert u^{\epsilon_k}_t(t)\Vert_{L^2(D;\mathbb{R}^d)}. \label{limitweakineq} \end{eqnarray} \end{lemma} {\bf Proof.} We start with the identity \begin{eqnarray} \Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}^2-2\int_D u^{\epsilon_k}_t\cdot u^0_t \,dx+\Vert u^0_t\Vert_{L^2(D;\mathbb{R}^d)}^2 = \Vert u^{\epsilon_k}_t-u^0_t\Vert_{L^2(D;\mathbb{R}^d)}^2 \geq 0, \label{locpositive} \end{eqnarray} and for every non-negative bounded measurable function of time $\psi(t)$ defined on $[0,T]$ we have \begin{eqnarray} \int_0^T\psi \Vert u^{\epsilon_k}_t-u^0_t\Vert_{L^2(D;\mathbb{R}^d)}^2\,dt\geq 0. \label{positive} \end{eqnarray} Together with the weak convergence given in Theorem \ref{weaktimederiviative} one easily sees that \begin{eqnarray} \liminf_{\epsilon_k\rightarrow 0}\int_0^T\psi\Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}^2\,dt-\int_0^T\psi\Vert u^0_t\Vert_{L^2(D;\mathbb{R}^d)}^2\,dt\geq 0. \label{diff} \end{eqnarray}
\noindent Applying \eqref{bddd} and invoking the Lebesgue dominated convergence theorem we conclude \begin{eqnarray} \liminf_{\epsilon_k\rightarrow 0}\int_0^T\psi\Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}^2\,dt=\int_0^T\psi\liminf_{\epsilon_k\rightarrow 0}\Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}^2\,dt \label{equalitybalance} \end{eqnarray} to recover the inequality given by \begin{eqnarray} \int_0^T\psi\left(\liminf_{\epsilon_k\rightarrow 0}\Vert u^{\epsilon_k}_t\Vert_{L^2(D;\mathbb{R}^d)}^2-\Vert u^0_t\Vert_{L^2(D;\mathbb{R}^d)}^2\right)\,dt\geq 0. \label{diffinal} \end{eqnarray} The lemma follows noting that \eqref{diffinal} holds for every non-negative test function $\psi$.
Theorem \ref{energyinequality} now follows immediately on taking the $\epsilon_k\rightarrow 0$ limit in the peridynamic energy balance equation \eqref{BalanceEnergy} of Theorem \ref{Ebalance} and applying \eqref{pointwise}, \eqref{GSBV}, and \eqref{limitweakineq} of Lemma \ref{weakinequality}.
\subsection{Stationarity conditions for the limit flow} \label{SC}
In this section we prove Theorem \ref{waveequation}. The first subsection establishes Theorem \ref{waveequation} using Theorem \ref{convgofelastic}. Theorem \ref{convgofelastic} is proved in the second subsection.
\subsubsection{Proof of Theorem \ref{waveequation}}
To proceed we make the change of variables $y=x+\epsilon\xi$ where $\xi$ belongs to the unit disk $\mathcal{H}_1(0)$ centered at the origin and the local strain $\mathcal{S}$ is of the form \begin{eqnarray}
\mathcal{S}=\left(\frac{u(x+\epsilon\xi)-u(x)}{\epsilon|\xi|}\right)\cdot e. \label{strainrescaled} \end{eqnarray}
It is convenient for calculation to express the strain through the directional difference operator $D_{e}^{\epsilon|\xi|}u$ defined by \begin{eqnarray}
D_{e}^{\epsilon|\xi|}u(x)=\frac{u(x+\epsilon\xi)-u(x)}{\epsilon|\xi|}\hbox{ and }\mathcal{S}=D_{e}^{\epsilon|\xi|} u\cdot e, \label{strainrescaledderiv} \end{eqnarray}
with $e=\xi/|\xi|$. One also has \begin{eqnarray}
D_{-e}^{\epsilon|\xi|}u(x)=\frac{u(x-\epsilon\xi)-u(x)}{\epsilon|\xi|}, \label{strainrescaledderivopposite} \end{eqnarray} and the integration by parts formula for functions $u$ in $L_0^2(D;\mathbb{R}^d)$, densities $\phi$ in $L_0^2(D;\mathbb{R})$ and $\psi$ continuous on $\mathcal{H}_1(0)$ given by \begin{eqnarray}
\int_D\int_{\mathcal{H}_1(0)}(D_e^{\epsilon|\xi|}u\cdot e)\phi(x)\psi(\xi)\,d\xi\,dx=\int_D\int_{\mathcal{H}_1(0)}(u\cdot e) (D_{-e}^{\epsilon|\xi|}\phi)\psi(\xi)\,d\xi\,dx. \label{intbypts} \end{eqnarray} Note further for $v$ in $C^\infty_0(D;\mathbb{R}^d)$ and $\phi$ in $C^\infty_0(D;\mathbb{R})$ one has \begin{eqnarray}
\lim_{\epsilon_k\rightarrow 0}D_e^{{\epsilon_k}|\xi|} v\cdot e=\mathcal{E} v \,e\cdot e &\hbox{ and }& \lim_{\epsilon_k\rightarrow 0}D_e^{{\epsilon_k}|\xi|} \phi= e\cdot \nabla\phi\label{grad} \end{eqnarray} where the convergence is uniform in $D$.
Taking the first variation of the action integral \eqref{Action} gives the Euler equation in weak form \begin{eqnarray} &&\rho\int_0^T\int_{D} u_t^{\epsilon_k}\cdot\delta_t\,dx \,dt\nonumber\\
&&-\frac{1}{\omega_d}\int_0^T\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)f'\left(\epsilon_k|\xi||D_e^{\epsilon_k|\xi|}u^{\epsilon_k}\cdot e|^2\right)2(D_e^{\epsilon_k|\xi|}u^{\epsilon_k}\cdot e) (D_{e}^{\epsilon_k|\xi|}\delta\cdot e)\,d\xi\,dx\,dt\nonumber\\ &&+\int_0^T\int_{D} b\cdot\delta\,dx\,dt=0,\label{stationtxweaker} \end{eqnarray}
where the test function $\delta=\delta(x,t)=\psi(t)\phi(x)$ is smooth and has compact support in $[0,T]\times D$. Next we make the change of function and write $F_s (\mathcal{S})=\frac{1}{s}f(s\mathcal{S}^2)$, $F'_s(\mathcal{S})=2\mathcal{S}f'(s\mathcal{S}^2)$, and $s={\epsilon_k}|\xi|$ we transform \eqref{stationtxweaker} into \begin{eqnarray} &&\rho\int_0^T\int_{D} u_t^{\epsilon_k}\cdot\delta_t\,dx \,dt\nonumber\\
&&-\frac{1}{\omega_d}\int_0^T\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)D_{e}^{\epsilon_k}\delta\cdot e\,d\xi\,dx\,dt\nonumber\\ &&+\int_0^T\int_{D} b\cdot\delta\,dx\,dt=0,\label{stationtxweakerlimitform} \end{eqnarray} where \begin{eqnarray}
F_{\epsilon_k|\xi|}'(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)=f'\left(\epsilon_k |\xi||D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e|^2\right)2D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e. \label{deriv} \end{eqnarray}
For future reference observe that $F_s(r)$ is convex-concave in $r$ with inflection point $\overline{r}_s=\overline{r}/\sqrt{s}$ where $\overline{r}$ is the inflection point of $f(r^2)=F_1(r)$. One also has the estimates \begin{eqnarray} &&F_s(r)\geq\frac{1}{s}F_1(\overline{r})\hbox{ for $r\geq\overline{r}_s$, and }\label{lowerestforF}\\
&&\sup_{0\leq r<\infty}|F'_s(r)|\leq\frac{2f'(\overline{r}^2){\overline{r}}}{\sqrt{s}},\label{boundderiv} \label{estforFprime} \end{eqnarray} We send $\epsilon_k\rightarrow 0$ in \eqref{stationtxweakerlimitform} applying the weak convergence Theorem \ref{weaktimederiviative} to the first term to obtain \begin{eqnarray}
&&\rho\int_0^T\int_{D} u_t^{0}\cdot\delta_t\,dx \,dt-\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\left(\int_0^T\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)D_{e}^{\epsilon_k}\delta\cdot e\,d\xi\,dx\,dt\right)\nonumber\\ &&+\int_0^T\int_{D} b\cdot\delta\,dx\,dt=0.\label{stationtxweakerlimit} \end{eqnarray} Theorem \ref{waveequation} follows once we identify the limit of the second term in \eqref{stationtxweakerlimit} for smooth test functions $\phi(x)$ with support contained in $D$. We state the following convergence theorem. \begin{theorem} \label{convgofelastic} Given any infinitely differentiable test function $\phi$ with compact support in $D$ then \begin{eqnarray}
\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k})D_{e}^{\epsilon_k}\phi\,d\xi\,dx=\int_{D}\mathbb{C}\mathcal{E} u^0:\mathcal{E}\phi\,dx, \label{limitdelta} \end{eqnarray} where $\mathbb{C}\mathcal{E} u^0:\mathcal{E}\phi=\sum_{ijkl=1}^d\mathbb{C}_{ijkl}\mathcal{E}u^0_{ij}\mathcal{E}\phi_{kl}$, $\mathbb{C}\mathcal{E}u^0=\lambda I_d Tr(\mathcal{E}u^0)+2\mu \mathcal{E}u^0$, and $\lambda$ and $\mu$ are given by \eqref{calibrate1}. \end{theorem} \noindent Theorem \ref{convgofelastic} is proved in Section \ref{prtheorem44}. The sequence of integrals on the left hand side of \eqref{limitdelta} are uniformly bounded in time, i.e., \begin{eqnarray}
\sup_{\epsilon_k>0}\left\{\sup_{0\leq t\leq T}\left\vert\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)D_{e}^{\epsilon_k}\phi\cdot e\,d\xi\,dx\right\vert\right\}<\infty, \label{uniboundt} \end{eqnarray} this is demonstrated in \eqref{Fprimesecond} of Lemma \ref{estimates} in Section \ref{prtheorem44}. Applying the Lebesgue bounded convergence theorem together with Theorem \ref{convgofelastic} with $\delta(t,x)=\psi(t)\phi(x)$ delivers the desired result \begin{eqnarray}
&&\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\left(\int_0^T\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)\psi D_{e}^{\epsilon_k}\phi\cdot e\,d\xi\,dx\,dt\right)\nonumber\\ &&=\int_0^T\int_{D}\mathbb{C}\mathcal{E} u^0:\mathcal{E}\phi\,dx\,dt, \label{limitidentity} \end{eqnarray} and we recover the identity \begin{eqnarray} &&\rho\int_0^T\int_{D} u_t^{0}(t,x)\cdot\psi_t(t)\phi(x)\,dx \,dt-\int_0^T\int_{D}\psi(t)\mathbb{C}\mathcal{E} u^0(t,x):\mathcal{E}\phi(x)\,dx\,dt \nonumber\\ &&+\int_0^T\int_{D} b(t,x)\cdot\psi(t)\phi(x)\,dx\,dt=0 \label{finalweakidentity} \end{eqnarray} from which Theorem \ref{waveequation} follows.
\subsubsection{ Proof of Theorem \ref{convgofelastic}} \label{prtheorem44}
We decompose the difference $D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e$ as \begin{eqnarray}
D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e=(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^- +(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^+ \label{decompose} \end{eqnarray} where \begin{equation}
(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}(x)\cdot e)^-=\left\{\begin{array}{ll}(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}(x)\cdot e),&\hbox{if $|D_e^{\epsilon_k |\xi|}u^{\epsilon_k}(x)\cdot e|<\frac{\overline{r}}{\sqrt{\epsilon_k|\xi|}}$}\\ 0,& \hbox{otherwise} \end{array}\right. \label{decomposedetails} \end{equation}
where $\overline{r}$ is the inflection point for the function $F_1(r)=f(r^2)$. Here $(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^+$ is defined so that \eqref{decompose} holds. We prove Theorem \ref{convgofelastic} by using the following two identities described in the Lemmas below. \begin{lemma} \label{twolimitsA} For any $\phi$ in $C^\infty_0(D;\mathbb{R}^d)$ \begin{eqnarray}
&&\lim_{\epsilon_k\rightarrow 0} \frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)D_{e}^{\epsilon_k|\xi|}\phi\cdot e\,d\xi\,dx\nonumber\\
&&-2\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)f'(0)(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-D_e^{\epsilon_k |\xi|}\phi\cdot e\,d\xi\,dx=0. \label{functiotolimit} \end{eqnarray} \end{lemma} \begin{lemma} \label{twolimitsB}
Assume that Hypotheses \ref{remarkthree}, \ref{remark555} and \ref{remark556} hold true and define the weighted Lebesgue measure $\nu$ by $d\nu=|\xi|J(|\xi|)d\xi\,dx$ for any Lebesgue measurable set $S\subset D\times\mathcal{H}_1(0)$. Passing to subsequences if necessary $\{(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-\}_{k=1}^\infty$ converges weakly in $L^2(D\times\mathcal{H}_1(0);\nu)$ to $\mathcal{E} u^0 e\cdot e$, i.e., \begin{eqnarray}
&&\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^- \psi\,d\nu\nonumber\\ &&=\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}(\mathcal{E} u^0 e \cdot e)\psi\,d\nu, \label{functiotofunction} \end{eqnarray} for any test function $\psi(x,\xi)$ in $L^2(D\times\mathcal{H}_1(0);\nu)$. \end{lemma}
We now apply the Lemmas. Observing that $D_e^{\epsilon_k |\xi|}\phi\cdot e $ converges strongly in $L^2(D\times\mathcal{H}_1(0):\nu)$ to $\mathcal{E}\phi\, e\cdot e$ for test functions $\phi$ in $C^\infty_0(D;\mathbb{R}^d)$ and from the weak $L^2(D\times\mathcal{H}_1(0):\nu)$ convergence of $(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ we deduce that \begin{eqnarray}
&&\lim_{\epsilon_k\rightarrow 0}\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)f'(0)(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-(D_e^{\epsilon_k |\xi|}\phi\cdot e)\,d\nu\nonumber\\
&&=\frac{1}{\omega_d}\int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)f'(0)\,(\mathcal{E} u^0 e\cdot e)(\mathcal{E}\phi e\cdot e)\,d\nu\nonumber\\
&&=\frac{f'(0)}{\omega_d}\sum_{ijkl=1}^d\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)\,e_i e_j e_k e_l,d\xi\int_{D}\mathcal{E}u^0_{ij}\mathcal{E}\phi_{kl}\,dx.\label{limitproduct} \end{eqnarray} Now we show that \begin{eqnarray}
\frac{f'(0)}{\omega_d}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)\,e_i e_j e_k e_l\,d\xi=\mathbb{C}_{ijkl}=2\mu\left(\frac{\delta_{ik}\delta_{jl}+\delta_{il}\delta_{jk}}{2}\right)+\lambda \delta_{ij}\delta_{kl}\label{shear} \end{eqnarray} where $\mu$ and $\lambda$ are given by \eqref{calibrate1}. To see this we write \begin{eqnarray} \Gamma_{ijkl}(e)=e_ie_je_ke_l, \label{tensorId} \end{eqnarray} to observe that $\Gamma(e)$ is a totally symmetric tensor valued function defined for $e\in S^{d-1}$ with the property \begin{eqnarray} \Gamma_{ijkl}(Qe)=Q_{im}e_mQ_{jn}e_nQ_{ko}e_oQ_{lp}e_p=Q_{im}Q_{jn}Q_{ko}Q_{lp}\Gamma_{mnop}(e) \label{rot} \end{eqnarray} for every rotation $Q$ in $SO^d$. Here repeated indices indicate summation. We write \begin{eqnarray}
\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)\,e_i e_j e_k e_l\,d\xi=\int_0^1|\xi|d|\xi|\int_{S^{d-1}}\Gamma_{ijkl}(e)\,de \label{groupavgpre} \end{eqnarray} to see that for every $Q$ in $SO^d$ \begin{eqnarray} Q_{im}Q_{jn}Q_{ko}Q_{lp}\int_{S^{d-1}}\Gamma_{ijkl}(e)\,de=\int_{S^{d-1}}\Gamma_{mnop}(Qe)\,de=\int_{S^{d-1}}\Gamma_{mnop}(e)\,de. \label{groupavg} \end{eqnarray} Therefore we conclude that $\int_{S^{d-1}}\Gamma_{ijkl}(e)\,de$ is an isotropic symmetric $4^{th}$ order tensor and of the form \begin{eqnarray} \int_{S^{d-1}}\Gamma_{ijkl}(e)\,de=a\left(\frac{\delta_{ik}\delta_{jl}+\delta_{il}\delta_{jk}}{2}\right)+b \delta_{ij}\delta_{kl}. \label{groupavgaft} \end{eqnarray} Here we evaluate $a$ by contracting both sides of \eqref{groupavgaft} with a trace free matrix and $b$ by contracting both sides with the $d\times d$ identity and calculation delivers \eqref{shear}. Theorem \ref{convgofelastic} now follows immediately from \eqref{limitproduct} and \eqref{functiotolimit}.
To establish Lemmas \ref{twolimitsA} and \ref{twolimitsB} we develop the following estimates for the sequences\\ $(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ and $(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^+$. We define the set $K^{+,\epsilon_k}$ by \begin{eqnarray}
K^{+,\epsilon_k}=\{(x,\xi)\in D\times\mathcal{H}_1(0)\,: (D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^+\not=0\}. \label{suppset} \end{eqnarray}
We have the following string of estimates. \begin{lemma} We introduce the generic positive constant $0<C<\infty$ independent of $0<\epsilon_k<1$ and $0\leq t\leq T$ and state the following inequalities that hold for all $0<\epsilon_k<1$ and $0\leq t\leq T$ and for $C^\infty(D)$ test functions $\phi$ with compact support on $D$. \label{estimates} \begin{eqnarray}
&&\int_{K^{+,\epsilon_k}} |\xi|J(|\xi|)\,d\xi\,dx<C\epsilon_k,\label{suppsetupper}\\
&&\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|,+}u^{\epsilon_k}\cdot e)^+)(D_{e}^{\epsilon_k}\phi\cdot e)\,d\xi\,dx\right\vert<C\sqrt{\epsilon_k}\Vert\mathcal{E}\phi\Vert_{\scriptscriptstyle{{L^\infty(D;\mathbb{R}^{d\times d})}}},\label{Fprimefirst}\\
&&\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)|(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-|^2\,d\xi\,dx<C,\label{L2bound}\\
&&\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)|D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e|\,d\xi\,dx<C,\hbox{ and}\label{L1bound}\\
&&\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e) (D_{e}^{\epsilon_k}\phi\cdot e)d\,\xi\,dx\right\vert<C\Vert\mathcal{E} \phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}}.\label{Fprimesecond} \end{eqnarray} \end{lemma} {\bf Proof.} For $(x,\xi)\in K^{+,\epsilon_k}$ we apply \eqref{lowerestforF} to get \begin{eqnarray}
J(|\xi|)\frac{1}{\epsilon_k}F_{1}(\overline{r})=|\xi|J(|\xi|)\frac{1}{\epsilon_k|\xi|}F_{1}(\overline{r})
\leq|\xi|J(|\xi|)F_{\epsilon_k|\xi|}(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)\label{first} \end{eqnarray}
and in addition since $|\xi|\leq 1$ we have \begin{eqnarray}
&&\frac{1}{\epsilon_k}F_{1}(\overline{r}) \int_{K^{+,\epsilon_k}} |\xi|J(|\xi|)\,d\xi\,dx\leq\frac{1}{\epsilon_k}F_{1}(\overline{r}) \int_{K^{+,\epsilon_k}} J(|\xi|)\,d\xi\,dx\nonumber\\
&&\leq\int_{K^{+,\epsilon_k}} |\xi|J(|\xi|)F_{\epsilon_k|\xi|}(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)\,d\xi\,dx\leq\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k}) \label{firstb} \end{eqnarray}
where Theorem \ref{Gronwall} implies that the right most element of the sequence of inequalities is bounded and \eqref{suppsetupper} follows noting that the inequality \eqref{firstb} is equivalent to \eqref{suppsetupper}. More generally since $|\xi|\leq 1$ we may argue as above to conclude that \begin{eqnarray}
\int_{K^{+,\epsilon_k}} |\xi|^pJ(|\xi|)\,d\xi\,dx<C\epsilon_k. \label{power} \end{eqnarray} for $0\leq p$. We apply \eqref{estforFprime} and \eqref{power} to find \begin{eqnarray}
&&\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^+)D_{e}^{\epsilon_k}\phi\,d\xi\,dx\right\vert\nonumber\\
&&\leq C\frac{2f'(\overline{r}^2)\overline{r}}{\sqrt{\epsilon_k}}\int_{K^{+,\epsilon_k}}\sqrt{|\xi|}J(|\xi|)\,d\xi\,dx \Vert\mathcal{E}\phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}} \leq\sqrt{\epsilon_k}C\Vert\mathcal{E}\phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}}, \label{second} \end{eqnarray} and \eqref{Fprimefirst} follows.
A basic calculation shows there exists a positive constant independent of $r$ and $s$ for which \begin{eqnarray} r^2\leq C F_s(r), \hbox{ for $r<\frac{\overline{r}}{\sqrt{s}}$}, \label{rsquaredd} \end{eqnarray} so \begin{eqnarray}
|D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e|^2\leq C F_{\epsilon_k|\xi|}(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e), \hbox{ for $|D_{e}^{\epsilon_k|\xi|}u^{\epsilon_k}\cdot e|<\frac{\overline{r}}{\sqrt{\epsilon_k |\xi|}}$}, \label{rsquared} \end{eqnarray} and \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)|(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-|^2\,d\xi\,dx
=\int_{D\times\mathcal{H}_1(0)\setminus K^{+,\epsilon_k}}|\xi| J(|\xi|)|D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e|^2\,d\xi\,dx\nonumber\\
&&\leq C\int_{D\times\mathcal{H}_1(0)\setminus K^{+,\epsilon_k}}|\xi| J(|\xi|)F_{\epsilon_k |\xi|}(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)\,d\xi\,dx\leq C\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k}) \label{third} \end{eqnarray} where Theorem \ref{Gronwall} implies that the right most element of the sequence of inequalities is bounded and \eqref{L2bound} follows.
To establish \eqref{L1bound} we apply H\"older's inequality to find that \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)|D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e|\,d\xi\,dx\nonumber\\
&&=\int_{K^{+,\epsilon_k}}|\xi| J(|\xi|)|D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e|\,d\xi\,dx+\int_{D\times\mathcal{H}_1(0)\setminus K^{+,\epsilon_k}}|\xi| J(|\xi|)|D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e|\,d\xi\,dx\nonumber\\
&&\leq \frac{2\Vert u^{\epsilon_k}\Vert_{L^\infty(D;\mathbb{R}^d)}}{\epsilon_k}\int_{K^{+,\epsilon_k}}|\xi|J(|\xi|)\,d\xi\,dx+\nonumber\\
&&+\nu(D\times\mathcal{H}_1(0))^{\frac{1}{2}}\left (\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)|(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-|^2\,d\xi\,dx\right)^{\frac{1}{2}}, \label{twoterms} \end{eqnarray} and \eqref{L1bound} follows from \eqref{suppsetupper} and \eqref{L2bound}.
We establish \eqref{Fprimesecond}. This bound follows from the basic features of the potential function $f$. We will recall for subsequent use that $f$ is smooth positive, concave and $f'$ is a decreasing function with respect to its argument. So for $A$ fixed and $0\leq h\leq A^2\overline{r}^2$ we have \begin{eqnarray}
|f'(h)-f'(0)|\leq |f'(A^2\overline{r}^2)- f'(0)|<2|f'(0)|^2. \label{ffact} \end{eqnarray} The bound \eqref{Fprimesecond} is now shown to be a consequence of the following upper bound valid for the parameter $0<A<1$ given by \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)|f'(\epsilon_k |\xi||(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-|^2)-f'(0)|^2\, d\xi\,dx\nonumber\\
&&\leq \nu(D\times\mathcal{H}_1(0))\times|f'(A^2\overline{r}^2)-f'(0)|^2+C\epsilon_k\frac{4|f'(0)|^2}{A^2}. \label{usefulbound} \end{eqnarray}
We postpone the proof of \eqref{usefulbound} until after it is used to establish \eqref{Fprimesecond}. Set $h_{\epsilon_k}=(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ to note \begin{eqnarray}
F_{\epsilon_k |\xi|}'(h_{\epsilon_k})-2f'(0)h_{\epsilon_k}=(f'(\epsilon_k |\xi| h^2_{\epsilon_k})-f'(0))2h_{\epsilon_k}. \label{diffeq} \end{eqnarray} Applying H\"olders inequality, \eqref{Fprimefirst}, \eqref{L2bound}, \eqref{usefulbound}, and \eqref{diffeq} gives \begin{eqnarray}
&&\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e) (D_{e}^{\epsilon_k}\phi\,d\xi\cdot e)\,dx \right\vert\nonumber\\
&&\leq\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^+) (D_{e}^{\epsilon_k}\phi\cdot e)\,d\xi\,dx \right\vert\nonumber\\
&&+\left\vert\int_{D\times\mathcal{H}_1(0)}|\xi| J(|\xi|)F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-) (D_{e}^{\epsilon_k}\phi\cdot e)\,d\xi\,dx \right\vert\nonumber\\
&&\leq C\sqrt{\epsilon_k}\Vert\mathcal{E}\phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}}+2\int_{D\times \mathcal{H}_1(0)}|\xi|J(|\xi|)f'(0)(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-(D_e^{\epsilon_k |\xi|}\phi\cdot e)\,d\xi\,dx\nonumber\\
&&+\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)\left (F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-) -2f'(0)(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-\right )(D_e^{\epsilon_k |\xi|}\phi\cdot e)\,d\xi\,dx\nonumber\\
&&\leq C\left(f'(0)+\sqrt{\epsilon_k}+\left(\nu(D\times\mathcal{H}_1(0))\times|f'(A^2\overline{r}^2)-f(0)|^2+\epsilon_k\frac{4|f'(0)|^2}{A^2}\right)^{1/2}\right )\Vert\mathcal{E} \phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}}.\nonumber\\ \label{five} \end{eqnarray} and \eqref{Fprimesecond} follows.
We establish the inequality \eqref{usefulbound}. Set $h_{\epsilon_k}=(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ and for $0<A<1$ introduce the set \begin{eqnarray}
K^{+,\epsilon_k}_A=\{(x,\xi)\in D\times\mathcal{H}_1(0)\,: A^2\overline{r}^2\leq \epsilon_k|\xi||h_{\epsilon_k}|^2\}. \label{suppAset} \end{eqnarray}
To summarize $(x,\xi)\in K^{+,\epsilon_k}_A$ implies $A^2\overline{r}^2\leq\epsilon_k|\xi||h_{\epsilon_k}|^2\leq\overline{r}^2$ and $(x,\xi)\not\in K^{+,\epsilon_k}_A$ implies $\epsilon_k|\xi||h_{\epsilon_k}|^2<A^2\overline{r}^2$ and $|f'(\epsilon_k|\xi||h_{\epsilon_k}|^2)-f'(0)|\leq|f'(A^2\overline{r}^2)-f'(0)|$. Inequality \eqref{L2bound} implies \begin{eqnarray}
&&C>\int_{K^{+,\epsilon_k}_A} |\xi|J(|\xi|) h_{\epsilon_k}^2\,d\xi\,dx\geq\frac{A^2\overline{r}^2}{\epsilon_k}\int_{K^{+,\epsilon_k}_A} J(|\xi|) \,d\xi\,dx\nonumber\\
&&\geq\frac{A^2\overline{r}^2}{\epsilon_k}\int_{K^{+,\epsilon_k}_A} |\xi|J(|\xi|) \,d\xi\,dx, \label{chebyA} \end{eqnarray}
the last inequality follows since $1\geq|\xi|>0$. Hence \begin{eqnarray}
\int_{K^{+,\epsilon_k}_A} |\xi|J(|\xi|) \,d\xi\,dx\leq C\frac{\epsilon_k}{A^2\overline{r}^2}, \label{chebyAUpper} \end{eqnarray} and it follows that \begin{eqnarray}
&&\int_{K^{+,\epsilon_k}_A} |\xi|J(|\xi|)|f'(\epsilon_k|\xi|h_{\epsilon_k}|^2-f'(0)|^2 \,d\xi\,dx\nonumber\\
&&\leq 4|f'(0)|^2\int_{K^{+,\epsilon_k}_A} |\xi|J(|\xi|) \,d\xi\,dx\leq C\epsilon_k\frac{4|f'(0)|^2}{A^2\overline{r}^2}. \label{kepsplus} \end{eqnarray} Collecting observations gives \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)\setminus K^{+,\epsilon_k}_A}|\xi|J(|\xi|)|f'(\epsilon_k |\xi||(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-|^2)-f'(0)|^2\, d\xi\,dx\nonumber\\
&&\leq \nu(D\times\mathcal{H}_1(0))\times |f'(A^2\overline{r}^2)-f'(0)|^2, \label{2ndbd} \end{eqnarray} and \eqref{usefulbound} follows.
We now prove Lemma \ref{twolimitsA}. Write \begin{eqnarray}
F_{\epsilon_k|\xi|}'(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)=F_{\epsilon_k|\xi|}'((D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^+)
+F_{\epsilon_k|\xi|}'((D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-), \label{plusminus} \end{eqnarray} and from \eqref{Fprimefirst} it follows that \begin{eqnarray}
&&\lim_{\epsilon_k\rightarrow 0} \int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)D_{e}^{\epsilon_k|\xi|}\phi\cdot e\,d\xi\,dx\nonumber\\
&&=\lim_{\epsilon_k\rightarrow 0} \int_{D}\int_{\mathcal{H}_1(0)}|\xi|J(|\xi|)F_{\epsilon_k|\xi|}'((D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)^-)(D_{e}^{\epsilon_k|\xi|}\phi\cdot e)\,d\xi\,dx. \label{functiotolimitminus} \end{eqnarray}
To finish the proof we identify the limit of the right hand side of \eqref{functiotolimitminus}. Set $h_{\epsilon_k}=(D_{e}^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ and apply H\'older's inequality to find \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)\left(F_{\epsilon_k|\xi|}'(h_{\epsilon_k}) -2f'(0)h_{\epsilon_k}\right)(D_e^{\epsilon_k|\xi|}\phi\cdot e)\,d\xi\,dx\nonumber\\
&&\leq C\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)\left|F_{\epsilon_k|\xi|}'(h_{\epsilon_k}) -2f'(0)h_{\epsilon_k}\right|\,d\xi\,dx\Vert\mathcal{E}\phi\Vert_{\scriptscriptstyle{L^\infty(D;\mathbb{R}^{d\times d})}} \label{firstestimate} \end{eqnarray} We estimate the first factor in \eqref{firstestimate} and apply \eqref{diffeq}, H\"older's inequality, \eqref{L2bound}, and \eqref{usefulbound} to obtain \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)\left |F_{\epsilon_k|\xi|}'(h_{\epsilon_k}) -2f'(0)h_{\epsilon_k}\right |\,d\xi\,dx\nonumber\\
&&\leq\int_{D\times\mathcal{H}_1(0)}|\xi|J(|\xi|)\left |f'(\epsilon_k |\xi||h_{\epsilon_k}|^2) -2f'(0)\right |\left | h_{\epsilon_k}\right |\,d\xi\,dx \nonumber\\
&&\leq C\left(\nu(D\times\mathcal{H}_1(0))\times |f'(A^2\overline{r}^2)-f'(0)|^2+\epsilon_k\frac{4|f'(0)|^2}{A^2\overline{r}^2}\right)^{1/2}. \label{usefulLemmaA} \end{eqnarray} Lemma \ref{twolimitsA} follows on applying the bound \eqref{usefulLemmaA} to \eqref {firstestimate} and passing to the $\epsilon_k$ zero limit and noting that the choice of $0<A<1$ is arbitrary.
We now prove Lemma \ref{twolimitsB}. For $\tau>0$ sufficiently small define $K^\tau\subset D$ by $K^{\tau}=\{x\in D:\,dist(x,J_{u^0(t)})<\tau\}$. From Hypothesis \ref{remark555} the collection of centroids in $CPZ^\delta(\overline{r},1/2,1/2,t)$ lie inside $K^\tau$ for $\delta$ sufficiently small. (Otherwise the components of the collection $CPZ^\delta(\overline{r},1/2,1/2,t)$ would concentrate about a component of $CPZ^0(\overline{r},1/2,1/2,t)$ outside $K^\tau$; contradicting the hypothesis that $J_{u^0(t)}=CPZ^0(\overline{r},1/2,1/2,t)$). The collection of all points belonging to unstable neighborhoods associated with centroids in is easily seen to be contained in the slightly larger set $K^{\tau,\delta}=\{x\in \,D; dist(x,K^\tau)<\delta\}$. From Hypothesis \ref{remark556} we may choose test functions $\phi\in C_0^1(D\setminus K^{\tau,2\delta})$ such that for $\epsilon_k$ sufficiently small \begin{eqnarray}
(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-\phi=(D_e^{\epsilon_k|\xi|}u^{\epsilon_k}\cdot e)\phi. \label{identical} \end{eqnarray}
We form the test functions $\phi(x)\psi(\xi)$, with $\phi\in C_0^1(D\setminus K^{\tau,2\delta})$ and
$\psi\in C(\mathcal{H}_1(0))$. From \eqref{L2bound} we may pass to a subsequence to find that $(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}\cdot e)^-$ weakly converges to the limit $g(x,\xi)$ in $L^2(D\times\mathcal{H}_1(0);\nu)$. With this in mind we write \begin{eqnarray}
&&\int_{D\times\mathcal{H}_1(0)} g(x,\xi)\phi(x)\psi(\xi)\,d\nu=\int_{D\times\mathcal{H}_1(0)} g(x,\xi)\phi(x)\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx\nonumber\\
&&=\lim_{\epsilon_k\rightarrow 0}\int_{D\times\mathcal{H}_1(0)}(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}(x)\cdot e)^- \phi(x)\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx\nonumber\\
&&=\lim_{\epsilon_k\rightarrow 0}\int_{D\times\mathcal{H}_1(0)}(D_e^{\epsilon_k |\xi|}u^{\epsilon_k}(x)\cdot e) \phi(x)\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx\nonumber\\
&&=\lim_{\epsilon_k\rightarrow 0}\int_{D\times\mathcal{H}_1(0)}(u^{\epsilon_k}(x)\cdot e)(D_{-e}^{\epsilon_k |\xi|}\phi(x))\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx,\label{middlepart} \end{eqnarray}
where we have integrated by parts using \eqref{intbypts} in the last line of \eqref{middlepart}. Noting that $D_{-e}^{\epsilon_k |\xi|}\phi(x)$ converges uniformly to $-e\cdot\nabla\phi(x)$ and from the strong convergence of $u^{\epsilon_k}$ to $u^0$ in $L^2$ we obtain \begin{eqnarray}
&&=\lim_{\epsilon_k\rightarrow 0}\int_{D\times\mathcal{H}_1(0)}(u^{\epsilon_k}(x)\cdot e)(D_{-e}^{\epsilon_k |\xi|}\phi(x))\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx\nonumber\\
&&=-\int_{D\times\mathcal{H}_1(0)}(u^{0}(x)\cdot e) (e\cdot\nabla\phi(x))\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx\nonumber\\
&&=-\sum_{j,k=1}^d\int_{D}u^0_j(x)\,\partial_{x_k}\left(\phi(x) \int_{\mathcal{H}_1(0)} e_j e_k\psi(\xi)|\xi|J(|\xi|)\,d\xi\right)\,dx\nonumber\\
&&=\int_{D}\mathcal{E} u^0_{jk}(x)\left(\phi(x) \int_{\mathcal{H}_1(0)} e_j e_k\psi(\xi)|\xi|J(|\xi|)\,d\xi\right)\,dx\nonumber\\
&&=\int_{D\times{\mathcal{H}_1(0)} }(\mathcal{E} u^0(x) e\cdot e)\phi(x)\psi(\xi)|\xi|J(|\xi|)\,d\xi\,dx, \label{identifyweakl2} \end{eqnarray} where we have made use of $E u^0\lfloor D\setminus K^{\tau,2\delta}=\mathcal{E} u^0\,dx$ on the third line of \eqref{identifyweakl2}. From the density of the span of the test functions we conclude that $g(x,\xi)=\mathcal{E} u^0(x)e\cdot e$ almost everywhere on $D\setminus K^{\tau,2\delta}\times\mathcal{H}_1(0)$. Since $K^{\tau,2\delta}$ can be chosen to have arbitrarily small measure with vanishing $\tau$ and $\delta$ we conclude that $g(x,\xi)=\mathcal{E} u^0(x) e\cdot e$ on $D\times\mathcal{H}_1(0)$ a.e. and Lemma \ref{twolimitsB} is proved.
\subsubsection{ Proof of Theorems \ref{epsiloncontropprocesszone} and \ref{bondunstable}} \label{proofbondunstable} We begin with the proof on the upper bound on the size of the process zone given by Theorem \ref{epsiloncontropprocesszone}. The set $K^{+,\epsilon_k}_\alpha$ is defined by \
\begin{eqnarray}
K^{+,\epsilon_k}_\alpha=\{(x,\xi)\in D\times\mathcal{H}_1(0);\,|D_e^{\epsilon_k|\xi|}u^{\epsilon_k}\cdot e|>\underline{k}|\epsilon_k\xi|^{\alpha-1}\} \label{equivdescralpha} \end{eqnarray} where $0<\underline{k}\leq\overline{r}$ and $1/2\leq \alpha<1$. We set $\beta=2\alpha-1$ to see for $(x,\xi)\in K^{+,\epsilon_k}$ that \begin{eqnarray}
\epsilon_k|\xi||D_e^{\epsilon_k|\xi|}u^{\epsilon_k}(x)\cdot e|^2>\underline{k}^2\epsilon_k^\beta|\xi|^\beta. \label{inequalbasic} \end{eqnarray} and recall that the potential function $f(r)=F_1(r)$ is increasing to get \begin{eqnarray}
J(|\xi|)\frac{1}{\epsilon_k}F_{1}(\underline{k}^2\epsilon_k^\beta|\xi|^\beta)=|\xi|J(|\xi|)\frac{1}{\epsilon_k|\xi|}F_{1}(\underline{k}^2\epsilon_k^\beta|\xi|^\beta)
\leq|\xi|J(|\xi|)F_{\epsilon_k|\xi|}(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)\label{firstalpha} \end{eqnarray}
and in addition since $|\xi|^{1-\beta}\leq 1$ we have \begin{eqnarray}
&& \int_{K^{+,\epsilon_k}_\alpha}\frac{1}{\epsilon_k}F_{1}(\underline{k}^2\epsilon_k^\beta|\xi|^\beta)|\xi|^{1-\beta}J(|\xi|)\,d\xi\,dx\leq \int_{K^{+,\epsilon_k}_\alpha}\frac{1}{\epsilon_k}F_{1}(\underline{k}^2\epsilon_k^\beta|\xi|^\beta)J(|\xi|)\,d\xi\,dx\nonumber\\
&&\leq\int_{K^{+,\epsilon_k}_\alpha} |\xi|J(|\xi|)F_{\epsilon_k|\xi|}(D_{e}^{{\epsilon_k}|\xi|}u^{\epsilon_k}\cdot e)\,d\xi\,dx\leq\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k}) \label{firstbalpha} \end{eqnarray} Taylor approximation gives \begin{eqnarray}
\frac{1}{\epsilon_k}F_1(\underline{k}^2\epsilon_k^\beta|\xi|^\beta)=\epsilon_k^{\beta-1}\underline{k}^2(f'(0)+o(\epsilon_k^\beta))|\xi|^\beta \label{taylor} \end{eqnarray} Substitution of \eqref{taylor} into the left hand side of \eqref{firstbalpha} gives \begin{eqnarray}
\int_{K^{+,\epsilon_k}_\alpha} |\xi|J(|\xi|)\,d\xi\,dx\leq \frac{\epsilon_k^{1-\beta}}{\underline{k}^2(f'(0)+o(\epsilon_k^\beta))}\left(\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k})\right). \label{semifinal} \end{eqnarray} Introduce the characteristic function $\chi_\alpha^{+,\epsilon_k}(x,\xi)$ defined on $D\times\mathcal{H}_1(0)$ taking the value $1$ for $(x,\xi)$ in $K_\alpha^{+,\epsilon_k}$ and zero otherwise. Observe that \begin{eqnarray}
\frac{1}{m}\int_{\mathcal{H}_1(0)}\chi_\alpha^{+,\epsilon_k}(x,\xi)|\xi|J(|\xi|)\,d\xi=P(\{y\in\mathcal{H}_{\epsilon_k}(x);|S^{\epsilon_k}(y,x)|>\underline{k}|y-x|^{\alpha-1}\}), \label{equivlence} \end{eqnarray} so \begin{eqnarray}
&&\int_DP(\{y\in\mathcal{H}_{\epsilon_k}(x);|S^{\epsilon_k}(y,x)|>\underline{k}|y-x|^{\alpha-1}\})dx\nonumber\\ &&\leq \frac{\epsilon_k^{1-\beta}}{m\times \underline{k}^2(f'(0)+o(\epsilon_k^\beta))}\left(\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k})\right). \label{semifinaler} \end{eqnarray} For $0<\overline{\theta}\leq 1$, Tchebyshev's inequality delivers \begin{eqnarray}
&& \mathcal{L}^d\left(\{x\in D;\, P\left(\{y\in\mathcal{H}_{\epsilon_k}(x);\,|S^{\epsilon_k}(y,x)|>\underline{k}|y-x|^{\alpha-1}\}\right)>\overline{\theta}\}\right)\nonumber\\ &&\leq \frac{\epsilon_k^{1-\beta}}{m\times \overline{\theta}\underline{k}^2(f'(0)+o(\epsilon_k^\beta))}\left(\sup_{t\in [0,T]}\sup_{\epsilon_k}PD^{\epsilon_k}(u^{\epsilon_k})\right). \label{probnewvariableboundchebyshcv} \end{eqnarray} and Theorem \ref{epsiloncontropprocesszone} follows on applying \eqref{gineq} and \eqref{upperbound}.
Choose $\epsilon_k=\frac{1}{2^k}$ and Theorem \ref{epsiloncontropprocesszone} imples $\mathcal{L}^d(PZ^{\epsilon_k}(\underline{k},\alpha,\overline{\theta},t))<C(\frac{1}{{2}^k})^{1-\beta}$, where $C$ is independent of $t\in [0,T]$. The collection of process zones for $\epsilon_k<\delta$ is written as \begin{eqnarray} CPZ^{\delta}(\underline{k},\alpha,\overline{\theta},t)=\cup_{\epsilon_k<\delta}PZ^{\epsilon_k}(\underline{k},\alpha,\overline{\theta},t) \label{unstablecollection} \end{eqnarray} and from the geometric series we find \begin{eqnarray} \mathcal{L}^d\left(CPZ^{\delta}(\underline{k},\alpha,\overline{\theta},t)\right)<C{\delta}^{1-\beta}. \label{boundcdelta} \end{eqnarray} Theorem \ref{bondunstable} follows noting further that $CPZ^{0}(\underline{k},\alpha,\overline{\theta},t)\subset CPZ^{\delta}(\underline{k},\alpha,\overline{\theta},t)\subset CPZ^{\delta'}(\underline{k},\alpha,\overline{\theta},t)$, for $0<\delta<\delta'$.
\setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0}\setcounter{proposition}{0}\setcounter{remark}{0}\setcounter{remark}{0} \setcounter{definition}{0}\setcounter{hypothesis}{0}
\section{Dynamics and limits of energies that $\Gamma$-converge to Griffith fracture energies} \label{Ninth} In this final section we collect ideas and illustrate how the approach presented in the earlier sections can be used to examine limits of dynamics associated with other energies that $\Gamma$- converge to the Griffith fracture energy. As an example we consider the phase field aproach based on the Ambrosio-Tortorelli approximation for dynamic brittle fracture calculations \cite{BourdinLarsenRichardson}. This model is seen to be a well posed formulation in the sense that existence of solutions can be shown \cite{LarsenOrtnerSuli}. To illustrate the ideas we focus on anti-plane shear and the model is described by an out of plane elastic displacement $u^\epsilon(x,t)$ and phase field $0\leq v^\epsilon(x,t)\leq 1$ defined for points $x$ belonging to the domain $D\subset \mathbb{R}^2$. The potential energy associated with the cracking body is given by the Ambrosio-Tortorelli potential \begin{eqnarray} P^\epsilon(u^\epsilon(t),v^\epsilon(t))=E^\epsilon(u^\epsilon(t),v^\epsilon(t))+H^\epsilon(v^\epsilon(t)), \label{ab} \end{eqnarray} with \begin{eqnarray}
E^\epsilon(u^\epsilon(t),v^\epsilon(t))=\frac{\mu}{2}\int_D a^\epsilon(t)|\nabla u^\epsilon(t)|^2\,dx \label{detailsforab} \end{eqnarray} and \begin{eqnarray}
H^\epsilon(v^\epsilon(t))=\frac{\mathcal{G}_c}{2}\int_D \frac{(1-v^\epsilon(t))^2}{2\epsilon}+2\epsilon|\nabla v^\epsilon(t)|^2\,dx. \label{moredetailsforab} \end{eqnarray} here $a^\epsilon(t)=a^\epsilon(x,t)=(v^\epsilon(x,t))^2+\eta^\epsilon$, with $0<\eta^\epsilon<<\epsilon$. In this model the phase field $v^\epsilon$, provides an approximate description of a freely propagating crack taking the value $1$ for points $(x,t)$ away from the crack and zero on the crack. To formulate the problem we introduce the space $H^1_0(D)$ defined to be displacements $u$ in $H^1(D)$ with zero Dirichlet data on $\partial D$ and the set of functions $H_1^1(D)$ defined to be functions $v$ in $H^1(D)$ for which $v=1$ on $\partial D$. The total energy is given by \begin{eqnarray}
\mathcal{F}(t;u^\epsilon,\partial_t u^\epsilon,v^\epsilon)=\frac{1}{2}\int_D|\partial_t u^\epsilon|^2\,dx+P^\epsilon(u^\epsilon,v^\epsilon)-\int_D f(t)u^\epsilon\,dx. \label{eergytotala} \end{eqnarray} The body force $f(x,t)$ is prescribed and the displacement - phase field pair $(u^\epsilon,v^\epsilon)$ is a solution of the initial boundary value problem \cite{LarsenOrtnerSuli} given by: \begin{eqnarray} \partial_{tt}^2 u^\epsilon-{\rm{div}}\left(a^\epsilon(t)\nabla(u^\epsilon-\partial_t u^\epsilon)\right)=f(t),&& \hbox{ in $D$},\nonumber\\ u^\epsilon=0 \hbox{ and } v^\epsilon=1, &&\hbox{ on $\partial D$},\label{ibvp} \end{eqnarray} for $t\in (0,T]$ with initial conditions $u^\epsilon(0)=u^\epsilon_0$, $\partial_t u^\epsilon(0)=u^\epsilon_1\in H^1_0(D)$, satisfying the crack stability condition \begin{eqnarray} P^\epsilon(u^\epsilon(t),v^\epsilon(t))\leq\inf\left\{P^\epsilon(u^\epsilon(t),v):\, v\in H^1_1(D),\,\,v\leq v^\epsilon(t)\right\} \label{stability} \end{eqnarray} and energy balance \begin{eqnarray}
\mathcal{F}(t;u^\epsilon,\partial_t u^\epsilon,v^\epsilon)=\mathcal{F}(0;u^\epsilon_0,u^\epsilon_1,v^\epsilon_0)-\int_0^t\int_D\,a^\epsilon|\nabla\partial_\tau u^\epsilon|^2\,d\tau-\int_0^t\int_D\,\partial_\tau f u^\epsilon\,dx\,d\tau, \label{eergytotalbalance} \end{eqnarray} for every time $0\leq t\leq T$. Finally the initial condition for the phase field is chosen such that $v^\epsilon(0)=v^\epsilon_0$ where $v^\epsilon_0\in H^1_1(D)$ satisfies the unilateral minimality condition \eqref{stability}. In this formulation the pair $u^\epsilon(t)$, $v^\epsilon(t)$ provides a regularized model for free crack propagation. Here the phase field tracks the evolution of the crack with $v^\epsilon=1$ away from the crack and $v^\epsilon=0$ in the crack set.
For a body force $f(x,t)$ in $C^1([0,T];L^2(D))$ it is shown in \cite{LarsenOrtnerSuli} that there exists at least one trajectory $(u^\epsilon,v^\epsilon)\in H^2((0,T);L^2(D))\cap W^{1,\infty}((0,T); H^1_0(D))\times W^{1,\infty}((0,T); H_1^1(D))$ satisfying \ref{ibvp} in the weak sense, i.e. \begin{eqnarray} \int_D\partial_{tt}^2 u^\epsilon\varphi\,dx+\int_D(a^\epsilon(t)\nabla(u^\epsilon-\partial_t u^\epsilon)\cdot\nabla\varphi\,dx=\int_D f(t)u^\epsilon\,dx, \label{ibvpweak} \end{eqnarray} for all $\varphi$ in $H^1_0(D)$ for almost every $t$ in $(0,T]$, with $u^\epsilon(0)=u^\epsilon_0$, $\partial_t u^\epsilon(0)=u^\epsilon_1$, $v^\epsilon(0)=v^\epsilon_0$, and such that \eqref{stability} and \eqref{eergytotalbalance} are satisfied for all times $0<t\leq T$. We have formulated the problem in a simplified setting to illustrate the ideas and note that this type of evolution is shown to exist for evolutions with more general boundary conditions and for displacements in two and three dimensions, see \cite{LarsenOrtnerSuli}. For future reference we call the pair $(u^\epsilon(t),v^\epsilon(t))$ a phase field fracture evolution.
In what follows we pass to the $\epsilon\rightarrow 0$ limit in the phase field evolutions to show existence of a limiting evolution with bounded linear elastic fracture energy. Here the limit evolution $u^0(t)$ is shown to take values in the space of special functions of bounded variation $SBV(D)$. This space is well known and can be thought of as a specialization of the space SBD introduced earlier in Section \ref{sec5} that is appropriate for the scalar problem treated here. For a treatment of SBV and its relation to fracture the reader is referred to \cite{AmbrosioBrades}.
Applying the techniques developed in previous sections it is possible to state and prove following theorem on the dynamics associated with the phase field evolutions $(u^\epsilon,v^\epsilon)$ in the limit as $\epsilon\rightarrow 0$. \begin{theorem} {\bf Sharp interface limit of phase field fracture evolutions.}\\ \label{epszerolimitofLOS} Suppose for every $\epsilon>0$ that: (a) the potential energy of the initial data $(u_0^\epsilon,v_0^\epsilon)$ is uniformly bounded, ie. $\sup_{\epsilon>0}\left\{P^\epsilon(u^\epsilon_0,v_0^\epsilon)\right\}<\infty$, and that (b) $\Vert u^\epsilon(t)\Vert_{L^\infty(D)}<C$ for all $0<\epsilon$ and $0\leq t\leq T$. Then on passing to a subsequence if necessary in the phase field fracture evolutions $(u^\epsilon,v^\epsilon)$ there exists an anti-plane displacement field $u^0(x,t)$ in $SBV(D)$ for all $t\in [0,T]$ such that $u^0\in C([0,T];L^2(D))$ and \begin{eqnarray} \label{limsbv} \lim_{\epsilon\rightarrow 0}\max_{0\leq t\leq T}\{\Vert u^\epsilon(t)-u^0(t)\Vert_{L^2(D)}\}=0 \end{eqnarray} with \begin{eqnarray} \label{lefm}
GF(u^0)=\frac{\mu}{2}\int_D|\nabla u^0(t)|^2+{G_c}\mathcal{H}^1(J_{u^0(t)})<C \end{eqnarray} for $0\leq t\leq T$. \end{theorem}
For anti-plane shear deformations the energy $GF$ is a special form of the energy $LEFM$ introduced in Section \ref{sec5}.
The strategy we will use for proving Theorem \ref{epszerolimitofLOS} is the same as the one developed in the proofs of Theorems \ref{LimitflowThm} and \ref{LEFMMThm}. This strategy can be made systematic and applied to evolutions associated with potential energies that $\Gamma$- converge to the Griffith fracture energy. It consists of four component parts: \begin{enumerate} \item Constructing upper bounds on the kinetic and potential energy of the evolutions that hold uniformly for $0\leq t\leq T$ and $0<\epsilon$. \item Showing compactness of the evolution $u^\epsilon(t)$ in $L^2(D)$ for each time $0\leq t\leq T$. \item Showing limit points of the sequence $\{u^\epsilon(t)\}$ belong to $SBD(D)$ (or $SBV(D)$ as appropriate) for each time $0\leq t\leq T$. \item $\Gamma$-convergence of the potential energies to the Griffith energy $LEFM$ (or $GF$ as appropriate). \end{enumerate}
Assume first that Parts 1 through 4 hold for the the phase field fracture evolution with potential energies $P^\epsilon$ given by \eqref{ab}. These are used as follows to prove Theorem \ref{epszerolimitofLOS}. Part 1 is applied as in \eqref{lip} to show that the sequence of evolutions $u^\epsilon(t)$ is uniformly Lipschitz continuous in time with respect to the $L^2(D)$ norm, i.e. \begin{eqnarray} \label{lipschitzat}
\Vert u^\epsilon(t_1)-u^\epsilon(t_2)\Vert_{L^2(D)}\leq K |t_1-t_2| \end{eqnarray} for $K$ independent of $\epsilon$ and for any $0\leq t_1<t_2\leq T$. Part 2 together with \eqref{lipschitzat} and the Ascoli theorem imply the existence of a subsequence and a limit $u^0(x,t)\in C([0,T];L^2(D))$ such that the convergence \eqref{limsbv} holds. Part 3 shows that $u^0(x,t)$ belongs to $SBV(D)$ for every time in $[0,T]$. Part 4 together with Part 1 and the lower bound property of $\Gamma$-convergence shows that \eqref{lefm} holds and Theorem \ref{epszerolimitofLOS} follows.
We now establish Parts 1 through 4 for the dynamic phase field fracture evolution introduced in \cite{LarsenOrtnerSuli}. To obtain a uniform bound on the kinetic and potential energy differentiate both sides of the energy balance \eqref{eergytotalbalance} with respect to time to get \begin{eqnarray} \label{gronagain}
&&\frac{d}{dt}\left(\frac{1}{2}\int_D|\partial_t u^\epsilon(t)|^2\,dx+P^\epsilon(u^\epsilon(t),v^\epsilon(t))\right)-\frac{d}{dt}\int_D\,f(t) u^\epsilon\,dx\\
&&=-\int_D a^\epsilon|\nabla \partial_t u^\epsilon|^2\,dx-\int_D\partial_t f u^\epsilon\,dx.\nonumber \end{eqnarray} Manipulation and application of the identity $f\partial_t u^\epsilon=\partial_t(f u^\epsilon)-\partial_t f u^\epsilon$ to \eqref{gronagain} delivers the inequality \begin{eqnarray} \label{gronagainagain}
&&\frac{d}{dt}\left(\frac{1}{2}\int_D|\partial_t u^\epsilon(t)|^2\,dx+P^\epsilon(u^\epsilon(t),v^\epsilon(t))\right)\\ &&\leq \int_D f \partial_t u^\epsilon\,dx.\nonumber \end{eqnarray} Now set \begin{eqnarray}
W^\epsilon(t)=\left(\frac{1}{2}\int_D|\partial_t u^\epsilon(t)|^2\,dx+P^\epsilon(u^\epsilon(t),v^\epsilon(t))\right)+1 \label{westat} \end{eqnarray} and proceed as in Section \ref{GKP} to get \begin{eqnarray}
\left(\frac{1}{2}\int_D|\partial_t u^\epsilon(t)|^2\,dx+P^\epsilon(u^\epsilon(t),v^\epsilon(t))\right) \leq \left(\int_0^t\Vert f(\tau)\Vert_{L^2(D)}\,d\tau +\sqrt{W^\epsilon(0)}\right )^2-1. \label{gineqat} \end{eqnarray} Part 1 easily follows from \eqref{gineqat} noting that $\sup_{\epsilon>0}\{W^{\epsilon}(0)\}<\infty$ is a consequence of hypothesis (a) of Theorem \ref{epszerolimitofLOS}. For this example Parts 2 and 3 follow from the uniform bound of Part 1, hypothesis (b) of Theorem \ref{epszerolimitofLOS} and the well known compactness result for the Ambrosio Tortorelli functional, see for example the remark following Theorem 2.3 of \cite{Giacomini}. Part 4 is given by the Ambrosio-Tortorelli result \cite{AT} as expressed in Theorem 2.3 of \cite{Giacomini}.
\section{Conclusions}
The cohesive model for dynamic brittle fracture evolution presented in this paper does not require extra constitutive laws such as a kinetic relation between crack driving force and crack velocity or a crack branching condition. Instead the evolution of the process zone together with the the fracture set is governed by one equation consistent with NewtonÕs second law given by \eqref{eqofmotion}. This is a characteristic feature of peridynamic models \cite{Silling1}, \cite{States}. This evolution is intrinsic to the formulation and encoded into the nonlocal cohesive constitutive law. Crack nucleation criteria although not necessary to the formulation follow from the dynamics and are recovered here by viewing nucleation as a dynamic instability, this is similar in spirit to \cite{SillingWecknerBobaru} and the work of \cite{BhattacharyaDyal} for phase transitions. Theorem \ref{epsiloncontropprocesszone} explicitly shows how the size of the process zone is controlled by the radius of the horizon. This analysis shows that {\em the horizon size $\epsilon$ for cohesive dynamics is a modeling parameter} that can be calibrated according to the size of the process zone obtained from experimental measurements. The process zone is seen to concentrate on a set of zero volume as the length scale of non-locality characterized by the radius of the horizon $\epsilon$ goes to zero, see Theorem \ref{bondunstable}. In this limit the dynamics is shown (under suitable hypotheses) to coincide with the simultaneous evolution of a displacement crack set pair. Here displacement satisfies the elastic wave equation for points in space-time away from the crack set. The shear and Lam\'e moduli together with the energy release rate are described in terms of moments of the nonlocal potentials.
\section{Acknowlegements} \label{Acknowlegements} The author would like to thank Stewart Silling, Richard Lehoucq and Florin Bobaru for stimulating and fruitful discussions. This research is supported by NSF grant DMS-1211066, AFOSR grant FA9550-05-0008, and NSF EPSCOR Cooperative Agreement No. EPS-1003897 with additional support from the Louisiana Board of Regents.
\end{document} |
\begin{document}
\title{The reverse Yang-Mills-Higgs flow in a neighbourhood of a critical point}
\author{Graeme Wilkin} \address{Department of Mathematics, National University of Singapore, Singapore 119076} \email{[email protected]}
\date{\today}
\begin{abstract} The main result of this paper is a construction of solutions to the reverse Yang-Mills-Higgs flow converging in the $C^\infty$ topology to a critical point. The construction uses only the complex gauge group action, which leads to an algebraic classification of the isomorphism classes of points in the unstable set of a critical point in terms of a filtration of the underlying Higgs bundle.
Analysing the compatibility of this filtration with the Harder-Narasimhan-Seshadri double filtration gives an algebraic criterion for two critical points to be connected by a flow line. As an application, we can use this to construct Hecke modifications of Higgs bundles via the Yang-Mills-Higgs flow. When the Higgs field is zero (corresponding to the Yang-Mills flow), this criterion has a geometric interpretation in terms of secant varieties of the projectivisation of the underlying bundle inside the unstable manifold of a critical point, which gives a precise description of broken and unbroken flow lines connecting two critical points. For non-zero Higgs field, at generic critical points the analogous interpretation involves the secant varieties of the spectral curve of the Higgs bundle. \end{abstract}
\maketitle
\thispagestyle{empty}
\baselineskip=16pt
\section{Introduction}
There is a well-known relationship between the Yang-Mills heat flow on a Riemann surface and the notion of stability from algebraic geometry. This began with work of Atiyah and Bott \cite{AtiyahBott83} and continued with Donaldson's proof \cite{Donaldson83} of the Narasimhan-Seshadri theorem \cite{NarasimhanSeshadri65} and subsequent work of Daskalopoulos \cite{Daskal92} and Rade \cite{Rade92}, which shows that the Yang-Mills flow converges to a unique critical point which is isomorphic to the graded object of the Harder-Narasimhan-Seshadri double filtration of the initial condition. In the setting of Higgs bundles, a theorem of Hitchin \cite{Hitchin87} and Simpson \cite{Simpson88} shows that a polystable Higgs bundle is gauge equivalent to the minimum of the Yang-Mills-Higgs functional and that this minimum is achieved by the heat flow on the space of metrics. The results of \cite{Wilkin08} show that the theorem of Daskalopoulos and Rade described above extends to the Yang-Mills-Higgs flow on the space of Higgs bundles over a compact Riemann surface. More generally, when the base manifold is compact and K\"ahler, then these results are due to \cite{Donaldson85}, \cite{Donaldson87-2}, \cite{UhlenbeckYau86}, \cite{Simpson88}, \cite{DaskalWentworth04}, \cite{Sibley15}, \cite{Jacob15} and \cite{LiZhang11}.
Continuing on from these results, it is natural to investigate flow lines between critical points. Naito, Kozono and Maeda \cite{NaitoKozonoMaeda90} proved the existence of an unstable manifold of a critical point for the Yang-Mills functional, however their method does not give information about the isomorphism classes in the unstable manifold, and their proof requires a manifold structure on the space of connections (which is not true for the space of Higgs bundles). Recent results of Swoboda \cite{Swoboda12} and Janner-Swoboda \cite{JannerSwoboda15} count flow lines for a perturbed Yang-Mills functional, however these perturbations destroy the algebraic structure of the Yang-Mills flow, and so there does not yet exist an algebro-geometric description of the flow lines in the spirit of the results described in the previous paragraph. Moreover, one would also like to study flow lines for the Yang-Mills-Higgs functional, in which case the perturbations do not necessarily preserve the space of Higgs bundles, which is singular.
The purpose of this paper is to show that in fact there is an algebro-geometric description of the flow lines connecting given critical points of the Yang-Mills-Higgs functional over a compact Riemann surface. As an application, we show that the Hecke correspondence for Higgs bundles studied by Witten in \cite{witten-hecke} has a natural interpretation in terms of gradient flow lines. Moreover, for the Yang-Mills flow, at a generic critical point there is a natural embedding of the projectivisation of the underlying bundle inside the unstable set of the critical point, and the results of this paper show that the isomorphism class of the limit of the downwards flow is determined if the initial condition lies in one of the secant varieties of this embedding, giving us a geometric criterion to distinguish between broken and unbroken flow lines. For the Yang-Mills-Higgs flow the analogous picture involves the secant varieties of the space of Hecke modifications compatible with the Higgs field. At generic critical points of the Yang-Mills-Higgs functional this space of Hecke modifications is the spectral curve of the Higgs bundle.
The basic setup for the paper is as follows. Let $E \rightarrow X$ be a smooth complex vector bundle over a compact Riemann surface with a fixed Hermitian metric and let $\mathcal{B}$ denote the space of Higgs pairs on $E$. The \emph{Yang-Mills-Higgs functional} is \begin{equation*}
\mathop{\rm YMH}\nolimits(\bar{\partial}_A, \phi) := \| F_A + [\phi, \phi^*] \|_{L^2}^2 \end{equation*} and the \emph{Yang-Mills-Higgs flow} is the downwards gradient flow of $\mathop{\rm YMH}\nolimits$ given by the equation \eqref{eqn:YMH-flow}. This flow is generated by the action of the complex gauge group $\mathcal{G}^\mathbb C$. Equivalently, one can fix a Higgs pair and allow the Hermitian metric on the bundle to vary in which case the flow becomes a nonlinear heat equation on the space of Hermitian metrics (cf. \cite{Donaldson85}, \cite{Simpson88}). At a critical point for this flow the Higgs bundle splits into Higgs subbundles and on each subbundle the Higgs structure minimises $\mathop{\rm YMH}\nolimits$. The \emph{unstable set} of a critical point $(\bar{\partial}_A, \phi)$ consists of all Higgs pairs for which a solution to the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:YMH-flow} exists for all negative time and converges in the smooth topology to $(\bar{\partial}_A, \phi)$ as $t \rightarrow - \infty$. The first theorem of the paper gives an algebraic criterion for a complex gauge orbit to intersect the unstable set for the Yang-Mills-Higgs flow.
\begin{theorem}[Criterion for convergence of reverse heat flow]\label{thm:unstable-set-intro} Let $E$ be a complex vector bundle over a compact Riemann surface $X$, and let $(\bar{\partial}_A, \phi)$ be a Higgs bundle on $E$. Suppose that $E$ admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ by Higgs subbundles such that the quotients $(Q_k, \phi_k) := (E^{(k)}, \phi^{(k)}) / (E^{(k-1)}, \phi^{(k-1)})$ are Higgs polystable and $\slope(Q_k) < \slope(Q_j)$ for all $k < j$. Then there exists $g \in \mathcal{G}^\mathbb C$ and a solution to the reverse Yang-Mills-Higgs heat flow equation with initial condition $g \cdot (\bar{\partial}_A, \phi)$ which converges to a critical point isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$.
Conversely, if there exists a solution of the reverse heat flow from the initial condition $(\bar{\partial}_A, \phi)$ converging to a critical point $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$ where each $(Q_j, \phi_j)$ is polystable with $\slope(Q_k) < \slope(Q_j)$ for all $k < j$, then $(E, \phi)$ admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ whose graded object is isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$. \end{theorem}
A key difficulty in the construction is the fact that the space of Higgs bundles is singular, and so the existing techniques for constructing unstable sets (see for example \cite{NaitoKozonoMaeda90} for the Yang-Mills flow or \cite[Sec. 6]{Jost05} in finite dimensions) cannot be directly applied since they depend on the manifold structure of the ambient space. One possibility is to study the unstable set of the function $\| F_A + [\phi, \phi^*] \|_{L^2}^2 + \| \bar{\partial}_A \phi \|_{L^2}^2$ on the space of all pairs $(\bar{\partial}_A, \phi)$ without the Higgs bundle condition $\bar{\partial}_A \phi = 0$, however one would then need a criterion to determine when a point in this unstable set is a Higgs bundle and one would also need a method to determine the isomorphism classes of these points.
The construction in the proof of Theorem \ref{thm:unstable-set-intro} is intrinsic to the singular space since it uses the action of the complex gauge group to map the unstable set for the linearised $\mathop{\rm YMH}\nolimits$ flow (for which we can explicitly describe the isomorphism classes) to the unstable set for the Yang-Mills-Higgs flow. The method used here to compare the flow with its linearisation is called the ``scattering construction'' in \cite{Hubbard05} and \cite{Nelson69} since it originates in the study of wave operators in quantum mechanics (see \cite{ReedSimonVol3} for an overview). The method in this paper differs from \cite{Hubbard05} and \cite{Nelson69} in that (a) the construction here is done using the gauge group action in order to preserve the singular space and (b) the distance-decreasing formula for the flow on the space of metrics \cite{Donaldson85} is used here in order to avoid constructing explicit local coordinates as in \cite{Hubbard05} (the construction of \cite{Hubbard05} requires a manifold structure around the critical points).
As a consequence of Theorem \ref{thm:unstable-set-intro}, we have an algebraic criterion for critical points to be connected by flow lines.
\begin{corollary}[Algebraic classification of flow lines]\label{cor:algebraic-flow-line-intro} Let $x_u = (\bar{\partial}_{A_u}, \phi_u)$ and $x_\ell = (\bar{\partial}_{A_\ell}, \phi_\ell)$ be critical points with $\mathop{\rm YMH}\nolimits(x_u) > \mathop{\rm YMH}\nolimits(x_\ell)$. Then $x_u$ and $x_\ell$ are connected by a flow line if and only if there exists a Higgs pair $(E, \phi)$ which has Harder-Narasimhan-Seshadri double filtration whose graded object is isomorphic to $x_\ell$, and which also admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ by Higgs subbundles such that the quotients $(Q_k, \phi_k) := (E^{(k)}, \phi^{(k)}) / (E^{(k-1)} \phi^{(k-1)})$ are polystable and satisfy $\slope(Q_k) < \slope(Q_j)$ for all $k < j$ and the graded object $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$ is isomorphic to $x_u$. \end{corollary}
As an application of the previous theorem, we can construct Hecke modifications of Higgs bundles via Yang-Mills-Higgs flow lines. First consider the case of a Hecke modification at a single point (miniscule Hecke modifications in the terminology of \cite{witten-hecke}).
\begin{theorem}\label{thm:hecke-intro}
\begin{enumerate}
\item Let $0 \rightarrow (E', \phi') \rightarrow (E, \phi) \stackrel{v}{\rightarrow} \mathbb C_p \rightarrow 0$ be a Hecke modification such that $(E, \phi)$ is stable and $(E', \phi')$ is semistable, and let $L_u$ be a line bundle with $\deg L_u + 1 < \slope(E') < \slope(E)$. Then there exist sections $\phi_u, \phi_\ell \in H^0(K)$, a line bundle $L_\ell$ with $\deg L_\ell = \deg L_u + 1$ and a metric on $E \oplus L_u$ such that $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$ are critical points connected by a $\mathop{\rm YMH}\nolimits$ flow line, where $(E_{gr}', \phi_{gr}')$ is isomorphic to the graded object of the Seshadri filtration of $(E', \phi')$.
\item Let $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E', \phi') \oplus (L_\ell, \phi_\ell)$ be critical points connected by a $\mathop{\rm YMH}\nolimits$ flow line such that $L_u, L_\ell$ are line bundles with $\deg L_\ell = \deg L_u + 1$, $(E, \phi)$ is stable and $(E', \phi')$ is polystable with $\deg L_u + 1 < \slope(E') < \slope(E)$. Then $(E', \phi')$ is the graded object of the Seshadri filtration of a Hecke modification of $(E, \phi)$. If $(E', \phi')$ is Higgs stable then it is a Hecke modification of $(E, \phi)$.
\end{enumerate}
\end{theorem}
For Hecke modifications defined at multiple points, we can inductively apply the above theorem to obtain a criterion for two critical points to be connected by a broken flow line. For non-negative integers $m, n$, the definition of $(m, n)$ stability is given in Definition \ref{def:m-n-stable}. The space $\mathcal{N}_{\phi, \phi_u}$ denotes the space of Hecke modifications compatible with the Higgs fields $\phi$ and $\phi_u$ (see Definition \ref{def:Hecke-compatible}).
\begin{corollary}\label{cor:broken-hecke-intro} Consider a Hecke modification $0 \rightarrow (E', \phi') \rightarrow (E, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ defined by $n > 1$ distinct points $\{ v_1, \ldots, v_n \} \in \mathbb{P} E^*$, where $(E, \phi)$ is $(0,n)$ stable. If there exists $\phi_u \in H^0(K)$ such that $v_1, \ldots, v_n \in \mathcal{N}_{\phi, \phi_u}$, then there is a broken flow line connecting $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$, where $(E_{gr}', \phi_{gr}')$ is the graded object of the Seshadri filtration of the semistable Higgs bundle $(E', \phi')$. \end{corollary}
For any gradient flow, given upper and lower critical sets $C_u$ and $C_\ell$, one can define the spaces $\mathcal{F}_{\ell, u}$ (resp. $\mathcal{BF}_{\ell,u}$) of unbroken flow lines (resp. broken or unbroken flow lines) connecting these sets, and the spaces $\mathcal{P}_{\ell, u}$ (resp. $\mathcal{BP}_{\ell, u}$) of pairs of critical points connected by an unbroken flow line (resp. broken or unbroken flow line). These spaces are correspondences with canonical projection maps to the critical sets given by the projection taking a flow line to its upper and lower endpoints. \begin{equation*} \xymatrix{
& \mathcal{F}_{\ell, u} \ar[d] \ar@/_/[ddl] \ar@/^/[ddr] & & & \mathcal{BF}_{\ell, u} \ar[d] \ar@/_/[ddl] \ar@/^/[ddr] \\
& \mathcal{P}_{\ell, u} \ar[dl] \ar[dr] & & & \mathcal{BP}_{\ell, u} \ar[dl] \ar[dr] & \\ C_\ell & & C_u & C_\ell & & C_u } \end{equation*}
In the setting of Theorem \ref{thm:hecke-intro}, let $d = \deg E$ and $r = \rank(E)$ and let $C_u$ and $C_\ell$ be the upper and lower critical sets. There are natural projection maps to the moduli space of semistable Higgs bundles $C_u \rightarrow \mathcal{M}_{ss}^{Higgs}(r, d)$ and $C_\ell \rightarrow \mathcal{M}_{ss}^{Higgs}(r, d-1)$. Suppose that $\gcd(r,d) = 1$ so that $\mathcal{M}_{ss}^{Higgs}(r, d)$ consists solely of stable Higgs pairs and hence any Hecke modification is semistable. Since the flow is $\mathcal{G}$-equivariant, then there is an induced correspondence variety, denoted $\mathcal{M}_{\ell, u}$ in the diagram below. \begin{equation*} \xymatrix{
& \mathcal{F}_{\ell, u} \ar[d] \ar@/_/[ddl] \ar@/^/[ddr] & \\
& \mathcal{P}_{\ell, u} \ar[dl] \ar[dr] \ar[d] & \\ C_\ell \ar[d] & \mathcal{M}_{\ell,u} \ar[dl] \ar[dr] & C_u \ar[d] \\ \mathcal{M}_{ss}^{Higgs}(r, d-1) & & \mathcal{M}_{ss}^{Higgs}(r,d) } \end{equation*}
As a consequence of Theorem \ref{thm:hecke-intro}, we have the following result.
\begin{corollary}\label{cor:hecke-correspondence-intro} $\mathcal{M}_{\ell,u}$ is the Hecke correspondence. \end{corollary}
A natural question from Floer theory is to ask whether a pair of critical points connected by a broken flow line can also be connected by an unbroken flow line, i.e whether $\mathcal{BP}_{\ell, u} = \mathcal{P}_{\ell, u}$. The methods used to prove the previous theorems can be used to investigate this question using the geometry of secant varieties of the space of Hecke modifications inside the unstable set of a critical point. For critical points of the type studied in Theorem \ref{thm:hecke-intro}, generically this space of Hecke modifications is the spectral curve of the Higgs field, and so the problem reduces to studying secant varieties of the spectral curve. This is explained in detail in Section \ref{sec:secant-criterion}. In particular, Corollary \ref{cor:rank-2-classification} gives a complete classification of the unbroken flow lines on the space of rank $2$ Higgs bundles.
The paper is organised as follows. In Section \ref{sec:preliminaries} we set the notation for the paper, prove a slice theorem around the critical points and derive some preliminary estimates for the $\mathop{\rm YMH}\nolimits$ flow near a critical point. Section \ref{sec:local-analysis} contains the main part of the analysis of the $\mathop{\rm YMH}\nolimits$ flow around a critical point, which leads to the proof of Theorem \ref{thm:unstable-set-intro} and Corollary \ref{cor:algebraic-flow-line-intro}. In Section \ref{sec:hecke} we interpret the analytic results on flow lines in terms of the Hecke correspondence, leading to the proof of Theorem \ref{thm:hecke-intro}, Corollary \ref{cor:broken-hecke-intro} and Corollary \ref{cor:hecke-correspondence-intro}. Appendix \ref{sec:uniqueness} contains a proof that a solution to the reverse $\mathop{\rm YMH}\nolimits$ flow with a given initial condition is necessarily unique.
{\bf Acknowledgements.} I would like to thank George Daskalopoulos, M.S. Narasimhan and Richard Wentworth for their interest in the project, as well as George Hitching for useful discussions about \cite{ChoeHitching10} and \cite{Hitching13}.
\section{Preliminaries}\label{sec:preliminaries}
\subsection{The Yang-Mills-Higgs flow on a compact Riemann surface}
Fix a compact Riemann surface $X$ and a smooth complex vector bundle $E \rightarrow X$. Choose a normalisation so that $\mathop{\rm vol}\nolimits(X) = 2\pi$. Fix $\bar{\partial}_{A_0} : \Omega^0(E) \rightarrow \Omega^{0,1}(E)$ such that $\bar{\partial}_{A_0}$ is $\mathbb C$-linear and satisfies the Leibniz rule $\bar{\partial}_{A_0}(fs) = (\bar{\partial} f) s + f (\bar{\partial}_{A_0} s)$ for all $f \in \Omega^0(X)$ and $s \in \Omega^0(E)$. Let $\mathcal{A}^{0,1}$ denote the affine space $\bar{\partial}_{A_0} + \Omega^{0,1}(\mathop{\rm End}\nolimits(E))$. A theorem of Newlander and Nirenberg identifies $\mathcal{A}^{0,1}$ with the space of holomorphic structures on $E$. The \emph{space of Higgs bundles on $E$} is \begin{equation} \mathcal{B} := \{ (\bar{\partial}_A, \phi) \in \mathcal{A}^{0,1} \times \Omega^{1,0}(\mathop{\rm End}\nolimits(E)) \, : \, \bar{\partial}_A \phi = 0 \} \end{equation}
The complex gauge group is denoted $\mathcal{G}^\mathbb C$ and acts on $\mathcal{B}$ by $g \cdot (\bar{\partial}_A, \phi) = (g \bar{\partial}_A g^{-1}, g \phi g^{-1})$. If $X$ is a complex manifold with $\dim_\mathbb C X > 1$ then we impose the extra integrability conditions $(\bar{\partial}_A)^2 = 0$ and $\phi \wedge \phi = 0$. Given a Hermitian metric on $E$, let $\mathcal{A}$ denote the space of connections on $E$ compatible with the metric, and let $\mathcal{G} \subset \mathcal{G}^\mathbb C$ denote the subgroup of unitary gauge transformations. The Chern connection construction defines an injective map $\mathcal{A}^{0,1} \hookrightarrow \mathcal{A}$ which is a diffeomorphism when $\dim_\mathbb C X = 1$. Given $\bar{\partial}_A \in \mathcal{A}^{0,1}$, let $F_A$ denote the curvature of the Chern connection associated to $\bar{\partial}_A$ via the Hermitian metric. The metric induces a pointwise norm $| \cdot | : \Omega^2(\mathop{\rm End}\nolimits(E)) \rightarrow \Omega^0(X, \mathbb R)$ and together with the Riemannian structure on $X$ an $L^2$ norm $\| \cdot \|_{L^2} : \Omega^2(\mathop{\rm End}\nolimits(E)) \rightarrow \mathbb R$. The \emph{Yang-Mills-Higgs functional} $\mathop{\rm YMH}\nolimits : \mathcal{B} \rightarrow \mathbb{R}$ is defined by \begin{equation}\label{eqn:YMH-def}
\mathop{\rm YMH}\nolimits(\bar{\partial}_A, \phi) = \| F_A + [\phi, \phi^*] \|_{L^2}^2 = \int_X | F_A + [ \phi, \phi^*] |^2 \, dvol \end{equation}
When $\dim_\mathbb C = 1$, the Hodge star defines an isometry $* : \Omega^2(\mathop{\rm End}\nolimits(E)) \rightarrow \Omega^0(\mathop{\rm End}\nolimits(E)) \cong \mathop{\rm Lie}\nolimits \mathcal{G}^\mathbb C$. For any initial condition $(A_0, \phi_0)$, the following equation for $g_t \in \mathcal{G}^\mathbb C$ has a unique solution on the interval $t \in [0, \infty)$ (cf. \cite{Donaldson85}, \cite{Simpson88}) \begin{equation}\label{eqn:gauge-flow} \frac{\partial g}{\partial t} g_t^{-1} = - i * ( F_{g_t \cdot A_0} + [g_t \cdot \phi_0, (g_t \cdot \phi_0)^*]) , \quad g_0 = \id . \end{equation} This defines a unique curve $(A_t, \phi_t) = g_t \cdot (A_0, \phi_0) \in \mathcal{B}$ which is a solution to the downwards Yang-Mills-Higgs gradient flow equations \begin{align}\label{eqn:YMH-flow} \begin{split} \frac{\partial A}{\partial t} & = i \bar{\partial}_A * (F_A + [\phi, \phi^*]) \\ \frac{\partial \phi}{\partial t} & = i \left[ \phi, *(F_A + [\phi, \phi^*]) \right] . \end{split} \end{align} for all $t \in [0, \infty)$. The result of \cite[Thm 3.1]{Wilkin08} shows that the solutions converge to a unique limit $(A_\infty, \phi_\infty)$ which is a critical point of $\mathop{\rm YMH}\nolimits$. Moreover \cite[Thm. 4.1]{Wilkin08} shows that the isomorphism class of this limit is determined by the graded object of the Harder-Narasimhan-Seshadri double filtration of the initial condition $(A_0, \phi_0)$.
\begin{remark}
Since the space $\mathcal{B}$ of Higgs bundles is singular, then we define the gradient of $\mathop{\rm YMH}\nolimits$ as the gradient of the function $\| F_A + [\phi, \phi^*] \|_{L^2}^2$ defined on the ambient smooth space $T^* \mathcal{A}^{0,1}$, which contains the space $\mathcal{B}$ as a singular subset. When the initial condition is a Higgs bundle, then a solution to \eqref{eqn:YMH-flow} is generated by the action of the complex gauge group $\mathcal{G}^\mathbb C$ which preserves $\mathcal{B}$. Therefore the solution to \eqref{eqn:YMH-flow} is contained in $\mathcal{B}$ and so from now on we can consider the flow \eqref{eqn:YMH-flow} as a well-defined gradient flow on the singular space $\mathcal{B}$. Throughout the paper we define a critical point to be a stationary point for the Yang-Mills-Higgs flow. \end{remark}
\begin{definition}\label{def:critical-point} A \emph{critical point} for $\mathop{\rm YMH}\nolimits$ is a pair $(A, \phi) \in \mathcal{B}$ such that \begin{equation}\label{eqn:critical-point} \bar{\partial}_A * (F_A + [\phi, \phi^*]) = 0, \quad \text{and} \quad \left[ \phi, *(F_A + [\phi, \phi^*]) \right] = 0 . \end{equation}
\end{definition}
The critical point equations \eqref{eqn:critical-point} imply that the bundle $E$ splits into holomorphic $\phi$-invariant sub-bundles $E_1 \oplus \cdots \oplus E_n$, such that the induced Higgs structure $(\bar{\partial}_{A_j}, \phi_j)$ on the bundle $E_j$ minimises the Yang-Mills-Higgs functional on the bundle $E_j$ (cf. \cite[Sec. 5]{AtiyahBott83} for holomorphic bundles and \cite[Sec. 4]{Wilkin08} for Higgs bundles). In particular, each Higgs pair $(\bar{\partial}_{A_j}, \phi_j)$ is polystable. The decomposition is not necessarily unique due to the possibility of polystable bundles with the same slope, however it is unique if we impose the condition that $(E_1, \phi_1) \oplus \cdots \oplus (E_n, \phi_n)$ is the graded object of the socle filtration of the Higgs bundle $(E, \phi)$ (see \cite{HuybrechtsLehn97} for holomorphic bundles and \cite[Sec. 4]{BiswasWilkin10} for Higgs bundles). With respect to this decomposition the curvature $*(F_A + [\phi, \phi^*]) \in \Omega^0(\mathop{\rm ad}\nolimits(E)) \cong \mathop{\rm Lie}\nolimits(\mathcal{G}$ has the following block-diagonal form \begin{equation}\label{eqn:critical-curvature} i * (F_A + [\phi, \phi^*]) = \left( \begin{matrix} \lambda_1 \cdot \id_{E_1} & 0 & \cdots & 0 \\ 0 & \lambda_2 \cdot \id_{E_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \cdot \id_{E_n} \end{matrix} \right) \end{equation} where $\lambda_j =\slope(E_j)$ and we order the eigenvalues by $\lambda_j < \lambda_k$ for all $j < k$.
\begin{definition} A \emph{Yang-Mills-Higgs flow line} connecting an upper critical point $x_u = (\bar{\partial}_{A_u}, \phi_u)$ and a lower critical point $x_\ell = (\bar{\partial}_{A_\ell}, \phi_\ell)$ is a continuous map $\gamma : \mathbb{R} \rightarrow \mathcal{B}$ such that \begin{enumerate}
\item $\frac{d\gamma}{dt}$ satisfies the Yang-Mills-Higgs flow equations \eqref{eqn:YMH-flow}, and
\item $\lim_{t \rightarrow - \infty} \gamma(t) = x_u$ and $\lim_{t \rightarrow \infty} \gamma(t) = x_\ell$, where the convergence is in the $C^\infty$ topology on $\mathcal{B}$.
\end{enumerate}
\end{definition}
\begin{definition}\label{def:unstable-set} The \emph{unstable set} $W_{x_u}^-$ of a non-minimal critical point $x_u = (\bar{\partial}_{A_u}, \phi_u)$ is defined as the set of all points $y_0 \in \mathcal{B}$ such that a solution $y_t$ to the Yang-Mills-Higgs flow equations \eqref{eqn:YMH-flow} exists for all $(-\infty, 0]$ and $y_t \rightarrow x$ in the $C^\infty$ topology on $\mathcal{B}$ as $t \rightarrow - \infty$. \end{definition}
\subsection{A local slice theorem}\label{subsec:local-slice}
In this section we define local slices around the critical points and describe the isomorphism classes in the negative slice.
\begin{definition}\label{def:slice} Let $x = (\bar{\partial}_A, \phi) \in \mathcal{B}$. The \emph{slice} through $x$ is the set of deformations orthogonal to the $\mathcal{G}^\mathbb C$ orbit at $x$. \begin{equation}\label{eqn:slice-def} S_x = \{ (a, \varphi) \in \Omega^{0,1}(\mathop{\rm End}\nolimits(E)) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)) \mid \bar{\partial}_A^* a -*[*\phi^*, \varphi] = 0, (\bar{\partial}_A + a, \phi + \varphi) \in \mathcal{B} \} . \end{equation} If $x$ is a critical point of $\mathop{\rm YMH}\nolimits$ with $\beta = *(F_A + [\phi, \phi^*])$, then the \emph{negative slice} $S_x^-$ is the subset \begin{equation}\label{eqn:neg-slice-def} S_x^- = \{ (a, \varphi) \in S_x \mid \lim_{t \rightarrow \infty} e^{i \beta t} \cdot (a, \varphi) = 0 \} . \end{equation} \end{definition}
To prove Lemma \ref{lem:slice-theorem} and Proposition \ref{prop:filtered-slice-theorem} below, one needs to first define the slice on the $L_1^2$ completion of the space of Higgs bundles with the action of the $L_2^2$ completion of the gauge group. The following lemma shows that if the critical point $x$ is $C^\infty$ then the elements in the slice $S_x$ are also $C^\infty$.
\begin{lemma}\label{lem:slice-smooth} Let $x = (\bar{\partial}_A, \phi)$ be a critical point of $\mathop{\rm YMH}\nolimits$ in the space of $C^\infty$ Higgs bundles, let $S_x$ be the set of solutions to the slice equations in the $L_1^2$ completion of the space of Higgs bundles and let $\delta x = (a, \varphi) \in S_x$. Then $\delta x$ is $C^\infty$. \end{lemma}
\begin{proof} The slice equations are \begin{align*} \bar{\partial}_A \varphi + [a, \phi] + [a, \varphi] & = 0 \\ \bar{\partial}_A^* a - *[\phi^*, *\varphi] & = 0 \end{align*} Since $(a, \varphi) \in L_1^2$ and $(\bar{\partial}_A, \phi)$ is $C^\infty$, then the second equation above implies that $\bar{\partial}_A^* a \in L_1^2$ and so $a \in L_2^2$ by elliptic regularity. After applying Sobolev multiplication $L_2^2 \times L_1^2 \rightarrow L^4$, then $[a, \varphi] \in L^4$ and so the first equation above implies that $\bar{\partial}_A \varphi \in L^4$, hence $\varphi \in L_1^4$. Repeating this again shows that $\varphi \in L_2^2$, and then one can repeat the process inductively to show that $\delta x = (a, \varphi)$ is $C^\infty$. \end{proof}
The following result gives a local description of the space of Higgs bundles in terms of the slice. The infinitesimal action of $\mathcal{G}^\mathbb C$ at $x \in \mathcal{B}$ is denoted by $\rho_x : \mathop{\rm Lie}\nolimits(\mathcal{G}^\mathbb C) \cong \Omega^0(\mathop{\rm End}\nolimits(E)) \rightarrow \Omega^{0,1}(\mathop{\rm End}\nolimits(E)) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E))$. Explicitly, for $x = (\bar{\partial}_A, \phi)$ and $u \in \Omega^0(\mathop{\rm End}\nolimits(E))$, we have $\rho_x(u) = -(\bar{\partial}_A u, [\phi, u])$. The $L^2$-orthogonal complement of $\ker \rho_x \subseteq \Omega^0(\mathop{\rm End}\nolimits(E))$ is denoted $(\ker \rho_x)^\perp$.
\begin{lemma}\label{lem:slice-theorem} Fix $x \in \mathcal{B}$. Then the map $\psi : (\ker \rho_x)^\perp \times S_x \rightarrow \mathcal{B}$ given by $\psi(u, \delta x) = \exp(u) \cdot (x + \delta x)$ is a local homeomorphism. \end{lemma}
\begin{proof} The result of \cite[Prop. 4.12]{Wilkin08} shows that the statement is true for the $L_1^2$ completion of the space of Higgs bundles and the $L_2^2$ completion of the gauge group, and so it only remains to show that it remains true on restricting to the space of $C^\infty$ Higgs bundles with the action of the group of $C^\infty$ gauge transformations. The proof of this statement follows from elliptic regularity using the same method as \cite[Cor. 4.17]{Wilkin08}. \end{proof}
Now let $x = (\bar{\partial}_A, \phi)$ be a critical point and let $\beta = \mu(x) := *(F_A + [\phi, \phi^*])$. The Lie algebra $\mathop{\rm Lie}\nolimits(\mathcal{G}^\mathbb C) \cong \Omega^0(\mathop{\rm End}\nolimits(E))$ decomposes into eigenbundles for the adjoint action of $e^{i \beta}$. We denote the positive, zero and negative eigenspaces respectively by $\Omega^0(\mathop{\rm End}\nolimits(E)_+)$, $\Omega^0(\mathop{\rm End}\nolimits(E)_0)$ and $\Omega^0(\mathop{\rm End}\nolimits(E)_-)$. The positive and negative eigenspaces are nilpotent Lie algebras with associated unipotent groups $\mathcal{G}_+^\mathbb C$ and $\mathcal{G}_-^\mathbb C$. The subgroups of $\mathcal{G}$ and $\mathcal{G}^\mathbb C$ consisting of elements commuting with $e^{i \beta}$ are denoted $\mathcal{G}_\beta$ and $\mathcal{G}_\beta^\mathbb C$ respectively. Since $\Omega^0(\mathop{\rm End}\nolimits(E)_0) \oplus \Omega^0(\mathop{\rm End}\nolimits(E)_+)$ is also a Lie algebra then there is a corresponding subgroup denoted $\mathcal{G}_*^\mathbb C$.
Let $\mathcal{G}_x$ and $\mathcal{G}_x^\mathbb C$ denote the respective isotropy groups of $x$ in $\mathcal{G}$ and $\mathcal{G}^\mathbb C$. There is an inclusion $(\mathcal{G}_x)^\mathbb C \subseteq \mathcal{G}_x^\mathbb C$, however at a non-minimal critical point the two groups may not be equal (in the context of reductive group actions on finite-dimensional affine spaces, this question has been studied by Sjamaar in \cite[Prop. 1.6]{Sjamaar95}). At a general critical point, the Higgs bundle $(E, \phi)$ splits into polystable Higgs sub-bundles $(E_1, \phi_1) \oplus \cdots \oplus (E_n, \phi_n)$, where we order by increasing slope. Then a homomorphism $u \in \mathop{\rm Hom}\nolimits(E_j, E_k)$ satisfying $u \phi_j = \phi_k u$ will be zero if $j > k$ since $(E_j, \phi_j)$ and $(E_k, \phi_k)$ are polystable and $\slope(E_j) > \slope(E_k)$, however if $j < k$ then the homomorphisms do not necessarily vanish in which case $(\mathcal{G}_x)^\mathbb C \subsetneq \mathcal{G}_x^\mathbb C$. Therefore $\ker \rho_x = \mathop{\rm Lie}\nolimits(\mathcal{G}_x^\mathbb C) \subset \Omega^0(\mathop{\rm End}\nolimits(E)_+) \oplus \Omega^0(\mathop{\rm End}\nolimits(E)_0)$, and so $\mathcal{G}_x^\mathbb C \subset \mathcal{G}_*^\mathbb C$.
The result of \cite[Thm. 2.16]{Daskal92} shows that the $L_2^2$ completion of the gauge group satisfies $\mathcal{G}^\mathbb C \cong \mathcal{G}_*^\mathbb C \times_{\mathcal{G}_\beta} \mathcal{G}$. We will use $(\ker \rho_x)_*^\perp$ to denote $(\ker \rho_x)^\perp \cap (\Omega^0(\mathop{\rm End}\nolimits(E))_+ \oplus \Omega^0(\mathop{\rm End}\nolimits(E)_0)$. At a critical point $x$, the above argument shows that isotropy group $\mathcal{G}_x^\mathbb C$ is contained in $\mathcal{G}_*^\mathbb C$, and so we have the following refinement of Lemma \ref{lem:slice-theorem}. \begin{proposition}\label{prop:filtered-slice-theorem} Let $x \in \mathcal{B}$ be a critical point of $\mathop{\rm YMH}\nolimits$. Then there exists a $\mathcal{G}$-invariant neighbourhood $U$ of $x$ and a neighbourhood $U'$ of $[\id, 0, 0]$ in $\mathcal{G} \times_{\mathcal{G}_\beta} \left( (\ker \rho_x)_*^\perp \times S_x \right)$ such that $\psi : U' \rightarrow U$ is a $\mathcal{G}$-equivariant homeomorphism. \end{proposition}
The results of Section \ref{sec:local-analysis} show that the negative slice $S_x^-$ is complex gauge-equivalent to the unstable set $W_x^-$ of a critical point. The next lemma gives a complete classification of the isomorphism classes in $S_x^-$. Together with the results of Section \ref{sec:local-analysis}, this is used in Section \ref{sec:hecke} to classify critical points connected by flow lines.
\begin{lemma}\label{lem:classify-neg-slice} Let $x = (E_1, \phi_1) \oplus \cdots \oplus (E_n, \phi_n)$ be a critical point of $\mathop{\rm YMH}\nolimits$ with curvature as in \eqref{eqn:critical-curvature} with the Higgs polystable subbundles ordered so that $\slope(E_j) < \slope(E_k)$ iff $j < k$. If $\delta x \in S_x^- \cap U$ then $x + \delta x$ has a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)})$ by Higgs subbundles such that the successive quotients are $(E^{(k)}, \phi^{(k)}) / (E^{(k-1)}, \phi^{(k-1)}) = (E_k, \phi_k)$. Conversely, there exists a neighbourhood $U$ of $x$ such that if a Higgs bundle $y = (E, \phi) \in U$ admits such a filtration then it is gauge equivalent to $x + \delta x$ for some $\delta x \in S_x^-$. \end{lemma}
\begin{proof} The first statement follows directly from the definition of the negative slice in \eqref{eqn:neg-slice-def}.
Let $\mathop{\rm End}\nolimits(E)_-$ be the subbundle of $\mathop{\rm End}\nolimits(E)$ corresponding to the negative eigenspaces of $i \beta$ and let $\rho_x^- : \Omega^0(\mathop{\rm End}\nolimits(E)_-) \rightarrow \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-)$ be the restriction of the infinitesimal action to the negative eigenspaces. Then \begin{equation*} \im \rho_x^- = \im \rho_x \cap \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \end{equation*} and \begin{equation}\label{eqn:negative-orthogonal} \ker (\rho_x^-)^* \supseteq \ker \rho_x^* \cap \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \end{equation} Since $\im \rho_x \oplus \ker \rho_x^* \cong \Omega^{0,1}(\mathop{\rm End}\nolimits(E)) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E))$ by \cite[Lem. 4.9]{Wilkin08} then \begin{align*} \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) & = \left( \im \rho_x \oplus \ker \rho_x^* \right) \cap \left( \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \right) \\
& \subseteq \left( \im \rho_x^- \oplus \ker (\rho_x^-)^* \right) \subseteq \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \end{align*} and so \eqref{eqn:negative-orthogonal} must be an equality, therefore $\Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \cong \im \rho_x^- \oplus \ker (\rho_x^-)^*$. Therefore the function \begin{align*} \psi^- : (\ker \rho_x^-)^\perp \times \ker (\rho_x^-)^* & \rightarrow \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \\
(u, \delta x) & \mapsto e^u \cdot (x + \delta x) \end{align*} is a local diffeomorphism at $0$. If $\delta x \in S_x^-$ then $x + \delta x \in \mathcal{B}$, and so $e^u \cdot (x + \delta x) \in \mathcal{B}$, since the complex gauge group preserves the space of Higgs bundles. Conversely, if $e^u \cdot (x + \delta x) \in \mathcal{B}$ then $x + \delta x \in \mathcal{B}$ and so $\delta x \in S_x^-$. Therefore $\psi$ restricts to a local homeomorphism $(\ker \rho_x^-)^\perp \times S_x^- \rightarrow \mathcal{B} \cap \left( \Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-) \right)$. \end{proof}
The next two results concern a sequence of points $g_t \cdot z$ in a $\mathcal{G}^\mathbb C$ orbit which approach a critical point $x$ in the $L_k^2$ norm and for which $\mathop{\rm YMH}\nolimits(z) < \mathop{\rm YMH}\nolimits(x)$. Since $x$ is critical and $\mathop{\rm YMH}\nolimits(z) < \mathop{\rm YMH}\nolimits(x)$ then $x \in \overline{\mathcal{G}^\mathbb C \cdot z} \setminus \mathcal{G}^\mathbb C \cdot z$, and therefore $\| g_t \|_{L_{k+1}^2} \rightarrow \infty$. The result below shows that the $C^0$ norm of the function $\sigma(h_t) = \mathop{\rm Tr}\nolimits(h_t) + \mathop{\rm Tr}\nolimits(h_t^{-1}) - 2 \rank (E)$ must also blow up.
\begin{lemma}\label{lem:GIT-C0-norm-blows-up} Let $x \in \mathcal{B}$ be a critical point of $\mathop{\rm YMH}\nolimits$ and let $z \in \mathcal{B}$ be any point such that $\mathop{\rm YMH}\nolimits(z) < \mathop{\rm YMH}\nolimits(x)$. Suppose that there exists a sequence of gauge transformations $g_t \in \mathcal{G}^\mathbb C$ such that $g_t \cdot z \rightarrow x$ in $L_k^2$. Then the change of metric $h_t = g_t^* g_t$ satisfies $\sup_X \sigma(h_t) \rightarrow \infty$. \end{lemma}
\begin{proof} Let $U$ be the neighbourhood of $x$ from Lemma \ref{lem:slice-theorem}. Since $g_t \cdot z \rightarrow x$, then there exists $T$ such that $g_t \cdot z \in U$ for all $t \geq T$. Therefore there exists $f_t$ in a neighbourhood of the identity in $\mathcal{G}^\mathbb C$ such that $f_t \cdot g_t \cdot z \in S_x$. The uniqueness of the decomposition from the slice theorem shows that if $t > T$, then $f_{t} \cdot g_{t} \cdot z = f_{t, T} \cdot f_{T} \cdot g_{T} \cdot z$ with $f_{t,T} \in \mathcal{G}_x^\mathbb C$. Therefore $t \rightarrow \infty$ implies that $f_{t,T}$ diverges in $\mathcal{G}_x^\mathbb C$. Fix a point $p$ on the surface $X$, and let $\mathcal{G}_0^\mathbb C$ be the normal subgroup of complex gauge transformations that are the identity at $p$, as in \cite[Sec. 13]{AtiyahBott83}. We have the following short exact sequence of groups \begin{equation*} 1 \rightarrow \mathcal{G}_0^\mathbb C \rightarrow \mathcal{G}^\mathbb C \rightarrow \mathsf{GL}(n, \mathbb C) \rightarrow 1 . \end{equation*} Since $\mathcal{G}_0^\mathbb C$ acts freely on the space of connections (and hence on $\mathcal{B}$), then restriction to the fibre over $p$ defines a bijective correspondence between $\mathcal{G}_x^\mathbb C \subset \mathcal{G}^\mathbb C$ and a subgroup of $\mathsf{GL}(n, \mathbb C)$ via the exact sequence above. Therefore $f_{t,T}$ diverges in $\mathcal{G}_x^\mathbb C$ implies that the restriction of $f_{t,T}$ to the fibre over $p$ diverges in $\mathsf{GL}(n, \mathbb C)$, and so the $C^0$ norm of $f_{t,T}$ diverges to $\infty$, and hence the same is true for $g_t = f_t^{-1} \cdot f_{t, T} \cdot f_T \cdot g_T \cdot z$ since $g_T$ is fixed and both $f_t$ and $f_T$ are contained in a fixed neighbourhood of the identity in $\mathcal{G}^\mathbb C$. Therefore $\sup_X \sigma(h_t) \rightarrow \infty$. \end{proof}
\begin{corollary}\label{cor:bounded-metric-away-from-critical} Let $x$ be a critical point of $\mathop{\rm YMH}\nolimits$. Then for each neighbourhood $V$ of $x$ in the $L_k^2$ topology on $\mathcal{B}$ and each constant $C > 0$, there exists a neighbourhood $U$ of $x$ such that if $z \notin V$ and $\mathop{\rm YMH}\nolimits(z) < \mathop{\rm YMH}\nolimits(x)$, then $y = g \cdot z$ with $h = g^* g$ satisfying $\sup_X \sigma(h) \leq C$ implies that $y \notin U$. \end{corollary}
\begin{proof} If no such neighbourhood $U$ exists, then we can construct a sequence $y_t = g_t \cdot z$ converging to $x$ in $L_k^2$ such that $h_t = g_t^* g_t$ satisfies $\sup_X \sigma(h_t) \leq C$ for all $t$, however this contradicts the previous lemma. \end{proof}
\subsection{Modifying the $\mathop{\rm YMH}\nolimits$ flow in a neighbourhood of a critical point}
Let $x$ be a critical point, let $\beta = \mu(x) = *(F_A + [\phi, \phi^*])$, and let $\mathcal{G}_*^\mathbb C$ be the subgroup defined in the previous section. In this section we explain how to modify the $\mathop{\rm YMH}\nolimits$ flow near $x$ so that the gauge transformation generating the flow is contained in $\mathcal{G}_*^\mathbb C$. The reason for modifying the flow is so that we can apply the distance-decreasing formula of Lemma \ref{lem:modified-distance-decreasing}, which is used for the convergence result of Section \ref{subsec:inverse-construction}.
Let $U$ be a $\mathcal{G}$-invariant neighbourhod of $x$ such that $U$ is homeomorphic to a neighbourhood of $[\id, 0, 0]$ in $\mathcal{G} \times_{\mathcal{G}_\beta} \left( (\ker \rho_x)_*^\perp \times S_x \right)$ by Proposition \ref{prop:filtered-slice-theorem}. Let $V \subset U$ be the image of $(\ker \rho_x)_*^\perp \times S_x$ under the homeomorphism from Proposition \ref{prop:filtered-slice-theorem}. For each $y \in V$, let $\gamma_-(y)$ be the component of $\mu(y)$ in $\Omega^0(\mathop{\rm End}\nolimits(E)_-)$. Since $\mu$ is $\mathcal{G}$-equivariant then we can extend $\gamma_-$ equivariantly from $V$ to all of $U$ using the action of $\mathcal{G}$. Define the map $\gamma : U \rightarrow \mathop{\rm Lie}\nolimits(\mathcal{G})$ by \begin{equation}\label{eqn:def-gamma} \gamma(y) = \gamma_-(y) - \gamma_-(y)^* \end{equation}
\begin{definition}\label{def:modified-flow} The \emph{modified flow} with initial condition $y_0 \in U$ is the solution to \begin{equation}\label{eqn:modified-flow} \frac{dy}{dt} = - I \rho_x(\mu(y)) + \rho_x(\gamma(y)) . \end{equation} More explicitly, on the space of Higgs bundles $y = (\bar{\partial}_A, \phi)$ satisfies \begin{align*} \frac{\partial A}{\partial t} & = i \bar{\partial}_A * (F_A + [\phi, \phi^*]) - \bar{\partial}_A \gamma(\bar{\partial}_A, \phi) \\ \frac{\partial \phi}{\partial t} & = i [\phi, *(F_A + [\phi, \phi^*])] - [\phi, \gamma(\bar{\partial}_A, \phi)] \end{align*} \end{definition}
In analogy with \eqref{eqn:gauge-flow}, the modified flow is generated by the action of the gauge group $y_t = g_t \cdot y_0$, where $g_t$ satisfies the equation \begin{equation}\label{eqn:modified-gauge-flow} \frac{\partial g_t}{\partial t} g_t^{-1} = -i \mu(g_t \cdot y_0) + \gamma(g_t \cdot y_0), \quad g_0 = \id . \end{equation} As before, let $V \subset U$ be the image of $(\ker \rho_x)_*^\perp \times S_x$ under the homeomorphism from the slice theorem (Proposition \ref{prop:filtered-slice-theorem}). Note that if $y_0 \in V$ then $\frac{\partial g_t}{\partial t} g_t^{-1} \in \mathop{\rm Lie}\nolimits(\mathcal{G}_*^\mathbb C)$, so $g_t \in \mathcal{G}_*^\mathbb C$ and the solution to the modified flow remains in $V$ for as long as it remains in the neighbourhood $U$.
\begin{lemma}\label{lem:relate-flows} Let $y_t = g_t \cdot y_0$ be the solution to the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:gauge-flow} with initial condition $y_0$. Then there exists $s_t \in \mathcal{G}$ solving the equation \begin{equation}\label{eqn:unitary-modification} \frac{ds}{dt} s_t^{-1} = \gamma(s_t \cdot y_t), \quad s_0 = \id \end{equation} such that $\tilde{y}_t = s_t \cdot y_t$ is a solution to the modified flow equation \eqref{eqn:modified-flow} with initial condition $y_0$. \end{lemma}
\begin{proof} Since $\gamma$ is $\mathcal{G}$-equivariant then \eqref{eqn:unitary-modification} reduces to \begin{equation*} \frac{ds}{dt} s_t^{-1} = \mathop{\rm Ad}\nolimits_{s_t} \gamma(y_t) . \end{equation*} Since $\gamma(y_t) \in \mathop{\rm Lie}\nolimits(\mathcal{G})$ is already defined by the gradient flow $y_t$, then this equation reduces to solving an ODE on the fibres of the bundle, and therefore existence of solutions follows from ODE existence theory. Let $\tilde{g}_t = s_t \cdot g_t$. A calculation shows that \begin{align*} \frac{d \tilde{g}_t}{dt} \tilde{g}_t^{-1} & = \frac{ds}{dt} s_t^{-1} + \mathop{\rm Ad}\nolimits_{s_t} \left( \frac{dg}{dt} g_t^{-1} \right) \\
& = \gamma(s_t \cdot y_t) - i \mathop{\rm Ad}\nolimits_{s_t} \mu(y_t) \\
& = \gamma(\tilde{y}_t) - i \mu(\tilde{y}_t) \\
& = \gamma(\tilde{g}_t \cdot y_0) - i \mu(\tilde{g}_t \cdot y_0) , \end{align*} and so $\tilde{y}_t = \tilde{g}_t \cdot y_0 = s_t \cdot y_t$ is a solution to the modified flow \eqref{eqn:modified-flow} with initial condition $y_0$. \end{proof}
As a corollary, we see that the change of metric is the same for the YMH flow \eqref{eqn:gauge-flow} and the modified flow \eqref{eqn:modified-gauge-flow}.
\begin{corollary}\label{cor:metrics-same} Let $y_t = g_t \cdot y_0$ be a solution to the Yang-Mills-Higgs flow equation \eqref{eqn:gauge-flow} and $\tilde{y}_t = \tilde{g}_t \cdot y_0$ be a solution to the modified flow equation \eqref{eqn:modified-gauge-flow}. Then $h_t = g_t^* g_t = \tilde{g}_t^* \tilde{g}_t$. \end{corollary}
Finally, we prove that convergence for the upwards $\mathop{\rm YMH}\nolimits$ flow implies convergence for the modified flow. \begin{lemma}\label{lem:unstable-sets-same} Let $x$ be a critical point and let $y_0 \in W_x^-$. Then the modified flow with initial condition $y_0$ exists for all $t \in (-\infty, 0]$ and converges in the $C^\infty$ topology to a point in $\mathcal{G} \cdot x$. \end{lemma}
\begin{proof} Let $y_t$ be the $\mathop{\rm YMH}\nolimits$ flow with initial condition $y_0$ and $\tilde{y_t} = s_t \cdot y_t$ the modified flow. By the definition of $W_x^-$ the $\mathop{\rm YMH}\nolimits$ flow exists for all $t \in (-\infty, 0]$ and $y_t \rightarrow x$ in the $C^\infty$ topology. Existence of the modified flow then follows from Lemma \ref{lem:relate-flows}. Proposition \ref{prop:exponential-convergence} shows that $y_t \rightarrow x$ exponentially in $L_k^2$ for all $k$, and so the same is true for $\gamma(y_t)$. Therefore the length of the modified flow line satisfies \begin{align*}
\int_{-\infty}^0 \| I \rho_{\tilde{y}_t}(\mu(\tilde{y}_t)) - \rho_{\tilde{y}_t}(\gamma(\tilde{y}_t)) \|_{L_k^2} \, dt & = \int_{-\infty}^0 \| I \rho_{y_t}(\mu(y_t)) - \rho_{y_t}(\gamma(y_t)) \|_{L_k^2} \, dt \\
& \leq \int_{-\infty}^0 \| \rho_{y_t}(\mu(y_t)) \|_{L_k^2} \, dt + \int_{-\infty}^0 \| \rho_{y_t}(\gamma(y_t)) \|_{L_k^2} \, dt \end{align*}
which is finite since the length $\int_{-\infty}^0 \| \rho_{y_t}(\mu(y_t)) \|_{L_k^2} \, dt$ of the $\mathop{\rm YMH}\nolimits$ flow line is finite, $y_t$ is bounded and $\gamma(y_t) \rightarrow 0$ exponentially. This is true for all $k$, and so the modified flow converges in the $C^\infty$ topology. \end{proof}
\subsection{Preliminary estimates for the $\mathop{\rm YMH}\nolimits$ flow in a neighbourhood of a critical point}
Given eigenvalues for $i \beta$ labelled by $\lambda_1 \leq \cdots \leq \lambda_k < 0 \leq \lambda_{k+1} \leq \cdots$, for any $y \in S_x^-$ and any norm, we have the Lipschitz bounds \begin{equation}\label{eqn:lipschitz-slice}
e^{\lambda_1 t} \| y - x \| \leq \| e^{i \beta t} \cdot y - x \| \leq e^{\lambda_k t} \| y - x \| . \end{equation}
\begin{lemma}\label{lem:moment-map-quadratic}
For any critical point $x$ there exists $C>0$ such that for any $y \in S_{x}^-$, we have $\| \mu(y) - \beta \|_{C^0} \leq C \| y - x \|_{C^0}^2$. \end{lemma}
\begin{proof} Let $y \in S_x^-$ and define $\delta y := y-x \in V \cong T_x V$. Then the defining equation for the moment map shows that for all $v \in \mathfrak{k}$, we have \begin{equation*} d \mu_x (\delta y) \cdot v = \omega(\rho_x(v), \delta y) = \left< I \rho_x(v), \delta y \right> \end{equation*} By the definition of the slice, each $\delta y \in S_x^-$ is orthogonal to the infinitesimal action of $\mathcal{G}^\mathbb C$ at $x$, and so $\left< I \rho_x(v), \delta y \right>=0$ for all $v \in \mathfrak{k}$. Therefore $d \mu_x(\delta y) = 0$. Since the moment map $\mu(\bar{\partial}_A, \phi) = F_A + [\phi, \phi^*]$ is quadratic, then we have \begin{equation*}
\| \mu(y) - \mu(x) \|_{C^0} \leq \| d\mu_x(\delta y) \|_{C^0} + C \| \delta y \|_{C^0}^2 = C \| \delta y \|_{C^0}^2 . \end{equation*} Since the moment map is $\mathcal{G}$-equivariant and the norms above are all $\mathcal{G}$-invariant, then the constant $C$ is independent of the choice of critical point in the orbit $\mathcal{G} \cdot x$. \end{proof}
Given $g \in \mathcal{G}^\mathbb C$, let $g^*$ denote the adjoint with respect to the Hermitian metric on $E$ and let $\mathcal{G}$ act on $\mathcal{G}^\mathbb C$ by left multiplication. In every equivalence class of the space of metrics $\mathcal{G}^\mathbb C/ \mathcal{G}$ there is a unique positive definite self-adjoint section $h$, which we use from now on to represent elements of $\mathcal{G}^\mathbb C/ \mathcal{G}$. Given $h = g^* g \in \mathcal{G}^\mathbb C/ \mathcal{G}$, define $\mu_h : \mathcal{B} \rightarrow \Omega^0(\mathop{\rm End}\nolimits(E)) \cong \mathop{\rm Lie}\nolimits (\mathcal{G}^\mathbb C)$ by \begin{equation}\label{eqn:def-muh} \mu_h(y) = \mathop{\rm Ad}\nolimits_{g^{-1}} \left( \mu(g\cdot y) \right) . \end{equation} Since the moment map is $\mathcal{G}$-equivariant, then for any $k \in \mathcal{G}$ we have \begin{equation*} \mathop{\rm Ad}\nolimits_{g^{-1}} \mathop{\rm Ad}\nolimits_{k^{-1}} \left( \mu(k \cdot g \cdot y) \right) = \mathop{\rm Ad}\nolimits_{g^{-1}} \left( \mu(g\cdot y) \right) \end{equation*} and so $\mu_h$ is well-defined on $\mathcal{G}^\mathbb C/ \mathcal{G}$. The length of a geodesic in the space of positive definite Hermitian matrices is computed in \cite[Ch. VI.1]{Kobayashi87}. Following \cite[Prop. 13]{Donaldson85} (see also \cite[Prop. 6.3]{Simpson88}), it is more convenient to define the distance function $\sigma : \mathcal{G}^\mathbb C/ \mathcal{G}\rightarrow \mathbb R$ \begin{equation}\label{eqn:def-sigma} \sigma(h) = \mathop{\rm Tr}\nolimits h + \mathop{\rm Tr}\nolimits h^{-1} - 2 \rank (E) . \end{equation} As explained in \cite{Donaldson85}, the function $\sup_X \sigma$ is not a norm in the complete metric space $\mathcal{G}^\mathbb C/ \mathcal{G}$, however we do have $h_t \stackrel{C^0}{\longrightarrow} h_\infty$ in $\mathcal{G}^\mathbb C/ \mathcal{G}$ if and only if $\sup_X \sigma(h_t h_\infty^{-1}) \rightarrow 0$. Note that if $h_1 = g_1^* g_1$ and $h_2 = g_2^* g_2$, then \begin{equation}\label{eqn:metric-difference} \sigma(h_1 h_2^{-1}) = \sigma \left( g_1^* g_1 g_2^{-1} (g_2^*)^{-1} \right) = \sigma \left( (g_1 g_2^{-1})^* g_1 g_2^{-1} \right) . \end{equation}
Recall from \cite{Donaldson85}, \cite{Simpson88} that we have the following distance-decreasing formula for a solution to the downwards $\mathop{\rm YMH}\nolimits$ flow. Since the change of metric is the same for the modified flow by Corollary \ref{cor:metrics-same}, then \eqref{eqn:distance-decreasing} is also valid for the modified flow.
\begin{lemma}\label{lem:distance-decreasing} Let $y_1, y_2 \in \mathcal{B}$ and suppose that $y_1 = g_0 \cdot y_2$ for $g \in \mathcal{G}^\mathbb C$. For $j = 1,2$, define $y_j(t)$ to be the solution of the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:YMH-flow} with initial condition $y_j$. Define $g_t$ by $y_1(t) = g_t \cdot y_2(t)$ and let $h_t = g_t^* g_t$ be the associated change of metric. Then \begin{equation}\label{eqn:distance-decreasing} \left( \frac{\partial}{\partial t} + \Delta \right) \sigma(h_t) \leq 0 . \end{equation} \end{lemma}
Since $\mathop{\rm Lie}\nolimits(\mathcal{G}_*^\mathbb C) = \Omega^0(\mathop{\rm End}\nolimits(E))_0 \oplus \Omega^0(\mathop{\rm End}\nolimits(E))_+$ and the adjoint action of $e^{-i \beta t}$ is the identity on $\Omega^0(\mathop{\rm End}\nolimits(E))_0$ and strictly contracting on $\Omega^0(\mathop{\rm End}\nolimits(E))_+$, then we have the following lemma which is used in Section \ref{subsec:inverse-construction}.
\begin{lemma}\label{lem:modified-distance-decreasing} Given any $g_0 \in \mathcal{G}_*^\mathbb C$, let $g_t = e^{-i \beta t} g_0 e^{i \beta t}$ and $h_t = g_t^* g_t$. Then $\frac{\partial}{\partial t} \sigma(h_t) \leq 0$. \end{lemma}
As part of the proof of the distance-decreasing formula in \cite{Donaldson85} we also have the following inequalities. This result is used in the proof of Lemma \ref{lem:uniform-bound-sigma}. \begin{lemma}\label{lem:metric-inequalities} For any metric $h \in \mathcal{G}^\mathbb C / \mathcal{G}$ and any $y \in \mathcal{B}$, we have \begin{align*} -2i \mathop{\rm Tr}\nolimits \left( (\mu_h(y) - \mu(y)) h \right) + \Delta \mathop{\rm Tr}\nolimits(h) & \leq 0 \\ 2i \mathop{\rm Tr}\nolimits \left( (\mu_h(y) - \mu(y)) h^{-1} \right) + \Delta \mathop{\rm Tr}\nolimits(h) & \leq 0 . \end{align*} \end{lemma}
\subsection{Exponential convergence of the backwards flow}
In this section we prove that if a solution to the backwards $\mathop{\rm YMH}\nolimits$ flow converges to a critical point, then it must do so exponentially in each Sobolev norm.
\begin{proposition}\label{prop:exponential-convergence}
Let $y_t$ be a solution to the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:YMH-flow} such that $\lim_{t \rightarrow -\infty} y_t = x$. Then for each positive integer $k$ there exist positive constants $C_1$ and $\eta$ such that $\| y_t - x \|_{L_k^2} \leq C_1 e^{\eta t}$ for all $t \leq 0$. \end{proposition}
The proof of the proposition reduces to the following lemmas. First recall from the slice theorem that there is a unique decomposition \begin{equation*} y = e^u \cdot (x + z) \end{equation*} for $u \in (\ker \rho_x)^\perp$ and $z \in S_x$. We can further decompose $z = z_{\geq 0} + z_-$, where $z_- \in S_x^-$ is the component of $z$ in the negative slice and $z_{\geq 0} = z - z_-$. At the critical point $x$ we have the decomposition $\mathop{\rm End}\nolimits(E) \cong \mathop{\rm End}\nolimits(E)_+ \oplus \mathop{\rm End}\nolimits(E)_0 \oplus \mathop{\rm End}\nolimits(E)_-$ according to the eigenspaces of $i \beta$ (cf. Sec. \ref{subsec:local-slice}). Then with respect to this decomposition $z_{\geq 0}$ is the component of $z$ in $\Omega^{0,1}(\mathop{\rm End}\nolimits(E))_+ \oplus \mathop{\rm End}\nolimits(E)_0) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_+ \oplus \mathop{\rm End}\nolimits(E)_0)$ and $z_-$ is the component in $\Omega^{0,1}(\mathop{\rm End}\nolimits(E)_-) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_-)$. In terms of the action of $\beta = \mu(x)$ we have $\lim_{t \rightarrow \infty} e^{i \beta t} \cdot z_- = 0$ and $\lim_{t \rightarrow \infty} e^{- i \beta t} \cdot z_{\geq 0} = z_0$, where $z_0$ is the component of $z$ in $\Omega^{0,1}(\mathop{\rm End}\nolimits(E)_0) \oplus \Omega^{1,0}(\mathop{\rm End}\nolimits(E)_0)$. Note that if $y = e^u \cdot (x + z)$ is a Higgs bundle, then $x+z$ is a Higgs bundle since $e^u \in \mathcal{G}^\mathbb C$ preserves the space of Higgs bundles, however $x+z_{\geq 0}$ may not be a Higgs bundle as the pair $(\bar{\partial}_{A_{\geq 0}}, \phi_{\geq 0})$ representing $x + z_{\geq 0}$ may not satisfy $\bar{\partial}_{A_{\geq 0}} \phi_{\geq 0} = 0$. Even though $\phi_{\geq 0}$ may not be holomorphic, we can still apply the principle that curvature decreases in subbundles and increases in quotient bundles and follow the same idea as \cite[Sec. 8 \& 10]{AtiyahBott83} to prove the following lemma.
\begin{lemma}\label{lem:non-holomorphic-extensions} \begin{enumerate}
\item $\mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0})) \geq \mathop{\rm YMH}\nolimits(x)$.
\item $\grad \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0}))$ is tangent to the set $\{z_- = 0\}$. \end{enumerate} \end{lemma}
The next lemma shows that the component in the negative slice is decreasing exponentially.
\begin{lemma}
Let $y_t = e^u \cdot (x + z_{\geq 0} + z_-)$ be a solution to the $\mathop{\rm YMH}\nolimits$ flow such that $\lim_{t \rightarrow -\infty} y_t = x$. Then there exist positive constants $K_1$ and $K_2$ such that $\| z_- \|_{L_1^2}^2 \leq K_1 e^{K_2 t}$ for all $t \leq 0$. \end{lemma}
\begin{proof} The proof follows the idea of \cite[Sec. 10]{Kirwan84}. The downwards gradient flow equation for $z_-$ is \begin{equation*} \frac{\partial z_-}{\partial t} = L z_- + N_-(u, z_{\geq 0}, z_-) \end{equation*}
where $L$ is a linear operator and the derivative of $N_-$ vanishes at the origin. Since $z_-$ is orthogonal to the $\mathcal{G}^\mathbb C$ orbit through $x$, then the Laplacian term in $\grad \mathop{\rm YMH}\nolimits$ vanishes on $z_-$ and so the linear part satisfies $e^{Lt} z_- = e^{-i \beta t} \cdot z_-$. Since $z_-$ is in the negative slice then there exists $\lambda_{min} > 0$ such that $\left< L z_- , z_- \right>_{L_1^2} \geq \lambda_{min} \| z_- \|_{L_1^2}$. Now Lemma \ref{lem:non-holomorphic-extensions} shows that the $\mathop{\rm YMH}\nolimits$ flow preserves the set $\{ z_- = 0 \}$, and so $N_-(u, z_{\geq 0}, 0) = 0$. Since $N_-$ is $C^1$ with vanishing derivative at the origin then for all $\varepsilon > 0$ there exists $\delta > 0$ such that if $\| y_t - x \|_{L_1^2} < \delta$ then \begin{equation*}
\| N_-(u, z_{\geq 0}, z_-) \|_{L_1^2} \leq \varepsilon \| z_- \|_{L_1^2} \end{equation*} Therefore \begin{equation*}
\frac{1}{2} \frac{\partial}{\partial t} \| z_- \|_{L_1^2}^2 = \left< L z_-, z_- \right>_{L_1^2} + \left< N_-(u, z_{\geq 0}, z_- ), z_- \right>_{L_1^2} \geq (\lambda_{min} - \varepsilon) \| z_- \|_{L_1^2}^2 , \end{equation*}
and so if $\varepsilon > 0$ is small enough (e.g. $\varepsilon < \frac{1}{2} \lambda_{min}$) then there exist positive constants $K_1$ and $K_2$ such that $\| z_- \|_{L_1^2}^2 \leq K_1 e^{K_2 t}$ for all $t \leq 0$. \end{proof}
The next lemma shows that the difference $\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(y_t)$ is decreasing exponentially.
\begin{lemma}\label{lem:f-exponential} Let $y_t = e^u \cdot (x+z_{\geq 0} + z_-)$ be a solution to the $\mathop{\rm YMH}\nolimits$ flow such that $\lim_{t \rightarrow -\infty} y_t = x$. Then there exist positive constants $K_1'$ and $K_2'$ such that \begin{equation*}
\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-)) \leq K_1' e^{K_2' t} \end{equation*} for all $t \leq 0$. \end{lemma}
\begin{proof} Recall that the Morse-Kirwan condition from Lemma \ref{lem:non-holomorphic-extensions} implies \begin{equation*} \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0})) - \mathop{\rm YMH}\nolimits(x) \geq 0 \end{equation*}
Since $x$ is a critical point of $\mathop{\rm YMH}\nolimits$, then for all $\varepsilon > 0$ there exists $\delta > 0$ such that if $\| y_t - x \|_{L_1^2} < \delta$ we have \begin{equation*}
\mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-)) - \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0})) \geq -\varepsilon \| z_- \|_{L_1^2} . \end{equation*} Therefore \begin{align*} \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-)) - \mathop{\rm YMH}\nolimits(x) & = \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-)) - \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0})) \\
& \quad \quad + \mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0})) - \mathop{\rm YMH}\nolimits(x) \\
& \geq - \varepsilon \|z_- \|_{L_1^2} \geq - \varepsilon \sqrt{K_1} e^{\frac{1}{2} K_2 t} \end{align*} Since $\mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-))$ is monotone decreasing with $t$ and $\lim_{t \rightarrow -\infty} \mathop{\rm YMH}\nolimits(e^u \cdot (x+z_{\geq 0} + z_-)) = \mathop{\rm YMH}\nolimits(x)$, then $\mathop{\rm YMH}\nolimits(e^u \cdot (x + z_{\geq 0} + z_-)) \leq \mathop{\rm YMH}\nolimits(x)$, and so the above equation implies that \begin{equation*}
\left| \mathop{\rm YMH}\nolimits(y_t) - \mathop{\rm YMH}\nolimits(x) \right| \leq K_1' e^{K_2' t} \end{equation*} for positive constants $K_1' = \varepsilon \sqrt{K_1}$ and $K_2' = \frac{1}{2} K_2$. \end{proof}
\begin{lemma}\label{lem:interior-bound} Let $y_t$ be a solution to the $\mathop{\rm YMH}\nolimits$ flow such that $y_t \rightarrow x$ as $t \rightarrow -\infty$. Then for each positive integer $k$ there exists a constant $C$ and a constant $\tau_0 \in \mathbb R$ such that \begin{equation*}
\| y_\tau - x \|_{L_k^2} \leq C \int_{-\infty}^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L^2} \, ds \end{equation*} for all $\tau \leq \tau_0$. \end{lemma}
\begin{proof} Recall the interior estimate from \cite[Lem. 7.3]{Rade92}, \cite[Prop. 3.6]{Wilkin08} which says that for all positive integers $k$ there exists a neighbourhood $U$ of $x$ in the $L_k^2$ topology and a constant $C$ such that if $y_t \in U$ for all $t \in [0, T]$ then \begin{equation*}
\int_1^T \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L_k^2} \, dt \leq C \int_0^T \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L^2} \, dt . \end{equation*} The constant $C$ is uniform as long as the initial condition satisfies a uniform bound on the derivatives of the curvature of the underlying holomorphic bundle and the flow line $y_t$ remains in the fixed neighbourhood $U$ of the critical point $x$ (cf. \cite[Prop. A]{Rade92}). In particular, the estimates of \cite[Lem. 3.14, Cor 3.16]{Wilkin08} show that this bound on the curvature is satisfied for any initial condition along a given flow line $y_t$. \emph{A priori} the constant depends on $T$, however it can be made uniform in $T$ using the following argument. Let $C$ be the constant for $T = 2$. For any $T \geq 2$, let $N$ be an integer greater than $T$ such that $y_t \in U$ for all $t \in [0, N]$. We then have \begin{align*}
\int_1^T \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L_k^2} \, dt & \leq \sum_{n=1}^{N-1} \int_n^{n+1} \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L_k^2} \, dt \\
& \leq C \sum_{n=1}^{N-1} \int_{n-1}^{n+1} \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L^2} \, dt \\
& \leq 2 C \int_0^N \| \grad \mathop{\rm YMH}\nolimits(y_t) \|_{L^2} \, dt \end{align*}
Since $\lim_{t \rightarrow - \infty} y_t = x$ in the $C^\infty$ topology, then for any $\varepsilon > 0$ there exists $\tau_0$ such that $\tau \leq \tau_0$ implies that $\| y_t - x \|_{L_k^2} < \varepsilon$ for all $t \leq \tau$ and therefore by choosing $\varepsilon$ small we can apply the above interior estimate on any interval $[t, \tau]$ for $\tau \leq \tau_0$. Therefore we have the bound \begin{equation*}
\int_t^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L_k^2} \, ds \leq 2C \int_{-\infty}^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L^2} \, ds \end{equation*} For fixed $\tau$ the right-hand side of the above inequality is constant, and so \begin{equation*}
\| y_\tau - x \|_{L_k^2} \leq \int_{-\infty}^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L_k^2} \, ds \leq 2C \int_{-\infty}^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L^2} \, ds \end{equation*} \end{proof}
\begin{proof}[Proof of Proposition \ref{prop:exponential-convergence}] After possibly shrinking the neighbourhood $U$ from the previous lemma, we can apply the Lojasiewicz inequality (cf. \cite[Prop. 3.5]{Wilkin08}) which implies that \begin{equation*}
\int_{-\infty}^\tau \| \grad \mathop{\rm YMH}\nolimits(y_s) \|_{L^2} \, ds \leq \frac{1}{C\theta} \left( f(x) - f(y_\tau) \right)^\theta \end{equation*} for constants $C > 0$ and $\theta \in (0, \frac{1}{2}]$. Lemma \ref{lem:f-exponential} shows that \begin{equation*} \left( f(x) - f(y_\tau) \right)^\theta \leq (K_1')^\theta e^{\theta K_2' t} \end{equation*} for all $t \leq 0$. These two estimates together with the result of Lemma \ref{lem:interior-bound} show that \begin{equation*}
\| y_t - x \|_{L_k^2} \leq C_1 e^{\eta t} \end{equation*} for some positive constants $C_1, \eta$ and all $t \leq 0$. \end{proof}
\section{The isomorphism classes in the unstable set}\label{sec:local-analysis}
Given a critical point $x \in \mathcal{B}$, in this section we show that for each $y \in S_x^-$ there exists a smooth gauge transformation $g \in \mathcal{G}^\mathbb C$ such that $g \cdot y \in W_x^-$ (Proposition \ref{prop:convergence-group-action}), and conversely for each $y \in W_x^-$ there exists $g \in \mathcal{G}^\mathbb C$ such that $g \cdot y \in S_x^-$ (Proposition \ref{prop:unstable-maps-to-slice}). As a consequence, the isomorphism classes in the unstable set are in bijective correspondence with the isomorphism classes in the negative slice, and so we have a complete description of these isomorphism classes by Lemma \ref{lem:classify-neg-slice}. This leads to Theorem \ref{thm:algebraic-flow-line} which gives an algebraic criterion for two points to be connected by a flow line.
\subsection{Convergence of the scattering construction}\label{sec:scattering-convergence}
The goal of this section is to prove Proposition \ref{prop:convergence-group-action}, which shows that every point in the negative slice $S_x^-$ is complex gauge equivalent to a point in the unstable set $W_x^-$.
The construction involves flowing up towards the critical point on the slice using the linearisation of the $\mathop{\rm YMH}\nolimits$ flow and then flowing down using the $\mathop{\rm YMH}\nolimits$ flow. A similar idea is used by Hubbard in \cite{Hubbard05} for analytic flows around a critical point in $\mathbb C^n$, where the flow on the slice is defined by projecting the flow from the ambient space. Hubbard's construction uses the fact that the ambient space is a manifold to (a) define this projection to the negative slice, and (b) define local coordinates in which the nonlinear part of the gradient flow satisfies certain estimates in terms of the eigenvalues for the linearised flow \cite[Prop. 4]{Hubbard05}, which is necessary to prove convergence. This idea originated in the study of the existence of scattering states in classical and quantum mechanics. In the context of this paper, one can think of the linearised flow and the YMH flow as two dynamical systems and the goal is to compare their behaviour as $t \rightarrow - \infty$ (see \cite[Ch. XI.1]{ReedSimonVol3} for an overview). As noted in \cite{Hubbard05}, \cite{Nelson69} and \cite{ReedSimonVol3}, the eigenvalues of the linearised flow play an important role in comparing the two flows.
The method of this section circumvents the need for a local manifold structure by defining the flow on the slice using the linearised flow and then using the distance-decreasing property of the flow on the space of metrics from \cite{Donaldson85}, \cite{Simpson88} (cf. Lemma \ref{lem:distance-decreasing}) in place of the estimate of \cite[Prop. 4]{Hubbard05} on the nonlinear part of the flow. The entire construction is done in terms of the complex gauge group, and so it is valid on any subset preserved by $\mathcal{G}^\mathbb C$, thus avoiding any problems associated with the singularities in the space of Higgs bundles. Moreover, using this method it follows naturally from the Lojasiewicz inequality and the smoothing properties of the heat equation that the backwards $\mathop{\rm YMH}\nolimits$ flow with initial condition in the unstable set converges in the $C^\infty$ topology.
\subsubsection{A $C^0$ bound on the metric}
First we derive an \emph{a priori} estimate on the change of metric along the flow. Fix an initial condition $y_0 \in S_x^-$ and let $\beta = \mu(x) = \Lambda(F_A + [\phi, \phi^*]) \in \Omega^0(\mathop{\rm ad}\nolimits(E)) \cong \mathop{\rm Lie}\nolimits(\mathcal{G})$. In this section we also use the function $\mu_h(y) = \mathop{\rm Ad}\nolimits_{g^{-1}} \left( \mu(g\cdot y) \right)$ from \eqref{eqn:def-muh}. The linearised flow with initial condition $y_0$ has the form $e^{- i \beta t} \cdot y_0$, and the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:gauge-flow} has the form $g_t \cdot y_0$. Let $f_t = g_t \cdot e^{i \beta t}$ and define $h_t = f_t^* f_t \in \mathcal{G}^\mathbb C / \mathcal{G}$. This is summarised in the diagram below.
\begin{figure}
\caption{Comparison of the gradient flow and the linearised flow.}
\end{figure}
\begin{lemma}\label{lem:derivative-difference} For any initial condition $y_0 \in S_x^-$, the induced flow on $\mathcal{G}^\mathbb C / \mathcal{G}$ satisfies \begin{equation*} \frac{dh_t}{dt} = -2 i h_t \, \mu_h(e^{-i \beta t} \cdot y_0) + i \beta h_t + h_t (i\beta) \end{equation*} \end{lemma}
\begin{proof} First compute \begin{equation}\label{eqn:group-scattering} \frac{df}{dt} f_t^{-1} = \frac{dg}{dt} g_t^{-1} + g_t (i\beta) e^{i\beta t} f_t^{-1} = -i \mu(g_t \cdot y_0) + f_t (i\beta) f_t^{-1} \end{equation} Then \begin{align*} \frac{dh}{dt} & = \frac{df^*}{dt} f_t + f_t^* \frac{df}{dt} \\
& = f_t^* \left( \frac{df}{dt} f_t^{-1} \right)^* f_t + f_t^* \left( \frac{df}{dt} f_t^{-1} \right) f_t \\
& = - f_t^* i \mu(g_t \cdot y_0) f_t + i \beta h_t - f_t^* i \mu(g_t \cdot y_0) f_t + h_t (i\beta) \\
& = -2 f_t^* i \mu(g_t \cdot y_0) f_t + i \beta h_t + h_t (i\beta) \\
& = -2 i h_t \mathop{\rm Ad}\nolimits_{f_t^{-1}} \left( \mu(g_t \cdot y_0) \right) + i \beta h_t + h_t (i\beta) \\
& = -2 i h_t \, \mu_h(e^{-i \beta t} \cdot y_0) + i \beta h_t + h_t (i\beta) \end{align*} where the last step follows from the definition of $\mu_h$ in \eqref{eqn:def-muh} and the fact that $e^{-i \beta t} = f_t^{-1} \cdot g_t$. \end{proof}
The next estimate gives a bound for $\sup_X \sigma(h_t)$ in terms of $\| y_0 - x \|_{C^0}$.
\begin{lemma}\label{lem:uniform-bound-sigma}
For every $\varepsilon > 0$ there exists a constant $C > 0$ such that for any initial condition $y_0 \in S_{x}^-$ with $\| e^{-i \beta T} \cdot y_0 - x \|_{C^0} < \varepsilon$ we have the estimate $\sup_X \sigma(h_t) \leq C \| e^{-i \beta T} \cdot y_0 - x \|_{C^0}^2$ for all $0 \leq t \leq T$. \end{lemma}
\begin{proof} Taking the trace of the result of Lemma \ref{lem:derivative-difference} gives us \begin{align*} \frac{d}{dt} \mathop{\rm Tr}\nolimits h_t = \mathop{\rm Tr}\nolimits \left( \frac{dh}{dt} \right) & = - 2i \mathop{\rm Tr}\nolimits \left( (\mu_h(e^{-i \beta t} \cdot y_0) - \beta) h_t \right) \\ \frac{d}{dt} \mathop{\rm Tr}\nolimits h_t^{-1} = - \mathop{\rm Tr}\nolimits \left( h_t^{-1} \frac{dh}{dt} h_t^{-1} \right) & = 2i \mathop{\rm Tr}\nolimits \left( h_t^{-1} (\mu_h(e^{-i \beta t} \cdot y_0) - \beta) \right) \end{align*}
Therefore \begin{equation*} \frac{d}{dt} \mathop{\rm Tr}\nolimits ( h_t ) = -2i \mathop{\rm Tr}\nolimits \left( (\mu_h(e^{-i \beta t} \cdot y_0) - \mu(e^{-i\beta t} \cdot y_0) ) h_t \right) - 2i \mathop{\rm Tr}\nolimits \left( (\mu(e^{-i \beta t} \cdot y_0) - \beta) h_t \right) \end{equation*} Lemma \ref{lem:metric-inequalities} together with the fact that $h_t$ is positive definite then shows that \begin{align*} \left( \frac{\partial}{\partial t} + \Delta \right) \mathop{\rm Tr}\nolimits(h_t) & \leq -2i \mathop{\rm Tr}\nolimits \left( (\mu(e^{-i \beta t} \cdot y_0) - \beta) h_t \right) \\
& \leq C_1 \| \mu(e^{-i \beta t} \cdot y_0) - \beta \|_{C^0} \mathop{\rm Tr}\nolimits (h_t) \\
& \leq C_1 \| e^{-i \beta t} \cdot y_0 - x \|_{C^0}^2 \mathop{\rm Tr}\nolimits (h_t) \quad \text{(by Lemma \ref{lem:moment-map-quadratic})} \end{align*} A similar calculation shows that \begin{equation*}
\left( \frac{\partial}{\partial t} + \Delta \right) \mathop{\rm Tr}\nolimits(h_t^{-1}) \leq C_1 \| e^{-i \beta t} \cdot y_0 - x \|_{C^0}^2 \mathop{\rm Tr}\nolimits (h_t^{-1}) \end{equation*}
If we label the eigenvalues of $i \beta$ as $\lambda_1 \leq \cdots \leq \lambda_k < 0 \leq \lambda_{k+1} \leq \cdots \leq \lambda_n$, then the estimate $\| e^{i \beta s} \cdot (y_0 - x) \|_{C^0}^2 \leq e^{2\lambda_k s} \| y_0 - x \|_{C^0}^2$ from \eqref{eqn:lipschitz-slice} gives us \begin{align}\label{eqn:sigma-sub-estimate} \begin{split} \left( \frac{\partial}{\partial t} + \Delta \right) \sigma(h_t) & = \left( \frac{\partial}{\partial t} + \Delta \right) \left( \mathop{\rm Tr}\nolimits (h_t) + \mathop{\rm Tr}\nolimits (h_t^{-1}) \right) \\
& \leq C_1 \| e^{-i \beta t} \cdot (y_0 - x) \|_{C^0}^2 \left( \mathop{\rm Tr}\nolimits (h_t) + \mathop{\rm Tr}\nolimits (h_t^{-1}) \right) \\
& = C_1 \| e^{i \beta (T-t)} \cdot e^{-i \beta T} \cdot (y_0 - x) \|_{C^0}^2 \left( \mathop{\rm Tr}\nolimits (h_t) + \mathop{\rm Tr}\nolimits (h_t^{-1}) \right) \\
& \leq C_1 e^{2\lambda_k (T-t)} \| e^{-i \beta T} \cdot (y_0 - x) \|_{C^0}^2 \sigma(h_t) + C_1 e^{2 \lambda_k (T-t)} \| e^{-i \beta T} \cdot (y_0 - x) \|_{C^0}^2 \rank(E) \end{split} \end{align}
Let $K_1 = C_1 \| e^{-i \beta T} \cdot y_0 - x \|_{C^0}^2$ and $K_2 = C_1 \| e^{-i \beta T} \cdot y_0 - x \|_{C^0}^2 \rank(E)$. Define \begin{equation*} \nu_t = \sigma(h_t) \exp\left( \frac{K_1}{2\lambda_k} e^{2 \lambda_k (T-t)} \right) - \int_0^t K_2 e^{2 \lambda_k (T-s)} \exp \left( \frac{K_1}{2\lambda_k} e^{2 \lambda_k (T-s)} \right) \, ds \end{equation*} Note that $\nu_0 = 0$ since $h_0 = \id$. A calculation using \eqref{eqn:sigma-sub-estimate} then shows that \begin{equation*} \left( \frac{\partial}{\partial t} + \Delta \right) \nu_t \leq 0 \end{equation*} and so $\sup_X \nu_t \leq \sup_X \nu_0 = 0$ by the maximum principle. Therefore \begin{align*} \sup_X \sigma(h_t) & \leq \exp \left(-\frac{K_1}{2\lambda_k} e^{2 \lambda_k (T-t)} \right) \int_0^t K_2 e^{2 \lambda_k (T-s)} \exp \left(\frac{K_1}{2\lambda_k} e^{2 \lambda_k (T-s)} \right) \, ds \\
& \leq \exp \left( - \frac{K_1}{2\lambda_k} \right) \int_0^t K_2 e^{2\lambda_k (T-s)} \, ds \leq C \| e^{-i \beta T} \cdot y_0 - x \|_{C^0}^2 \end{align*}
for some constant $C$, since $\lambda_k < 0$, $0 \leq s \leq t < T$, $K_1$ is bounded since $\| e^{-i \beta T} \cdot y_0 - x \|_{C^0} < \varepsilon$ by assumption and $K_2$ is proportional to $\| e^{-i \beta T} \cdot y_0 - x \|_{C^0}^2$. \end{proof}
\subsubsection{$C^\infty$ convergence in the space of metrics}\label{subsec:metric-convergence}
Now consider the case of a fixed $y_0 \in S_x^-$ and define $y_t = e^{i \beta t} \cdot y_0$. Define $g_s(y_t) \in \mathcal{G}^\mathbb C$ to be the unique solution of \eqref{eqn:gauge-flow} such that $g_s(y_t) \cdot y_t$ is the solution to the $\mathop{\rm YMH}\nolimits$ flow at at time $s$ with initial condition $y_t$. Let $f_s(y_t) = g_s(y_t) \cdot e^{i \beta s} \in \mathcal{G}^\mathbb C$, and define $h_s(y_t) = f_s(y_t)^* f_s(y_t)$ to be the associated change of metric. The estimate from the previous lemma now becomes \begin{equation}\label{eqn:sigma-C0-estimate}
\sup_X \sigma(h_s(y_t)) \leq C \| y_t - x \|_{C^0}^2 = C \| e^{i \beta t} \cdot (y_0 - x) \|_{C^0}^2 \leq C e^{2 \lambda_k t} \| y_0 - x \|_{C^0}^2 \end{equation} This is summarised in the diagram below.
\begin{figure}
\caption{Comparison of $f_{t_1}(y_0) \cdot y_0$ and $f_{t_2}(y_0) \cdot y_0$.}
\label{fig:flow-up-flow-down}
\end{figure}
\begin{proposition}\label{prop:metrics-converge} $h_t(y_0) \stackrel{C^0}{\longrightarrow} h_\infty(y_0) \in \mathcal{G}^\mathbb C / \mathcal{G}$ as $t \rightarrow \infty$. The limit depends continuously on the initial condition $y_0$. The rate of convergence is given by \begin{equation}\label{eqn:metric-convergence-rate}
\sup_X \sigma(h_t(y_0) (h_\infty(y_0))^{-1}) \leq C_2 e^{2 \lambda_k t} \| y_0 - x \|_{C^0}^2 \end{equation} where $C_2 > 0$ is a constant depending only on the orbit $\mathcal{G} \cdot x$. \end{proposition}
\begin{proof} Let $t_1 > t_2 \geq T$. The estimate \eqref{eqn:sigma-C0-estimate} shows that \begin{equation*}
\sup_X \sigma(h_{t_1-t_2}(y_{t_2})) \leq C_2 \| y_{t_2} - x \|_{C^0}^2 \leq C e^{2 \lambda_k t_2} \| y_0 - x \|_{C^0}^2 \leq C e^{2 \lambda_k T} \| y_0 - x \|_{C^0}^2 . \end{equation*} Recall from \eqref{eqn:metric-difference} that \begin{equation*} \sigma(h_{t_1}(y_0) h_{t_2}(y_0)^{-1}) = \sigma\left( ( f_{t_1}(y_0) f_{t_2}(y_0)^{-1} )^* f_{t_1}(y_0) f_{t_2}(y_0)^{-1} \right) . \end{equation*} The distance-decreasing formula of Lemma \ref{lem:distance-decreasing} shows that \begin{equation*} \sup_X \sigma\left( ( f_{t_1}(y_0) f_{t_2}(y_0)^{-1} )^* f_{t_1}(y_0) f_{t_2}(y_0)^{-1} \right) \leq \sup_X \sigma( h_{t_1-t_2}(y_{t_2}) ) . \end{equation*} Therefore the distance (measured by $\sigma$) between the two metrics $h_{t_1}(y_0)$ and $h_{t_2}(y_0)$ satisfies the following bound \begin{align*} \sup_X \sigma(h_{t_1}(y_0) h_{t_2}(y_0)^{-1}) & = \sup_X \sigma\left( ( f_{t_1}(y_0) f_{t_2}(y_0)^{-1} )^* f_{t_1}(y_0) f_{t_2}(y_0)^{-1} \right) \\
& \leq \sup_X \sigma( h_{t_1-t_2}(y_{t_2}) ) \leq C_2 e^{2 \lambda_k T} \| y_0 - x \|_{C^0}^2 \end{align*} and so $h_t(y_0)$ is a Cauchy sequence in $C^0$ with a unique limit $h_\infty \in \mathcal{G}^\mathbb C / \mathcal{G}$, The above equation shows that the rate of convergence is given by \eqref{eqn:metric-convergence-rate}.
Since the finite-time Yang-Mills-Higgs flow and linearised flow both depend continuously on the initial condition, then $h_t(y_0)$ depends continuously on $y_0$ for each $t > 0$. Continuous dependence of the limit then follows from the estimate \eqref{eqn:metric-convergence-rate}. \end{proof}
Now we can improve on the previous estimates to show that $h_t(y_0)$ converges in the smooth topology along a subsequence, and therefore the limit $h_\infty$ is $C^\infty$. Define $z_t = f_t(y_0) \cdot y_0$, where $y_0 \in S_x^-$ and $f_t(y_0) \in \mathcal{G}^\mathbb C$ are as defined in the previous proposition. Given a Higgs bundle $z_t = (\bar{\partial}_A, \phi)$, let $\nabla_A$ denote the covariant derivative with respect to the metric connection associated to $\bar{\partial}_A$.
\begin{lemma}
For each initial condition $y_0 \in S_x^-$, there is a uniform bound on $\sup_X | \nabla_A^\ell \mu(z_t) |$ and $\sup_X |\nabla_A^\ell \phi|$ for each $\ell \geq 0$. \end{lemma}
\begin{proof}
Since $\{ e^{i \beta t} \cdot y_0 \, : \, t \in [0, \infty] \}$ is a compact curve in the space of $C^\infty$ Higgs bundles connecting two $C^\infty$ Higgs bundles $y_0$ and $x$, then $\sup_X \left| \mu(e^{i \beta t} \cdot y_0) \right|$ and $\sup_X \left| \nabla_A \phi \right|$ are both uniformly bounded along the sequence $e^{i \beta t} \cdot y_0$. By construction, $z_t$ is the time $t$ $\mathop{\rm YMH}\nolimits$ flow with initial condition $e^{i \beta t} \cdot y_0$. Along the $\mathop{\rm YMH}\nolimits$ flow, for each $\ell$ the quantities $\sup_X \left| \nabla_A^\ell \mu \right|$ and $\sup_X \left| \nabla_A^\ell \phi \right|$ are both uniformly bounded by a constant depending on the value of $\sup_X \left| \mu \right|$ and $\sup_X \left| \nabla_A \phi \right|$ at the initial condition (cf. \cite[Sec. 3.2]{Wilkin08}). Since these quantities are uniformly bounded for the initial conditions, then the result follows. \end{proof}
\begin{corollary} There is a subsequence $t_n$ such that $h_{t_n} \rightarrow h_\infty$ in the $C^\infty$ topology. Therefore $h_\infty$ is $C^\infty$. \end{corollary}
\begin{proof}
Since $z_t$ is contained in the complex gauge orbit of $y_0$ for all $t$, then \cite[Lem. 3.14]{Wilkin08} shows that the uniform bound on $\left| \nabla_A^\ell \mu(z_t) \right|$ from the previous lemma implies a uniform bound on $\left| \nabla_A^\ell F_A \right|$ for all $\ell$. Therefore, since Proposition \ref{prop:metrics-converge} shows that $h_t$ converges in $C^0$, then the estimates of \cite[Lem. 19 \& 20]{Donaldson85} show that $h_t$ is bounded in $C^\ell$ for all $\ell$, and so there is a subsequence $h_{t_n}$ converging in the $C^\infty$ topology. \end{proof}
\subsubsection{$C^\infty$ convergence in the space of Higgs bundles}\label{sec:convergence-in-B}
In this section we show that the scattering construction converges in the $C^\infty$ topology on the space of Higgs bundles. As a consequence of the methods, we obtain an estimate that shows the solution to the reverse heat flow constructed in Section \ref{subsec:construct-reverse-solution} converges to the critical point $x$ in the smooth topology.
This section uses a slightly modified version of the flow from the previous section, defined as follows. Given $y_0 \in S_x^-$ and $t > 0$, let $x_s = g_s \cdot e^{i \beta t} \cdot y_0$ be the time $s$ solution to the $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:gauge-flow} with initial condition $e^{i \beta t} \cdot y_0$, let $s(t)$ be the unique point in time such that $\mathop{\rm YMH}\nolimits(x_{s(t)}) = \mathop{\rm YMH}\nolimits(y_0)$ and define $t' = \min \{ t, s(t) \}$. Since the critical values of $\mathop{\rm YMH}\nolimits$ are discrete, then $t'$ is well-defined for small values of $\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(y_0)$.
\begin{center} \begin{pspicture}(0,-0.5)(8,5) \psline(4,5)(4,0) \psline(4,4)(4,1) \pscurve[arrowsize=5pt]{->}(4,4)(4.4,2.3)(5,1.2)(5.2,1)
\psline[linestyle=dashed](0,1)(8,1) \psdots[dotsize=3pt](4,5)(4,4)(4,1)(5.2,1) \uput{4pt}[180](4,5){\small{$x$}} \uput{4pt}[180](4,4){\small{$e^{i \beta t} \cdot y_0$}} \uput{4pt}[180](4,0.7){\small{$y_0$}} \uput{4pt}[0](4.5,0.7){\small{$z_t = g_{t'} \cdot e^{i \beta t} \cdot y_0$}} \uput{2pt}[180](4,0){$S_x^-$}
\uput{3pt}[30](4.4,2.3){\small{$g_{t'}$}} \uput{3pt}[180](4,2.3){\small{$e^{i \beta t}$}} \uput{3pt}[90](1,1){\small{$\mathop{\rm YMH}\nolimits^{-1}(\mathop{\rm YMH}\nolimits(y_0))$}} \end{pspicture} \end{center}
Now define $z_t = g_{t'} \cdot e^{i \beta t} \cdot y_0$ and $y_t = e^{i \beta (t-t')} \cdot y_0$. Note that $z_t = g_{t'} \cdot e^{i \beta t'} \cdot y_t$ and so the results of the previous section show that the $C^0$ norm of the change of metric connecting $y_t$ and $z_t$ is bounded. Therefore Corollary \ref{cor:bounded-metric-away-from-critical} shows that $y_t$ and $z_t$ are both uniformly bounded away from $x$.
\begin{lemma}\label{lem:bounded-away-from-critical} There exists $T > 0$ such that $t-t' \leq T$ for all $t$. \end{lemma}
\begin{proof}
If $s(t) \geq t$ then $t' = t$ and the desired inequality holds. Therefore the only non-trivial case is $s(t) < t$. Since $\mathop{\rm YMH}\nolimits(z_t) = \mathop{\rm YMH}\nolimits(y_0)$ and $\mathop{\rm YMH}\nolimits$ is continuous in the $L_1^2$ norm on $\mathcal{B}$, then there exists a neighbourhood $V$ of $x$ such that $z_t \notin V$ for all $t$. We also have $z_t = f_{t'} \cdot y_t$ with $f_{t'} = g_{t'} e^{i \beta t'}$ such that $h_t = f_{t'}^* f_{t'}$ satisfies $\sup_X \sigma(h_t) \leq C \| y_t - x \|_{C^0}^2 \leq C \| y_0 - x \|_{C^0}^2$ by Lemma \ref{lem:uniform-bound-sigma}, and so Corollary \ref{cor:bounded-metric-away-from-critical} shows that there exists a neighbourhood $U$ of $x$ in the $L_1^2$ topology on $\mathcal{B}$ such that $y_t \notin U$. Therefore there exists $\eta > 0$ such that $\| y_t - x \|_{L_1^2} \geq \eta$ and $\| z_t - x \|_{L_1^2} \geq \eta$.
Since $y_t = e^{i \beta (t-t')} \cdot y_0$ and $e^{i \beta s} \cdot y_0$ converges to $x$ as $s \rightarrow \infty$, then there exists $T$ such that $t-t' \leq T$ for all $t$, since otherwise $\| y_t - x \|_{L_1^2} < \eta$ for some $t$ which contradicts the inequality from the previous paragraph. \end{proof}
Next we use the Lojasiewicz inequality to derive a uniform bound on $\| z_t - x \|_{L_1^2}$.
\begin{lemma}\label{lem:L12-bound}
Given $\varepsilon > 0$ there exists $\delta > 0$ such that for each $y_0 \in S_x^-$ with $\| y_0 - x \|_{L_1^2} < \delta$ there exists a neighbourhood $U$ of $x$ in the $L_1^2$ topology such that $\| z_t - x \|_{L_1^2} < \varepsilon$ for all $t$ such that $e^{i \beta t} \cdot y_0 \in U$. \end{lemma}
\begin{proof} Recall from \cite[Prop. 3.5]{Wilkin08} that there exists $\varepsilon_1 > 0$ and constants $C > 0$ and $\theta \in \left( 0, \frac{1}{2} \right)$ such that the Lojasiewicz inequality \begin{equation}\label{eqn:lojasiewicz}
\| \grad \mathop{\rm YMH}\nolimits(z) \|_{L^2} \geq C \left| \mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(z) \right|^{1-\theta} \end{equation}
holds for all $z$ such that $\| z - x \|_{L_1^2} < \varepsilon_1$. Recall the interior estimate \cite[Prop. 3.6]{Wilkin08} which says that for any positive integer $k$ there exists $\varepsilon_2 > 0$ and a constant $C_k'$ such that for any solution $x_s = g_s \cdot e^{i \beta t} \cdot y_0$ to the $\mathop{\rm YMH}\nolimits$ flow with initial condition $e^{i \beta t} \cdot y_0$ which satisfies $\| x_s - x \|_{L_k^2} < \varepsilon_2$ for all $0 \leq s \leq S$, then we have \begin{equation}\label{eqn:interior-estimate}
\int_1^S \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L_k^2} \, dt \leq C_k' \int_0^S \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L^2} \, dt . \end{equation}
where the constant $C_k'$ is uniform over all initial conditions in a given $\mathcal{G}^\mathbb C$ orbit and for all $S$ such that $\| x_s - x \|_{L_k^2} < \varepsilon_2$ for all $s \in [0, S]$ (cf. Lemma \ref{lem:interior-bound}). Define $\varepsilon' = \min \{ \varepsilon, \varepsilon_1, \varepsilon_2 \}$. A calculation using \eqref{eqn:lojasiewicz} (cf. \cite{Simon83}) shows that any flow line $x_s$ which satisfies $\| x_s - x \|_{L_1^2} < \varepsilon'$ for all $s \in [0,t']$ also satisfies the gradient estimate \begin{equation*}
C \theta \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L^2} \leq \frac{\partial}{\partial s} \left| \mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_s) \right|^\theta \end{equation*}
and so if $\| x_s - x \|_{L_1^2} < \varepsilon'$ for all $s < t'$ then \begin{align}\label{eqn:flow-length-estimate} \begin{split}
\int_0^{t'} \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L^2} \, ds & \leq \frac{1}{C\theta} \left( |\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_{t'}) |^\theta - |\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_0)|^\theta \right) \\
& \leq \frac{1}{C\theta} \left| \mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_{t'}) \right|^\theta \end{split} \end{align}
Let $k=1$ in \eqref{eqn:interior-estimate} and choose $\delta > 0$ so that $\| y_0 - x \|_{L_1^2} < \delta$ implies that that $\frac{1}{C \theta} |\mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(y_0)|^\theta \leq \frac{\varepsilon'}{3C_1'}$, where $C$ and $\theta$ are the constants from the Lojasiewicz inequality \eqref{eqn:lojasiewicz} and $C_1'$ is the constant from \eqref{eqn:interior-estimate} for $k=1$. Therefore, since $\mathop{\rm YMH}\nolimits(y_0) = \mathop{\rm YMH}\nolimits(x_{t'}) < \mathop{\rm YMH}\nolimits(x_\tau) \leq \mathop{\rm YMH}\nolimits(x)$ for all $\tau < t'$, then \begin{equation}\label{eqn:energy-bound}
\frac{1}{C\theta} \left| \mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_\tau) \right|^\theta \leq \frac{\varepsilon'}{3C_1'} \quad \text{for all $\tau < t'$}. \end{equation}
Since the finite-time $\mathop{\rm YMH}\nolimits$ flow depends continuously on the initial condition in the $L_1^2$ norm by \cite[Prop. 3.4]{Wilkin08}, then there exists a neighbourhood $U$ of $x$ such that $x_0 \in U$ implies that $\| x_1 - x \|_{L_1^2} < \frac{1}{3} \varepsilon'$. Choose $t$ large so that $e^{i \beta t} \cdot y_0 = e^{i \beta t'} \cdot y_t \in U$ and let $x_s = g_s \cdot e^{i \beta t} \cdot y_0$ be the solution to the $\mathop{\rm YMH}\nolimits$ flow at time $s$ with initial condition $x_0 = e^{i \beta t} \cdot y_0$. Note that $x_{t'} = z_t$. Define \begin{equation*}
\tau = \sup \{ s \mid \| x_r - x \|_{L_1^2} < \varepsilon' \, \, \text{for all $r \leq s$} \} \end{equation*} and note that $\tau > 0$. By definition of $\tau$, the Lojasiewicz inequality \eqref{eqn:lojasiewicz} and the interior estimate \eqref{eqn:flow-length-estimate} are valid for the flow line $x_s$ on the interval $[0,\tau]$. If $\tau < t'$, then \eqref{eqn:flow-length-estimate} and \eqref{eqn:energy-bound} imply that \begin{align*}
\| x_\tau - x \|_{L_1^2} & \leq \| x_1 - x \|_{L_1^2} + \| x_\tau - x_1 \|_{L_1^2} \\
& < \frac{1}{3} \varepsilon' + \int_1^\tau \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L_1^2} \, ds \\
& \leq \frac{1}{3} \varepsilon' + C_1' \int_0^\tau \| \grad \mathop{\rm YMH}\nolimits(x_s) \|_{L^2} \, ds \\
& \leq \frac{1}{3} \varepsilon' + \frac{C_1'}{C \theta} \left| \mathop{\rm YMH}\nolimits(x) - \mathop{\rm YMH}\nolimits(x_\tau) \right|^\theta \leq \frac{1}{3} \varepsilon' + \frac{1}{3} \varepsilon' \end{align*}
contradicting the definition of $\tau$ as the supremum. Therefore $t' \leq \tau$ and the same argument as above shows that $\| x_{t'} - x_0 \|_{L_1^2} < \frac{2}{3} \varepsilon'$, so we conclude that $z_t = x_{t'}$ satisfies $\| z_t - x \|_{L_1^2} < \frac{2}{3} \varepsilon' < \varepsilon$ for all $t$ such that $e^{i \beta t} \cdot y_0 \in U$. \end{proof}
Now that we have a uniform $L_1^2$ bound on $z_t - x$, then we can apply the same idea using the interior estimate \eqref{eqn:interior-estimate} as well as continuous dependence on the initial condition in the $L_k^2$ norm from \cite[Prop. 3.4]{Wilkin08} to prove the following uniform $L_k^2$ bound on $z_t - x$.
\begin{lemma}\label{lem:Lk2-length}
Given $\varepsilon > 0$ and a positive integer $k$ there exists $\delta > 0$ such that if $\| y_0 - x \|_{L_1^2} < \delta$ then there exists a neighbourhood $U$ of $x$ in the $L_k^2$ topology such that $\| z_t - x \|_{L_k^2} < \varepsilon$ for all $t$ such that $e^{i \beta t} \cdot y_0 \in U$. \end{lemma}
Now we can prove that there is a limit $z_\infty$ in the space of $C^\infty$ Higgs bundles. In Section \ref{subsec:construct-reverse-solution} we will show that $z_\infty \in W_x^-$.
\begin{proposition}\label{prop:strong-convergence} For each $y_0 \in S_x^-$, let $z_t$ be the sequence defined above. Then there exists $z_\infty \in \mathcal{B}$ such that for each positive integer $k$ there exists a subsequence of $z_t$ converging to $z_\infty$ strongly in $L_k^2$. \end{proposition}
\begin{proof}
The previous estimate with $k=2$ shows that $\| z_t - x \|_{L_2^2}$ is bounded. Compactness of the embedding $L_{k+1}^2 \hookrightarrow L_k^2$ shows that there is a subsequence $\{ z_{t_n} \}$ converging strongly to a limit $z_\infty$ in $L_1^2$.
For any $k > 1$, the same argument applied to the subsequence $\{ z_{t_n} \}$ from the previous paragraph shows that there exists a further subsequence, which we denote by $\{ z_{t_{n_j}} \}$, which converges strongly in $L_k^2$. Since $z_{t_{n_j}} \stackrel{L_1^2}{\longrightarrow} z_\infty$ then the limit in $L_k^2$ of $z_{t_{n_j}}$ must be $z_\infty$ also. Therefore $z_\infty$ is a $C^\infty$ Higgs pair. \end{proof}
Finally, we can prove that $z_\infty$ is gauge-equivalent to $y_0$. Recall the constant $T$ from Lemma \ref{lem:bounded-away-from-critical} and let $\varphi(z_t,s)$ denote the time $s$ downwards $\mathop{\rm YMH}\nolimits$ flow \eqref{eqn:YMH-flow} with initial condition $z_t$. The gauge transformation $f_t(y_0) \in \mathcal{G}^\mathbb C$ from Proposition \ref{prop:metrics-converge} satisfies $f_t(y_0) \cdot y_0 = \phi(z_t, t-t')$.
For any $k$, let $z_{t_n}$ be a subsequence converging strongly to $z_\infty$ in $L_k^2$. Such a subsequence exists by Proposition \ref{prop:strong-convergence}. Since $0 \leq t_n - t_n' \leq T$ for all $n$ then there exists $s \in [0, T]$ and a subsequence $\{t_{n_\ell} \}$ such that $t_{n_\ell} - t_{n_\ell}' \rightarrow s$. Since the finite-time $\mathop{\rm YMH}\nolimits$ flow depends continuously on the initial condition in $L_k^2$, then $f_{t_{n_\ell}} (y_0) \cdot y_0 = \varphi(z_{t_{n_\ell}}, t_{n_\ell} - t_{n_\ell}')$ converges to $z_\infty^0 := \varphi(z_\infty, s)$ strongly in $L_k^2$. After taking a further subsequence if necessary, the method of Section \ref{subsec:metric-convergence} shows that the change of metric associated to $f_{t_{n_\ell}}(y_0)$ converges strongly in $L_{k+1}^2$. Therefore, since the action of the Sobolev completion $\mathcal{G}_{L_{k+1}^2}^\mathbb C$ on $\mathcal{B}_{L_k^2}$ is continuous, then $\varphi(z_\infty, s)$ (and hence $z_\infty$) is related to $y_0$ by a gauge transformation in $\mathcal{G}_{L_{k+1}^2}^\mathbb C$. Since $y_0$ and $z_\infty$ are both smooth Higgs pairs then an elliptic regularity argument shows that this gauge transformation is smooth. Therefore we have proved the following result.
\begin{proposition}\label{prop:limit-in-group-orbit} Given any $y_0 \in S_x^-$, let $z_\infty$ be the limit from Proposition \ref{prop:strong-convergence}. Then there exists a smooth gauge transformation $g \in \mathcal{G}^\mathbb C$ such that $z_\infty = g \cdot y_0$. \end{proposition}
Since $z_\infty^0 = \varphi(z_\infty, s)$ is related to $z_\infty$ by the finite-time flow and $s$ is bounded, then we have the following estimate for $\| z_\infty^0 - x \|_{L_k^2}$. Note that this requires a bound on $\| y_0 - x \|_{L_1^2}$ for the estimates of this section to work, and a bound on $\| y_0 - x \|_{C^0}$ for the estimates of Lemma \ref{lem:uniform-bound-sigma} to work.
\begin{corollary}\label{cor:flow-bound}
For all $\varepsilon > 0$ there exists $\delta > 0$ such that $\| y_0 - x \|_{L_1^2} + \| y_0 - x \|_{C^0} < \delta$ implies $\| z_\infty^0 - x \|_{L_k^2} < \varepsilon$. \end{corollary}
\begin{remark} The previous proof uses the fact that the \emph{finite-time} flow depends continuously on the initial condition. The limit of the downwards $\mathop{\rm YMH}\nolimits$ flow as $t \rightarrow \infty$ depends continuously on initial conditions within the same Morse stratum (cf. \cite[Thm. 3.1]{Wilkin08}). It is essential that the constant $T$ from Lemma \ref{lem:bounded-away-from-critical} is finite (which follows from Corollary \ref{cor:bounded-metric-away-from-critical}) in order to guarantee that $z_\infty$ and $\varphi(z_\infty, s)$ are gauge equivalent. Without a bound on $T$, it is possible that $z_\infty$ may be in a different Morse stratum to $\lim_{t \rightarrow \infty} \varphi(z_t, t-t')$. \end{remark}
\subsubsection{Constructing a convergent solution to the backwards $\mathop{\rm YMH}\nolimits$ flow}\label{subsec:construct-reverse-solution}
In this section we show that the limit $z_\infty$ is in the unstable set $W_x^-$.
\begin{proposition}\label{prop:convergence-group-action} For each $y_0 \in S_x^-$ there exists $g \in \mathcal{G}^\mathbb C$ such that $g \cdot y_0 \in W_x^-$. \end{proposition}
\begin{proof} In what follows, fix any positive integer $k$. Given $y_0 \in S_x^-$, let $z_t^0 = f_t(y_0) \cdot y_0$, where $f_t$ is the complex gauge transformation from Proposition \ref{prop:metrics-converge}. Then Proposition \ref{prop:limit-in-group-orbit} shows that there exists $z_\infty^0 := \phi(z_\infty, s)$ and a subsequence $\{ z_{t_n}^0 \}$ such that $z_{t_n}^0 \rightarrow z_\infty^0$ strongly in $L_k^2$.
For any $s > 0$, let $y_s = e^{i \beta s} \cdot y_0$ and define $z_t^{-s} = f_t(y_s) \cdot y_s$. By definition, $z_t^0$ is the downwards $\mathop{\rm YMH}\nolimits$ flow for time $s$ with initial condition $z_t^{-s}$. Applying Proposition \ref{prop:strong-convergence} to the subsequence $z_{t_n}^{-s}$ shows that there is a subsequence $z_{t_{n_j}}^{-s}$ converging in $L_k^2$ to some $z_\infty^{-s}$. Since the $\mathop{\rm YMH}\nolimits$ flow for finite time $s$ depends continuously on the initial condition (cf. \cite[Prop. 3.4]{Wilkin08}) then $z_{t_{n_j}}^{-s} \rightarrow z_\infty^{-s}$ and $z_{t_{n_j}}^0 \rightarrow z_\infty^0$ implies that $z_\infty^0$ is the time $s$ flow with initial condition $z_\infty^{-s}$. Therefore, for any $s > 0$ we have constructed a solution to the $\mathop{\rm YMH}\nolimits$ flow on $[-s, 0]$ connecting $z_\infty^0$ and $z_\infty^{-s}$. Proposition \ref{prop:backwards-uniqueness} shows that this solution must be unique for each $s$, and therefore there is a well-defined solution on the time interval $(-\infty, 0]$.
Moreover, we also have the uniform bound from Corollary \ref{cor:flow-bound} which shows that for all $\varepsilon > 0$ there exists $\delta > 0$ such that $\| z_\infty^{-s} - x \|_{L_k^2} \leq \varepsilon$ for all $y_0$ such that $\| y_0 - x \|_{L_1^2} < \delta$. Therefore as $s \rightarrow \infty$, the sequence $z_\infty^{-s}$ converges strongly to $x$ in the $L_k^2$ norm for any $k$, and so $z_\infty^0 = g \cdot y_0 \in W_x^-$. Proposition \ref{prop:exponential-convergence} then shows that the convergence is exponential in each Sobolev norm. \end{proof}
\subsection{Convergence of the inverse process}\label{subsec:inverse-construction}
In this section we consider the inverse procedure to that of the previous section and prove that each point in the unstable set $W_x^-$ is gauge equivalent to a point in the negative slice $S_x^-$. The idea is similar to that of the previous section, except here we use the modified flow.
\subsubsection{A $C^0$ bound in the space of metrics}
Given $y_0 \in W_x^-$, let $y_t = g_t \cdot y_0$ be the solution to the modified flow \eqref{eqn:modified-flow} with initial condition $y_0$. Define $f_t = g_t \cdot e^{i\beta t}$ and let $h_t = f_t^* f_t$. This is summarised in the diagram below.
Using a similar calculation as the previous section, we have the same expression for the change of metric as in Lemma \ref{lem:derivative-difference}. \begin{lemma}\label{lem:reverse-derivative-difference} For any initial condition $y_0 \in W_x^-$, the induced flow on $\mathcal{G}^\mathbb C / \mathcal{G}$ satisfies \begin{equation}\label{eqn:metric-derivative} \frac{dh_t}{dt} = -2i h_t \mu_h(e^{-i \beta t} \cdot y_0) + i \beta h_t + i h_t \beta . \end{equation} \end{lemma}
\begin{proof} A similar calculation as in the proof of Lemma \ref{lem:derivative-difference} (this time using the modified flow \eqref{eqn:modified-flow}) shows that \begin{equation*} \frac{df_t}{dt} f_t^{-1} = -i\mu(g_t \cdot y_0) + \gamma(g_t \cdot y_0) + f_t (i \beta) f_t^{-1} . \end{equation*} Then \begin{align*} \frac{dh_t}{dt} & = f_t^* \left( \frac{df}{dt} f_t^{-1} \right)^* f_t + f_t^* \left( \frac{df}{dt} f_t^{-1} \right) f_t \\
& = f_t^* \left( -i \mu(g_t \cdot y_0) - \gamma(g_t \cdot y_0) + (f_t^*)^{-1} (i \beta) f_t^* - i \mu(g_t \cdot y_0) + \gamma(g_t \cdot y_0) + f_t (i \beta) f_t^{-1} \right) f_t \\
& = -2i h_t f_t^{-1} \mu(g_t \cdot y_0) f_t + i \beta h_t + i h_t \beta \\
& = -2i h_t \mu_h(e^{-i \beta t} \cdot y_0) + i \beta h_t + i h_t \beta . \qedhere \end{align*} \end{proof}
\begin{lemma}\label{lem:reverse-uniform-bound-sigma}
For every $\varepsilon > 0$ there exists a constant $C > 0$ such that for any initial condition $y_0 \in W_{x}^-$ with $\| e^{-i \beta T} \cdot y_0 - x \|_{C^1} + \| g_T \cdot y_0 - x \|_{C^1} < \varepsilon$ we have the estimate \begin{equation*}
\sup_X \sigma(h_t) \leq C \left( \| e^{-i \beta T} \cdot y_0 - x \|_{C^1} + \| g_T \cdot y_0 - x \|_{C^1} \right) \end{equation*} for all $0 \leq t \leq T$. \end{lemma}
\begin{proof}
In contrast to the proof of Lemma \ref{lem:uniform-bound-sigma}, $e^{-i \beta t} \cdot y_0$ is not in the slice $S_x$ and so it satisfies the inequality $\| \mu(e^{-i \beta t} \cdot y_0) - \beta \|_{C^0} \leq C' \| e^{-i \beta t} \cdot y_0 - x \|_{C^1}$ instead of the quadratic bound of Lemma \ref{lem:moment-map-quadratic}. Using this inequality, the same idea as in the proof of Lemma \ref{lem:uniform-bound-sigma} leads to the bound \begin{align}\label{eqn:heat-operator-bounded} \begin{split}
\left( \frac{\partial}{\partial t} + \Delta \right) \sigma(h_t) & \leq C_1 \| e^{-i \beta t} \cdot (y_0-x) \|_{C^1} \left( \mathop{\rm Tr}\nolimits(h_t) + \mathop{\rm Tr}\nolimits(h_t^{-1}) \right) \\
& = C_1 \| e^{-i \beta t} \cdot (y_0 - x) \|_{C^1} \sigma(h_t) + 2 C_1 \| e^{-i \beta t} \cdot (y_0 - x) \|_{C^1} \rank(E) \end{split} \end{align} In general, if the heat operator is bounded for all $t \geq 0$ \begin{equation*} \left( \frac{\partial}{\partial t} + \Delta \right) f(p,t) \leq C(t) f(p,t) + D(t), \quad p \in X, t \in [0, \infty) \end{equation*} for some nonnegative functions $C(t)$ and $D(t)$ independent of $p \in X$, then $f(p,t)$ satisfies the bound \begin{equation}\label{eqn:general-heat-bound} f(p,t) \leq \exp \left( \int_0^t C(s) \, ds \right) \int_0^t D(s) \, ds + f(p,0) \end{equation}
Therefore \eqref{eqn:heat-operator-bounded} implies that the problem reduces to finding a bound for $\int_0^t \| e^{-i \beta s} \cdot (y_0 - x) \|_{C^1} \, ds$. Proposition \ref{prop:exponential-convergence} shows that the backwards flow with initial condition in $W_x^-$ converges exponentially to $x$ in every Sobolev norm. Therefore there exists a neighbourhood $U$ of $x$ such that if $g_T \cdot y_0 \in U$ then there exist positive constants $C_1$ and $\eta$ such that the following estimate holds \begin{equation*}
\| y_0 - x \|_{C^1} \leq C_1 e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1} . \end{equation*}
Recall the eigenbundles $\mathop{\rm End}\nolimits(E)_-$, $\mathop{\rm End}\nolimits(E)_0$ and $\mathop{\rm End}\nolimits(E)_+$ from Section \ref{sec:preliminaries}. The above estimate shows that each component of $y_0 - x$ in $\mathop{\rm End}\nolimits(E)_-$, $\mathop{\rm End}\nolimits(E)_0$ and $\mathop{\rm End}\nolimits(E)_+$ is bounded by $C_1 e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1}$. Since the component of $e^{-i \beta t} \cdot (y_0 - x)$ in $\mathop{\rm End}\nolimits(E)_+$ is exponentially decreasing with $t$ then \begin{equation*}
\int_0^T \| (e^{-i \beta t} \cdot y_0 - x)_{\mathop{\rm End}\nolimits(E)_+} \|_{C^1} dt \leq C_1' \| y_0 - x \|_{C^1} \leq C_1 e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1} . \end{equation*} The component of $e^{-i \beta t} \cdot (y_0 - x)$ in $\mathop{\rm End}\nolimits(E)_0$ is constant with respect to $t$, and so \begin{equation*}
\int_0^T \| (e^{-i \beta t} \cdot y_0 - x)_{\mathop{\rm End}\nolimits(E)_0} \|_{C^1} dt \leq C_2' T \| y_0 - x \|_{C^1} \leq C_2 T e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1} . \end{equation*} Finally, the component of $e^{-i \beta t} \cdot (y_0 - x)$ in $\mathop{\rm End}\nolimits(E)_-$ is exponentially increasing, and so we have the bound \begin{equation*}
\int_0^T \| (e^{-i \beta t} \cdot y_0 - x)_{\mathop{\rm End}\nolimits(E)_-} \|_{C^1} \leq C_3 \| e^{-i \beta T} \cdot (y_0 - x) \|_{C^1} . \end{equation*}
Combining the estimates for the three components shows that the integral \begin{equation*}
I(t) = \int_0^t \| e^{-i \beta s} \cdot (y_0-x) \|_{C^1} \, ds \end{equation*} is bounded by \begin{equation*}
I(t) \leq C_1 e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1} + C_2 T e^{-\eta T} \| g_T \cdot y_0 - x \|_{C^1} + C_3 \| e^{-i \beta T} \cdot y_0 - x \|_{C^1} \end{equation*}
The inequality \eqref{eqn:general-heat-bound} together with the assumption $\| g_T \cdot y_0 - x \|_{C^1} + \| e^{-i \beta T} \cdot y_0 - x \|_{C^1} < \varepsilon$ shows that there exists a constant $C$ such that \begin{equation*}
\sup_X \sigma(h_t) \leq C \left( \| e^{-i \beta T} \cdot y_0 - x \|_{C^1} + \| g_T \cdot y_0 - x \|_{C^1} \right) \qedhere \end{equation*} \end{proof}
\subsubsection{Convergence in the space of Higgs bundles}
In this section we use a method analogous to that of Section \ref{sec:convergence-in-B} to show that the sequence converges in the space of Higgs bundles and that the limit is gauge equivalent to $y_0$. Given $y_0 \in W_x^-$ and $t \in (-\infty, 0]$, define $s < 0$ by $\| e^{i \beta s} \cdot g_t(y_0) \cdot y_0 - x \|_{L_k^2} = \| y_0 - x \|_{L_k^2}$. Note that this is well-defined for small values of $\| y_0 - x \|_{L_k^2}$ since Lemma \ref{lem:unstable-sets-same} shows that $g_t(y_0) \cdot y_0 \rightarrow x$ in the $C^\infty$ topology as $t \rightarrow - \infty$ and for $s < 0$ the action of $e^{i \beta s}$ exponentially increases the $C^0$ norm of the component of $g_t(y_0) \cdot y_0$ in $\mathop{\rm End}\nolimits(E)_-$. Now define $t' := \max \{ t, s \} < 0$, let $f_t(y_0) = e^{i \beta t'} \cdot g_{t'}(g_{t-t'}(y_0) \cdot y_0) $ and $z_t := e^{i \beta t'} \cdot g_t(y_0) \cdot y_0 = f_t(y_0) \cdot g_{t-t'}(y_0) \cdot y_0$. Let $h_t = f_t^* f_t$ be the associated change of metric.
\begin{center} \begin{pspicture}(0,-0.5)(8,5.5) \psline[arrowsize=5pt]{->}(4,4)(4,1) \pscurve(3.9,5)(4,4)(4.4,2.3)(5,1.2)(5.2,1)(6,0.4)
\psline[linestyle=dashed](0,1)(8,1) \psdots[dotsize=3pt](3.9,5)(4,4)(4,1)(5.2,1) \uput{4pt}[180](3.9,5){\small{$x$}} \uput{4pt}[180](4,4){\small{$g_t(y_0) \cdot y_0$}} \uput{5pt}[270](3.8,1){\small{$z_t = f_t \cdot y_0$}} \uput{5pt}[270](5.2,1){\small{$y_0$}} \uput{2pt}[270](6,0.4){$W_x^-$}
\uput{3pt}[30](4.4,2.3){\small{$g_{t}$}} \uput{3pt}[180](4,2.3){\small{$e^{i \beta t'}$}}
\uput{3pt}[90](8,1){\small{$\| y_0 - x \|_{L_k^2} = \text{constant}$}} \end{pspicture} \end{center}
Lemma \ref{lem:reverse-uniform-bound-sigma} then shows that $\sup_X \sigma(h_t) \leq C \left( \| z_t - x \|_{C^1} + \| g_{t-t'}(y_0) \cdot y_0 - x \|_{C^1} \right)$. Since either $\| z_t - x \|_{L_k^2} = \| y_0 - x \|_{L_k^2}$ (when $t < t'$) or $g_{t-t'}(y_0) \cdot y_0 = y_0$ (when $t'=t$), then Corollary \ref{cor:bounded-metric-away-from-critical} shows that $g_{t-t'}(y_0) \cdot y_0$ and $z_t$ are both bounded away from $x$ in the $L_k^2$ norm. As a consequence, $|t-t'|$ is uniformly bounded in the same way as Lemma \ref{lem:bounded-away-from-critical}. Therefore \begin{equation}\label{eqn:bounded-linear-flow}
\| e^{i \beta t} \cdot g_t(y_0) \cdot y_0 - x \|_{L_k^2} = \| e^{i \beta (t-t')} \cdot z_t - x \|_{L_k^2} \leq C' \| z_t - x \|_{L_k^2} = C' \| y_0 - x \|_{L_k^2} \end{equation} for some constant $C'$, which implies that there is a subsequence of $e^{i \beta t} \cdot g_t(y_0) \cdot y_0$ converging strongly to a limit $z_\infty^0$ in $L_{k-1}^2$. Since this is true for all $k$, then $z_\infty^0$ is a $C^\infty$ Higgs pair.
A special case of \eqref{eqn:bounded-linear-flow} is \begin{equation}\label{eqn:bounded-linear-flow-C1}
\| e^{i \beta t} \cdot g_t(y_0) \cdot y_0 - x \|_{C^1} \leq C \| e^{i \beta t} \cdot g_t(y_0) \cdot y_0 - x \|_{L_k^2} \leq C' \| y_0 - x \|_{L_k^2} \end{equation} for any $k$ such that $L_k^2 \hookrightarrow C^1$ is an embedding.
By modifying the method of Proposition \ref{prop:metrics-converge} we can now show that the change of metric converges in $C^0$. For $t \in (-\infty, 0]$, define $f_t(y_0) = e^{i \beta t} \cdot g_{t}(y_0)$ and let $t_1 \leq t_2 \leq T < 0$. This is summarised in the diagram below.
\begin{proposition}\label{prop:reverse-metrics-converge} $h_t(y_0)$ converges in the $C^0$ norm to a unique limit $h_\infty(y_0) \in \mathcal{G}^\mathbb C / \mathcal{G}$ as $t \rightarrow -\infty$. The limit depends continuously on the initial condition $y_0 \in S_x^-$. The rate of convergence is given by \begin{equation}\label{eqn:reverse-metric-convergence-rate}
\sup_X \sigma(h_t(y_0) (h_\infty(y_0))^{-1}) \leq C_2 e^{2 \eta t} \| y_0 - x \|_{L_k^2} \end{equation} where $C_2 > 0$ is a constant depending only on the orbit $\mathcal{G} \cdot x$, the constant $\eta$ is from Proposition \ref{prop:exponential-convergence} and $k$ is a positive integer chosen so that $L_k^2 \hookrightarrow C^1$ is a continuous embedding. \end{proposition}
\begin{proof} The result follows from the same procedure as the proof of Proposition \ref{prop:metrics-converge}, except now we use the estimate from Lemma \ref{lem:reverse-uniform-bound-sigma} instead of the estimate from Lemma \ref{lem:uniform-bound-sigma} and the distance-decreasing formula for the modified flow from Lemma \ref{lem:modified-distance-decreasing}.
Let $h_{t_1-t_2}(y_{t_2})$ be the change of metric connecting $y_{t_2} = g_{t_2}(y_0) \cdot y_0$ and $e^{i \beta (t_1-t_2)} \cdot y_{t_1}$. Lemma \ref{lem:reverse-uniform-bound-sigma} and the estimate \eqref{eqn:bounded-linear-flow-C1} above show that $h_{t_1-t_2}(y_{t_2})$ satisfies \begin{align*}
\sup_X \sigma(h_{t_1-t_2}(y_{t_2})) & \leq C \left( \| e^{i \beta (t_1-t_2)} \cdot y_{t_1} - x \|_{C^1} + \| y_{t_2} - x \|_{C^1} \right) \\
& \leq C C' \| y_{t_2} - x \|_{L_k^2} + C \| y_{t_2} - x \|_{C^1} \\
& \leq C'' \| y_T - x \|_{L_k^2} \leq C_2 e^{2 \eta T} \| y_0 - x \|_{L_k^2} \end{align*} By the construction of the modified flow, the gauge transformation connecting $y_{t_2}$ and $e^{i \beta (t_1-t_2)} \cdot y_{t_1}$ is in $\mathcal{G}_*^\mathbb C$, The distance-decreasing formula for the action of $e^{i \beta (t_1 - t_2)}$ from Lemma \ref{lem:modified-distance-decreasing} then implies that \begin{equation*} \sigma(h_{t_1}(y_0) h_{t_2}(y_0)^{-1}) \leq \sigma(h_{t_1-t_2}(y_{t_2})) \end{equation*} and so the sequence $h_t(y_0)$ is Cauchy in the $C^0$ norm, by the same proof as Proposition \ref{prop:metrics-converge}. \end{proof}
Therefore $y_0$ is connected to $z_\infty^0$ by a $C^0$ gauge transformation. Elliptic regularity together with the fact that $z_\infty^0$ is a $C^\infty$ Higgs pair then shows that $y_0$ is gauge equivalent to $z_\infty^0$ by a $C^\infty$ gauge transformation.
The same method as the proof of Proposition \ref{prop:convergence-group-action} then allows us to explicitly construct a solution of the linearised flow $z_\infty^{-s} = e^{i\beta s} \cdot z_\infty^0$ converging to $x$ as $s \rightarrow +\infty$. Lemma \ref{lem:classify-neg-slice} then shows that $z_\infty^0$ is $\mathcal{G}^\mathbb C$ equivalent to a point in $S_x^-$, which is smooth by Lemma \ref{lem:slice-smooth}.
Therefore $y_0$ is $\mathcal{G}^\mathbb C$ equivalent to a point in $S_x^-$, and so we have proved the following converse to Proposition \ref{prop:convergence-group-action}.
\begin{proposition}\label{prop:unstable-maps-to-slice} For each $y_0 \in W_x^-$ there exists a $C^\infty$ gauge transformation $g \in \mathcal{G}^\mathbb C$ such that $g \cdot y_0 \in S_x^-$. \end{proposition}
\subsection{An algebraic criterion for the existence of flow lines}\label{sec:filtration-criterion}
The results of the previous two sections combine to give the following theorem. \begin{theorem}\label{thm:algebraic-flow-line} Let $E$ be a complex vector bundle over a compact Riemann surface $X$, and let $(\bar{\partial}_A, \phi)$ be a Higgs bundle on $E$. Suppose that $E$ admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ by Higgs subbundles such that the quotients $(Q_k, \phi_k) := (E^{(k)}, \phi^{(k)}) / (E^{(k-1)}, \phi^{(k-1)})$ are Higgs polystable and $\slope(Q_k) < \slope(Q_j)$ for all $k < j$. Then there exists $g \in \mathcal{G}^\mathbb C$ and a solution to the reverse Yang-Mills-Higgs heat flow equation with initial condition $g \cdot (\bar{\partial}_A, \phi)$ which converges to a critical point isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$.
Conversely, if there exists a solution of the reverse heat flow from the initial condition $(\bar{\partial}_A, \phi)$ converging to a critical point $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$ then $(\bar{\partial}_A, \phi)$ admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ whose graded object is isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$. \end{theorem}
\begin{proof} Suppose first that $(\bar{\partial}_A, \phi)$ admits a filtration $(E^{(1)}, \phi^{(1)}) \subset \cdots \subset (E^{(n)}, \phi^{(n)}) = (E, \phi)$ by Higgs subbundles such that the quotients $(Q_k, \phi_k) := (E^{(k)}, \phi^{(k)}) / (E^{(k-1)}, \phi^{(k-1)})$ are Higgs polystable and $\slope(Q_k) < \slope(Q_j)$ for all $k < j$. Let $x$ be a critical point isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$, and let $U$ be the neighbourhood of $x$ from Lemma \ref{lem:classify-neg-slice}. Then by applying the isomorphism $x \cong (Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$ and scaling the extension classes there exists a complex gauge transformation such that $g \cdot (\bar{\partial}_A, \phi)$ is in $U$. Applying Lemma \ref{lem:classify-neg-slice} shows that $(\bar{\partial}_A, \phi)$ is isomorphic to a point in $S_x^-$, and therefore Proposition \ref{prop:convergence-group-action} shows that $(\bar{\partial}_A, \phi)$ is isomorphic to a point in $W_x^-$.
Conversely, if $x = (Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$ is a critical point and $(\bar{\partial}_A, \phi) \in W_x^-$, then Proposition \ref{prop:unstable-maps-to-slice} shows that there exists $g \in \mathcal{G}^\mathbb C$ such that $g \cdot (\bar{\partial}_A, \phi) \in S_x^-$. Therefore Lemma \ref{lem:classify-neg-slice} shows that $(\bar{\partial}_A, \phi)$ admits a filtration whose graded object is isomorphic to $(Q_1, \phi_1) \oplus \cdots \oplus (Q_n, \phi_n)$. \end{proof}
\section{The Hecke correspondence via Yang-Mills-Higgs flow lines}\label{sec:hecke}
Let $(E, \phi)$ be a polystable Higgs bundle of rank $r$ and degree $d$, and let $(L_u, \phi_u)$ be a Higgs line bundle with $\deg L_u < \slope E$. Let $F$ be a smooth complex vector bundle $C^\infty$ isomorphic to $E \oplus L_u$ and choose a metric on $F$ such that the Higgs structure on $(E, \phi) \oplus (L_u,\phi_u)$ is a Yang-Mills-Higgs critical point in the space $\mathcal{B}(F)$ of Higgs bundles on $F$. The goal of this section is to show that Hecke modifications of the Higgs bundle $(E, \phi)$ correspond to Yang-Mills-Higgs flow lines in $\mathcal{B}(F)$ connecting the critical point $(E, \phi) \oplus (L_u, \phi_u)$ to lower critical points.
In Section \ref{sec:Higgs-hecke-review} we review Hecke modifications of Higgs bundles. Section \ref{sec:canonical-map} describes how the space of Hecke modifications relates to the geometry of the negative slice and Section \ref{sec:YMH-flow-hecke} contains the proof of Theorem \ref{thm:flow-hecke} which shows that Hecke modifications correspond to $\mathop{\rm YMH}\nolimits$ flow lines. In Section \ref{sec:secant-criterion} we give a geometric criterion for points to be connected by unbroken flow lines in terms of the secant varieties of the space of Hecke modifications inside the negative slice. In particular, this gives a complete classification of the $\mathop{\rm YMH}\nolimits$ flow lines for rank $2$ (cf. Corollary \ref{cor:rank-2-classification}). Throughout this section the notation $\mathcal{E}$ is used to denote the sheaf of holomorphic sections of the bundle $E$.
\subsection{Hecke modifications of Higgs bundles}\label{sec:Higgs-hecke-review}
The purpose of this section is to derive some basic results for Hecke modifications of Higgs bundles which will be used in Section \ref{sec:YMH-flow-hecke} to prove Theorem \ref{thm:flow-hecke}. In Section \ref{sec:secant-criterion} we extend these results to study unbroken YMH flow lines.
First recall that a Hecke modification of a holomorphic bundle $E$ over a Riemann surface $X$ is determined by points $p_1, \ldots, p_n \in X$ (not necessarily distinct) and nonzero elements $v_j \in E_{p_j}^*$ for $j = 1, \ldots, n$. This data determines a sheaf homomorphism $\mathcal{E} \rightarrow \oplus_{j=1}^n \mathbb C_{p_j}$ to the skyscraper sheaf supported at $p_1, \ldots, p_n$ with kernel a locally free sheaf $\mathcal{E}'$. This determines a holomorphic bundle $E' \rightarrow X$ which we call the \emph{Hecke modification of $E$ determined by $v = (v_1, \ldots, v_n)$}. \begin{equation*} 0 \rightarrow \mathcal{E}' \rightarrow \mathcal{E} \stackrel{v}{\longrightarrow} \bigoplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0 \end{equation*} Since the kernel sheaf $\mathcal{E}'$ only depends on the equivalence class of each $v_j$ in $\mathbb{P} E_{p_j}^*$ then from now on we abuse the notation slightly and also use $v_j \in \mathbb{P} E_{p_j}^*$ to denote the equivalence class of $v_j \in E_{p_j}^*$.
As explained in \cite[Sec. 4.5]{witten-hecke}, if $(E, \phi)$ is a Higgs bundle, then a Hecke modification of $(E, \phi)$ may introduce poles into the Higgs field and so there are restrictions on the allowable modifications which preserve holomorphicity of the Higgs field.
\begin{definition} Let $(E, \phi)$ be a Higgs bundle. A Hecke modification $E'$ of $E$ is \emph{compatible} with $\phi$ if the induced Higgs field on $E'$ is holomorphic. \end{definition}
The next result describes a basic condition for the modification to be compatible with the Higgs field.
\begin{lemma} Let $(E, \phi)$ be a Higgs bundle, and $0 \rightarrow \mathcal{E}' \rightarrow \mathcal{E} \stackrel{v}{\longrightarrow} \mathbb{C}_p \rightarrow 0$ a Hecke modification of $E$ induced by $v \in E_p^*$. Then the induced Higgs field $\phi'$ on $E'$ is holomorphic if and only if there exists an eigenvalue $\mu$ of $\phi(p)$ such that the composition $\mathcal{E} \otimes K^{-1} \stackrel{\phi - \mu \cdot \id}{\ensuremath{\relbar\mathrel{\mkern-4mu}\relbar\mathrel{\mkern-4mu}\longrightarrow}} \mathcal{E} \stackrel{v}{\rightarrow} \mathbb C_p$ is zero. \end{lemma}
\begin{proof} Let $\phi \in H^0(\mathop{\rm End}\nolimits(E) \otimes K)$. Then $\phi$ pulls back to a holomorphic Higgs field $\phi' \in H^0(\mathop{\rm End}\nolimits(E') \otimes K)$ if and only if for any open set $U \subset X$ and any section $s \in \mathcal{E}(U)$, the condition $s \in \ker (\mathcal{E}(U) \stackrel{v}{\rightarrow} \mathbb C_p(U))$ implies that $\phi(s) \in \ker ((\mathcal{E} \otimes K)(U) \stackrel{v}{\rightarrow} \mathbb C_p(U))$. After choosing a trivialisation of $K$ in a neighbourhood of $p$, we can decompose the Higgs field $\phi(p)$ on the fibre $E_p$ as follows \begin{equation}\label{eqn:fibre-extension} \xymatrix{
0 \ar[r] & \ker v \ar[r] \ar[d]^{\left. \phi(p) \right|_{\ker v}} & E_p \ar[r] \ar[d]^{\phi(p)} & \mathbb{C}_p \ar[r] \ar[d]^\mu & 0 \\ 0 \ar[r] & \ker v \ar[r] & E_p \ar[r] & \mathbb{C}_p \ar[r] & 0 } \end{equation} where scalar multiplication by $\mu$ is induced from the action of $\phi(p)$ on the quotient $\mathbb{C}_p = E_p / \ker v$. Therefore the endomorphism $\left( \phi(p) - \mu \cdot \id \right)$ maps $E_p$ into the subspace $\ker v$ and so $v \in E_p^*$ descends to a well-defined homomorphism $v' : \mathop{\rm coker}\nolimits \left( \phi(p) - \mu \cdot \id \right) \rightarrow \mathbb C$.
Conversely, given an eigenvalue $\mu$ of $\phi(p)$ and an element $v' \in \mathop{\rm coker}\nolimits(\phi(p) - \mu \cdot \id)^*$, one can choose a basis of $E_p$ and extend $v'$ to an element $v \in E_p^*$ such that $\im (\phi(p) - \mu \cdot \id) \subset \ker v$. Equivalently, $\phi(p)$ preserves $\ker v$ and so $v \in E_p^*$ defines a Hecke modification $E'$ of $E$ such that the induced Higgs field on $E'$ is holomorphic. \end{proof}
\begin{corollary}\label{cor:Higgs-compatible} Let $(E, \phi)$ be a Higgs bundle and let $0 \rightarrow \mathcal{E}' \rightarrow \mathcal{E} \stackrel{v}{\rightarrow} \mathbb C_p \rightarrow 0$ be a Hecke modification of $E$ induced by $v \in \mathbb{P} E_p^*$. The following conditions are equivalent \begin{enumerate} \item The induced Higgs field $\phi'$ on $E'$ is holomorphic.
\item There exists an eigenvalue $\mu$ of $\phi(p)$ such that $v(\phi(s)) = \mu v(s)$ for all sections $s$ of $E$.
\item There exists an eigenvalue $\mu$ of $\phi(p)$ such that $v$ descends to a well-defined $v' \in (\mathop{\rm coker}\nolimits (\phi(p) - \mu \cdot \id))^*$.
\end{enumerate} \end{corollary}
\begin{lemma}\label{lem:resolve-higgs-subsheaf} Let $(E, \phi)$ be a Higgs bundle and $(G, \varphi)$ a Higgs subsheaf. Then there exists a Higgs subbundle $(G', \varphi') \subset (E, \phi)$ such that $\rank(G) = \rank (G')$ and $(G, \varphi)$ is a Higgs subsheaf of $(G', \varphi')$. \end{lemma}
\begin{proof} Since $\dim_\mathbb C X = 1$ then a standard procedure shows that there is a holomorphic subbundle $G' \subset E$ with $\rank(G) = \rank (G')$ and $G$ is a subsheaf of $G'$, and so it only remains to show that this is a Higgs subbundle. The reverse of the construction above shows that the Higgs field $\varphi$ preserving $G$ extends to a meromorphic Higgs field $\varphi'$ preserving $G'$, and since this is the restriction of a holomorphic Higgs field $\phi$ on $E$ to the holomorphic subbundle $G'$, then $\varphi'$ must be holomorphic on $G'$. Therefore $G'$ is $\phi$-invariant. \end{proof}
\begin{definition}\label{def:m-n-stable} A Higgs bundle $(E, \phi)$ is \emph{$(m,n)$-stable (resp. $(m,n)$-semistable)} if for every proper $\phi$-invariant holomorphic subbundle $F \subset E$ we have \begin{equation*} \frac{\deg F + m}{\rank F} < \frac{\deg E - n}{\rank E} \quad \text{(resp. $\leq$)} . \end{equation*} \end{definition}
If $(E, \phi)$ is $(0,n)$-semistable then any Hecke modification $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ is semistable.
\begin{definition} Then \emph{space of admissible Hecke modifications} is the subset $\mathcal{N}_{\phi} \subset \mathbb{P} E^*$ corresponding to the Hecke modifications which are compatible with the Higgs field. \end{definition}
\begin{remark}\label{rem:hecke-explicit} \begin{enumerate}
\item If $\phi = 0$ then $\mathcal{N}_{0} = \mathbb{P} E^*$. If $E$ is $(0,1)$-stable then there is a well-defined map $\mathbb{P} E^* \rightarrow \mathbb{P} H^1(E^*)$. The construction of the next section generalises this to a map $\mathcal{N}_{\phi} \rightarrow \mathbb{P} \mathcal{H}^1(E^*)$ (cf. Remark \ref{rem:canonical-map}).
\item Note that the construction above is the reverse of that described in \cite{witten-hecke}, which begins with $E'$ and modifies the bundle to produce a bundle $E$ with $\deg E = \deg E' + 1$. Here we begin with $E$ and construct $E'$ via a modification $0 \rightarrow \mathcal{E}' \rightarrow \mathcal{E} \rightarrow \mathbb C_p \rightarrow 0$ since we want to interpret the compatible modifications in terms of the geometry of the negative slice (see Section \ref{sec:canonical-map}) in order to draw a connection with the results on gradient flow lines for the Yang-Mills-Higgs flow functional from Section \ref{sec:filtration-criterion}.
\item One can also see the above construction more explicitly in local coordinates as in \cite{witten-hecke} by choosing a local frame $\{ s_1, \ldots, s_n \}$ for $E$ in a neighbourhood $U$ of $p$ with local coordinate $z$ centred at $p$ and for which the evaluation map $\mathcal{E} \stackrel{v}{\rightarrow} \mathbb C_p$ satisfies $v(s_1) = s_1(0)$ and $v(s_j) = 0$ for all $j=2, \ldots, n$. Then over $U \setminus \{ p \}$, the functions $\{ \frac{1}{z} s_1(z), s_2(z) \ldots, s_n(z) \}$ form a local frame for $E'$. Equivalently, the transition function $g = \left( \begin{matrix} \frac{1}{z} & 0 \\ 0 & \id \end{matrix} \right)$ maps the trivialisation for $E$ to a trivialisation for $E'$ (note that this is the inverse of the transition function from \cite[Sec. 4.5.2]{witten-hecke} for the reason explained in the previous paragraph). In this local frame field on $E$ we write $\phi(z) = \left( \begin{matrix} A(z) & B(z) \\ C(z) & D(z) \end{matrix} \right)$. The action on the Higgs field is then \begin{equation*} g \left( \begin{matrix} A(z) & B(z) \\ C(z) & D(z) \end{matrix} \right) g^{-1} = \left( \begin{matrix} A(z) & \frac{1}{z} B(z) \\ z C(z) & D(z) \end{matrix} \right) \end{equation*} Therefore the induced Higgs field on $E'$ will have a pole at $p$ unless $B(0) = 0$. The scalar $A(0)$ in this local picture is the same as the scalar $\mu$ from \eqref{eqn:fibre-extension}, and we see that \begin{equation*} \phi(p) - \mu \cdot \id = \left( \begin{matrix} 0 & 0 \\ C(0) & D(0) - \mu \cdot \id \end{matrix} \right) \end{equation*} With respect to the basis of $E_p$ given by the choice of local frame, $v(\phi(p) - \mu \cdot \id) = 0$. Moreover, via this local frame $\mathop{\rm coker}\nolimits ( \phi(p) - \mu \cdot \id)$ is identified with a subspace of $E_p$ which contains the linear span of $s_1(0)$. Therefore we see in the local coordinate picture that $v \in E_p^*$ descends to an element of $(\mathop{\rm coker}\nolimits (\phi(p) - \mu \cdot \id))^*$. \end{enumerate} \end{remark}
The next result shows that the admissible Hecke modifications have an interpretation in terms of the spectral curve associated to the Higgs field. This extends the results of \cite{witten-hecke} to include the possibility that $p$ is a branch point of the spectral cover.
First recall Hitchin's construction of the spectral curve from \cite{Hitchin87-2}. Let $(E, \phi)$ be a Higgs pair. Then there is a projection map $\pi : K \rightarrow X$ and a bundle $\pi^* E$ over the total space of the canonical bundle together with a tautological section $\lambda$ of $\pi^* E$. The zero set of the characteristic polynomial of $\pi^* \phi$ defines a subvariety $S$ inside the total space of $K$. The projection $\pi$ restricts to a map $\pi : S \rightarrow X$, where for each $p \in X$ the fibre $\pi^{-1}(p)$ consists of the eigenvalues of the Higgs field $\phi(p)$. As explained in \cite{Hitchin87-2}, generically the discriminant of the Higgs field has simple zeros and in this case $S$ is a smooth curve called the \emph{spectral curve}. The induced projection $\pi : S \rightarrow X$ is then a ramified covering map with ramification divisor denoted $\mathcal{R} \subset S$.
The pullback of the Higgs field to the spectral curve is a bundle homomorphism $\pi^* E \rightarrow \pi^*(E \otimes K)$, and the eigenspaces correspond to $\ker (\pi^* \phi - \lambda \cdot \id)$, where $\lambda$ is the tautological section defined above. When the discriminant of the Higgs field has simple zeros then Hitchin shows in \cite{Hitchin87-2} that the eigenspaces form a line bundle $\mathcal{N} \rightarrow S$ and that the original bundle $E$ can be reconstructed as $\pi_* \mathcal{L}$, where the line bundle $\mathcal{L} \rightarrow S$ is formed by modifying $\mathcal{N}$ at the ramification points $0 \rightarrow \mathcal{N} \rightarrow \mathcal{L} \rightarrow \bigoplus_{p \in \mathcal{R}} \mathbb C_p \rightarrow 0$. One can reconstruct the Higgs field $\phi$ by pushing forward the endomorphism defined by the tautological section $\lambda : \mathcal{L} \rightarrow \mathcal{L} \otimes \pi^* K$.
\begin{lemma} If the discriminant of $\phi$ has simple zeros then an admissible Hecke modification of $(E, \phi)$ corresponds to a Hecke modification of the line bundle $\mathcal{L}$ over the spectral curve. \end{lemma}
\begin{proof} Consider the pullback bundle $\pi^* E \rightarrow S$. The pullback of the Higgs field induces a sheaf homomorphism $(\pi^* \phi - \lambda \cdot \id) : \pi^* \mathcal{E} \otimes (\pi^* K)^{-1} \rightarrow \pi^* \mathcal{E}$. As explained in \cite[Sec. 2.6]{witten-hecke}, when the discriminant of $\phi$ has simple zeros then the cokernel of this homomorphism is the line bundle $\mathcal{L} \rightarrow S$ such that $\mathcal{E} \cong \pi_* \mathcal{L}$.
For $\mu \in S$ such that $p = \pi(\mu)$, there is an isomorphism of the stalks of the skyscraper sheaves $\mathbb C_p \cong \pi_* (\mathbb C_\mu)$. Then a Hecke modification $\mathcal{L} \stackrel{v'}{\rightarrow} \mathbb C_\mu$ given by nonzero $v' \in \mathcal{L}_\mu^*$ induces a Hecke modification $v = v' \circ q \circ \pi^* : \mathcal{E} \rightarrow \mathbb C_p$, defined by the commutative diagram below. \begin{equation*} \xymatrix{ \pi^* \mathcal{E} \otimes (\pi^* K)^{-1} \ar[rr]^(0.6){\pi^*\phi - \lambda \cdot \id} & & \pi^* \mathcal{E} \ar[r]^(0.3)q & \mathop{\rm coker}\nolimits(\pi^* \phi - \lambda \cdot \id) \ar[dr]^(0.6){v'} \ar[r] & 0 \\ & & \mathcal{E} \ar[u]^{\pi^*} \ar[rr]^v & & \mathbb C_p \ar[r] & 0 } \end{equation*} The definition of $v$ implies that for any open set $U \subset X$ with a trivialisation of $K$ in a neighbourhood of $p$, and all $s \in \mathcal{E}(U)$ we have \begin{equation*} v(\phi s) = v' \circ q(\pi^* (\phi s)) = v' \circ q(\mu \, \pi^* (s)) = \mu \, v' \circ q \circ \pi^* (s) = \mu \, v(s) \end{equation*} and so $v$ is compatible with the Higgs field by Corollary \ref{cor:Higgs-compatible}.
Conversely, let $v \in E_p^*$ be compatible with the Higgs field $\phi$. Corollary \ref{cor:Higgs-compatible} shows that this induces a well-defined element of $\mathop{\rm coker}\nolimits(\phi - \mu \cdot \id)^*$. Consider the endomorphisms $\phi(p) - \mu \cdot \id$ on the fibre of $E$ over $p \in X$ and $\pi^* \phi(\mu) - \mu \cdot \id$ on the fibre of $\pi^* E$ over $\mu \in S$.
\begin{equation*} \xymatrix{ (\pi^* E \otimes \pi^* K^{-1})_\mu \ar[rr]^(0.6){\pi^* \phi - \mu \cdot \id} & & (\pi^* E)_\mu \ar[r] & \mathop{\rm coker}\nolimits (\pi^* \phi - \mu \cdot \id)_\mu \ar[r] & 0\\ (E \otimes K^{-1})_p \ar[rr]^(0.6){\phi - \mu \cdot \id} \ar[u] & & E_p \ar[r] \ar[u] & \mathop{\rm coker}\nolimits(\phi - \mu \cdot \id)_p \ar[r] \ar@{-->}[u] & 0 } \end{equation*}
The universal property of cokernel defines a map $\mathop{\rm coker}\nolimits(\phi - \mu \cdot \id)_p \rightarrow \mathop{\rm coker}\nolimits(\pi^* \phi - \mu \cdot \id)_\mu$. Since the discriminant of the Higgs field has simple zeros then both fibres are one-dimensional and so this map becomes an isomorphism. Therefore $v$ induces a well-defined homomorphism on the fibre $\mathop{\rm coker}\nolimits(\pi^* \phi - \mu \cdot \id)_\mu \rightarrow \mathbb C$, and hence a Hecke modification of $\mathcal{L}$ at $\mu \in S$. \end{proof}
\begin{remark} When $p \in X$ is not a branch point of $\pi : S \rightarrow X$ then this result is contained in \cite{witten-hecke}. \end{remark}
\begin{corollary} If the discriminant of $\phi$ has simple zeros then the space of Hecke modifications is $\mathcal{N}_{\phi} = S$. \end{corollary}
\subsection{Secant varieties associated to the space of Hecke modifications}\label{sec:canonical-map}
The purpose of this section is to connect the geometry of the space of Hecke modifications with the geometry of the negative slice at a critical point in order to prepare for the proof of Theorem \ref{thm:flow-hecke} in the next section.
Let $(E_1, \phi_1)$ and $(E_2, \phi_2)$ be Higgs bundles and let $\bar{\partial}_A$ denote the induced holomorphic structure on $E_1^* E_2$. Then there is an elliptic complex \begin{equation*} \Omega^0(E_1^* E_2) \stackrel{L_1}{\longrightarrow} \Omega^{0,1}(E_1^* E_2) \oplus \Omega^{1,0}(E_1^* E_2) \stackrel{L_2}{\longrightarrow} \Omega^{1,1}(E_1^* E_2) , \end{equation*} where $L_1(u) = (\bar{\partial}_A u, \phi_2 u - u \phi_1)$ and $L_2(a, \varphi) = (\bar{\partial}_A \varphi + [a, \phi])$. Let $\mathcal{H}^0 = \ker L_1$, $\mathcal{H}^1 = \ker L_1^* \cap \ker L_2$ and $\mathcal{H}^2 = \ker L_2^*$ denote the spaces of harmonic forms. Recall that if $(E_1, \phi_1)$ and $(E_2, \phi_2)$ are both Higgs stable and $\slope(E_2) < \slope(E_1)$ then $\mathcal{H}^0(E_1^* E_2) = 0$.
Now consider the special case where $(E_1, \phi_1)$ is $(0, n)$-stable and $(E_2, \phi_2)$ is a Higgs line bundle. Let $\mathcal{B}$ denote the space of Higgs bundles on the smooth bundle $E_1 \oplus E_2$ and choose a metric such that $(E_1, \phi_1) \oplus (E_2, \phi_2)$ is a critical point of $\mathop{\rm YMH}\nolimits : \mathcal{B} \rightarrow \mathbb R$. Definition \ref{def:slice} shows that $\mathcal{H}^1(E_1^* E_2) \cong S_x^-$ is the negative slice at this critical point.
Let $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}_1, \phi_1) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification defined by $v_1, \ldots, v_n \in \mathbb{P} E_1^*$. Applying the functor $\mathop{\rm Hom}\nolimits(\cdot, \mathcal{E}_2)$ to the short exact sequence $0 \rightarrow \mathcal{E}' \rightarrow \mathcal{E}_1 \rightarrow \oplus_j \mathbb C_{p_j} \rightarrow 0$ gives us an exact sequence of sheaves $0 \rightarrow \mathop{\rm Hom}\nolimits(\mathcal{E}_1, \mathcal{E}_2) \rightarrow \mathop{\rm Hom}\nolimits(\mathcal{E}', \mathcal{E}_2) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j}^* \rightarrow 0$, where the final term comes from the isomorphism $\mathop{\rm Ext}\nolimits^1(\oplus_j \mathbb C_{p_j}, \mathcal{E}_2) \cong \mathop{\rm Hom}\nolimits(\mathcal{E}_2, \oplus_j \mathbb C_{p_j} \otimes K)^* \cong \oplus_j \mathbb C_{p_j}^*$. Note that this depends on a choice of trivialisations of $E_2$ and $K$, however the kernel of the map $\mathop{\rm Hom}\nolimits(\mathcal{E}', \mathcal{E}_2) \rightarrow \oplus_j \mathbb C_{p_j}$ is independent of these choices. This gives us the following short exact sequence of Higgs sheaves \begin{equation}\label{eqn:dual-short-exact} 0 \rightarrow \mathcal{E}_1^* \mathcal{E}_2 \rightarrow (\mathcal{E}')^* \mathcal{E}_2 \rightarrow \bigoplus_{j=1}^n \mathbb C_{p_j}^* \rightarrow 0 \end{equation}
There is an induced map $\Omega^0((E')^* E_2) \rightarrow \Omega^{1,0}((E')^* E_2)$ given by $s \mapsto \phi_2 s - s \phi'$. Recall from Corollary \ref{cor:Higgs-compatible} that there exists an eigenvalue $\mu_j$ for $\phi_1(p_j)$ such that $v(\phi_1(p_j) - \mu_j \cdot \id) = 0$ for each $j=1, \ldots, n$. From the above exact sequence there is an induced homomorphism $\Omega^{1,0}((E')^* E_2) \stackrel{ev^1}{\longrightarrow} \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$. The component of $ev^1(\phi_2 s - s \phi')$ in $\mathbb C_{p_j}$ is $(\phi_2(p_j) - \mu_j ) s$. In particular, $\phi_2 s - s \phi' \in \ker(ev^1)$ iff $\phi_2(p_j) = \mu_j$ for all $j=1, \ldots, n$.
\begin{definition}\label{def:Hecke-compatible} Let $(E_1, \phi_1)$ be a Higgs bundle, and $(E_2, \phi_2)$ a Higgs line bundle. The \emph{space of Hecke modifications compatible with $\phi_1$ and $\phi_2$}, denoted $\mathcal{N}_{\phi_1, \phi_2} \subset \mathcal{N}_{\phi_1}$, is the set of Hecke modifications compatible with $\phi_1$ such that $ev^1(\phi_2 s - s \phi') = 0$ for all $s \in \Omega^0((E')^* E_2)$. \end{definition}
\begin{remark}\label{rem:miniscule-compatible} Note that if $n = 1$ and $v \in \mathbb{P} E_1^*$ is a Hecke modification compatible with $\phi_1$, then the requirement that $v \in \mathcal{N}_{\phi_1, \phi_2}$ reduces to $\phi_2(p) = \mu$, where $\mu$ is the eigenvalue of $\phi_1(p)$ from Corollary \ref{cor:Higgs-compatible}. Such a $\phi_2 \in H^0(\mathop{\rm End}\nolimits(E_2) \otimes K) = H^0(K)$ always exists since the canonical linear system is basepoint free and therefore $\bigcup_{\phi_2 \in H^0(K)} \mathcal{N}_{\phi_1, \phi_2} = \mathcal{N}_{\phi_1}$. If $n > 1$ then $\phi_2$ with these properties may not exist for some choices of $\phi_1 \in H^0(\mathop{\rm End}\nolimits(E_1) \otimes K)$ and $v_1, \ldots, v_n \in \mathbb{P} E_1^*$ (the existence of $\phi_2$ depends on the complex structure of the surface $X$). If $\phi_1 = 0$, then we can choose $\phi_2 = 0$ and in this case $\mathcal{N}_{\phi_1, \phi_2} = \mathcal{N}_{\phi_1} = \mathbb{P} E_1^*$ (this corresponds to the case of the Yang-Mills flow in Theorem \ref{thm:flow-hecke}). \end{remark}
\begin{lemma} Let $(E_1, \phi_1)$ be Higgs polystable and $(E_2, \phi_2)$ be a Higgs line bundle. Let $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification defined by distinct $v_1, \ldots, v_n \in \mathcal{N}_{\phi_1, \phi_2}$.
Then there is an exact sequence \begin{equation}\label{eqn:hyper-exact-sequence} 0 \rightarrow \mathcal{H}^0(E_1^* E_2) \rightarrow \mathcal{H}^0((E')^* E_2) \rightarrow \mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2) \end{equation} \end{lemma}
\begin{proof}
The short exact sequence \eqref{eqn:dual-short-exact} leads to the following commutative diagram of spaces of smooth sections \begin{equation*} \xymatrix@C=1.6em{ 0 \ar[r] & \Omega^0(E_1^* E_2) \ar[r]^{i^*} \ar[d]^{L_1} & \Omega^0((E')^* E_2) \ar[r]^{ev^0} \ar[d]^{L_1} & \bigoplus_{j=1}^n \mathbb C_{p_j} \ar[r] & 0 \\ 0 \ar[r] & \Omega^{0,1}(E_1^* E_2) \oplus \Omega^{1,0}(E_1^* E_2) \ar[r]^(0.47){i^*} & \Omega^{0,1}((E')^* E_2) \oplus \Omega^{1,0}((E')^* E_2) \ar[r]^(0.62){ev^1} & \bigoplus_{j=1}^n \mathbb C_{p_j} \oplus \mathbb C_{p_j} \ar[r] & 0 } \end{equation*} Since $\bar{\partial}_A s$ depends on the germ of a section around a point, then there is no well-defined map $\bigoplus_{j=1}^n \mathbb C_{p_j} \rightarrow \bigoplus_{j=1}^n \mathbb C_{p_j} \oplus \mathbb C_{p_j}$ making the diagram commute, so the exact sequence \eqref{eqn:hyper-exact-sequence} does not follow immediately from the standard construction, and therefore we give an explicit construction below.
First construct a map $\mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2)$ as follows. Given $z \in \mathbb C^n$, choose a smooth section $s' \in \Omega^0((E')^* E_2)$ such that $ev^0(s') = z$ and $ev^1(\bar{\partial}_A s') = 0$. Since $\phi_2(p_j) = \mu_j$, then $ev^1(\phi_2 s' - s' \phi') = 0$ and so $ev^1(L_1 s') = 0$. Therefore $(\bar{\partial}_A s', \phi_2 s' - s' \phi') = i^*(a, \varphi)$ for some $(a, \varphi) \in \Omega^{0,1}(E_1^* E_2) \oplus \Omega^{1,0}(E_1^* E_2)$. Let $[(a, \varphi)] \in \mathcal{H}^1(E_1^* E_2)$ denote the harmonic representative of $(a, \varphi)$. Define the map $\mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2)$ by $z \mapsto [(a, \varphi)]$.
To see that this is well-defined independent of the choice of $s' \in \Omega^0((E')^* E_2)$, note that if $s'' \in \Omega^0((E')^* E_2)$ is another section such that $ev^0(s'') = z$ and $ev^1(\bar{\partial}_A s'') = 0$, then $ev^0(s'' - s') = 0$, and so $s'' - s' = i^*(s)$ for some $s \in \Omega^0(E_1^* E_2)$. Therefore $L_1(s'' - s') = i^* L_1(s)$ with $[L_1(s)] = 0 \in \mathcal{H}^1(E_1^* E_2)$, and so $s'$ and $s''$ determine the same harmonic representative in $\mathcal{H}^1(E_1^* E_2)$.
To check exactness of \eqref{eqn:hyper-exact-sequence} at the term $\mathbb C^n$, note that if $z = ev^0(s')$ for some harmonic $s' \in \mathcal{H}^0((E')^* E_2)$, then $L_1(s') = 0 = i^* (0,0)$, and so $z \in \mathbb C^n$ maps to $0 \in \mathcal{H}^1(E_1^* E_2)$. Moreover, if $z$ maps to $0 \in \mathcal{H}^1(E_1^*E_2)$, then there exists $s' \in \Omega^0((E')^* E_2)$ such that $L_1(s') = i^*(a, \varphi)$ where $(a, \varphi) \in \Omega^{0,1}(E_1^* E_2) \oplus \Omega^{1,0}((E')^* E_2)$ and $(a, \varphi) = L_1(s)$ for some $s \in \Omega^0(E_1^* E_2)$. Therefore $s'$ and $i^* s$ differ by a harmonic section of $\mathcal{H}^0((E')^* E_2)$. Since $ev^0(i^* s) = 0$ then $z$ is the image of this harmonic section under the map $\mathcal{H}^0((E')^* E_2) \rightarrow \mathbb C^n$.
To check exactness at $\mathcal{H}^1(E_1^* E_2)$, given $z \in \mathbb C^n$ construct $(a, \varphi)$ as above and note that $i^*(a, \varphi) = L_1 s'$ for some $s' \in \Omega^0((E')^* E_2)$. Therefore $i^*[(a, \varphi)] = 0 \in \mathcal{H}^1((E')^* E_2)$ and so the image of $\mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2)$ is contained in the kernel of $\mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2)$. Now suppose that the image of $[(a, \varphi)]$ is zero in $\mathcal{H}^1((E')^* E_2)$, i.e. $i^*(a, \varphi) = L_1 s'$ for some $s' \in \Omega^0((E')^* E_2)$. Let $z = ev^0(s')$. Note that $z = 0$ implies that $s' = i^* s$ for some $s \in \Omega^0(E_1^* E_2)$, and so $[(a, \varphi)] = 0$. If $z \neq 0$ then there exists $s'' \in \Omega^0((E')^* E_2)$ such that $ev^1(L_1(s'')) = 0$ and $ev^0(s'') = z$. Then $L_1(s'') = i^*(a'', \varphi'')$ for some $(a'', \varphi'') \in \Omega^{0,1}(E_1^* E_2) \oplus \Omega^{1,0}(E_1^* E_2)$. Moreover, $ev^0(s'' - s') = 0$, so $s'' - s' = i^* s$ for some $s \in \Omega^0(E_1^* E_2)$. Commutativity implies that $L_1 s = (a'', \varphi'') - (a, \varphi)$, and so the harmonic representatives $[(a, \varphi)]$ and $[(a'', \varphi'')]$ are equal. Therefore $[(a, \varphi)]$ is the image of $z$ by the map $\mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2)$, which completes the proof of exactness at $\mathcal{H}^1(E_1^* E_2)$.
Exactness at the rest of the terms in the sequence \eqref{eqn:hyper-exact-sequence} then follows from standard methods. \end{proof}
For any stable Higgs bundle $(E, \phi)$ with $d = \deg E$ and $r = \rank E$, define the \emph{generalised Segre invariant} by \begin{equation*} s_k(E, \phi) := k d - r \left( \max_{F \subset E, \rank F = k} \deg F \right) . \end{equation*} where the maximum is taken over all $\phi$-invariant holomorphic subbundles of rank $k$. Note that $s_k(E, \phi) \geq s_k(E, 0) =: s_k(E)$ and \begin{equation*} \frac{1}{rk} s_k(E, \phi) = \min_{F \subset E, \rank F = k} \left( \slope(E) - \slope(F) \right) \end{equation*} Note that any Hecke modification $(E', \phi') \hookrightarrow (E, \phi)$ with $\deg E - \deg E' = n$ has Segre invariant $s_k(E', \phi') \geq s_k(E, \phi) - nk$. As a special case, $(E', \phi')$ is stable if $n < \frac{1}{k} s_k(E, \phi)$ for all $k = 1, \ldots, r-1$.
A theorem of Lange \cite[Satz 2.2]{Lange83} shows that a general stable holomorphic bundle $E$ satisfies $s_k(E) \geq k(r - k)(g-1)$ for all $k = 1, \ldots, r - 1$. Since there is an dense open subset of stable Higgs bundles whose underlying holomorphic bundle is stable, then Lange's theorem also gives the same lower bound on the Segre invariant for a general stable Higgs bundle.
\begin{lemma}\label{lem:segre-bound} Let $0 \rightarrow (E', \phi') \rightarrow (E, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification defined by distinct points $v_1, \ldots, v_n \in \mathbb{P} E^*$ such that $n < \frac{1}{k} s_k(E, \phi)$ for all $k = 1, \ldots, r-1$. Then $\slope(G) < \slope (E')$ for any proper non-zero Higgs subbundle $(G, \phi_G) \subset (E, \phi)$. In particular, this condition is satisfied if $(E, \phi)$ is a general stable Higgs bundle and $n < g-1$. \end{lemma}
\begin{proof} Let $k = \rank G$ and $h = \deg G$. Then the lower bound on the Segre invariant implies that \begin{align*} \slope(E') - \slope(G) = \frac{d - n}{r} - \frac{h}{k} & = \frac{1}{rk} \left(kd - kn - rh \right) \\
& \geq \frac{1}{rk} \left( s_k(E, \phi) - kn \right) \end{align*} Therefore if $n < \frac{1}{k} s_k(E, \phi)$ then $\slope(E') - \slope(G) > 0$ for any Higgs subbundle of rank $k$. If $n < g-1$ then \cite[Satz 2.2]{Lange83} shows that this condition is satisfied for general stable Higgs bundles. \end{proof}
\begin{corollary}\label{cor:n-dim-kernel} Let $(E_1, \phi_1)$ be a stable Higgs bundle, let $n < \frac{1}{k} s_k(E_1, \phi_1)$ for all $k=1, \ldots, \rank(E_1)-1$ and let $(E_2, \phi_2)$ be a Higgs line bundle such that $\deg E_2 < \frac{\deg E_1 - n}{\rank E_1}$. Then given any set of $n$ distinct points $\{ v_1, \ldots, v_n \} \subset \mathcal{N}_{\phi_1, \phi_2}$ there is a well-defined $n$-dimensional subspace $\ker (\mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2))$. \end{corollary}
\begin{proof} Let $(E', \phi')$ be the Hecke modification of $(E_1, \phi_1)$ determined by $\{ v_1, \ldots v_n \} \subset \mathbb{P} E_1^*$. The lower bound on the Segre invariant implies that $(E', \phi')$ is Higgs stable, and therefore $\mathcal{H}^0((E')^* E_2) = 0$ since $\slope(E_2) < \slope(E') = \frac{\deg E_1 - n}{\rank E_1}$. The exact sequence \eqref{eqn:hyper-exact-sequence} then reduces to \begin{equation*} 0 \rightarrow \mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2) \end{equation*} and so $\ker (\mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2))$ is a well-defined $n$-dimensional subspace of $\mathcal{H}^1(E_1^* E_2)$ associated to $\{ v_1, \ldots, v_n \}$. \end{proof}
\begin{remark}\label{rem:canonical-map} As noted above, the maps $\mathbb C^n \rightarrow \mathcal{H}^1(E_1^* E_2)$ depend on choosing trivialisations, but different choices lead to the same map up to a change of basis of $\mathbb C^n$, and so the subspace $\ker (\mathcal{H}^1((E')^* E_2) \rightarrow \mathcal{H}^1(E_1^* E_2))$ is independent of these choices.
In the special case where $n=1$, then this construction gives a well-defined map $\mathcal{N}_{\phi_1, \phi_2} \rightarrow \mathbb{P} \mathcal{H}^1(E_1^* E_2)$. When $n < \frac{1}{k} s_k(E_1, \phi_1)$ for all $k$, then Corollary \ref{cor:n-dim-kernel} shows that any $n$ distinct points $v_1, \ldots, v_n$ span a nondegenerate copy of $\mathbb{P}^{n-1}$ in $\mathbb{P} \mathcal{H}^1(E_1^* E_2)$.
In the special case where $\phi_1 = \phi_2 = 0$ and $E_2$ is trivial, then $\mathcal{N}_{\phi_1, \phi_2} = \mathbb{P} E^*$ and $\mathcal{H}^1(E_1^*) \cong H^{0,1}(E_1^*) \oplus H^{1,0}(E_1^*)$. Then the map $\mathbb{P} E^* \rightarrow \mathcal{H}^1(E_1^*) \rightarrow H^{0,1}(E_1^*) \cong H^0(E_1 \otimes K)^*$ is the usual map defined for holomorphic bundles (cf. \cite[p804]{HwangRamanan04}). \end{remark}
\begin{definition}\label{def:secant-variety} The \emph{$n^{th}$ secant variety}, denoted $\Sec^n(\mathcal{N}_{\phi_1, \phi_2}) \subset \mathbb{P} \mathcal{H}^1(E_1^* E_2)$, is the union of the subspaces $\vecspan \{ v_1, \ldots, v_n \} \subset \mathbb{P} \mathcal{H}^1(E_1^* E_2)$ taken over all $n$-tuples of distinct points $v_1, \ldots, v_n \in \mathcal{N}_{\phi_1, \phi_2}$. \end{definition}
The next lemma is a Higgs bundle version of \cite[Lemma 3.1]{NarasimhanRamanan69}. Since the proof is similar to that in \cite{NarasimhanRamanan69} then it is omitted.
\begin{lemma}\label{lem:nr-higgs} Let $0 \rightarrow (E_2, \phi_2) \rightarrow (F, \tilde{\phi}) \rightarrow (E_1, \phi_1) \rightarrow 0$ be an extension of Higgs bundles defined by the extension class $[(a, \varphi)] \in \mathcal{H}^1(E_1^* E_2)$. Let $(E', \phi') \stackrel{i}{\longrightarrow} (E_1, \phi_1)$ be a Higgs subsheaf such that $i^*[(a, \varphi)] = 0 \in \mathcal{H}^1((E')^* E_2)$. Then $(E', \phi')$ is a Higgs subsheaf of $(F, \tilde{\phi})$. \end{lemma}
\begin{equation*} \xymatrix{
& & & (E', \phi') \ar[d]^i \ar@{-->}[dl] \\ 0 \ar[r] & (E_2, \phi_2) \ar[r] & (F, \tilde{\phi}) \ar[r] & (E_1, \phi_1) \ar[r] & 0 \\ }\qedhere \end{equation*}
\begin{corollary}\label{cor:linear-span} Let $(E_1, \phi_1)$ be stable, let $n < \frac{1}{k} s_k(E_1, \phi_1)$ for all $k=1, \ldots, \rank(E_1)-1$, let $(E_2, \phi_2)$ be a Higgs line bundle and suppose that $\deg E_2 < \frac{\deg E_1 - n}{\rank E_1}$. Let $0 \rightarrow (E_2, \phi_2) \rightarrow (F, \tilde{\phi}) \rightarrow (E_1, \phi_1) \rightarrow 0$ be an extension of Higgs bundles with extension class $[(a, \varphi)] \in \mathcal{H}^1(E_1^* E_2)$. Let $0 \rightarrow (E', \phi') \stackrel{i}{\hookrightarrow} (E_1, \phi_1) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification determined by distinct points $\{v_1, \ldots, v_n\} \in \mathcal{N}_{\phi_1, \phi_2}$.
Then $(E', \phi')$ is a subsheaf of $(F, \tilde{\phi})$ if $[(a, \varphi)] \in \vecspan\{ v_1, \ldots, v_n \} \subset \mathcal{H}^1(E_1^* E_2)$. \end{corollary}
\begin{proof} If $[(a, \varphi)] \in \vecspan\{ v_1, \ldots, v_n \}$ then $[(a, \varphi)] \in \ker (\mathcal{H}^1(E_1^* E_2) \rightarrow \mathcal{H}^1((E')^* E_2))$ by Corollary \ref{cor:n-dim-kernel}, and therefore $(E', \phi')$ is a subsheaf of $(F, \tilde{\phi})$ by Lemma \ref{lem:nr-higgs}. \end{proof}
The next lemma gives a condition on the extension class $[(a, \varphi)] \in \mathcal{H}^1(E_1^* E_2)$ for $(E', \phi')$ to be the subsheaf of largest degree which lifts to a subsheaf of $(F, \tilde{\phi})$. This is used to study unbroken flow lines in Section \ref{sec:secant-criterion}.
\begin{lemma}\label{lem:nondegenerate-maximal} Let $(E_1, \phi_1)$ be a stable Higgs bundle, choose $n$ such that $2n-1 < \frac{1}{k} s_k(E_1, \phi_1)$ for all $k=1, \ldots, \rank(E_1)$, let $(E_2, \phi_2)$ be a Higgs line bundle and suppose that $\deg E_2 < \frac{\deg E_1 - (2n-1)}{\rank E_1}$. Let $0 \rightarrow (E_2, \phi_2) \rightarrow (F, \tilde{\phi}) \rightarrow (E_1, \phi_1) \rightarrow 0$ be an extension of Higgs bundles with extension class $[(a, \varphi)] \in \Sec^n(\mathcal{N}_{\phi_1, \phi_2}) \setminus \Sec^{n-1}(\mathcal{N}_{\phi_1, \phi_2}) \subset \mathbb{P} \mathcal{H}^1(E_1^* E_2)$ and let $0 \rightarrow (E', \phi') \stackrel{i}{\hookrightarrow} (E_1, \phi_1) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification determined by distinct points $v_1, \ldots, v_n \in \mathcal{N}_{\phi_1, \phi_2}$ such that $i^* [(a, \varphi)] = 0$.
Let $(\mathcal{E}'', \phi'') \stackrel{i''}{\hookrightarrow} (\mathcal{E}, \phi)$ be a subsheaf such that $(i'')^* [(a, \varphi)] = 0 \in \mathcal{H}^1((E'')^* E_2)$ and $\rank E'' = \rank E$. Then $\deg(E'') \leq \deg(E')$. \end{lemma}
\begin{proof} Let $\{ v_1'', \ldots, v_m'' \} \subset \mathcal{N}_{\phi_1, \phi_2}$ be the set of distinct points defining the Hecke modification $(\mathcal{E}'', \phi'') \stackrel{i''}{\hookrightarrow} (\mathcal{E}_1, \phi_1)$. Then $i^* [(a, \varphi)] = 0$ and $(i'')^*[(a, \varphi)] = 0$ together imply that $[(a, \varphi)] \in \vecspan\{ v_1, \ldots, v_n \} \cap \vecspan \{ v_1'', \ldots, v_m''\}$. Either $m + n > 2n-1$ (and so $\deg E'' \leq \deg E'$) or $m + n \leq 2n-1$ in which case Corollary \ref{cor:n-dim-kernel} together with the lower bound $2n-1 < \frac{1}{k} s_k(E_1, \phi_1)$ implies that $\vecspan \{ v_1, \ldots, v_n \} \cap \vecspan \{v_1'', \ldots, v_m'' \}$ is the linear span of $\{ v_1, \ldots, v_n \} \cap \{ v_1'', \ldots, v_m'' \}$. Since $m+n \leq 2n-1$ then $\{ v_1, \ldots, v_n \} \cap \{ v_1'', \ldots, v_m'' \}$ is a strict subset of $\{ v_1, \ldots, v_n\}$, which is not possible since $[(a, \varphi)] \notin \Sec^{n-1}(\mathcal{N}_{\phi_1, \phi_2})$. Therefore $\deg E'' \leq \deg E'$. \end{proof}
\subsection{Constructing Hecke modifications of Higgs bundles via the Yang-Mills-Higgs flow.}\label{sec:YMH-flow-hecke}
Let $(E, \phi)$ be a stable Higgs bundle and $L_u$ a line bundle with $\deg L_u < \frac{\deg E - 1}{\rank E}$, and let $E'$ be a Hecke modification of $E$ which is compatible with the Higgs field \begin{equation*} 0 \rightarrow (\mathcal{E}', \phi') \stackrel{i}{\hookrightarrow} (\mathcal{E}, \phi) \stackrel{v}{\rightarrow} \mathbb C_p \rightarrow 0 . \end{equation*}
The goal of this section is to construct critical points $x_u = (L_u, \phi_u) \oplus (E, \phi)$ and $x_\ell = (L_\ell, \phi_\ell) \oplus (E', \phi')$ together with a broken flow line connecting $x_u$ and $x_\ell$. The result of Theorem \ref{thm:algebraic-flow-line} shows that this amounts to constructing a Higgs field $\phi_u \in H^0(K)$, a Higgs pair $(F, \tilde{\phi})$ in the unstable set of $x_u$ and a complex gauge transformation $g \in \mathcal{G}^\mathbb C$ such that $(E', \phi')$ is a Higgs subbundle of $g \cdot (F, \tilde{\phi})$.
\begin{lemma}\label{lem:construct-Higgs-extension} Let $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \stackrel{v}{\rightarrow} \mathbb C_p \rightarrow 0$ be a Hecke modification such that $(E, \phi)$ and $(E', \phi')$ are both Higgs semistable, and let $L_u$ be a line bundle with $\deg L_u < \slope(E') < \slope(E)$. Then there exists a Higgs field $\phi_u \in H^0(K)$ and a non-trivial Higgs extension $(F, \tilde{\phi})$ of $(L_u, \phi_u)$ by $(E, \phi)$ such that $(E', \phi')$ is a Higgs subsheaf of $(F, \tilde{\phi})$. \end{lemma}
\begin{proof} By Remark \ref{rem:miniscule-compatible}, there exists $\phi_u \in H^0(K)$ such that $v \in \mathcal{N}_{\phi, \phi_u}$. Since $(E', \phi')$ is semistable with $\slope(E') > \slope(L_u)$ then $\mathcal{H}^0((E')^* L_u) = 0$ and so the exact sequence \eqref{eqn:hyper-exact-sequence} shows that the Hecke modification $v \in \mathbb{P} E^*$ determines a one-dimensional subspace of $\mathcal{H}^1(E^* L_u)$, and that any non-trivial extension class in this subspace is in the kernel of the map $\mathcal{H}^1(E^* L_u) \rightarrow \mathcal{H}^1((E')^* L_u)$. Let $0 \rightarrow (L_u, \phi_u) \rightarrow (F, \tilde{\phi}) \rightarrow (E, \phi) \rightarrow 0$ be such an extension. Then Lemma \ref{lem:nr-higgs} shows that $(E', \phi')$ is a Higgs subsheaf of $(F, \tilde{\phi})$. \end{proof}
We can now use this result to relate Hecke modifications at a single point with $\mathop{\rm YMH}\nolimits$ flow lines.
\begin{theorem}\label{thm:flow-hecke}
\begin{enumerate}
\item Let $0 \rightarrow (E', \phi') \rightarrow (E, \phi) \stackrel{v}{\rightarrow} \mathbb C_p \rightarrow 0$ be a Hecke modification such that $(E, \phi)$ is stable and $(E', \phi')$ is semistable, and let $L_u$ be a line bundle with $\deg L_u + 1 < \slope(E') < \slope(E)$. Then there exist sections $\phi_u, \phi_\ell \in H^0(K)$, a line bundle $L_\ell$ with $\deg L_\ell = \deg L_u + 1$ and a metric on $E \oplus L_u$ such that $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$ are critical points connected by a $\mathop{\rm YMH}\nolimits$ flow line, where $(E_{gr}', \phi_{gr}')$ is isomorphic to the graded object of the Seshadri filtration of $(E', \phi')$.
\item Let $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E', \phi') \oplus (L_\ell, \phi_\ell)$ be critical points connected by a $\mathop{\rm YMH}\nolimits$ flow line such that $L_u, L_\ell$ are line bundles with $\deg L_u = \deg L_\ell + 1$, $(E, \phi)$ is stable and $(E', \phi')$ is polystable with $\deg L_u + 1 < \slope(E') < \slope(E)$. If $(E', \phi')$ is Higgs stable then it is a Hecke modification of $(E, \phi)$. If $(E', \phi')$ is Higgs polystable then it is the graded object of the Seshadri filtration of a Hecke modification of $(E, \phi)$.
\end{enumerate}
\end{theorem}
\begin{proof}[Proof of Theorem \ref{thm:flow-hecke}] Given a Hecke modification $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \rightarrow \mathbb C_p \rightarrow 0$ as in Lemma \ref{lem:construct-Higgs-extension}, choose $\phi_u \in H^0(K)$ such that $v \in \mathcal{N}_{\phi, \phi_u}$ and apply a gauge transformation to $E \oplus L_u$ such that $x_u = (E, \phi) \oplus (L_u, \phi_u)$ is a critical point of $\mathop{\rm YMH}\nolimits$. The harmonic representative of the extension class $[(a, \varphi)] \in \mathcal{H}^1(E^* L_u)$ from Lemma \ref{lem:construct-Higgs-extension} defines an extension $0 \rightarrow (L_u, \phi_u) \rightarrow (F, \tilde{\phi}) \rightarrow (E, \phi) \rightarrow 0$ such that $y = (F, \tilde{\phi})$ is in the negative slice of $x_u$, and therefore flows down to a limit isomorphic to the graded object of the Harder-Narasimhan-Seshadri filtration of $(F, \tilde{\phi})$.
Lemma \ref{lem:construct-Higgs-extension} also shows that $(E', \phi')$ is a Higgs subsheaf of $(F, \tilde{\phi})$. Lemma \ref{lem:resolve-higgs-subsheaf} shows that this has a resolution as a Higgs subbundle of $(F, \tilde{\phi})$, however since the Harder-Narasimhan type of $(F, \tilde{\phi})$ is strictly less than that of $(E, \phi) \oplus (L_u, \phi_u)$, $\rank(E') = \rank(F) - 1$ and $\deg E' = \deg E - 1$, then $(E', \phi')$ already has the maximal possible slope for a semistable Higgs subbundle of $(F, \tilde{\phi})$, and therefore $(E', \phi')$ must be the maximal semistable Higgs subbundle. Since $\rank(E') = \rank(F) - 1$, then the graded object of the Harder-Narasimhan-Seshadri filtration of $(F, \tilde{\phi})$ is $(E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$, where $(L_\ell, \phi_\ell) = (F, \tilde{\phi}) / (E', \phi')$. Theorem \ref{thm:algebraic-flow-line} then shows that $(E, \phi) \oplus (L_u, \phi_u)$ and $(E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$ are connected by a flow line.
Conversely, if $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E', \phi') \oplus (L_\ell, \phi_\ell)$ are critical points connected by a flow line, then Theorem \ref{thm:algebraic-flow-line} shows that there exists a Higgs pair $(F, \tilde{\phi})$ in the negative slice of $x_u$ such that $(E', \phi')$ is the graded object of the Seshadri filtration of the maximal semistable Higgs subbundle of $(F, \tilde{\phi})$. If $(E', \phi')$ is Higgs stable, then since $\slope(E') > \slope (L_u)$ we see $(E', \phi')$ is a Higgs subsheaf of $(E, \phi)$ with $\rank(E) = \rank(E')$ and $\deg(E') = \deg(E) - 1$. Therefore $(E', \phi')$ is a Hecke modification of $(E, \phi)$. If $(E', \phi')$ is Higgs polystable then the same argument shows that $(E', \phi')$ is the graded object of the Seshadri filtration of a Hecke modification of $(E, \phi)$. \end{proof}
In general, for any flow one can define the space $\mathcal{F}_{\ell, u}$ of flow lines connecting upper and lower critical sets $C_u$ and $C_\ell$, and the space $\mathcal{P}_{\ell, u} \subset C_u \times C_\ell$ of pairs of critical points connected by a flow line. These spaces are equipped with projection maps to the critical sets defined by the canonical projection taking a flow line to its endpoints. \begin{equation} \xymatrix{
& \mathcal{F}_{\ell, u} \ar[d] \ar@/_/[ddl] \ar@/^/[ddr] & \\
& \mathcal{P}_{\ell, u} \ar[dl] \ar[dr] & \\ C_\ell & & C_u } \end{equation}
For the Yang-Mills-Higgs flow, given critical sets $C_u$ and $C_\ell$ of respective Harder-Narasimhan types $(\frac{d}{r}, \deg L_u)$ and $(\frac{d-1}{r}, \deg L_u + 1)$ as in Theorem \ref{thm:flow-hecke} above, there are natural projection maps to the moduli space $C_u \rightarrow \mathcal{M}_{ss}^{Higgs}(r, d)$ and $C_\ell \rightarrow \mathcal{M}_{ss}^{Higgs}(r, d-1)$. Since the flow is $\mathcal{G}$-equivariant, then there is an induced correspondence variety $\mathcal{M}_{\ell, u} \subset \mathcal{M}_{ss}^{Higgs}(r, d-1) \times \mathcal{M}_{ss}^{Higgs}(r, d)$. \begin{equation}\label{eqn:flow-hecke-diagram} \xymatrix{
& \mathcal{P}_{\ell, u} \ar[dl] \ar[dr] \ar[d] & \\ C_\ell \ar[d] & \mathcal{M}_{\ell,u} \ar[dl] \ar[dr] & C_u \ar[d] \\ \mathcal{M}_{ss}^{Higgs}(r, d-1) & & \mathcal{M}_{ss}^{Higgs}(r,d) } \end{equation}
Theorem \ref{thm:flow-hecke} shows that $\left( (E', \phi'), (E, \phi) \right) \in \mathcal{M}_{\ell, u}$ if and only if $(E', \phi')$ is a Hecke modification of $(E, \phi)$ and both Higgs pairs are semistable. If $r$ and $d$ are coprime then $\mathcal{M}_{ss}^{Higgs}(r,d)$ consists of stable Higgs pairs and so every Hecke modification of $(E, \phi)$ is semistable. Therefore we have proved \begin{corollary} $\mathcal{M}_{\ell,u}$ is the Hecke correspondence. \end{corollary}
For Hecke modifications defined at multiple points (non-miniscule Hecke modifications in the terminology of \cite{witten-hecke}), we immediately have the following result.
\begin{corollary}\label{cor:broken-hecke} Let $(E, \phi)$ be a $(0,n)$-stable Higgs bundle and consider a Hecke modification $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_n} \rightarrow 0$ defined by $n > 1$ distinct points $\{ v_1, \ldots, v_n \} \in \mathbb{P} E^*$. If there exists $\phi_u \in H^0(K)$ such that $v_1, \ldots, v_n \in \mathcal{N}_{\phi, \phi_u}$, then there is a broken flow line connecting $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E_{gr}', \phi_{gr}') \oplus (L_\ell, \phi_\ell)$, where $(E_{gr}', \phi_{gr}')$ is the graded object of the Seshadri filtration of the semistable Higgs bundle $(E', \phi')$. \end{corollary}
\begin{proof} Inductively apply Theorem \ref{thm:flow-hecke}. \end{proof}
\subsection{A geometric criterion for unbroken $\mathop{\rm YMH}\nolimits$ flow lines}\label{sec:secant-criterion}
Corollary \ref{cor:broken-hecke} gives a criterion for two $\mathop{\rm YMH}\nolimits$ critical points $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E', \phi') \oplus (L_\ell, \phi_\ell)$ to be connected by a broken flow line. It is natural to ask whether they are also connected by an \emph{unbroken} flow line. The goal of this section is to answer this question by giving a geometric construction for points in the negative slice of $x_u$ which correspond to unbroken flow lines connecting $x_u$ and $x_\ell$ in terms of the secant varieties $\Sec^n(\mathcal{N}_{\phi, \phi_u})$. For holomorphic bundles, the connection between secant varieties and Hecke modifications has been studied in \cite{LangeNarasimhan83}, \cite{ChoeHitching10} and \cite{Hitching13}.
Given a $\mathop{\rm YMH}\nolimits$ critical point $x_u = (E, \phi) \oplus (L_u, \phi_u)$ with $(E, \phi)$ stable and $\rank L_u =1$, consider an extension $0 \rightarrow (L_u, \phi_u) \rightarrow (F, \tilde{\phi}) \rightarrow (E, \phi) \rightarrow 0$ with extension class $[(a, \varphi)] \in \mathcal{H}^1(E^* L_u) = S_{x_u}^-$. Let $0 \rightarrow (E', \phi') \rightarrow (E, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification of $(E, \phi)$ as in the previous lemma, such that $\deg L_u < \slope(E')$.
\begin{lemma}\label{lem:subbundle-slope-bound} If $(G, \phi_G)$ is a semistable Higgs subbundle of $(F, \tilde{\phi})$ with $\slope(G) > \deg L_u$ and $\rank(G) < \rank (E)$, then there is a Higgs subbundle $(G', \phi_G') \subset (E, \phi)$ with $\slope(G') \geq \slope(G)$ and $\rank(G) = \rank(G')$. \end{lemma}
\begin{proof} If $(G, \phi_G)$ is a semistable Higgs subbundle of $(F, \tilde{\phi})$ with $\slope(G) > \deg L_u$, then $\mathcal{H}^0(G^* L_u) = 0$, and so $(G, \phi_G)$ is a Higgs subsheaf of $(E, \phi)$. \begin{equation*} \xymatrix{
& & & (G, \phi_G) \ar[dl] \ar@{-->}[d] \\ 0 \ar[r] & (L_u, \phi_u) \ar[r] & (F, \tilde{\phi}) \ar[r] & (E, \phi) \ar[r] & 0 } \end{equation*} Lemma \ref{lem:resolve-higgs-subsheaf} shows that the subsheaf $(G, \phi_G)$ can be resolved to form a Higgs subbundle $(G', \phi_G')$ of $(E, \phi)$ with $\slope(G') \geq \slope(G)$. \end{proof}
\begin{theorem}\label{thm:unbroken-criterion} Let $(E, \phi)$ be a stable Higgs bundle with Segre invariant $s_k(E, \phi)$ and choose $n$ such that $0 < 2n-1 < \min_{1 \leq k \leq r-1} \left( \frac{1}{k} s_k(E, \phi) \right)$. Let $0 \rightarrow (\mathcal{E}', \phi') \rightarrow (\mathcal{E}, \phi) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0$ be a Hecke modification of $(E, \phi)$ defined by distinct points $v_1, \ldots, v_n \in \mathbb{P} E^*$, and let $(L_u, \phi_u)$ be a Higgs line bundle such that $v_1, \ldots, v_n \in \mathcal{N}_{\phi, \phi_u}$. Choose a metric such that $x_u = (E, \phi) \oplus (L_u, \phi_u)$ is a $\mathop{\rm YMH}\nolimits$ critical point.
Then any extension class $[(a, \varphi)] \in \vecspan \{ v_1, \ldots, v_n \} \cap \left( \Sec^n(\mathcal{N}_{\phi, \phi_u}) \setminus \Sec^{n-1}(\mathcal{N}_{\phi, \phi_u}) \right) \subset \mathbb{P} \mathcal{H}^1(E^* L_u)$ is isomorphic to an unbroken flow line connecting $x_u = (E, \phi) \oplus (L_u, \phi_u)$ and $x_\ell = (E', \phi') \oplus (L_\ell, \phi_\ell)$. \end{theorem}
\begin{proof} Let $(F, \tilde{\phi})$ be a Higgs bundle determined by the extension class $[(a, \varphi)] \in \mathbb{P} \mathcal{H}^1(E^* L_u)$. The choice of bundle is not unique, but the isomorphism class of $(F, \tilde{\phi})$ is unique. The proof reduces to showing that $(E', \phi')$ is the maximal semistable Higgs subbundle of $(F, \tilde{\phi})$.
Since $[(a, \varphi)] \notin \Sec^{n-1}(\mathcal{N}_{\phi, \phi_u})$, then Lemma \ref{lem:nondegenerate-maximal} shows that $(E', \phi')$ is the subsheaf of $(E, \phi)$ with maximal degree among those that lift to a subsheaf of $(F, \tilde{\phi})$. Any semistable Higgs subbundle $(E'', \phi'')$ of $(F, \tilde{\phi})$ with $\rank(E'') = \rank(E)$ either has $\slope(E'') \leq \deg L_u < \slope(E')$, or it is a subsheaf of $(E, \phi)$ and so must have $\slope(E'') \leq \slope(E')$.
The previous lemma shows that if $(G, \phi_G)$ is any semistable Higgs subbundle of $(F, \tilde{\phi})$ with $\slope(G) > \deg L_u$ and $\rank(G) < \rank(E)$, then there is a Higgs subbundle $(G', \phi_G')$ of $(E, \phi)$ with $\slope(G') \geq \slope(G)$. The upper bound on $n = \deg E - \deg E'$ in terms of the Segre invariant then implies that $\slope(E') > \slope(G') \geq \slope(G)$ by Lemma \ref{lem:segre-bound}.
Therefore the subbundle $(\tilde{E}', \tilde{\phi}')$ resolving the subsheaf $(E', \phi') \subset (F, \tilde{\phi})$ is the maximal semistable Higgs subbundle of $(F, \tilde{\phi})$. Since $(\tilde{E}', \tilde{\phi}')$ is semistable and $\slope(\tilde{E}') \geq \slope(E') > \deg L_u$, then $\mathcal{H}^0((\tilde{E}')^* L_u) = 0$, and so $(\tilde{E}', \tilde{\phi}')$ is a Higgs subsheaf of $(E, \phi)$ that lifts to a subbundle of $(F, \tilde{\phi})$. Since $\deg E'$ is maximal among all such subsheaves, then we must have $(E', \phi') = (\tilde{E}', \tilde{\phi}')$ and so $(E', \phi')$ is the maximal semistable subbundle of $(F, \tilde{\phi})$. Therefore Theorem \ref{thm:algebraic-flow-line} shows that $x_u$ and $x_\ell$ are connected by an unbroken flow line. \end{proof}
If $\rank(F) = 2$ (so that $E$ is a line bundle), then the condition on the Segre invariant $s_k(E, \phi)$ becomes vacuous. Moreover, $\mathbb{P} E^* \cong X$ and so Hecke modifications of $E$ are determined by a subset $\{ v_1, \ldots, v_n \} \subset X$. Therefore in the case $\rank(F) = 2$, we have a complete classification of the $\mathop{\rm YMH}\nolimits$ flow lines on the space of Higgs bundles $\mathcal{B}(F)$.
\begin{corollary}\label{cor:rank-2-classification} Let $F \rightarrow X$ be a $C^\infty$ Hermitian vector bundle with $\rank(F) = 2$. Let $x_u = (L_1^u, \phi_1^u) \oplus (L_2^u, \phi_2^u)$ and $x_\ell = (L_1^\ell, \phi_1^\ell) \oplus (L_2^\ell, \phi_2^\ell)$ be non-minimal critical points with $\mathop{\rm YMH}\nolimits(x_u) > \mathop{\rm YMH}\nolimits(x_\ell)$. Suppose without loss of generality that $\deg L_1^u > \deg L_1^\ell > \deg L_2^\ell > \deg L_2^u$. Let $n = \deg L_1^u - \deg L_1^\ell$.
Then $x_u$ and $x_\ell$ are connected by a broken flow line if and only if there exists $\{ v_1, \ldots, v_n \} \in \mathcal{N}_{\phi_1^u, \phi_2^u}$ such that \begin{align*} 0 \rightarrow (L_1^\ell, \phi_1^\ell) \rightarrow (L_1^u, \phi_1^u) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0 \\ 0 \rightarrow (L_2^u, \phi_2^u) \rightarrow (L_2^\ell, \phi_2^\ell) \rightarrow \oplus_{j=1}^n \mathbb C_{p_j} \rightarrow 0 \end{align*} are both Hecke modifications determined by $\{ v_1, \ldots, v_n \}$. They are connected by an unbroken flow line if the previous condition holds and $\{ v_1, \ldots, v_n \} \in \Sec^n(\mathcal{N}_{\phi_1^u, \phi_2^u}) \setminus \Sec^{n-1}(\mathcal{N}_{\phi_1^u, \phi_2^u})$. \end{corollary}
\appendix
\section{Uniqueness for the reverse Yang-Mills-Higgs flow}\label{sec:uniqueness}
The methods of Donaldson \cite{Donaldson85} and Simpson \cite{Simpson88} show that the Yang-Mills-Higgs flow resembles a nonlinear heat equation, and therefore the backwards flow is ill-posed. In Section \ref{sec:scattering-convergence} we prove existence of solutions to the backwards heat flow that converge to a critical point. To show that these solutions are well-defined we prove in this section that if a solution to the reverse $\mathop{\rm YMH}\nolimits$ flow exists then it must be unique.
Using the Hermitian metric, let $d_A$ be the Chern connection associated to $\bar{\partial}_A$ and let $\psi = \phi + \phi^* \in \Omega^1(i \mathop{\rm ad}\nolimits(E))$. The holomorphicity condition $\bar{\partial}_A \phi = 0$ becomes the pair of equations $d_A \psi = 0$, $d_A^* \psi = 0$ which also imply that $[F_A, \psi] = d_A^2 \psi = 0$, and the Yang-Mills-Higgs functional is $\| F_A + \psi \wedge \psi \|_{L^2}^2$.
\begin{proposition}\label{prop:backwards-uniqueness} Let $(d_{A_1}, \psi_1)(t)$, $(d_{A_2}, \psi_2)(t)$ be two solutions of the Yang-Mills-Higgs flow \eqref{eqn:YMH-flow-general} on a compact Riemann surface with respective initial conditions $(d_{A_1}, \psi_1)(0)$ and $(d_{A_2}, \psi_2)(0)$. If there exists a finite $T > 0$ such that $(d_{A_1}, \psi_1)(T) = (d_{A_2}, \psi_2)(T)$ then $(d_{A_1}, \psi_1)(t) = (d_{A_2}, \psi_2)(t)$ for all $t \in [0, T]$. \end{proposition}
The result of Proposition \ref{prop:backwards-uniqueness} is valid when the base manifold is a compact Riemann surface, since we use the estimates of \cite[Sec. 3.2]{Wilkin08} to prove that the constant $C$ in Lemma \ref{lem:heat-inequalities} is uniform. In the case of the Yang-Mills flow on a compact K\"ahler manifold the estimates of Donaldson in \cite{Donaldson85} show that we can make this constant uniform on a finite time interval $[0,T]$ and so the result also applies in this setting. The setup described in the previous paragraph consisting of Higgs pairs $(d_A, \psi)$ satisfying $d_A \psi = 0$, $d_A^* \psi = 0$ is valid on any Riemannian manifold, and so the result of Proposition \ref{prop:backwards-uniqueness} will also apply to any class of solutions for which one can prove that the connection, Higgs field, the curvature and all of their derivatives are uniformly bounded on the given finite time interval $[0, T]$.
Let $\nabla_A$ denote the covariant derivative associated to the connection $d_A$. The complex connection associated to the pair $(d_A, \psi)$ is $D_{(A, \psi)} \eta = d_A \eta + [\psi, \eta]$ and the Laplacian is $\Delta_{(A, \psi)} \eta = D_{(A, \psi)}^* D_{(A, \psi)} \eta + D_{(A, \psi)} D_{(A, \psi)}^* \eta$ for any form $\eta \in \Omega^p(\mathop{\rm End}\nolimits(E))$. The equation $d_A \psi = 0$ implies that the curvature of the complex connection is $D_{(A, \psi)} D_{(A, \psi)} \eta = [F_A + \psi \wedge \psi, \eta]$.
We have the following identities which will be useful in what follows. The notation $a \times b$ is used to denote various bilinear expressions with constant coefficients.
\begin{align} 0 & = d_A(F_A + \psi \wedge \psi) , \quad 0 = [\psi, F_A + \psi \wedge \psi] \label{eqn:Higgs-Bianchi} \\ \Delta_{(A, \psi)} \eta & = \nabla_A^* \nabla_A \eta + (F_A + \psi \wedge \psi) \times \eta + R_M \times \eta + \psi \times \psi \times \eta + \nabla_A \psi \times \psi \times \eta \quad \label{eqn:Higgs-Weitzenbock} \\ 0 & = D_{(A, \psi)}^* D_{(A, \psi)}^* (F_A + \psi \wedge \psi) \label{eqn:compose-adjoint} \end{align} The first identity follows from the Bianchi identity and the equation $d_A \psi = 0$. Equation \eqref{eqn:Higgs-Weitzenbock} is the Weitzenbock identity for a Higgs pair which follows from the usual identity for $\nabla_A$ (see for example \cite{BourguignonLawson81}) together with the fact that $(\psi \wedge \psi) \times \eta$ and the remaining terms in the Laplacian are of the form $\psi \times \psi \times \eta + \nabla_A \psi \times \psi \times \eta$. To see the identity \eqref{eqn:compose-adjoint}, take the inner product of the right hand side with an arbitrary $\eta \in \Omega^0(\mathop{\rm End}\nolimits(E))$. We have (cf. \cite[(2.2)]{Rade92} for the case $\psi = 0$) \begin{align*} \left< D_{(A, \psi)}^* D_{(A, \psi)}^* (F_A + \psi \wedge \psi), \eta \right> & = \left< F_A + \psi \wedge \psi, D_{(A, \psi)} D_{(A, \psi)} \eta \right> \\
& = \left< F_A + \psi \wedge \psi, [F_A + \psi \wedge \psi, \eta] \right> = 0 \end{align*}
Consider the Yang-Mills-Higgs flow equations \begin{equation}\label{eqn:YMH-flow-general} \frac{\partial A}{\partial t} = - d_A^* (F_A + \psi \wedge \psi), \quad \frac{\partial \psi}{\partial t} = * [\psi, *(F_A + \psi \wedge \psi)] \end{equation} After using the metric to decompose $\Omega^1(\mathop{\rm End}\nolimits(E)) \cong \Omega^1(\mathop{\rm ad}\nolimits(E)) \oplus \Omega^1(i \mathop{\rm ad}\nolimits(E))$, the flow equation can be written more compactly as \begin{equation*} \frac{\partial}{\partial t} (d_A + \psi) = - D_{(A, \psi)}^* (F_A + \psi \wedge \psi) \end{equation*} We then have \begin{align*} \frac{\partial}{\partial t} (F_A + \psi \wedge \psi) & = d_A \left( \frac{\partial A}{\partial t} \right) + \frac{\partial \psi}{\partial t} \wedge \psi + \psi \wedge \frac{\partial \psi}{\partial t} \\
& = - d_A d_A^* (F_A + \psi \wedge \psi) + \left[ \psi, *[\psi, *(F_A + \psi \wedge \psi)] \right] \\
& = - \Delta_{(A, \psi)} (F_A + \psi \wedge \psi) - d_A*[\psi, *(F_A + \psi \wedge \psi)] + [\psi, d_A^*(F_A + \psi \wedge \psi)] \end{align*} where in the last step we use the Bianchi identity \eqref{eqn:Higgs-Bianchi}. We also have \begin{align*} \frac{\partial}{\partial t} \left( d_A^* (F_A + \psi \wedge \psi) \right) & = -*\left[ \frac{\partial A}{\partial t}, *(F_A + \psi \wedge \psi) \right] + d_A^* \left( \frac{\partial}{\partial t} (F_A + \psi \wedge \psi) \right) \\
& = * \left[ d_A^*(F_A + \psi \wedge \psi), F_A + \psi \wedge \psi \right] - d_A^* d_A d_A^* (F_A + \psi \wedge \psi) \\
& \quad \quad + d_A^* [\psi, *[\psi, *(F_A + \psi \wedge \psi)]] \end{align*} and \begin{align*} \frac{\partial}{\partial t} \left( -*[\psi, *(F_A + \psi \wedge \psi)] \right) & = -* \left[ \frac{\partial \psi}{\partial t}, *(F_A + \psi \wedge \psi) \right] - *\left[ \psi, \frac{\partial}{\partial t} *(F_A + \psi \wedge \psi) \right] \\
& = * \left[ - *[\psi, *(F_A + \psi \wedge \psi)], *(F_A + \psi \wedge \psi) \right] + * \left[ \psi, * d_A d_A^* (F_A + \psi \wedge \psi) \right] \\
& \quad \quad - * \left[ \psi, *[\psi, *[\psi, *(F_A + \psi \wedge \psi)]] \right] \end{align*} Adding these two results gives us \begin{align*} \frac{\partial}{\partial t} \left( D_{(A, \psi)}^* (F_A + \psi \wedge \psi) \right) & = * \left[ D_{(A, \psi)}^* (F_A + \psi \wedge \psi), F_A + \psi \wedge \psi \right] - D_{(A, \psi)}^* D_{(A, \psi)} D_{(A, \psi)}^* (F_A + \psi \wedge \psi) \\
& = * \left[ D_{(A, \psi)}^* (F_A + \psi \wedge \psi), F_A + \psi \wedge \psi \right] - \Delta_{(A, \psi)} D_{(A, \psi)}^* (F_A + \psi \wedge \psi) \end{align*} where the last step uses \eqref{eqn:compose-adjoint}. Let $\mu_{(A, \psi)} = F_A + \psi \wedge \psi$ and $\nu_{(A, \psi)} = D_{(A, \psi)}^* (F_A + \psi \wedge \psi)$. The above equations become \begin{align} \left( \frac{\partial}{\partial t} + \Delta_{(A, \psi)} \right) \mu_{(A, \psi)} & = -d_A*[\psi, *(F_A + \psi \wedge \psi)] + [\psi, d_A^*(F_A + \psi \wedge \psi)] \label{eqn:mu-evolution} \\ \left( \frac{\partial}{\partial t} + \Delta_{(A, \psi)} \right) \nu_{(A, \psi)} & = *[\nu_{(A, \psi)}, *\mu_{(A, \psi)}] \label{eqn:nu-evolution} \end{align}
Now consider two solutions $(A_1, \psi_1)(t)$ and $(A_2, \psi_2)(t)$ to the Yang-Mills-Higgs flow equations \eqref{eqn:YMH-flow-general} on the time interval $[0,T]$ such that $(A_1, \psi_1)(T) = (A_2, \psi_2)(T)$. We will show below that this implies $(A_1, \psi_1)(0) = (A_2, \psi_2)(0)$.
Define $(a_t, \varphi_t) = (A_2, \psi_2)(t) - (A_1, \psi_1)(t)$, $m_t = \mu_{(A_2, \psi_2)} - \mu_{(A_1, \psi_1)}$ and $n_t = \nu_{(A_2, \psi_2)} - \nu_{(A_1, \psi_1)}$. In terms of $(a_t, \varphi_t)$ we can write \begin{align*} m_t = \mu_{(A_2, \psi_2)} - \mu_{(A_1, \psi_1)} = d_{A_1} a_t + a_t \wedge a_t + [\psi, \varphi_t] + \varphi_t \wedge \varphi_t \end{align*} and for any $\eta \in \Omega^p(\mathop{\rm End}\nolimits(E))$ the difference of the associated Laplacians has the form \begin{equation}\label{eqn:laplacian-difference} \left(\Delta_{(A_2, \psi_2)} - \Delta_{(A_1, \psi_1)} \right) \eta = \nabla_A a \times \eta + a \times \nabla_A \eta + a \times a \times \eta + \psi \times \varphi \times \eta + \varphi \times \varphi \times \eta \end{equation} where again $\omega_1 \times \omega_2$ is used to denote a bilinear expression in $\omega_1$ and $\omega_2$ with constant coefficients. By definition of $\nu_{(A, \psi)}$ as the gradient of the Yang-Mills-Higgs functional at $(d_A, \psi)$ we immediately have \begin{equation*} \frac{\partial}{\partial t} (a_t + \varphi_t) = n_t , \quad \text{and} \quad \frac{\partial}{\partial t} (\nabla_A a_t + \nabla_A \varphi_t) = \left( \frac{\partial A}{\partial t} \times a_t, \frac{\partial A}{\partial t} \times \varphi_t \right) + \nabla_A n_t \end{equation*} Equation \eqref{eqn:mu-evolution} then becomes \begin{align*} \left( \frac{\partial}{\partial t} + \Delta_{(A_1, \psi_1)} \right) m_t & = - \left( \Delta_{(A_2, \psi_2)} - \Delta_{(A_1, \psi_1)} \right) \mu_{(A_2, \psi_2)} \\
& \quad \quad + a_t \times \psi_1 \times (F_{A_1} + \psi_1 \wedge \psi_1) + \nabla_{A_1} \varphi_t \times (F_{A_1} + \psi_1 \wedge \psi_1) \\
& \quad \quad + \nabla_{A_1} \psi_1 \times m_t + \psi_1 \times n_t \end{align*} and equation \eqref{eqn:nu-evolution} becomes \begin{align*} \left( \frac{\partial}{\partial t} + \Delta_{(A_1, \psi_1)} \right) n_t & = *[\nu_{(A_2, \psi_2)}, * \mu_{(A_2, \psi_2)}] - *[\nu_{(A_1, \psi_1)}, *\mu_{(A_1, \psi_1)}] - \left( \Delta_{(A_2, \psi_2)} - \Delta_{(A_1, \psi_1)} \right) \nu_{(A_2, \psi_2)} \\
& = *[n_t, *\mu_{(A_2, \psi_2)}] +*[\nu_{(A_1, \psi_1)}, *m_t] - \left( \Delta_{(A_2, \psi_2)} - \Delta_{(A_1, \psi_1)} \right) \nu_{(A_2, \psi_2)} \\ \end{align*} Using \eqref{eqn:laplacian-difference} and the Weitzenbock formula \eqref{eqn:Higgs-Weitzenbock}, we then have the following inequalities. In the case where $X$ is a compact Riemann surface then the estimates of \cite[Sec. 2.2]{Wilkin08} show that all of the derivatives of the connection, the Higgs field and the curvature $F_A$ are uniformly bounded along the flow and so the constant can be chosen uniformly on the interval $[0,T]$. \begin{lemma}\label{lem:heat-inequalities} For any pair of solutions $(d_{A_1}, \psi_1)(t)$ and $(d_{A_2}, \psi_2)(t)$ to the Yang-Mills-Higgs flow \eqref{eqn:YMH-flow-general} there exists a positive constant $C$ (possibly depending on $t$) such that the following inequalities hold \begin{align}
\left| \left( \frac{\partial}{\partial t} + \nabla_{A_1}^* \nabla_{A_1} \right) m_t \right| & \leq C \left( |a_t| + |\varphi_t| + | \nabla_{A_1} a_t| +| \nabla_{A_1} \varphi_t |+ | m_t | + |n_t| \right) \label{eqn:m-evolution} \\
\left| \left( \frac{\partial}{\partial t} + \nabla_{A_1}^* \nabla_{A_1} \right) n_t \right| & \leq C \left( |a_t| + |\varphi_t| + | \nabla_{A_1} a_t| +| \nabla_{A_1} \varphi_t |+ | m_t | + |n_t| \right) \label{eqn:n-evolution} \\
\left| \frac{\partial}{\partial t} (a_t + \varphi_t) \right| & = | n_t | \label{eqn:a-evolution} \\
\left| \frac{\partial}{\partial t} (\nabla_A a_t + \nabla_A \varphi_t) \right| & \leq C \left( |a_t| + |\varphi_t| + | \nabla_A n_t | \right) \label{eqn:nabla-a-evolution} \end{align} Moreover, if $X$ is a compact Riemann surface then the constant $C$ can be chosen uniformly on any finite time interval $[0, T]$. \end{lemma}
For simplicity of notation, in the following we use $\nabla := \nabla_{A_1}$ and $\square := \nabla_{A_1}^* \nabla_{A_1}$. Let $X := (m_t, n_t)$ and $Y := (a_t, \varphi_t, \nabla a_t, \nabla \varphi_t)$. The previous lemma implies that there exists a positive constant $C$ such that the following inequalities hold \begin{align}\label{eqn:coupled-system} \begin{split}
\left| \frac{\partial X}{\partial t} + \square X \right| & \leq C \left( | X | + | \nabla X| + | Y | \right) \\
\left| \frac{\partial Y}{\partial t} \right| & \leq C \left( |X| + |\nabla X| + |Y| \right) \end{split} \end{align} A general result of Kotschwar in \cite[Thm 3]{kotschwar-uniqueness} shows that any system satisfying \eqref{eqn:coupled-system} on the time interval $[0,T]$ for which $X(T) = 0$, $Y(T)=0$, must also satisfy $X(t) = 0$, $Y(t) = 0$ for all $t \in [0, T]$. In the context of the Yang-Mills-Higgs flow \eqref{eqn:YMH-flow-general}, this gives us the proof of Proposition \ref{prop:backwards-uniqueness}.
\end{document} |
\begin{document}
\title{DropCompute: simple and more robust distributed synchronous training via compute variance reduction}
\begin{abstract} \textbf{Background.} Distributed training is essential for large scale training of deep neural networks (DNNs). The dominant methods for large scale DNN training are synchronous (e.g. \textit{All-Reduce}), but these require waiting for all workers in each step. Thus, these methods are limited by the delays caused by straggling workers.\\ \textbf{Results.} We study a typical scenario in which workers are straggling due to variability in compute time. We find an analytical relation between compute time properties and scalability limitations, caused by such straggling workers. With these findings, we propose a simple yet effective decentralized method to reduce the variation among workers and thus improve the robustness of synchronous training. This method can be integrated with the widely used \textit{All-Reduce}. Our findings are validated on large-scale training tasks using 200 Gaudi Accelerators. A reference implementation\footnote[2]{\url{https://github.com/paper-submissions/dropcompute}} is provided. \end{abstract}
\section{Introduction}
Deep Neural Networks (DNNs) training continues to scale over size and computational footprint, as a result of a higher number of trainable parameters, wider and deeper models, and growing amounts of training data. As improvements in model quality (as measured by test loss, for example) \citep{scalingnlp} lead over hardware capabilities \citep{hwlottery}, this scale-up translates into a need for a growing number of training devices working in tandem \citep{chowdhery2022palm}, turning distributed training to the standard approach for training DNNs on a large scale.
\begin{figure}\label{fig:abstract}
\end{figure}
Distributed training typically refers to three parallelism paradigms --- data parallel, model parallel and layer pipelining \citep{DemystifyingDistributed}. Several variants and hybrid solutions exist in modern implementations such as tensor parallel \citep{Megatron2021} and parameter sharding \citep{rajbhandari2020zero, rasley2020deepspeed}. These can be used separately or combined as they are orthogonal to each other. Mainly, data parallelism is straightforward, where the data is sharded among workers, and all workers share the same global model state. At each step, workers compute gradients locally and then aggregate them before taking an optimization step. When training synchronously, workers update their parameters in lockstep. This ensures that all workers hold a consensus on the same model and that gradients are averaged over all workers before being applied to the model. This approach is easy to implement and allows for good convergence properties, and correspondingly is the prevalent optimization method.
Although state-of-the-art models use synchronous optimization for training, synchronous methods scale poorly, as stragglers and communication overhead might severely deteriorate system utilization. These issues are exacerbated as the required scale grows, even in homogeneous high-performance computing clusters. We are interested in cases where significant computing variance between the workers exists. This includes (but is not limited to) straggling workers. For instance, certain learning tasks entail heterogeneity in the required computation of data, such as varying sentence lengths in language processing, or different image sizes and frame numbers in computer vision. In addition, recent state-of-the-art models use all three parallelism paradigms (data (DP), tensor (TP), and pipeline (PP) parallelism), thus each data parallel node is a set of processing units (accelerators) communicating between them (via TP and PP) to calculate the model gradients collectively. This could potentially intensify compute variance between data parallel workers. Furthermore, slowdowns due to changes over time in hardware state or load might occur and cause variance in compute and specifically lead to straggling workers. As compute variance grows, the utilization deteriorates, such that more workers remain idle waiting for slower workers to complete calculating their gradients \cite{chen2016revisiting,chen2019round,ji2022ep4ddl}.
In this paper, we suggest a simple, yet effective method called \textit{DropCompute} to improve the robustness and scalability of synchronous optimization in the face of compute variance. We model the compute time as a random variable and show that under reasonable assumptions, a tail of straggling workers slows down the system at a rate that is not proportional to the contributed compute by these straggling workers. We harness the gradient accumulation method widely used in Large Language Models (LLMs) \citep{ott-etal-2018-scaling, roberta} to implement the method in a few lines of code on a relevant large-scale learning task.
The contributions of our work include: \begin{itemize}
\item \textit{DropCompute}: a novel, decentralized method to better handle heterogeneity or stragglers without additional hyper-parameters. \textit{DropCompute} is hardware and framework agnostic, runs on top of existing optimizers, and can also be combined with other methods that improve other aspects of robustness such as communication overhead.
\item A theoretical convergence proof of SGD with stochastic batch size, as in \textit{DropCompute}.
\item A theoretical runtime analysis on standard synchronous training and the proposed method, with an approximation of the expected speedup using \textit{DropCompute}.
\item Empirical evaluation of the proposed method on a relevant large scale task, using up to 200 accelerators connected with high bandwidth communication. \end{itemize}
\section{Related Work}
The challenge of training deep neural networks on a large scale has been extensively explored. With rapidly growing models and data sizes, numerous works tackled the weaknesses in synchronous DNN training on a large scale and suggested methods to alleviate these weaknesses.
\textbf{Redundancy methods.} This line of work addresses the straggling worker problem using a redundancy mechanism. Redundant workers or redundant data are used such that straggling workers will not slow down the entire system \citep{chen2016revisiting, bitar2020stochastic}. These methods provide better robustness to synchronous training, even in the event of a complete failure of a subset of the workers or considerable communication slowdown. However, the robustness is limited by the redundancy factor, and more generally, more compute resources are required and full utilization cannot be achieved. In addition, some coordination method is required to keep the training synchronous, i.e., keeping consensus between the model replicas of the workers. In particular, \citet{chen2016revisiting, bitar2020stochastic} use a centralized approach of a parameter server to determine which workers are left out at each iteration. Modern large-scale systems use decentralized variants of \textit{All-Reduce} \citep{baidu_ringreduce, patarasuk2009bandwidth}, so it is not trivial to determine which workers should be considered at each step, given that each worker can see a different subset of straggling workers. Moreover, combining redundancy with communication primitive collectives (e.g., \textit{All-Reduce}) requires adaptation to existing underlying frameworks \citep{sanders2019sequential}.
\textbf{Asynchronous optimization.} Another approach is introducing asynchrony to the optimization. Asynchronous training is inherently more scalable than synchronous training by being robust to all kinds of workers and communication faults. This includes periodic synchronization by exchanging parameters every $\tau$ optimization steps \citep{stich2018local, Lin2020Don't, wang2021cooperative, zhang2015deep, Wang2020SlowMo, li2020federated}, approximate distributed averaging where each worker communicates with a subset of workers each step \citep{jiang2017collaborative, lian2017can, assran2019stochastic, yang2020mitigating}, and many more. These works provide better scale-up properties and improve time performance. The main drawback is asynchronous optimization itself. In practice, the convergence is less stable, and more optimization steps are needed to generalize as well as synchronous training. In addition, hyperparameters should be chosen more precisely to guarantee convergence and generalization properties \citep{giladi2019stability, mitliagkas2016asynchrony}. Due to these issues, asynchronous methods are less commonly used on a large scale.
\textbf{Sparse and compressed communication.} Alternatively, several works addressed only the communication overhead. A common technique is to reduce the amount of data exchanged by the workers at each step. This can be done by gradient pruning \citep{xu2021deepreduce}, gradient compression \citep{1bitsgd, chen2020scalecom, 1bitadam} or low-rank approximation \citep{vogels2019powersgd}. These works reduce the communication overhead in a deterministic form while ignoring any compute variance across processing workers. This makes these works orthogonal to ours and potentially can be combined with our method.
\section {Reducing Compute Variance} \label{section:distributed_training} This paper proposes a method called \textit{DropCompute} that improves the robustness of synchronous training by reducing compute variance. First, we describe the vanilla synchronous training framework. Then, we introduce the proposed approach.
\subsection{Problem setup} \label{subsection:problem_setup}
We start with a model formulation for data-parallel synchronous SGD training with $N$ workers, where each worker holds a replica of the model parameters $\theta$. Given a dataset $\mathcal{D}$, we are interested in minimizing the empirical loss \begin{equation*}
\mathcal{L}(\mathcal{D},\theta)=\frac{1}{\abs{\mathcal{D}}}\sum_{z\in\mathcal{D}}\ell(z,\theta), \end{equation*} where $\ell(z,\theta)$ is the loss with respect to data-point $z$ and the model parameters $\theta$. At each step, the workers calculate gradients based on a local batch and then aggregate the gradients before taking an optimization step. An equivalent strategy would be that the workers aggregate the parameters after taking an optimization step based on the local gradients. The aggregation is done in a decentralized fashion, such as \textit{AllReduce}.
We also consider the use of gradient accumulation to increase the size of the local batch. Gradient accumulation breaks down each worker's local batch into $M$ micro-batches for computation. This enables reaching a large global batch size beyond hardware capacity. In each iteration $i$ and accumulation $m$, the gradients of the loss function with respect to the model parameters are computed, denoted by \begin{equation*}
g_{n}^{(m)}(\theta_i) = \nabla \mathcal{L}(\mathcal{D}_{i,n}^{(m)}, \theta_i)\,, \end{equation*}
where $\mathcal{D}_{i,n}^{(m)}\subseteq \mathcal{D}$ is the micro-batch $m$ of worker $n$, sampled without replacement from $\mathcal{D}$, and we assume a constant micro-batch size $|\mathcal{D}_{i,n}^{m}|$.
The gradients accumulated by worker $n$ at step $i$ are \begin{equation*}
g_{n}(\theta_i) = \frac{1}{M} \sum_{m=1}^{M} g_{n}^{(m)}(\theta_i) \,. \end{equation*} Finally, the workers aggregate and average the computed gradients to update the model parameters \begin{equation}
\theta_{i+1} = \theta_i - \eta g(\theta_i);\quad g(\theta_i) = \frac{1}{N} \sum_{n=1}^{N} g_{n}(\theta_i) \,,
\label{eq:theta} \end{equation} where $\eta$ is the learning rate. Equation \ref{eq:theta} requires all workers to receive all gradients before updating the parameters. This communication restriction is what makes the training synchronous and ensures the model parameters are the same for all workers. However, due to this restriction, the slowest worker at each step dictates the iteration time. More generally, any variation in computation time among workers will lead to workers with idle time. Therefore, to improve the efficiency and robustness of the synchronous training process, we need to reduce the computation time variance among workers (namely, the compute variance).
\subsection{Our method: \textit{DropCompute}} \label{sec:our_method} To mitigate the effect of compute variance and to improve the robustness of synchronous training, we propose a simple yet effective method called \textit{DropCompute}. \textit{DropCompute} reduces the compute variance by introducing a \textit{compute threshold} after which workers have to stop calculating local gradients and move directly to the communication phase, i.e., Equation \ref{eq:theta}.
In each iteration $i$, each worker $n$ measures the time while calculating its local batch, and compares it against a given threshold $\tau$, which is set as described in section \ref{section:automatic_threshold}. If that time exceeds the \textit{compute threshold}, the worker stops and sends the gradients it has calculated so far. The rest of the training remains unchanged, thus synchronous training is intact. The pseudo-code of \textit{DropCompute} is shown in Algorithm \ref{algorithm:drop_compute}.
This method maintains a decentralized approach while improving its robustness to compute variance and stragglers in particular. Since our method drops samples, the batch size is no longer deterministic, Specifically, the total batch size (the total number of samples used in one iteration by all workers) can be smaller than the maximal total batch size $b_{\max}=NM|\mathcal{D}_{i,n}^{(m)}|$. Nevertheless, we show both theoretically and empirically that this makes no difference to convergence or generalization. Next, we analyze this method in section \ref{section:analysis} and evaluate it in section \ref{section:experiments}.
\begin{algorithm}[h!]
\caption{DropCompute on worker $n$ at iteration $i$}
\label{algorithm:drop_compute}
\begin{algorithmic}[1]
\Input
\State model parameters $\theta_i$; total number of micro-batches $M$; compute threshold $\tau$
\State local batch data $\mathcal{D}_{i,n}=\{\mathcal{D}_{i,n}^{(1)}, \mathcal{D}_{i,n}^{(2)}, \cdots, \mathcal{D}_{i,n}^{(M)}\}$; step time $T_n$ to compute local batch $\mathcal{D}_{i,n}$
\EndInput
\State \textbf{Initialize step time:} $T_n=0$
\State \textbf{Initialize accumulated gradients:} $g_{n}(\theta_i)=0$
\InParallel
\NumFor{1}{$m=1, \dots, M$}
\State \hskip1.5em $g_{n}^{(m)}(\theta_i) = \nabla \mathcal{L}(\mathcal{D}_{i,n}^{(m)}, \theta_i)$ \Comment{Compute gradient}
\State \hskip1.5em $g_{n}(\theta_i) \leftarrow g_{n}(\theta_i) + g_{n}^{(m)}(\theta_i)/M$ \Comment{Accumulate gradients}
\EndNumFor
\State \textbf{(2) wait for $T_n>\tau$ and break for loop (1)}
\EndInParallel
\State \textbf{Output: }$g_{n}(\theta_i)$ \Comment{\textit{AllReduce}}
\end{algorithmic} \end{algorithm}
\section{Method Analysis} \label{section:analysis}
After establishing the notion of compute variance and the formulation of the proposed method, we analyze synchronous training and the potential value of the proposed method on the training time performance. To do so, we start with convergence guarantees when using \textit{DropCompute}. Then, we theoretically analyze the computation time with synchronous training and when using \textit{DropCompute}. Through this analysis, we estimate the potential speedup over vanilla synchronous training.
\subsection{Convergence analysis of \textit{DropCompute}} \label{subsec:convergence} Using \textit{DropCompute}, the batch size is no longer fixed, but a random variable, which can also potentially depends on the data samples. To the best of our knowledge, this setting is somewhat different from existing convergence guarantees. Therefore, in this section, we provide convergence guarantees for \textit{DropCompute} by analyzing the convergence of SGD with stochastic batch size.
\begin{assumption} \label{assumption:convergence}
Following the notations in section \ref{subsection:problem_setup}, consider a possibly non-convex smooth loss function $\mathcal{L}(\mathcal{D},\theta)$, with a global minimum $\theta^*\in{\mathbb R}^d$, and the following assumptions
\begin{enumerate}
\item \textbf{L-smooth}: All functions $\mathcal{L}(\cdot,\theta)$ are with L-Lipschitzian gradients.
\item \textbf{Unbiased estimation}: Each $\nabla\ell(z,\theta_i) ,z\in\mathcal{D}$ is an unbiased estimate of $\nabla \mathcal{L}(\mathcal{D},\theta_i)$ which is the true (full batch) gradient at $\theta_i$\footnote{This is easily satisfied when all workers can access all data.}. Namely,
$\forall i: \E[\nabla\ell(z,\theta_i)\vert \theta_i] = \nabla\mathcal{L}(\mathcal{D},\theta_i)\,.$
\item \textbf{Bounded variance}: The variance of the stochastic gradient is bounded by a constant $\sigma$,
$$\forall i: \E[\|\nabla\ell(z,\theta_i)-\nabla\mathcal{L}(\mathcal{D},\theta_i)\|^2\vert \theta_i] \leq \sigma^2\,.$$
\end{enumerate} \end{assumption}
\begin{theorem} \label{theorem:non_convex}
Under the above assumption, applying SGD with \textit{DropCompute} (Algorithm \ref{algorithm:drop_compute}), ensures
\begin{equation} \label{eq:NonCvxSGD_GUARANTEES_final}
\mathbb{E}\|\nabla\mathcal{L}(\mathcal{D},\bar{\theta})\|^2 \leq \frac{2L b_{\max} (\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*))}{K} + \frac{2\sigma\sqrt{L(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) }}{\sqrt{K}}~,
\end{equation}
where $b_{\max}$ is the maximal total batch size, $K$ is the total number of samples that are used throughout the training, $\theta_1$ is the initial model, and $\bar{\theta}$ is a random sample of $\theta_i$ from the trajectory obtained by Algorithm \ref{algorithm:drop_compute}, where $i$ is selected with probability proportional to the total batch size at iteration $i$. The expectation is with respect to the randomization introduced due to sampling from $\mathcal{D}$ throughout the optimization process and with respect to choosing $\bar{\theta}$. \end{theorem}
The bound in Theorem~\ref{theorem:non_convex} is in a similar spirit to existing fixed batch-size guarantees~\citep{dekel2012optimal}. The second term in the bound does not degrade with the batch sizes, and behaves like $O(1/\sqrt{K})$, while the first term behaves like $O(b_{\max} /{K})$. This implies that as long as $b_{\max} \leq O(\sqrt{K})$, the second term is dominant and we are in the regime of linear speedup. Thus, we show that one can attain a linear speedup despite using changing batch sizes, as long as the maximal batch size is bounded.
Similarly, in Theorem \ref{theorem:convex_case} in appendix \ref{appendix:convex_proof} we show that the loss itself converges, in the convex case along with a proof. The proof of Theorem \ref{theorem:non_convex} is provided in appendix \ref{appendix:non_convex_proof}. Lastly, we discuss the impact of the stochastic batch size on generalization in appendix \ref{app:generalization_discussion}.
\subsection{Iteration time in standard synchronous distributed training} \label{sec:analysis_iteration_time}
We start with finding a closed-form expression for the cumulative distribution function (CDF) of the iteration time, denoted as $T$, defined as \begin{equation*}
T=\max(T_1,T_2,...,T_N)\,, \end{equation*} where $T_n$ represents the time taken by worker $n$ to compute its local batch, which follows some cumulative probability function $F_{T_n}(x)=\mathbb{P}(T_n<x),$ which we assume is independent for each worker. Let $F_T(x)$ represent the cumulative distribution function and $f_T(x)$ represent the probability density function of the maximum iteration time $T$. The relation between $F_T(x)$ and $F_{T_n}(x)$ is
\begin{equation*}
F_T(x) = \mathbb{P}\left(\max \left(T_1, \ldots, T_N\right) \leq x\right) =\prod_{n=1}^N F_{T_n}(x)\,. \end{equation*}
Differentiating with respect to $x$ and applying the chain rule gives: \begin{equation*}
f_T(x)=\frac{d F_T}{d x}(x)=\sum_{n=1}^N f_{T_n}(x)\prod_{n'\neq n}^{N}F_{T_{n'}}(x) \,. \end{equation*}
In the special case where all of the workers' iteration time distributions are identically and independently distributed (i.i.d.), this reduces to the well-known formula: \begin{equation}
\label{pdf}
f_T(x)=N \cdot f_{T_n}(x) \cdot F_{T_n}(x)^{N-1} \end{equation}
If the iteration time of each worker is distributed normally ($\sim \mathcal{N}(\mu, \sigma^2)$), the expected value of $T$ can be approximated as shown by \citet{bailey2014pseudomathematics}: \begin{equation} \mathbb{E}(T) \approx \sigma\cdot \left( (1-\gamma)\cdot \Phi^{-1}\left(1-\frac{1}{N} \right) + \gamma\cdot \Phi^{-1}\left(1-\frac{1}{e\cdot N}\right)\right) + \mu \label{eq:estimate_T} \end{equation} where $\Phi$ is the CDF of the standard normal distribution, and $\gamma$ is the euler-mascheroni constant. When the number of micro-batches $M\gg1$, we can make a similar approximation under Central Limit Theorem (CLT) conditions. More details are in appendix \ref{appendix:speedup_analysis}.
\subsection{Iteration time and number of micro-batches with \textit{DropCompute}}
When using \textit{DropCompute} with a constant threshold $\tau$, each worker is preempted at $\tilde{T}_n=\min\left\{\tau,T_n\right\}$ and joins the \textit{AllReduce}. Therefore, the total iteration time with \textit{DropCompute} is $$ \tilde{T} + T^c = \min\left\{\tau,T\right\}+T^c \, ,$$ where $T^c$ is a serial latency present in each iteration, which includes the \textit{AllReduce} step. This upper limit serves to clip extreme values of $T_n$, effectively constraining the range of potential outcomes for $\tilde{T}$. As a result, the compute time variability decreases, leading to a narrower distribution and enhanced compute efficiency. These effects are illustrated in Figure \ref{fig:experiement_vs_theory}.
As a consequence of preempting each worker at $\tilde{T}_n$, the number of micro-batches computed in each step varies. Denote as $t_n^{(m)}$ the compute latency of a single micro-batch $m$ for worker $n$, and $T_n^{(m)}=\sum_{j=1}^mt_n^{(j)}$. We can define the average number of micro-batches computed by each worker before reaching threshold $\tau$ as $$\tilde{M}(\tau)=\frac{1}{N}\sum_{n=1}^N\sum_{m=1}^M \left\{
\begin{array}{lr}
1, & \text{if } T_n^{(m)}<\tau\\
0, & \text{otherwise }
\end{array}\right\}~.$$ Under CLT conditions, the expected value for $\tilde{M}(\tau)$ can be approximated in a closed form: \begin{equation}
\mathbb{E}[\tilde{M}(\tau)] \approx \sum_{m=1}^M \Phi\left(\frac{\tau - m\cdot \mu}{\sqrt{m\cdot \sigma^2}} \right)
\label{eq:estimate_M} \end{equation} where $\mu, \sigma^2$ are the mean and variance for a single micro-batch $t_n^{(m)}$ compute latency, and $\Phi$ is the CDF of the standard normal distribution. This approximation closely fits the real value of $\tilde{M}$ and can be used to analyze the expected gain from \textit{DropCompute}. More details in appendix \ref{appendix:speedup_analysis}. \begin{figure}\label{fig:experiement_vs_theory}
\end{figure} \subsection{Choosing the threshold} \label{section:automatic_threshold} The throughput of the system can be seen as the number of micro-batches computed per second. For $N$ workers, this can be written as $NM/(T+T^c)$. To evaluate the effectivness of \textit{DropCompute}, we consider the difference in throughput between the baseline and when using \textit{DropCompute}. Doing so, we can define the effective speedup for $\tau$ as: \begin{equation}
S_{\mathrm{eff}}(\tau)= \frac{\text{DropCompute Throughput}}{\text{Baseline Throughput}} = \frac{N\tilde{M}(\tau)/ (\min\left\{\tau,T\right\}+T^c)}{NM/(T+T^c)} = \frac{\tilde{M}(\tau)(T+T^c)}{M(\min\left\{\tau,T\right\}+T^c)}
\label{eq:auto_eff} \end{equation} Given the statistical characteristics of the training setting, it is possible to estimate analytically the expected value of the effective speedup $\mathbb{E}[S_\mathrm{eff}]$ by using Equations \ref{eq:estimate_M} and \ref{eq:estimate_T}.
As shown in \ref{fig:estimate_s_eff_b}, Equation \ref{eq:estimate_T} is less accurate when samples deviate from a normal distribution. \begin{figure}\label{fig:estimate_s_eff_a}
\label{fig:estimate_s_eff_b}
\label{threshold_choosing}
\label{fig:estimate_s_eff}
\end{figure} To find the optimal compute threshold, we synchronize the empirical distribution of micro-batch compute latency between all workers after a few iterations. Given this distribution, we find $T$ and $\tilde{M}(\tau)$, and search in a decentralized way for the threshold $\tau^*$ that maximizes the effective speedup $S_{\mathrm{eff}}(\tau)$ defined in Equation \ref{eq:auto_eff}.
Lowering the threshold leads to reduced compute time but higher compute drop rates. Figure \ref{threshold_choosing} highlights this trade-off and the optimal $\tau^*$ is marked.
\section{Experiments} \label{section:experiments} To be useful, \textit{DropCompute} must possess two properties. First, it should not compromise the accuracy of the trained model. This property is put to test in section \ref{sec:generalization_perf} where we fully train BERT-Large and ResNet-50 \citep{devlin2018bert, resnet}, each on a different task, with different drop rates to compare accuracy. Second, \textit{DropCompute} should maintain a high level of runtime performance, especially when compute variance or straggling workers exist and vanilla synchronous training time deteriorates. Section \ref{sec:runtime_perf} tests runtime performance of \textit{DropCompute} by training a 1.5 billion parameter language model, BERT1.5B \citep{devlin2018bert} with additive noise to the compute time of each worker.
\textbf{Experimental setup.} The analysis of all BERT models is performed on the same dataset as \citet{devlin2018bert}, which is a concatenation of Wikipedia and BooksCorpus with 2.5B and 800M words respectively. The finetuning of the pretrained models is performed on SQuAD-v1 \citep{rajpurkar-etal-2016-squad}. We verify the generality of \textit{DropCompute} by additional evaluation of a ResNet-50 model for image classification on ImageNet \citep{imagenet_cvpr09}. The experiments depicted in section \ref{sec:runtime_perf} and section \ref{sec:generalization_perf} are executed on Habana Gaudi-1 and Gaudi-2 accelerators, respectively, with high performance network \citep{habana2020-whitepaper}.
\subsection{Generalization performance} \label{sec:generalization_perf} The sole difference in the optimization when \textit{DropCompute} is applied is that the batch size is not deterministic, but stochastic, as explained in section \ref{sec:our_method}. To complement theorem \ref{theorem:non_convex}, we examine the generalization performance achieved with a stochastic batch size on two popular tasks.
\textbf{Image classification.} To evaluate the generality of stochastic batch size and \textit{DropCompute} in particular, we evaluate the Top-1 accuracy of a ResNet-50 model on the Imagenet dataset using our method. Since it is not common to use gradient accumulation in large scale training of this task, we simulate the drops such that each worker randomly drops its local batch, so the total batch size is stochastic. This simulated environment enables us to examine the extent of drop rate we can use without compromising accuracy. Figure \ref{fig:resnet50_accuracy} in appendix \ref{appendix:image_classification} shows that up to $10\%$ drop rate, which is more than what \textit{DropCompute} operates on, there is a negligible deterioration in accuracy.
\begin{wraptable}{r}{0.4\textwidth}
\centering
\caption{\textbf{Maintaining accuracy of BERT-Large pretraining.} Fine-tuning results on SqUAD v1.1, where The F1 score is obtained by the pretrained model, each with a different drop rate during pretraining.}
\begin{tabular}{| c c |}
\hline
\% Drop rate & F1 score on dev set \\ [0.5ex]
\hline\hline
0\% & 91.32 $\pm$ 0.15 \\
\hline
2.5-3\% & 91.34 $\pm$ 0.04 \\
\hline
5.5-6\% & 91.44 $\pm$ 0.02 \\
\hline
10-11\% & 91.13 $\pm$ 0.02 \\
\hline
\end{tabular}
\label{table:accuracy}
\end{wraptable}
\textbf{Large language model.} Training LLMs is resource intensive, typically using large batch sizes, which makes \textit{DropCompute} appealing. We evaluate \textit{DropCompute} method on this task by fully pretraining BERT-Large model several times, each with a different drop rate. We follow the optimization regime described in \citet{you2019large} with a batch size of 64K for phase-1 and 32K for phase-2 (more details are provided in appendix \ref{appendix:generalization}). Each of the pretrained models is fine-tuned on the SQuAD task 3 times with different initializations. Fine-tuning is performed without drops, as it is not a large scale resource consuming task. Table \ref{table:accuracy} shows the average accuracy ($\pm$ standard deviation) obtained for each drop rate. As shown, \textit{DropCompute} at drop rates of up to $10\%$ have negligible accuracy difference. Higher values measured up to $20\%$ of dropped gradients provide acceleration with a small yet discernible hit on accuracy. We note that these results are for a fixed budget of steps. In the presence of compute variance, the effective speedup indicates that additional steps can be executed while still maintaining competitive runtime performance. This notion is demonstrated in section \ref{sec:runtime_perf}.
\subsection{Runtime performance} \label{sec:runtime_perf} The main purpose of our proposed method is to maintain runtime performance when compute variance is present. We examine this by measuring the speedup of \textit{DropCompute} over standard synchronous training in several settings. First, we measure the potential speedup for different drop rates and training settings by post analysis of synchronous training without drops. In addition, we introduce compute variance by training with additive noise, and measure actual speedups using \textit{DropCompute}. The experiments in this section are performed on BERT1.5B. Details are provided in appendix \ref{appendix:runtime_performace}.
\textbf{Training with different number of workers and micro-batches.} We evaluate the potential speedup of \textit{DropCompute} on several training settings with natural heterogeneity and no drops. For each setting, we post analyze what would have been the speedup for different drop rates. As can be seen in Figure \ref{fig:speedup}, \textit{DropCompute} exhibits increasing benefits with a growing number of workers and compute requirements. However, there are diminishing returns in terms of speedup with more accumulations. This could possibly be explained by the amortization time of a large number of micro-batches.
\begin{figure}\label{fig:speedup}
\end{figure}
\textbf{Simulated delay environment.} Although \textit{DropCompute} may have value when the workers' compute latency variance is low, its significance becomes crucial when the workers' compute latency exhibits high variability. To evaluate our method, we introduce a delay environment where random latency is added to each micro-batch computation. This additive noise follows a bounded log-normal distribution. Detailed information and motivation regarding the additive noise are in appendix \ref{appendix:runtime_performace}. The experiments are executed with 12 gradient accumulations and a local batch size of 192. In Figure \ref{fig:abstract}, the negative impact of compute variance on scalability is demonstrated and mitigated using \textit{DropCompute}. The results in Figure \ref{fig:abstract} also correspond to section \ref{section:analysis} and Equation \ref{eq:eff_speedup}, where a theoretical extrapolation follows the same trend line. When utilizing \textit{DropCompute} in this setup, achieving the same training loss as the baseline might requires additional training steps, however, it leads to a notable reduction in overall training time. Figure \ref{fig:time_to_train} demonstrates it in a training session with 64 workers, where approximately 3\% more steps is needed to reach the same loss, in 13\% less time.
\begin{figure}\label{fig:time_to_train}
\end{figure}
\section{Discussion} \textbf{Summary.} Efficient scalable systems are a key component to enable the continued development of deep learning models. To this day, state-of-the-art models rely on synchronous distributed optimization. The challenge to maintain synchronous training as an efficient solution grows larger with the quickly growing model sizes and data. Therefore, improving the robustness and scalability of distributed synchronous training is an important endeavor. This paper tackles the challenge of maintaining synchronous training scalable in the face of compute variance. We propose \textit{DropCompute} to improve the robustness of synchronous training. Workers drop their remaining compute when they reach a compute threshold, determined by exchanging and analyzing the compute latency distribution. We find that for a small percentage of dropped data, a much larger percentage of time can be saved, depending on the compute latency distribution of the workers. In addition, we provide theoretical convergence guarantees and runtime predictions.
\textbf{Limitations.} While \textit{DropCompute} is simple and straightforward, it deals with system efficiency, and as such, the user-level implementation provided is not optimal. Most of all, the provided implementation is limited by using gradient accumulations and integrating compute timeout in between accumulations. This can be solved by integrating this method in low-level accelerator code to enable execution more efficiently.
\textbf{Future directions.} \textit{DropCompute} is described and analyzed in this paper as a method built on top of synchronous training. However, this method can be integrated with other possibly asynchronous methods such as periodic synchronization. In appendix \ref{app:local_sgd}, we implement \textit{DropCompute} on top of Local-SGD \citep{Lin2020Don't} and show that \textit{DropCompute} can also improve the robustness of Local-SGD to stragglers.
\section*{Acknowledgments} We thank Itay Hubara for technical advising and valuable comments on the manuscript. The research of DS was Funded by the European Union (ERC, A-B-C-Deep, 101039436). Views and opinions expressed are however those of the author only and do not necessarily reflect those of the European Union or the European Research Council Executive Agency (ERCEA). Neither the European Union nor the granting authority can be held responsible for them. DS also acknowledges the support of Schmidt Career Advancement Chair in AI.
\appendix
\part*{Appendix}
\section{Experiments} \label{appendix:experiments}
\subsection{Runtime performance experiments} \label{appendix:runtime_performace} In this section, we provide details for the experiments of section \ref{sec:runtime_perf}.
\textbf{Experiment details.} As mentioned in the paper in section \ref{sec:runtime_perf}, we pre-train BERT1.5B following \citet{habana-deepspeed-bert}. The experiments in this section use up to 200 Gaudi accelerators with high bandwidth inter-connectivity. The training is done with a maximum input sequence length of 128 and 80 maximum predictions per sequence. The training regime consists of a local batch size of 196, 12 gradient accumulations, LANS optimizer \citep{zheng2020accelerated}, and a learning rate of 0.0015. Due to the large capacity of the model, we used ZeRO optimizer stage 1 to fit the model in memory \citep{rajbhandari2020zero}.
\textbf{Simulated delay.} Many frameworks use padding to allow for constant input length which improves hardware efficiency \citep{kosec2021packing}. However, some learning tasks inherently involve dynamic shapes, such as translation \citep{ott-etal-2018-scaling} and multi-task sequences \citep{2020t5}. These use cases motivate us to explore scenarios of dynamic length via simulation. To demonstrate the value of \textit{DropCompute} in dealing with compute variance we added to each micro-batch compute time an additional random waiting time. The additive noise is based on a Log-normal distribution since it is typical for user post lengths in internet discussions \citep{sobkowicz2013lognormal}, which are used as training data in recent language models \citep{radford2019language}. To make this setting more realistic, we scale down and bound the noise so that each accumulation takes $\times1.5$ longer on average, and, in extreme cases, can take up to 6 times longer. This allows us to simulate stragglers and high compute variance while keeping a conservative limit on iteration time. Thus, the additive noise takes the form of $$\epsilon=\min\left(\frac{1}{\alpha}Z, \beta\right) ,\;\;\;\;\;\;\;\;\; Z\sim \mathrm{LogNormal}(4,1) \,.$$ This noise was added to each accumulation $$t_n^{(m)}\gets t_n^{(m)} + \mu\cdot \epsilon \,,$$ where $\mu$ is the mean value for $t_n^{(m)}$, $\alpha=2\exp(4.5)$ and $\beta=5.5$ are the scaling and bounding constants, and the log-normal parameters (4,1) fit user post lengths, as seen in \citet{sobkowicz2013lognormal}. As illustrated in Figure \ref{fig:epsilon_dist}, the noise distribution leads to each micro-batch latency increased by up to $6\mu$, while the majority of accumulations have low latency. Further analysis on the affect of noise properties is discussed in \ref{appendix:noise_analysis}.
\begin{figure}
\caption{\textbf{The latency distribution in a simulated delay environment.} (left) The distribution of the additive noise $\epsilon$, added to each accumulation. (right) The distribution for iteration time $T_n$, with 12 accumulations, each with added noise, in BERT1.5B training.}
\label{fig:epsilon_dist}
\end{figure}
\subsection{Generalization experiments} \label{appendix:generalization} In this section, we provide details for the experiments of section \ref{sec:generalization_perf}.
\subsubsection{Large language models}
Here we provide more details about how the LLM experiment was executed as well as additional graphs related to the LLM experiment described in section \ref{sec:generalization_perf}.
\textbf{Experiment details.} As mentioned in the paper, in section\ref{sec:generalization_perf} we follow \citet{you2019large} optimization regime with LAMB optimizer. Specifically, for phase-1 where the sequence length is 128 tokens per sample, we use a batch size of 64K, the learning rate is 0.006, the warmup ratio is 0.2843, and the steps number is 7038. For phase-2 where the sequence length is 512, we use a batch size of 32K, the learning rate is 0.004, the warmup ratio is 0.128 and the steps number is 1563. The experiments were executed on 64 workers.
\textbf{Batch size distribution.} As explained in section \ref{sec:generalization_perf} we fully pretrain a BERT-Large model with \textit{DropCompute} several times, each with a different drop rate. Figure \ref{fig:batch_dist} shows the empirical batch distribution of each of the drop rates in phase-1.
\begin{figure}
\caption{\textbf{Batch size distribution.} BERT-Large phase-1 pretraining batch size distribution when using \textit{DropCompute} and drop rate of (a) 2.5\% , (b) 5.5\%,
and (c) 11.5\%}
\label{fig:batch_dist}
\end{figure}
\textbf{Convergence loss.} In addition to the results depicted in Table \ref{table:accuracy}, we show the convergence of the training loss with the different drop rates in Figure \ref{fig:loss_1563}.
\begin{figure}
\caption{\textbf{Train loss convergence.} BERT-Large phase-1 (left) and phase-2 (right) pretraining train loss for different drop rates.}
\label{fig:loss_1563}
\end{figure}
\subsubsection{Image classification} \label{appendix:image_classification} This section provides the details of the image classification experiment described in section \ref{sec:generalization_perf} as well as Figure \ref{fig:resnet50_accuracy} which is referenced from the paper.
\textbf{Experiment details.} To simulate \textit{DropCompute}, at each training step, the gradients of each worker are set to zero with a probability of $P_{\text {drop }}$. We repeat each training process 3 times with different initializations. To examine the generalization of \textit{DropCompute} over different optimizers, we implement our method on two popular training regimes of ResNet50. First, we follow the optimization regime described in \citet{goyal2017accurate} that uses SGD with 32 workers and a global batch size of 4096. Second, we follow \citet{mlperf} that uses LARS \citep{you2017large} with 8 workers and a global batch size of 2048.
\begin{figure}
\caption{\textbf{Generalization over varying drop rates.} Top-1 validation accuracy of ResNet50 trained on ImageNet with varying simulated drop rates. The dashed line is the baseline accuracy without drops. The solid line is the average over 3 runs, and the blue area is the standard deviation. (left) Training regime with SGD \citep{goyal2017accurate}. (right) Training regime with LARS \citep{mlperf}. Up to a 10\% drop rate, there is a negligible accuracy deterioration.}
\label{fig:resnet50_accuracy}
\end{figure}
\textbf{Learning rate correction.} Previous works showed that the learning rate should be scaled with respect to the batch size \citep{hoffer2017train, goyal2017accurate}. With a stochastic batch size and specifically \textit{DropCompute}, it is possible that a learning rate correction should be considered to maintain accuracy with the same number of steps. We examine such corrections when training with stochastic batch size. First, we decrease the learning rate by a constant factor, equal to the average drop rate. Specifically, for an average drop rate $P_{\text {drop}}\in[0,1]$ we multiply the learning rate by $(1-P_{\text {drop}})$. A different correction we consider is a stochastic correction, such that in each step we divide the gradients by the computed batch size, instead of the original batch size. This result in a different normalization in each step depending on the actual dropped samples. We note that for the latter, the workers have to synchronize the computed batch of each worker at each step. This is generally can be done during the \textit{AllReduce}, with negligible overhead. We repeat the training of ResNet50 on ImageNet as described in \citet{goyal2017accurate} to evaluate the generalization without correction and with the two suggested corrections. We use 128 workers, batch size 8192, and use ghost batch norm (GBN) \citep{hoffer2017train} to match batch normalization of 32 samples and restore the results in \citet{goyal2017accurate}. As can be seen in Figure \ref{fig:resnet50_lr_correction}, for low drop rates, there is no superior correction method, and no correction generally achieves the same generalization. Yet, it is possible that a learning rate correction could potentially improve generalization on a different task or with a different optimizer.
\begin{figure}
\caption{\textbf{Learning rate correction is not necessary for low drop rates.} Top-1 validation accuracy of ResNet50 trained on ImageNet with varying simulated drop rates, using the learning-rate correction methods described in \ref{appendix:image_classification}. The dashed line is the baseline accuracy without drops. Each solid line is the average over 3 runs, and the area around it is the standard deviation. Up to a 10\% drop rate, there is a negligible accuracy deterioration regardless of the correction applied.}
\label{fig:resnet50_lr_correction}
\end{figure}
\subsection{Local-SGD} \label{app:local_sgd} Periodic synchronization methods, such as Local-SGD, provide better scalability properties than synchronous methods. By exchanging parameters less frequently, communication overhead is mitigated. For compute variance and straggling workers in particular, the robustness of these methods greatly depends on the distribution of the compute time between workers. For example, when straggling workers appear randomly with homogeneous distribution, Local-SGD can mitigate the straggling workers slowdowns to some extent; this is because of the amortization effect in synchronizing periodically once every several steps. On the other hand, if straggling workers appear from a small set of workers such as a single server, a realistic scenario, Local-SGD acts more closely to synchronous training as the worst-case scenario is when a single worker always straggling behind. \textit{DropCompute} can be easily integrated with Local-SGD by leveraging periodic synchronization instead of gradient accumulations. We implement \textit{DropCompute} on top of Local-SGD by comparing the compute time with a threshold at each local step. We show that when stragglers are apparent, \textit{DropCompute} can improve the robustness of Local-SGD. We randomly slow down workers to simulate stragglers in two scenarios as described in Figure \ref{fig:local_sgd}. The experiment setting is 32 workers training on ResNet50 and ImageNet. At each local step, each worker is selected to be straggler with a $4\%$ chance. This way, there is at least 1 straggler for each local step on average. We measure relative speedup compared to synchronous training in terms of step time, both for Local-SGD and with \textit{DropCompute} on top of Local-SGD. As can be seen, with \textit{DropCompute} (set to $~6.2\%$ drop rate in this experiment) we improve the robustness of Local-SGD. \ \begin{figure}\label{fig:local_sgd}
\end{figure}
\section{Analyzing the effective speedup using \textit{DropCompute}} In this section, we provide more details on the process of choosing the threshold $\tau^*$ that will maximize the effective speedup. We begin by giving technical details on the process used during training, given samples drawn from the empirical distribution of the compute latency. Next, we continue to explore and establish the analytic connection between the latency statistics and the effective speedup.
\subsection{Automatic selection of the drop threshold} \label{appendix:auto_threshold}
In Algorithm \ref{algorithm:optimal_threshold} below we present the algorithm used for automatic selection of the drop threshold $\tau$. In this algorithm, $t_{i,n}^{(m)}$ is the time it takes worker $n$ to process micro-batch $m$ at step $i$ and $T_i^c$ is the time spent on communication for at step $i$. This data is measured by each worker, and then synchronized between all workers after $I$ iterations. After the synchronization step each worker will have the same drop threshold $\tau$, which depends on both his own speed and the compute latency of the other workers. Since $T_i^c$ is used in the normalization of the effective speedup, the chosen threshold takes into account both compute and communication time.
\begin{algorithm}[h!]
\caption{Automatic choice for the optimal threshold $\tau$}
\label{algorithm:optimal_threshold}
\begin{algorithmic}
\Statex \textbf{Input}:
\State number of workers $N$
\State number of iterations $I$
\State number of micro-batches per step $M$
\State micro-batch time samples $\{\{t\}_{i,n}^{(m)}\}^{i\in[1:I], n\in[1:N], m\in[1:M]}$
\State communication time for each iteration $T_i^c$
\State potential thresholds $[\tau_0, \tau_1, ...]$
\For{$\tau \in [\tau_0,\tau_1,...]$}
\For{$i = 1$ to $I$}
\State Initialize completed micro-batch count: $\tilde{M}_i(\tau)=0$
\State Initialize compute step latency for all workers: $T_i=0$
\For{$n = 1$ to $N$}
\State Initialize single worker step compute latency: $T_{i,n}=0$
\For{$m = 1$ to $M$}
\State $T_{i,n} \gets T_{i,n} + t_{i,n}^{(m)} $
\State $\tilde{M}_i(\tau) \gets \tilde{M}_i(\tau) + \frac{1}{N}\cdot\left\{
\begin{array}{lr}
1, & \text{if } T_{i,n}<\tau\\
0, & \text{otherwise }
\end{array}\right\}$
\EndFor
\State $T_i \gets \text{max}(T_i, T_{i,n})$ \Comment{compute time of the slowest worker at step (i)}
\EndFor
\State $S_i(\tau) = \frac{T_i + T_i^c}{\min(\tau,T_i)+T_i^c}\cdot \frac{\tilde{M}_i(\tau)}{M}$ \Comment{Effective speedup for step (i)}
\EndFor
\State $S_{\mathrm{eff}}(\tau) = \frac{1}{I}\sum_{i=1}^{I}{S_i(\tau)}$ \Comment{Mean speedup for threshold ($\tau$)}
\EndFor
\State $\tau^* \gets \text{argmax}_\tau\left(S_{\mathrm{eff}}(\tau) \right)$
\end{algorithmic} \end{algorithm}
\subsection{DropCompute speedup analytic analysis} \label{appendix:speedup_analysis} In this section we further explore the relation between the compute latency distribution and the effective speedup. We will derive a closed-form representation of the effective speedup by making certain assumptions. First we assume that \begin{assumption} \label{assu: iid} $t_n^{(m)}$ is i.i.d with finite mean $\mu$ and finite variance $\sigma^2$.\end{assumption} Note that in the assumption above, for simplicity of presentation we assumed all workers as identical and so $\mu$ and $\sigma^2$ are identical. However, it is possible to derive similar properties with nonidentical workers, each with their own $\mu_n$, $\sigma_n$. Next, denote the time for completion of micro-batch $m$ as $T_n^{(m)}=\sum_{j=1}^m t_n^{(j)}$. Then, we assume \begin{assumption} \label{assume:gaussian} $T_n^{(m)}\text{ is Gaussian }\sim\mathcal{N}(m\mu,m\sigma^2)$ for $m>\sqrt{M}\,.$\end{assumption} This assumption holds in the limit $M\to\infty$ given Assumption \ref{assu: iid}, from the Central Limit Theorem (CLT). Lastly, denoting $\tau$ as the threshold used, we assume \begin{assumption} \label{assume:tau_limit} $\tau>\frac{M\mu}{2}\,.$\end{assumption} This bound can be considered as the minimum threshold allowed, in order for \textit{DropCompute} to be effective. Taking a lower threshold will result in unacceptable high drop rate.
Using these assumptions we first derive analytical expression for the iteration time $\mathbb{E}[T]$ and the mean completed number of gradient accumulations $\mathbb{E}[\tilde{M}]$ with \textit{DropCompute}. Then, we combine these expressions to obtain an expression for the mean effective speed $\mathbb{E}[S_{\mathrm{eff}}]$.
Figure \ref{fig:estimate_s_eff_b} shows an example of how close is the derived expression of $\mathbb{E}[S_{\mathrm{eff}}]$ to the value calculated by using the algorithm described in section \ref{appendix:auto_threshold}. The `analytical' curve is slightly off due to the inaccuracy of the Gaussian Assumption \ref{assume:gaussian} in calculating $\mathbb{E}[T]$, as we discuss below. We therefore added another curve `analytical given $\mathbb{E}(T)$', which uses same the derived expression for $\mathbb{E}[S_{\mathrm{eff}}]$ but replacing value of $\mathbb{E}[T]$, with the empiric mean: $\overline{T}=\frac{1}{I}\sum_{i=1}^I \max_n\left\{T_{n,i}^{(M)} \right\}$ where: $i\in[1:I]$ are the iterations measured.
\textbf{Iteration time.} as written in section \ref{sec:analysis_iteration_time}, the iteration time for all workers is $$T=\max_n \left(T_n^{(M)}\right) = T^c + \max_n \left( \sum_{m=1}^M t_n^{(m)} \right)\,.$$ When $T_n^{(M)}\sim\mathcal{N}(M\mu, M\sigma^2)\,.$ the expected value of $T$ can be approximated as \citep{bailey2014pseudomathematics}: \begin{equation} \label{eq:approximate_t}
\mathbb{E}[T] \approx \sqrt{M\sigma^2}\cdot\left( (1-\gamma)\cdot \Phi^{-1}\left(1-\frac{1}{N} \right) + \gamma\cdot \Phi^{-1}\left(1-\frac{1}{e\cdot N} \right)\right) + M\mu + T^c\,. \end{equation} It is worth noting that the distribution of $T$ is mostly affected by the tail of distribution of $T_n=T_n^{(M)}$ (as a consequence of Equation \ref{pdf}). Therefore, in practice the Gaussian Assumption \ref{assume:gaussian}, and therefore Equation \ref{eq:approximate_t}, can be inaccurate, especially for small $M$ and large $N$. It is therefore more accurate to use the real value of $\mathbb{E}[T]$, measured without \textit{DropCompute}, to estimate the potential effective speedup. An example for the inaccuracy of this approximation can be seen in Figure \ref{fig:estimate_s_eff_b}, when $T_n^{(M)}$ does not follow a normal distribution.
\textbf{Completed micro-batches.} The average number of micro-batch computed by a single worker $n$ when using \textit{DropCompute} with threshold $\tau$, is: $$\tilde{M}(\tau)=\frac{1}{N}\sum_{n=1}^N\sum_{m=1}^M \left\{
\begin{array}{lr}
1, & \text{if } T_n^{(m)}<\tau\\
0, & \text{otherwise }
\end{array}\right\} \,.$$ Its expected value can be written as: $$ \mathbb{E}[\tilde{M}(\tau)]=\sum_{m=1}^M P\left(T_n^{(m)}<\tau\right) \,.$$
In order to use assumption \ref{assume:gaussian}, we can split $\tilde{M}$ into 2 sums and derive a closed-form formula for the expected value: \begin{equation} \label{eq:tilde_m_split}
\tilde{M}(\tau)=\sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor} P\left(T_n^{(m)}<\tau\right) + \sum_{m=\lceil{{\sqrt{M}}}\rceil}^M P\left(T_n^{(m)}<\tau\right)\,. \end{equation} For the right term we can use assumption \ref{assume:gaussian} so that $P(T_n^{(m)}<\tau)=\Phi\left(\frac{\tau - m\cdot \mu}{\sqrt{m\cdot \sigma^2}} \right)$. For the left term, when $m<\sqrt{M}$ we use Markov inequality and assumption \ref{assume:tau_limit} to show that $$ 0\leq P(T_n^{(m)}>\tau)\leq \frac{\mathbb{E}[T_n^{(m)}]}{\tau}=\frac{m\mu}{\tau}\leq \frac{2m}{M}\,.$$
In other words, when using \textit{DropCompute} with low drop rates, $P(T_n^{(m)}<\tau)$ is very high for $m<\sqrt{M}$. The Gaussian approximation for $m<\sqrt{M}$ diminishes exponentially when increasing $M$, as seen by applying Chernoff bound: $$ 0\leq P(Z^{(m)}>\tau)\leq e^{-\frac{(\tau-m\mu)^2}{2m\sigma^2}}\leq e^{-\frac{(M/2-m)^2\mu^2}{2m\sigma^2}}$$ where $Z^{(m)}\sim\mathcal{N}(m\mu, m\sigma^2)$. Therefore, the error resulting in replacing $T_n^{(m)}$ with a Gaussian approximation, is bounded: $$ P(T_n^{(m)}<\tau) - P(Z^{(m)}<\tau) = P(Z^{(m)}>\tau) -P(T_n^{(m)}>\tau) $$ $$\Downarrow$$ $$ -\frac{2m}{M} \leq -P(T_n^{(m)}>\tau) \leq P(T_n^{(m)}<\tau) - P(Z^{(m)}<\tau) \leq P(Z^{(m)}>\tau)\leq e^{-\frac{(M/2-m)^2\mu^2}{2m\sigma^2}}$$ $$\Downarrow$$ $$ -\frac{2m}{M} + P(Z^{(m)}<\tau) \leq P(T_n^{(m)}<\tau) \leq P(Z^{(m)}<\tau) + e^{-\frac{(M/2-m)^2\mu^2}{2m\sigma^2}}$$ Plugging these inequalities into the left term in equation \ref{eq:tilde_m_split} gives us:
\begin{equation} \label{eq:tilde_m_split_ineq} \begin{split} \sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor} P\left(T_n^{(m)}<\tau\right) \leq& \sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor} \left(P\left(Z^{(m)}<\tau\right)+e^{-\frac{(M/2-m)^2\mu^2}{2m\sigma^2}} \right) \\ \\ \sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor} P\left(T_n^{(m)}<\tau\right) \geq& \sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor}\left( P\left(Z^{(m)}<\tau\right) -\frac{2m}{M}\right) = O(1) +\sum_{m=1}^{ \lfloor{{\sqrt{M}}}\rfloor} P\left( Z^{(m)}<\tau\right) \end{split} \end{equation}
Combining equations \ref{eq:tilde_m_split},\ref{eq:tilde_m_split_ineq} we can write the expected value as \begin{equation} \label{eq:approximate_m} \mathbb{E}[\tilde{M}(\tau)]=\sum_{m=1}^M P\left(T_n^{(m)}<\tau\right) = O(1)+ \sum_{m=1}^M \Phi\left(\frac{\tau - m\cdot \mu}{\sqrt{m\cdot \sigma^2}} \right)\,. \end{equation}
\textbf{Effective speedup.} As seen in section \ref{section:automatic_threshold}, we define the effective speedup as $$ S_{\mathrm{eff}}(\tau)= \frac{\tilde{M}(\tau)(T+T^c)}{M\cdot(\min\left\{\tau,T\right\}+T^c)}\,. $$ We are interested in calculating the expected value for the effective speedup, and in order to use the formulations in equations \ref{eq:approximate_t},\ref{eq:approximate_m} we first need to show that $\mathbb{E}[\tilde{M}(\tau)\cdot T] \approx \mathbb{E}[\tilde{M}(\tau)]\cdot \mathbb{E}[T]$. We examine $$ \mathbb{E}[\tilde{M}T]=\mathbb{E}[T(\mathbb{E}[\tilde{M}] + \tilde{M}- \mathbb{E}[\tilde{M}])]=\mathbb{E}[\tilde{M}]\mathbb{E}[T]+\mathbb{E}[T(\tilde{M}-\mathbb{E}[\tilde{M}])]$$ Applying Cauchy–Schwarz inequality we get
$$|\mathbb{E}[T(\tilde{M}-\mathbb{E}[\tilde{M}])]| \leq \sqrt{\mathbb{E}[T^2]\mathbb{E}[(\tilde{M}-\mathbb{E}[\tilde{M}])^2]}= \sqrt{\mathbb{E}[T^2]\frac{\sigma_{\tilde{M}_n}^2}{N}}=O(N^{-\frac{1}{2}}) \,,$$
where $\sigma_{\tilde{M}_n}^2$ denotes the variance of $\tilde{M}_n(\tau)=\sum_{m=1}^M \left\{ \begin{array}{lr} 1, & \text{if } T_n^{(m)}<\tau\\ 0, & \text{otherwise } \end{array}\right\}$. Hence: $$ \mathbb{E}[\tilde{M}T] = \mathbb{E}[\tilde{M}]\mathbb{E}[T] + O(N^{-\frac{1}{2}})$$ We can now write the expected value for the effective speedup as: \begin{equation} \label{eq:eff_speedup}
\mathbb{E}[S_\mathrm{eff}] =
\frac{\sum_{m=1}^M \Phi\left(\frac{\tau - m\cdot \mu}{\sqrt{m\sigma^2}} \right) }{M} \cdot
\frac{\mathbb{E}[T]}{\min(\tau, \mathbb{E}[T])+T^c}+ O(M^{-1}+M^{-1}N^{-\frac{1}{2}}) \end{equation} \begin{equation} \label{eq:T_approximation}
\mathbb{E}[T]\approx\sqrt{M\sigma^2} \left( (1-\gamma)\cdot \Phi^{-1}\left(1-\frac{1}{N} \right) + \gamma\cdot \Phi^{-1}\left(1-\frac{1}{e N}\right)\right) + M\mu + T^c \end{equation} As mentioned above, when the Gaussian Assumption \ref{assume:gaussian} is inaccurate it may be useful to plug instead in the empirical value for $\mathbb{E}[T]$ in equation \ref{eq:eff_speedup} in order to get a more accurate estimation of $\mathbb{E}[S_\mathrm{eff}]$.
\textbf{Finding $\tau^*$.} The optimal threshold $\tau^*$ can be chosen as: $$ \tau^* = \text{argmax}_\tau \mathbb{E}[S_\mathrm{eff}(\tau)] =\text{argmax}_\tau \left(\frac{1}{\tau + T^c}\cdot \sum_{m=1}^M \Phi\left(\frac{\tau - m\cdot \mu}{\sqrt{m\cdot \sigma^2}} \right)\right)$$
By using the above derivations, we can utilize $\mu$, $\sigma$, $T^c$ to understand the potential value of \textit{DropCompute}. This can be done without actually training and measuring the empiric distribution of $t_n^{(m)}$ as done in appendix section \ref{appendix:auto_threshold}. We note that finding $\tau^*$ does not require any estimation of $T$ and can be done without any statistics that originate from a large scale training session.
\subsection{Additive noise analysis} \label{appendix:noise_analysis} As a conclusion of the previous sections, we understand that the effectiveness of \textit{DropCompute} is mostly due to the behavior of the stochastic latency of each worker. To analyze this phenomenon we simulate a training of multiple workers using the scheme presented in section \ref{appendix:runtime_performace} with various additive noise types. As shown in figures \ref{fig:noise_type}, \ref{fig:noise_scale}, the ratio $\mathbb{E}[T]/\mathbb{E}[T_i]$ is good indicator for determining the potential of \textit{DropCompute} on a given training setting. High ratios indicate a gap between the step time for a single worker and the step time for multiple workers, that can be compensated by using \textit{DropCompute}.
\begin{figure}\label{fig:noise_type}
\end{figure}
\begin{figure}\label{fig:noise_scale}
\end{figure}
\section{Convergence with stochastic batch size} \label{appendix:proof} In this section, we provide proof of theorem \ref{theorem:non_convex} (\ref{appendix:non_convex_proof}), as well as convergence proof for the loss itself in the convex case (\ref{appendix:convex_proof}). We also discuss the generalization properties with a stochastic batch in section \ref{app:generalization_discussion}.
\subsection{Proof for convex case} \label{appendix:convex_proof}
\begin{theorem} \label{theorem:convex_case} Under assumption \ref{assumption:convergence} and the specific case where $f$ is a convex function, for SGD with \textit{DropCompute} (Algorithm \ref{algorithm:drop_compute}), given $N$ workers and a local batch size $b_{local}$, we have that \al \label{eq:SGD_GUARANTEES_final} \E[\mathcal{L}(\mathcal{D},\bar{\theta}) - \mathcal{L}(\mathcal{D},\theta^*)] \leq
\frac{8Lb_\mathrm{max}\|\theta_1-\theta^*\|^2}{K} + \frac{6\sigma \|\theta_1-\theta^*\|}{\sqrt{K}}~. \eal where $K$ is the total number of samples used throughout the algorithm \footnote{We assume that the batch sizes may be stochastic but $K$ is predefined and deterministic}, and the expectation is with respect to the randomization introduced due to sampling from $\mathcal{D}$ throughout the optimization process. \end{theorem}
\textbf{Proof of theorem \ref{theorem:convex_case}}
\textbf{Notation:} During the proof we denote by $b_i$ the total batch size (summed over all workers) that we employ at iteration $i$. $K = \sum_{i=1}^S b_i$ is the total number of samples along all $S$ iterations. At iteration $i$ we maintain a weight vector $\theta_i\in{\mathbb R}^d$ and query a gradient estimate $g_i$ based on a batch size of $b_i$ samples. Thus, we set $$ g_i = \frac{1}{b_i} \sum_{s=1}^{b_i} g_i^s $$
where $g_i^s=\nabla\ell(z_i^s,\theta_i)$, and $z_i^s$ is randomly sampled from $\mathcal{D}$. We also maintain importance weights, $\alpha_i = b_i$.
Note that there exists $b_{\max}$ such that $\alpha_i\leq b_{\max}~;\forall i$
Now, the update rule: \al \label{eq:Update} \theta_{i+1} = \theta_i - \eta \alpha_i g_i \eal Eventually, we output $$ \bar{\theta} = \frac{1}{\alpha_{1:S}}\sum_{i=1}^S \alpha_i \theta_i $$ where $\alpha_{1:S}:=\sum_{i=1}^S \alpha_i.$
We assume that the batch sizes $b_i$ are stopping times w.r.t.~the natural filtration induced by the samples we draw during the optimization process. Informally, this means that the value of $b_i$ depends only on the history of the samples that we have seen prior to setting $b_i$.
We can therefore prove the following lemma, \begin{lemma} \label{lem:Unbiased}
Upon choosing $\alpha_i = b_i$ the following holds,
\als
\E [\alpha_i (g_i - \nabla \mathcal{L}(\mathcal{D},\theta_i)\vert \theta_i]
= \E [\sum_{s=1}^{b_i} (g_i^s - \nabla\mathcal{L}(\mathcal{D},\theta_i))\vert \theta_i] = 0~.
\eals \end{lemma}
Now, using standard analysis for Online Gradient Descent \citep{hazan2016introduction} with the update rule of \eqref{eq:Update} gives, $$
\sum_{i=1}^S \alpha_i g_i\cdot(\theta_i-\theta^*) \leq \frac{\|\theta_1-\theta^*\|^2}{\eta} + \eta \sum_{i=1}^S \alpha_i^2 \|g_i\|^2 $$ Taking expectation and using Lemma~\ref{lem:Unbiased} gives, $$
\E\sum_{i=1}^S \alpha_i \nabla\mathcal{L}(\mathcal{D},\theta_i)\cdot(\theta_i-\theta^*) \leq \frac{\|\theta_1-\theta^*\|^2}{\eta} + \eta \E\sum_{i=1}^S \alpha_i^2 \|g_i\|^2 $$ From convexity we know that $0\leq \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*)\leq \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot(\theta_i-\theta^*)$, therefore the above implies, \al \label{eq:SGD_GUARANTEES}
\E\sum_{i=1}^S \alpha_i( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*) ) \leq \frac{\|\theta_1-\theta^*\|^2}{\eta} + \eta \E\sum_{i=1}^S \alpha_i^2 \|g_i\|^2 \eal
Now, we write $g_i = \nabla \mathcal{L}(\mathcal{D},\theta_i) + \xi_i$ where $\xi_i = g_i - \nabla \mathcal{L}(\mathcal{D},\theta_i)$ and note that $$\alpha_i \xi_i = \sum_{s=1}^{b_i}(g_i^s - \nabla \mathcal{L}(\mathcal{D},\theta_i)) = \sum_{s=1}^{b_i} \xi_i^i$$ where we denote $\xi_i^s: = g_i^s - \nabla \mathcal{L}(\mathcal{D},\theta_i)$. Next, we shall use the following lemma, \begin{lemma}\label{lem:BoundG1} The following holds, \al \label{eq:VarStep2_FinalA}
\E \alpha_i^2 \|g_i\|^2
&\leq 2 b_{\max} \E \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\sigma^2 \E b_i~. \eal Moreover, due to the $L$-smoothness of $f(\cdot)$, and global optimality of $\theta^*$, the following holds, \al \label{eq:VarStep2_FinalB}
\E \alpha_i^2 \|g_i\|^2 &\leq 4 b_{\max}L \E \alpha_i ( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*)) + 2\sigma^2 \E b_i~. \eal \end{lemma}
\paragraph{Final Bound:} Plugging the above lemma back into Eq.~\eqref{eq:SGD_GUARANTEES} gives, \al \label{eq:SGD_GUARANTEES2} \E\sum_{i=1}^S \alpha_i( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*) ) &\leq
\frac{\|\theta_1-\theta^*\|^2}{\eta} + 4\eta b_{\max} \E\sum_{i=1}^S\alpha_i L( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*)) + 8\eta\sigma^2 \E\sum_{i=1}^S b_i \nonumber\\ &\leq
\frac{\|\theta_1-\theta^*\|^2}{\eta} + 4\eta b_{\max}L \E\sum_{i=1}^S\alpha_i ( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*)) + 8\eta\sigma^2 K ~, \eal where we used $K = \sum_{i=1}^S b_i$.
Now if we pick $\eta$ such that $4\eta b_{\max}L \leq 1/2$ then we can move the second term in the RHS to the LHS and obtain, \al \label{eq:SGD_GUARANTEES3} \frac{1}{2}\E\sum_{i=1}^S \alpha_i( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*) ) &\leq
\frac{\|\theta_1-\theta^*\|^2}{\eta} + 8\eta\sigma^2 K ~,
\eal Thus, choosing $\eta = \min\left\{ \frac{\|\theta_1-\theta^*\|}{\sigma\sqrt{8K}} , \frac{1}{8L b_{\max}} \right\}$ gives the following bound, \al \label{eq:SGD_GUARANTEES4} \E\sum_{i=1}^S \alpha_i( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*) ) &\leq
8Lb_{\max}\|\theta_1-\theta^*\|^2 + 6\sigma \|\theta_1-\theta^*\|\sqrt{K} \eal Now, recalling that $K : = \sum_{i=1}^S b_i= \sum_{i=1}^S \alpha_i$ and using Jensen's inequality together with $\bar{\theta} = \frac{1}{\alpha_{1:S}}\sum_{i=1}^S \alpha_i \theta_i$ yields, \al \label{eq:SGD_GUARANTEES5} \E[\mathcal{L}(\mathcal{D},\bar{\theta}) - \mathcal{L}(\mathcal{D},\theta^*)] &\leq \E\sum_{i=1}^S \frac{\alpha_i}{\alpha_{1:S}}( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*) ) \leq
\frac{8Lb_{\max}\|\theta_1-\theta^*\|^2 + 6\sigma \|\theta_1-\theta^*\|\sqrt{K}}{\alpha_{1:S}}\nonumber\\ &\leq
\frac{8Lb_{\max}\|\theta_1-\theta^*\|^2}{K} + \frac{6\sigma \|\theta_1-\theta^*\|}{\sqrt{K}} \eal where we used $\alpha_{1:S} = K$. \hspace{0.5cm}\qedsymbol{}
\subsection{Proof for non-convex case} \label{appendix:non_convex_proof}
\textbf{Proof of theorem \ref{theorem:non_convex}}
We use the same notation for $b_i$ and $g_i$ as before. And again used weights, $$ \alpha_i = b_i $$ We also assume that $b_i\leq b_{\max}~,\forall i$.
The update rule is the following, $$ \theta_{i+1} = \theta_i - \eta \alpha_i g_i $$ And the output is $\bar{\theta}$, where we define, $$ \bar{\theta} = \theta_i~;\quad \text{w.p.}~~ \frac{\alpha_i}{\alpha_{1:S}} $$ Thus,
$$ \E \|\nabla \mathcal{L}(\mathcal{D},\bar{\theta})\|^2=\frac{1}{\alpha_{1:S}} \sum_{i=1}^S \alpha_i \E\|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 $$
Using smoothness, \als \mathcal{L}(\mathcal{D},\theta_{i+1}) &\leq
\mathcal{L}(\mathcal{D},\theta_i) - \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (\theta_{i+1}-\theta_i) + \frac{L}{2}\|\theta_{i+1}-\theta_i \|^2 \nonumber\\ &\leq
\mathcal{L}(\mathcal{D},\theta_i) - \eta \alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot g_i + \frac{L\eta^2}{2}\|\alpha_i g_i \|^2 \nonumber\\ &\leq
\mathcal{L}(\mathcal{D},\theta_i) - \eta \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2-\eta \alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) + \frac{L\eta^2}{2}\|\alpha_i g_i \|^2 \eals Re-arranging the above yields, \als
\eta \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq
\mathcal{L}(\mathcal{D},\theta_i) - \mathcal{L}(\mathcal{D},\theta_{i+1}) -\eta \alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) + \frac{L\eta^2}{2}\|\alpha_i g_i \|^2 \eals Summing the above, and dividing by $\eta$, we obtain, \al \label{eq:NonCvx1}
\sum_{i=1}^S \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq
\frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta_{S+1})) - \sum_{i=1}^S\alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) + \frac{L\eta}{2}\sum_{i=1}^S\|\alpha_i g_i \|^2 \nonumber\\ &\leq
\frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) - \sum_{i=1}^S\alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) + \frac{L\eta}{2}\sum_{i=1}^S\|\alpha_i g_i \|^2 \eal where we uses $\mathcal{L}(\mathcal{D},\theta^*) \leq \mathcal{L}(\mathcal{D},\theta_{S+1})$ since $\theta^*$ is the global minimum of $\mathcal{L}(\mathcal{D},\cdot)$.
Now recall that from Lemma~\ref{lem:Unbiased} we have, \al \label{eq:Doob2B} \E[\alpha_i (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i))\vert \theta_i] = 0~. \eal And from Lemma~\ref{lem:BoundG1} we have, \al \label{eq:VarStep2B}
\E \alpha_i^2 \|g_i\|^2
&\leq 2 b_{\max} \E \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\sigma^2 \E b_i~. \eal
Thus, taking expectation in Eq.~\eqref{eq:NonCvx1}, and plugging Eq.~\eqref{eq:Doob2B} and \eqref{eq:VarStep2B}, yields, \al \label{eq:NonCvx2}
\E \sum_{i=1}^S \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq
\frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) - \sum_{i=1}^S\E \alpha_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\cdot (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) + \frac{L\eta}{2}\sum_{i=1}^S \E\|\alpha_i g_i \|^2 \nonumber\\ &\leq
\frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) + L\eta b_{\max} \sum_{i=1}^S \E\alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 +L\eta \sigma^2 \E \sum_{i=1}^S b_i \nonumber\\ &\leq
\frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) + L\eta b_{\max} \sum_{i=1}^S \E\alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 +L\eta \sigma^2 \cdot K \eal where the last line uses $K:= \sum_{i=1}^S b_i$.
Now if we pick $\eta$ such that $\eta b_{\max}L \leq 1/2$ then we can move the second term in the RHS to the LHS and obtain, \al \label{eq:NonCvx3}
\frac{1}{2}\E \sum_{i=1}^S \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq \frac{1}{\eta}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) +L\eta \sigma^2 \cdot K \eal Thus, choosing $\eta = \min\left\{ \frac{\sqrt{\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)}}{\sigma\sqrt{LK}} , \frac{1}{2L b_{\max}} \right\}$ gives the following bound, \al \label{eq:NonCvx4}
\E \sum_{i=1}^S \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq 2L b_{\max}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) +2\sigma\sqrt{L(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) } \cdot \sqrt{K} \eal Dividing by $K:=\alpha_{1:S}$ and using the definition of $\bar{\theta}$ yields, \al \label{eq:NonCvx5}
\E\|\nabla \mathcal{L}(\mathcal{D},\bar{\theta})\|^2 =
\E \frac{1}{\alpha_{1:S}}\sum_{i=1}^S \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 &\leq \frac{2L b_{\max}(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*))}{K} +\frac{2\sigma\sqrt{L(\mathcal{L}(\mathcal{D},\theta_1) - \mathcal{L}(\mathcal{D},\theta^*)) }}{\sqrt{K}} \eal
\subsection{Remaining Proofs} \subsubsection{Proof of Lemma~\ref{lem:BoundG1}}
\begin{proof}[Proof of Lemma~\ref{lem:BoundG1}] We can write, \al \label{eq:VarStep1}
\alpha_i^2 \|g_i\|^2 &=
\|b_i \nabla \mathcal{L}(\mathcal{D},\theta_i) + \sum_{s=1}^{b_i}\xi_i^s\|^2 \nonumber\\ &\leq
2\|b_i \nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\|\sum_{s=1}^{b_i}\xi_i^s\|^2 \nonumber\\ &=
2 b_i^2 \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\|\sum_{s=1}^{b_i}\xi_i^s\|^2 \nonumber\\
&\leq 2 b_{\max} \alpha_i \|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\|\sum_{s=1}^{b_i}\xi_i^s\|^2 \nonumber\\
\eal where the second line uses $\|a+b\|^2 \leq 2\|a\|^2+2\|b\|^2$; the fourth line uses $b_i\leq b_{\max}$ as well as $\alpha_i = b_i$ implying that $b_i^2 \leq b_{\max}\alpha_i$.
\paragraph{Bounding $E\|\sum_{s=1}^{b_i}\xi_i^s\|^2$:}
Given $i$ and $\theta_i$, Let us define the following sequence, $Q_0=0$, $Q_1 = \|\xi_i^1\|^2-\sigma$, and for any $k>1$ $$
Q_k = \sum_{s=1}^k\|\xi_i^k\|^2-\sigma^2 \cdot k + 2\sum_{s=1}^j \sum_{n=s+1}^j \xi_i^s\cdot \xi_i^n $$ It can be directly shown that $\{Q_k\}_k$ is a Supermartingale sequence, and that $$
\|\sum_{s=1}^{b_i}\xi_i^s\|^2 = Q_{b_i}+ \sigma^2 \cdot b_i $$ Thus, since $b_i$ is a bounded stopping time, we can use Doob's optional stopping theorem which implies that, $$
\E \|\sum_{s=1}^{b_i}\xi_i^s\|^2 = \E Q_{b_i}+ \sigma^2 \E\cdot b_i \leq EQ_0 + \sigma^2 \E\cdot b_i = 0+\sigma^2 \E b_i $$ Plugging the above back into Eq.~\eqref{eq:VarStep1} yields, \al \label{eq:VarStep2_Pre}
\E \alpha_i^2 \|g_i\|^2
&\leq 2 b_{\max} \E \alpha_i \| \nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 + 2\sigma^2 \E b_i~. \eal
Now, since $\mathcal{L}(\mathcal{D},\cdot)$ is $L$-smooth and $\theta^*$ is its global minima, then the following holds: $\|\nabla \mathcal{L}(\mathcal{D},\theta_i)\|^2 \leq 2L( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*))$; See e.g.~\citet{levy2017online} for the proof. Plugging this into the above equation we obtain, \al \label{eq:VarStep2_Post}
\E \alpha_i^2 \|g_i\|^2 &\leq 4 b_{\max} \E \alpha_i L( \mathcal{L}(\mathcal{D},\theta_i)-\mathcal{L}(\mathcal{D},\theta^*)) + 2\sigma^2 \E b_i~. \eal \end{proof}
\subsubsection{Proof of Lemma~\ref{lem:Unbiased}} \begin{proof}[Proof of Lemma~\ref{lem:Unbiased}] We can define the following Martingale sequence for each step $s$: $M_0 = 0$, and $M_j = \sum_{s=1}^j (g_i^s - \nabla \mathcal{L}(\mathcal{D},\theta_i))$ for any $j=1,2,\ldots$.
Thus, since the mixing time $b_i$ is bounded by $b_{\max}$, then according to Doob's optional stopping theorem \citep{levin2017markov} we have that, \al \label{eq:Doobs} \E[M_{b_i}\vert \theta_i] = \E [\sum_{s=1}^{b_i} (g_i^s - \nabla \mathcal{L}(\mathcal{D},\theta_i))\vert \theta_i] = \E [M_0\vert \theta_i] = 0~. \eal
Now, notice that for any $i$, we have $\alpha_i (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i)) = M_{b_i}$, and therefore, \al \label{eq:Doob2} \E[\alpha_i (g_i-\nabla \mathcal{L}(\mathcal{D},\theta_i))\vert \theta_i] =\E[ M_{b_i}\vert \theta_i] = 0~. \eal
\end{proof}
\subsection{Generalization discussion} \label{app:generalization_discussion} An interesting observation arising from our results is the small impact of gradient dropping as measured in final test accuracy. One explanation for this can be based on viewing \textit{DropCompute} as noise induced over gradients. Optimization using variants of SGD is inherently noisy due to the use of data samples used to evaluate the intermediate error. The stochastic nature of computed weight gradients was previously found to provide generalization benefits for the final trained model, although this is still part of ongoing debate \citep{geiping2022stochastic}. Nevertheless, several works found generalization benefits with the \emph{injection} of noise into the weights' gradients \citep{gradnoise} or their use in computed update rule \citep{lrdropout}.
\end{document} |
\begin{document}
\title{Key polynomials and pseudo-convergent sequences} \author{Josnei Novacoski} \author{Mark Spivakovsky}
\keywords{Key polynomials, Pseudo-convergent sequences, Valuations} \subjclass[2010]{Primary 13A18} \begin{abstract} In this paper we introduce a new concept of key polynomials for a given valuation $\nu$ on $K[x]$. We prove that such polynomials have many of the expected properties of key polynomials as those defined by MacLane and Vaqui\'e, for instance, that they are irreducible and that the truncation of $\nu$ associated to each key polynomial is a valuation. Moreover, we prove that every valuation $\nu$ on $K[x]$ admits a sequence of key polynomials that completely determines $\nu$ (in the sense which we make precise in the paper). We also establish the relation between these key polynomials and pseudo-convergent sequences defined by Kaplansky. \end{abstract}
\maketitle \section{Introduction} Given a valuation $\nu$ of a field $K$, it is important to understand what are the possible extensions of $\nu$ to $K[x]$. Many different theories have been developed in order to understand such extensions. For instance, in \cite{Mac}, MacLane develops the theory of key polynomials. He proves that given a discrete valuation $\nu$ of $K$, every extention of $\nu$ to $K[x]$ is uniquely determined by a sequence (with order type at most $\omega$) of key polynomials. Recently, M. Vaqui\'e developed a more general theory of key polynomials (see \cite{Vaq}), which extends the results of MacLane for a general valued field (that is, the given valuation of $K$ is no longer assumed to be discrete). At the same time, F.H. Herrera Govantes, W. Mahboub, M.A. Olalla Acosta and M. Spivakovsky developed another definition of key polynomials (see \cite{HOS}). This definition is an adaptation of the concept of generating sequences introduced by Spivakovsky in \cite{Spi1}. A comparison between this two definitions of key polynomials is presented in \cite{Mahboud}.
Roughly speaking, for a given valuation $\mu$ of $K[x]$, a MacLane -- Vaqui\'e key polynomial $\phi\in K[x]$ for $\mu$ is a polynomial that allows us to obtain a new valuation $\mu_1$ of $K[x]$ with $\mu_1(\phi)=\gamma_1>\mu(\phi)$ and $\mu(p)=\mu_1(p)$ for every $p\in K[x]$ with $\deg(p)<\deg(\phi)$ (in this case we denote $\mu_1$ by $[\mu;\mu_1(\phi)=\gamma_1]$). Then, for any valuation $\nu$ of $K[x]$ one tries to obtain a sequence of valuations $\mu_0,\mu_1,\ldots,\mu_n,\ldots$ with $\mu_0$ a monomial valuation and $\mu_{i+1}=[\mu_i;\mu_{i+1}(\phi_{i+1})=\gamma_{i+1}]$ for a key polynomial $\phi_{i+1}$ for $\mu_i$, such that \begin{equation} \nu=\lim \mu_i\label{eq:nu=lim} \end{equation} (in the sense that will be defined precisely below). This process does not work in general, that is, the equality (\ref{eq:nu=lim}) may not hold even after one constructs an infinite sequence $\{\mu_i\}$. This leads one to introduce the concept of ``limit key polynomial".
It is known that valuations which admit limit key polynomials are more difficult to handle. For instance, it was proved by J.-C. San Saturnino (see Theorem 6.5 of \cite{JCSS}), that if a valuation $\nu$ is centered on a noetherian local domain and $\nu$ does not admit limit key polynomials (on any sub-extension $R'\subseteq R'[x]\subseteq R$ with $\dim\ R'=\dim\ R-1$), then it has the local uniformization property (where we assume, inductively, that local uniformization holds for $R'$).
In this paper, we introduce a new concept of key polynomials. Let $K$ be a field and $\nu$ a valuation on $K[x]$. Let $\Gamma$ denote the value group of $K$ and $\Gamma'$ the value group of $K[x]$. For a positive integer $b$, let $\partial_b:=\frac{1}{b!}\frac{\partial^b}{\partial x^b}$ (this differential operator of order $b$ is sometiems called \textbf{the $b$-th formal derivative}). For a polynomial $f\in K[x]$ let \[ \epsilon(f)=\max_{b\in \mathbb{N}}\left\{\frac{\nu(f)-\nu(\partial_bf)}{b}\right\}. \] A monic polynomial $Q\in K[x]$ is said to be a \textbf{key polynomial} (of level $\epsilon (Q)$) if for every $f\in K[x]$ if $\epsilon(f)\geq \epsilon(Q)$, then $\deg(f)\geq\deg(Q)$.
This new definition offers many advantages. For instance, it gives a criterion to determine, for a given valuation $\nu$ of $K[x]$, whether any given polynomial is a key polynomial for $\nu$. This has a different meaning than in the approach of MacLane-Vaqui\'e. In their approach, a key polynomial allows us to ``extend the given valuation" and here a key polynomial allows us to ``truncate the given valuation". For instance, our definition of key polynomials treats the limit key polynomials on the same footing as the non-limit ones. Moreover, we present a characterization of key polynomial (Theorem \ref{definofkeypol}) which allows us to determine whether a given key polynomial is a limit key polynomial. A more precise comparison between the concept of key polynomial introduced here and that of MacLane -- Vaqui\'e will be explored in a forthcoming paper by Decoup, Mahboub and Spivakovsky.
Given two polynomials $f,q\in K[x]$ with $q$ monic, we call the \textbf{$q$-standard expansion of $f$} the expression \[ f(x)=f_0(x)+f_1(x)q(x)+\ldots+f_n(x)q^n(x) \] where for each $i$, $0\leq i\leq n$, $f_i=0$ or $\deg(f_i)<\deg(q)$. For a polynomial $q(x)\in K[x]$, the \textbf{$q$-truncation of $\nu$} is defined as \[ \nu_q(f):=\min_{0\leq i\leq n}\{\nu(f_iq^i)\} \] where $f=f_0+f_1q+\ldots+f_nq^n$ is the $q$-standard expansion of $f$. In Section 2, we present an example that shows that $\nu_q$ does need to be a valuation. We also prove (Theorem \ref{proptruncakeypolval}) that if $Q$ is a key polynomial, then $\nu_Q$ is a valuation. A set $\Lambda$ of key polynomials is said to be a \textbf{complete set of key polynomials for $\nu$} if for every $f\in K[x]$, there exists $Q\in \Lambda$ such that $\nu_Q(f)=\nu(f)$. One of the main results of this paper is the following:
\begin{Teo}\label{Theoremexistencecompleteseqkpol} Every valuation $\nu$ on $K[x]$ admits a complete set of key polynomials. \end{Teo}
Another way of describing extensions of valuations from $K$ to $K[x]$ is the theory of pseudo-convergent sequences developed by Kaplansky in \cite{Kap}. He uses this theory to determine whether a maximal immediate extension of the valued field $(K,\nu)$ is unique (up to isomorphism). For a valued field $(K,\nu)$, a \textbf{pseudo-convergent sequence} is a well-ordered subset $\{a_{\rho}\}_{\rho<\lambda}$ of $K$, without last element, such that \[ \nu(a_\sigma-a_\rho)<\nu(a_\tau-a_\sigma)\mbox{ for all }\rho<\sigma<\tau<\lambda. \] For a given pseudo-convergent sequence $\{a_{\rho}\}_{\rho<\lambda}$ it is easy to show that either $\nu(a_\rho)<\nu(a_\sigma)$ for all $\rho<\sigma<\lambda$ or there is $\rho<\lambda$ such that $\nu(a_\sigma)=\nu(a_\rho)$ for every $\rho<\sigma<\lambda$. If we set $\gamma_\rho:=\nu(a_{\rho+1}-a_\rho)$, then $\nu(a_\sigma-a_\rho)=\gamma_\rho$ for every $\rho<\sigma<\lambda$. Hence, the sequence $\{\gamma_\rho\}_{\rho<\lambda}$ is an increasing subset of $\Gamma$. An element $a\in K$ is said to be a \textbf{limit} of the pseudo-convergent sequence $\{a_\rho\}_{\rho<\lambda}$ if $\nu(a-a_\rho)=\gamma_\rho$ for every $\rho<\lambda$.
One can prove that for every polynomial $f(x)\in K[x]$, there exists $\rho_f<\lambda$ such that either \begin{equation}\label{condforpscstotra} \nu(f(a_\sigma))=\nu(f(a_{\rho_f}))\mbox{ for every }\rho_f\leq \sigma<\lambda, \end{equation} or \begin{equation}\label{condforpscstoalg} \nu(f(a_\sigma))>\nu(f(a_{\rho}))\mbox{ for every }\rho_f\leq \rho< \sigma<\lambda. \end{equation} If case (\ref{condforpscstotra}) happens, we say that the value of $f$ is fixed by $\{a_\rho\}_{\rho<\lambda}$ (or that $\{a_\rho\}_{\rho<\lambda}$ fixes the value of $f$). A pseudo-convergent sequence $\{a_\rho\}_{\rho<\lambda}$ is said to be of \textbf{transcendental type} if for every polynomial $f(x)\in K[x]$ the condition (\ref{condforpscstotra}) holds. Otherwise, $\{a_\rho\}_{\rho<\lambda}$ is said to be of \textbf{algebraic type}, i.e., if there exists at least one polynomial for which condition (\ref{condforpscstoalg}) holds.
The concept of key polynomials appears in the approach to local uniformization by Spivakovsky.
On the other hand, the concept of pseudo-convergent sequence plays an important role in the work of Knaf and Kuhlmann (see \cite{KK_1}) on the local uniformization problem. In this paper, we present a comparison between the concepts of key polynomials and pseudo-convergent sequences. More specifically, we prove the following:
\begin{Teo}\label{compthemkppsc} Let $\nu$ be a valuation on $K[x]$ and let $\{a_\rho\}_{\rho<\lambda}\subset K$ be a pseudo-convergent sequence, without a limit in $K$, for which $x$ is a limit. If $\{a_\rho\}_{\rho<\lambda}$ is of transcendental type, then $\Lambda:=\{x-a_\rho\mid \rho<\lambda\}$ is a complete set of key polynomials for $\nu$. On the other hand, if $\{a_\rho\}_{\rho<\lambda}$ is of algebraic type, then every polynomial $q(x)$ of minimal degree among the polynomials not fixed by $\{a_\rho\}_{\rho<\lambda}$ is a limit key polynomial for $\nu$. \end{Teo}
\section{Key polynomials} We will assume throughout this paper that $K$ is a field, $\nu$ a valuation of $K[x]$, non-trivial on $K$ with $\nu(x)\geq 0$. We begin by making some remarks. \begin{Obs}\label{exlinearnonlinepolarepsk} \begin{description} \item[(i)] Every linear polynomial $x-a$ is a key polynomial (of level $\epsilon(x-a)=\nu(x-a)$).
\item[(ii)] Take a polynomial $f(x)\in K[x]$ of degree greater than one and assume that there exists $a\in K$ such that $\nu(\partial_bf(a))=\nu(\partial_bf(x))$ for every $b\in\mathbb{N}$ (note that such an $a$ always exists if the assumptions of Theorem \ref{compthemkppsc} hold and the pseudo-convergent sequence is transcendental or is algebraic and $\deg(f)\leq\deg(q)$). Write \[ f(x)=f(a)+\sum_{i=1}^n\partial_if(a)(x-a)^i \] and take $h\in\{1,\ldots,n\}$ such that \[ \nu(\partial_hf(x))+h\nu(x-a)=\min_{1\leq i\leq n}\{\nu(\partial_if(x))+i\nu(x-a)\}. \] If $\nu(f(a))<\nu(\partial_hf(x))+h\nu(x-a)$, then $\nu(f(x))=\nu(f(a))$ and hence \[ \frac{\nu(f(x))-\nu(\partial_if(x))}{i}<\nu(x-a) \] for every $i$, $1\leq i\leq n$. Consequently, $\epsilon(f)<\nu(x-a)=\epsilon(x-a)$ and hence $f$ is not a key polynomial. On the other hand, if $$ \nu(\partial_hf(x))+h\nu(x-a)\leq\nu(f(a)), $$ then \begin{equation}\label{eqtaylorexpwithpcs} \nu(f(x))\geq\nu(\partial_hf(x))+h\nu(x-a) \end{equation} and if the equality holds in (\ref{eqtaylorexpwithpcs}), then \[ \epsilon(f)=\frac{\nu(f(x))-\nu(\partial_hf(x))}{h}=\nu(x-a)=\epsilon(x-a) \] and hence $f$ is not a key polynomial. In other words, the only situation when $f$ may be a key polynomial is when $$ f(x)>\min_{1\leq i\leq n}\{f(a),\nu(\partial_if(x))+i\nu(x-a)\}. $$ \end{description} \end{Obs}
\begin{Obs} We observe that if $Q$ is a key polynomial of level $\epsilon:=\epsilon(Q)$, then for every polynomial $f\in K[x]$ with $\deg(f)<\deg(Q)$ and every $b\in\mathbb{N}$ we have \begin{equation}\label{eqpolyndegsmallkeypol} \nu(\partial_b(f))>\nu(f)-b\epsilon. \end{equation} Indeed, from the definition of key polynomial we have that $\epsilon>\epsilon(f)$. Hence, for every $b\in\mathbb{N}$ we have \[ \frac{\nu(f)-\nu(\partial_b(f))}{b}\leq\epsilon(f)<\epsilon \] and this implies (\ref{eqpolyndegsmallkeypol}). \end{Obs} Let \[
I(f)=\left\{b\in\mathbb{N}\left|\frac{\nu(f)-\nu(\partial_bf)}{b}=\epsilon(f)\right.\right\} \] and $b(f)=\min I(f)$. \begin{Lema} Let $Q$ be a key polynomial and take $f,g\in K[x]$ such that $$ \deg(f)<\deg(Q) $$ and $$ \deg(g)<\deg(Q). $$ Then for $\epsilon:=\epsilon(Q)$ and any $b\in\mathbb{N}$ we have the following: \begin{description}\label{lemaonkeypollder} \item[(i)] $\nu(\partial_b(fg))>\nu(fg)-b\epsilon$ \item[(ii)] If $\nu_Q(fQ+g)<\nu(fQ+g)$ and $b\in I(Q)$, then $\nu(\partial_b(fQ+g))=\nu(fQ)-b\epsilon$; \item[(iii)] If $h_1,\ldots,h_s$ are polynomials such that $\deg(h_i)<\deg(Q)$ for every $i=1,\ldots, s$ and $\displaystyle\prod_{i=1}^sh_i=qQ+r$ with $\deg(r)<\deg(Q)$, then \[ \nu(r)=\nu\left(\prod_{i=1}^sh_i\right)<\nu(qQ). \] \end{description} \end{Lema}
\begin{proof} \textbf{(i)} Since $\deg(f)<\deg(Q)$ and $\deg(g)<\deg(Q)$, for each $j\in \mathbb{N}$, we have \[ \nu(\partial_jf)>\nu(f)-j\epsilon\mbox{ and }\nu(\partial_jg)>\nu(g)-j\epsilon. \] This, and the fact that \[ \partial_b(fg)=\sum_{j=0}^b\partial_jf\partial_{b-j}g, \] imply that \[ \nu(\partial_b(fg))\geq\min_{0\leq j\leq b}\{\nu(\partial_jf)+\nu(\partial_{b-j}g)\}>\nu(fg)-b\epsilon. \]
\textbf{(ii)} If $\nu_Q(fQ+g)<\nu(fQ+g)$, then $\nu(fQ)=\nu(g)$. Hence, \[ \nu(\partial_bg)>\nu(g)-b\epsilon=\nu(fQ)-b\epsilon. \] Moreover, for every $j\in\mathbb{N}$, we have \[ \nu(\partial_j f\partial_{b-j}Q)=\nu(\partial_jf)+\nu(\partial_{b-j}Q)>\nu(f)-j\epsilon+\nu(Q)-(b-j)\epsilon=\nu(fQ)-b\epsilon. \] Therefore, \[ \nu(\partial_b(fQ+g))=\nu\left(f\partial_bQ+\sum_{j=1}^b\partial_jf\partial_{b-j}Q+\partial_bg\right)=\nu(fQ)-b\epsilon. \]
\textbf{(iii)} We proceed by induction on $s$. If $s=1$, then $h_1=qQ+r$ with $$ \deg(h_1)<\deg(Q), $$ which implies that $h_1=r$ and $q=0$. Our result follows immediately.
Next, consider the case $s=2$. Take $f,g\in K[x]$ such that $\deg(f)<\deg(Q)$, $\deg(g)<\deg(Q)$ and write $fg=qQ+r$ with $\deg(r)<\deg(Q)$. Then $$ \deg(q)<\deg(Q) $$ and for $b\in I(Q)$ we have \[ \nu\left(\partial_b(qQ)\right)=\nu\left(\sum_{j=0}^b\partial_jq\partial_{b-j}Q\right)=\nu(qQ)-b\epsilon. \] This and part \textbf{(i)} imply that \begin{displaymath} \begin{array}{rcl} \nu(qQ)-b\epsilon &=& \nu\left(\partial_b(qQ)\right)= \nu(\partial_b(fg)-\partial_b(r))\\
&\geq &\min\{\nu\left(\partial_b(fg)\right),\nu\left(\partial_b(r)\right)\}\\
&>&\min\{\nu(fg),\nu(r)\}-b\epsilon.
\end{array} \end{displaymath} and consequently \begin{equation}\label{equationwithepsilon} \nu(r)=\nu(fg)<\nu(qQ). \end{equation}
Assume now that $s>2$ and define $\displaystyle h:=\prod_{i=1}^{s-1}h_i$. Write $h=q_1Q+r_1$ with $\deg(r_1)<\deg(Q)$. Then by the induction hypothesis we have $$ \nu(r_1)=\nu(h)<\nu(q_1Q) $$ and hence \[ \nu\left(\prod_{i=1}^sh_i\right)=\nu(r_1h_s)<\nu(q_1h_sQ). \] Write $r_1h_s=q_2Q+r_2$. Then, by equation (\ref{equationwithepsilon}) we have \[ \nu(r_2)=\nu(r_1h_s)<\nu(q_2Q). \] If $\displaystyle \prod_{i=1}^sh_i=qQ+r$ with $\deg(r)<\deg(Q)$, then \[ qQ+r=\prod_{i=1}^sh_i=hh_s=(q_1Q+r_1)h_s=q_1h_sQ+r_1h_s=q_1h_sQ+q_2Q+r_2 \] and hence $q=q_1h_s+q_2$ and $r=r_2$. Therefore, \[ \nu(qQ)\geq\min\{\nu(q_1h_sQ),\nu(q_2Q)\}>\nu(r_1h_s)=\nu(r)=\nu\left(\prod_{i=1}^sh_i\right). \] This is what we wanted to prove. \end{proof}
We denote by $p$ the \textbf{exponent characteristic} of $K$, that is, $p=1$ if $\mbox{\rm char}(K)=0$ and $p=\mbox{\rm char}(K)$ if $\mbox{\rm char}(K)>0$. \begin{Prop}\label{propaboutpseudkeyool} Let $Q\in K[x]$ be a key polynomial and set $\epsilon:=\epsilon(Q)$. Then the following hold: \begin{description} \item[(i)] Every element in $I(Q)$ is a power of $p$; \item[(ii)] $Q$ is irreducible. \end{description} \end{Prop} \begin{proof} \textbf{(i)} Take $b\in I(Q)$ and assume, aiming for contradiction, that $b$ is not a power of $p$. Write $b=p^tr$ where $r>1$ is prime to $p$. Then, by Lemma 6 of \cite{Kap}, $\binom{b}{p^t}$ is prime to $p$ and hence $\nu\binom{b}{p^t}=0$. Since $\binom{b}{p^t}\partial_b=\partial_{p^t}\circ\partial_{b'}$ for $b'=b-p^t$, we have \[ \nu(\partial_{b'}Q)-\nu(\partial_bQ)=\nu(\partial_{b'}Q)-\nu(\partial_{p^t}(\partial_{b'}Q))\leq p^t\epsilon(\partial_{b'}(Q))<p^t\epsilon \] because $\deg(\partial_{b'}Q)<\deg(Q)$ and $Q$ is a key polynomial. Hence, \[ b\epsilon=\nu(Q)-\nu(\partial_bQ)=\nu(Q)-\nu(\partial_{b'}Q)+\nu(\partial_{b'}Q)-\nu(\partial_bQ)< b'\epsilon+p^t\epsilon=b\epsilon, \] which gives the desired contradiction.
\textbf{(ii)} If $Q=gh$ for non-constant polynomials $g,h\in K[x]$, then by Lemma \ref{lemaonkeypollder} \textbf{(i)}, we would have for $b\in I(Q)$ that \[ \nu(\partial_bQ)>\nu(Q)-b\epsilon, \] which is a contradiction to the definition of $b$ and $\epsilon$. \end{proof}
We present an example to show that $\nu_q$ does not need to be a valuation for a general polynomial $q(x)\in K[x]$. \begin{Exa} Consider a valuation $\nu$ in $K[x]$ such that $\nu(x)=\nu(a)=1$ for some $a\in K$. Take $q(x)=x^2+1$ (which can be irreducible, for instance, if $K=\mathbb{R}$ or $K=\mathbb F_p$ and $-1$ is not a quadratic residue $\mod p$). Since $x^2-a^2=(x^2+1)-(a^2+1)$ we have \[ \nu_q(x^2-a^2)=\min\{\nu(x^2+1),\nu(a^2+1)\}=0. \] On the other hand, $\nu_q(x+a)=\nu(x+a)\geq\min\{\nu(a),\nu(x)\}=1$ (and the same holds for $\nu_q(x-a)$). Hence \[ \nu_q(x^2-a^2)=0<1+1\leq\nu_q(x-a)+\nu_q(x+a) \] which shows that $\nu_q$ is not a valuation. \end{Exa}
If $f=f_0+f_1q+\ldots+f_nq^n$ is the $q$-standard decomposition of $f$ we set \[ S_q(f):=\{i\in\{0,\ldots, n\}\mid \nu(f_iq^i)=\nu_q(f)\}\mbox{ and }\delta_q(f)=\max S_q(f). \] \begin{Prop}\label{proptruncakeypolval} If $Q$ is a key polynomial, then $\nu_Q$ is a valuation of $K[x]$. \end{Prop} \begin{proof} One can easily see that $\nu_Q(f+g)\geq\min\{\nu_Q(f),\nu_Q(g)\}$ for every $f,g\in K[x]$. It remains to prove that $\nu_Q(fg)=\nu_Q(f)+\nu_Q(g)$ for every $f,g\in K[x]$. Assume first that $\deg(f)<\deg(Q)$ and $\deg(g)<\deg(Q)$ and let $fg=aQ+c$ be the $Q$-standard expansion of $fg$. By Lemma \ref{lemaonkeypollder} \textbf{(iii)} we have \[ \nu(fg)=\nu(c)<\nu(aQ) \] and hence \[ \nu_Q(fg)=\min\{\nu(aQ),\nu(c)\}=\nu(c)=\nu(fg)=\nu(f)+\nu(g)=\nu_Q(f)+\nu_Q(g). \]
Now assume that $f,g\in K[x]$ are any polynomials and consider the $Q$-expansions \[ f=f_0+\ldots+f_nQ^n\mbox{ and }g=g_0+\ldots+g_mQ^m \] of $f$ and $g$. Then, using the first part of the proof, we obtain \[ \nu_Q(fg)\geq\min_{i,j}\{\nu_Q(f_ig_jQ^{i+j})\}=\min_{i,j}\{\nu_Q(f_iQ^i)+\nu_Q(g_jQ^j)\}=\nu_Q(f)+\nu_Q(g). \] For each $i\in\{0,\ldots,n\}$ and $j\in\{0,\ldots,m\}$, let $f_ig_j=a_{ij}Q+c_{ij}$ be the $Q$-standard expansion of $f_ig_j$. Then, by Lemma \ref{lemaonkeypollder} \textbf{(iii)}, we have \[ \nu(f_iQ^i)+\nu(g_jQ^j)=\nu(f_ig_j)+\nu(Q^{i+j})=\nu(c_{ij})+\nu(Q^{i+j})=\nu(c_{ij}Q^{i+j}). \] Let \[ i_0=\min\{i\mid\nu_Q(f)=\nu(f_iQ^i)\}\mbox{ and }j_0=\min\{j\mid\nu_Q(g)=\nu(g_jQ^j)\}, \] and set $k_0:=i_0+j_0$. Then for every $i<i_0$ or $j<j_0$ we have \begin{equation}\label{eqnatnaksdjs} \min\left\{\nu(a_{ij}Q^{i+j+1}),\nu(c_{ij}Q^{i+j})\right\}=\nu(f_iQ^i)+\nu(g_jQ^j)>\nu(c_{i_0j_0}Q^{k_0}). \end{equation} Let $fg=a_0+a_1Q+\ldots+a_rQ^r$ be the $Q$-standard expansion of $fg$. Then \[ a_{k_0}=\sum_{i+j+1=k_0}a_{ij}+\sum_{i+j=k_0}c_{ij}. \] This and equation (\ref{eqnatnaksdjs}) give us that \[ \nu(a_{k_0}Q^{k_0})=\nu(c_{i_0j_0}Q^{k_0})=\nu(f_{i_0}Q^{i_0})+\nu(g_{j_0}Q^{j_0})=\nu_Q(f)+\nu_Q(g). \] Therefore, \[ \nu_Q(fg)=\min_{0\leq k\leq r}\{\nu(a_kQ^k)\}\leq \nu_Q(f)+\nu_Q(g), \] which completes the proof.
\end{proof}
\begin{Prop}\label{Propdificil} Let $Q\in K[x]$ be a key polynomial and set $\epsilon:=\epsilon(Q)$. For any $f\in K[x]$ the following hold: \begin{description} \item[(i)] For any $b\in\mathbb{N}$ we have \begin{equation}\label{eqthatcompvalutrunc} \frac{\nu_Q(f)-\nu_Q(\partial_bf)}{b}\leq \epsilon; \end{equation} \item[(ii)] If $S_Q(f)\neq\{0\}$, then the equality in (\ref{eqthatcompvalutrunc}) holds for some $b\in\mathbb{N}$;
\item[(iii)] If for some $b\in\mathbb{N}$, the equality in (\ref{eqthatcompvalutrunc}) holds and $\nu_Q(\partial_bf)=\nu(\partial_bf)$, then $\epsilon(f)\geq\epsilon$. If in addition, $\nu(f)>\nu_Q(f)$, then $\epsilon(f)>\epsilon$. \end{description} \end{Prop} Fix a key polynomial $Q$ and $h\in K[x]$ with $\deg(h)<\deg(Q)$. Then, for every $b\in\mathbb{N}$ the Leibnitz rule for derivation gives us that \begin{equation} \partial_b(hQ^n)=\sum_{b_0+\ldots+b_r=b}T_b(b_0,\ldots,b_r) \end{equation} where \[ T_b(b_0,\ldots, b_r):=\partial_{b_0}h\left(\prod_{i=1}^r\partial_{b_i}Q\right)Q^{n-r}. \]
In order to prove Proposition \ref{Propdificil}, we will need the following result: \begin{Lema}\label{Lemamagic3} Let $Q$ be a key polynomial, $h\in K[x]$ with $\deg(h)<\deg(Q)$ and set $\epsilon:=\epsilon(Q)$. For any $b\in\mathbb{N}$ we have \[ \nu_Q(T_b(b_0,\ldots,b_r))\geq \nu(hQ^n)-b\epsilon. \] Moreover, if either $b_0>0$ or $b_i\notin I(Q)$ for some $i=1,\ldots, r$, then \[ \nu_Q(T_b(b_0,\ldots,b_r))> \nu(hQ^n)-b\epsilon. \] \end{Lema} \begin{proof} Since $\deg(h)<\deg(Q)$ and $Q$ is a key polynomial we have $\epsilon(h)<\epsilon$. Hence, if $b_0>0$ we have \[ \nu(\partial_{b_0}h)\geq \nu(h)-b_0\epsilon(h)>\nu(h)-b_0\epsilon. \] On the other hand, for every $i=1,\ldots, r$, by definition of $\epsilon$ we have \[ \nu(\partial_{b_i}Q)\geq \nu(Q)-b_i\epsilon, \] and if $b_i\notin I(Q)$ we have \[ \nu(\partial_{b_i}Q)> \nu(Q)-b_i\epsilon. \] Since $\nu_Q(\partial_{b_0}h)=\nu(\partial_{b_0}h)$ and $\nu_Q(\partial_{b_i}Q)=\nu(\partial_{b_i}Q)$, we have \begin{displaymath} \begin{array}{rcl} \nu_Q(T_b(b_0,\ldots,b_r))&=&\displaystyle\nu_Q\left(\partial_{b_0}h\left(\prod_{i=1}^r\partial_{b_i}Q\right)Q^{n-r}\right)\\
&=&\displaystyle\nu_Q(\partial_{b_0}h)+\sum_{i=1}^r\nu_Q(\partial_{b_i}Q)+(n-r)\nu_Q(Q)\\
&\geq &\displaystyle\nu(h)-b_0\epsilon+\sum_{i=1}^r\left(\nu(Q)-b_i\epsilon\right)+(n-r)\nu(Q)\\
&\geq &\nu(hQ^n)-b\epsilon. \end{array} \end{displaymath} Moreover, if $b_0>0$ or $b_i\notin I(Q)$ for some $i=1,\ldots, r$, then the inequality above is strict. \end{proof}
\begin{Cor}\label{Coroaboutderib} For every $b\in\mathbb{N}$ we have $\nu_Q\left(\partial_b(aQ^n)\right)\geq\nu(aQ^n)-b\epsilon$. \end{Cor}
\begin{proof}[Proof of Proposition \ref{Propdificil}] \textbf{(i)} Take any $f\in K[x]$ and consider its $Q$-standard expansion $f=f_0+f_1Q+\ldots+f_nQ^n$. For each $i=0,\ldots,n$, Corollary \ref{Coroaboutderib} gives us that \[ \nu_Q\left(\partial_b(f_iQ^i)\right)\geq \nu (f_iQ^i)-b\epsilon. \] Hence, \[ \nu_Q\left(\partial_b(f)\right)\geq\min_{0\leq i\leq n}\{\nu_Q(f_iQ^i)\}\geq\min_{0\leq i\leq n}\{\nu(f_iQ^i)-b\epsilon\}=\nu_Q(f)-b\epsilon. \] \textbf{(ii)} Assume that $S_Q(f)\neq \{0\}$ and set $j_0=\min S_Q(f)$. Then $j_0=p^er$ for some $e\in\mathbb{N}\cup\{0\}$ and some $r\in\mathbb{N}$ with $(r,p)=1$. We set $b:=p^eb(Q)$ and will prove that $\nu_Q(\partial_b(f))=\nu_Q(f)-b\epsilon$.
Write \[ f_{j_0}\left(\partial_{b(Q)}Q\right)^{p^e}=rQ+h \] for some $r,h\in K[x]$ and $\deg(h)<\deg(Q)$ (note that $h\ne0$ because $Q$ is irreducible and $Q\nmid f_{j_0}$ and $Q\nmid \partial_{b(Q)}Q$). Then Lemma \ref{lemaonkeypollder} \textbf{(iii)} gives us that \[ \nu(h)=\nu\left(f_{j_0}(\partial_{b(Q)}Q)^{p^e}\right). \] This implies that \begin{equation}\label{equationboa} \nu\left(hQ^{j_0-p^e}\right)=\nu_Q(f)-b\epsilon. \end{equation} Indeed, we have \begin{displaymath} \begin{array}{rcl} \nu\left(hQ^{j_0-p^e}\right)&=& \nu(h)+\nu\left(Q^{j_0-p^e}\right)=\nu\left(f_{j_0}(\partial_{b(Q)}Q)^{p^e}\right)+ \nu\left(Q^{j_0-p^e}\right)\\
&=&\nu(f_{j_0})+p^e\nu\left(\partial_{b(Q)}Q\right)+(j_0-p^e)\nu(Q)\\
&=&\nu(f_{j_0})+p^e\left(\nu\left(Q\right)-b(Q)\epsilon\right)+(j_0-p^e)\nu(Q)\\
&=&\nu(f_{j_0})+j_0\nu(Q)-p^eb(Q)\epsilon\\
&=&\nu(f_{j_0}Q^{j_0})-p^eb(Q)\epsilon=\nu_Q(f)-b\epsilon.\\
\end{array} \end{displaymath}
Since $f=f_0+f_1Q+\ldots+f_nQ^n$, we have $\partial_b(f)=\partial_b(f_0)+\partial_b(f_1Q)\ldots+\partial_b(f_nQ^n)$. For each $j=0,\ldots, n$, if $j\notin S_Q(f)$, then \[ \nu_Q\left(\partial_b(f_jQ^j)\right)\geq \nu_Q(f_jQ^j)-b\epsilon>\nu_Q(f)-b\epsilon. \] We set \[ h_1=\sum_{j\notin S_Q(f)}f_jQ^j. \] Then $\nu_Q(h_1)>\nu_Q(f)-b\epsilon$.
For each $j\in S_Q(f)$ the term $\partial_b(f_jQ^j)$ can be written as a sum of terms of the form $T_b(b_0,\ldots,b_r)$. For each $T_b(b_0,\ldots,b_r)$ we have the following cases:
\textbf{Case 1:} $b_0>0$ or $b_i\notin I(Q)$ for some $i$.\\ In this case, by Lemma \ref{Lemamagic3} we have $\nu_Q(T_b(b_0,\ldots,b_r))>\nu_Q(f)-b\epsilon$. In particular, if $h_2$ is the sum of all these terms, then $\nu_Q(h_2)>\nu_Q(f)-b\epsilon$.
\textbf{Case 2:} $b_0=0$ and $b_i\in I(Q)$ for every $i=1,\ldots,r$ but $b_{i_0}\neq b(Q)$ for some $i_0=1,\ldots,r$.\\ This implies, in particular, that $j\geq j_0$ and since $b=p^eb(Q)$ we must have $r<p^e$. Hence \[ T_b(b_0,b_1,\ldots,b_r)=\partial_{b_0}f_j\left(\prod_{i=1}^r\partial_{b_i}Q\right)Q^{j-r}=sQ^{j_0-p^e+1} \] for some $s\in K[x]$.
\textbf{Case 3:} $b_0=0$, $j>j_0$ and $b_i=b(Q)$ for every $i=1,\ldots,r$.\\ Since $b=p^eb(Q)$, $b_i=b(Q)$ and $\displaystyle\sum_{i=1}^rb_i=b$ we must have $r=p^e$. Hence \[ T_b(b_0,b_1,\ldots,b_r)=f_j\left(\partial_{b(Q)}Q\right)^{p^e}Q^{j-p^e}=s'Q^{j_0-p^e+1} \] for some $s'\in K[x]$.\\
\textbf{Case 4:} $b_0=0$, $j=j_0$ and $b_i=b(Q)$ for every $i=1,\ldots,r$.\\ In this case we have \begin{equation}\label{caseintport} \begin{array}{rcl} T_b(b_0,b_1,\ldots,b_r)&=&f_{j_0}\left(\partial_{b(Q)}Q\right)^{p^e}Q^{j_0-p^e}\\
&=&\left(h-rQ\right)Q^{j_0-p^e}\\
&=&hQ^{j_0-p^e}-rQ^{j_0-p^e+1}. \end{array} \end{equation}
Observe that the number of times that the term (\ref{caseintport}) appears in $\partial_b(f_{j_0}Q^{j_0})$ is $\binom{j_0}{p^e}$, that is, the number of ways that one can choose a subset with $p^e$ elements in a set of $j_0$ elements.
Therefore, we can write \[ \partial_b(f)=\binom{j_0}{p^e}hQ^{j_0-p^e}+\left(s+s'-\binom{j_0}{p^e}r\right)Q^{j_0-p^e+1}+h_1+h_2 \] Since $p\nmid \binom{j_0}{p^e}$ the equation (\ref{equationboa}) gives us that \[ \nu\left(\binom{j_0}{p^e}hQ^{j_0-p^e}\right)=\nu_Q(f)-b\epsilon. \] Then \[ \nu_Q\left(\binom{j_0}{p^e}hQ^{j_0-p^e}+\left(s+s'-\binom{j_0}{p^e}r\right)Q^{j_0-p^e+1}\right)\leq \nu_Q(f)-b\epsilon. \] This and the fact that $\nu_Q(h_1+h_2)>\nu_Q(f)-b\epsilon$ imply that $\nu_Q\left(\partial_b(f)\right)\leq\nu_Q(f)-b\epsilon$. This concludes the proof of \textbf{(ii)}.
\textbf{(iii)} The assumptions on $b$ give us \[ \frac{\nu_Q(f)-\nu_Q(\partial_bf)}{b}= \epsilon \] and \[ \nu_Q(\partial_bf)=\nu(\partial_bf). \] Consequently, \[ \epsilon(f)\geq \frac{\nu(f)-\nu(\partial_bf)}{b}\geq\frac{\nu_Q(f)-\nu_Q(\partial_bf)}{b}= \epsilon. \] In the inequality above, one can see that if $\nu(f)>\nu_Q(f)$, then $\epsilon(f)>\epsilon$.
\end{proof}
\begin{Prop}\label{Propcompkeypol} For two key polynomials $Q,Q'\in K[x]$ we have the following: \begin{description} \item[(i)] If $\deg(Q)<\deg(Q')$, then $\epsilon(Q)<\epsilon(Q')$; \item[(ii)] If $\epsilon(Q)<\epsilon(Q')$, then $\nu_Q(Q')<\nu(Q')$; \item[(iii)] If $\deg(Q)=\deg(Q')$, then \begin{equation}\label{eqwhdegsame} \nu(Q)<\nu(Q')\Longleftrightarrow \nu_Q(Q')<\nu(Q')\Longleftrightarrow \epsilon(Q)<\epsilon(Q'). \end{equation} \end{description} \end{Prop} \begin{proof} Item \textbf{(i)} follows immediately from the the definition of key polynomial (in fact, the same holds if we substitute $Q$ for any $f\in K[x]$).
In order to prove \textbf{(ii)} we set $\epsilon:=\epsilon(Q)$ and $b':=b(Q')$. By \textbf{(i)} of Proposition \ref{Propdificil}, we have \[ \nu_Q(Q')\leq \nu_Q(\partial_{b'}Q')+b'\epsilon. \] Since $\epsilon(Q)<\epsilon(Q')$, we also have \[ \nu(\partial_{b'}Q')+b'\epsilon< \nu(\partial_{b'}Q')+b'\epsilon(Q')=\nu(Q'). \] This, and the fact that $\nu_Q(\partial_{b'}Q')\leq \nu(\partial_{b'}Q')$, imply that $\nu_Q(Q')<\nu(Q')$.
Now assume that $\deg(Q)=\deg(Q')$ and let us prove (\ref{eqwhdegsame}). Since $$ \deg(Q)=\deg(Q') $$ and both $Q$ and $Q'$ are monic, the $Q$-standard expansion of $Q'$ is given by $$ Q'=Q+(Q-Q'). $$ Hence \[ \nu_Q(Q')=\min\{\nu(Q),\nu(Q-Q')\}. \] The first equivalence follows immediately from this. In view of part \textbf{(ii)}, it remains to prove that if $\nu_Q(Q')<\nu(Q')$, then $\epsilon(Q)<\epsilon(Q')$. Since $\nu_Q(Q')<\nu(Q')$ we have $S_Q(Q')\neq \{0\}$. Hence, by Proposition \ref{Propdificil} \textbf{(ii)}, the equality holds in (\ref{eqthatcompvalutrunc}) (for $f=Q'$) for some $b\in\mathbb{N}$. Moreover, since $\deg(Q)=\deg(Q')$, we have $\deg(\partial_bQ')<\deg(Q)$ and consequently $\nu_Q(\partial_bQ')=\nu(\partial_bQ')$. Then Proposition \ref{Propdificil} \textbf{(iii)} implies that $\epsilon(Q)<\epsilon(Q')$. \end{proof}
For a key polynomial $Q\in K[x]$, let \[ \alpha(Q):=\min\{\deg(f)\mid \nu_Q(f)< \nu(f)\} \] (if $\nu_Q=\nu$, then set $\alpha(Q)=\infty$) and \[ \Psi(Q):=\{f\in K[x]\mid f\mbox{ is monic},\nu_Q(f)< \nu(f)\mbox{ and }\alpha(Q)=\deg (f)\}. \] \begin{Lema}\label{lemmapsikeypoly} If $Q$ is a key polynomial, then every element $Q'\in\Psi(Q)$ is also a key polynomial. Moreover, $\epsilon(Q)<\epsilon(Q')$. \end{Lema} \begin{proof} By assumption, we have $\nu_Q(Q')<\nu(Q')$, hence $S_{Q}(Q')\neq \{0\}$. This implies, by Proposition \ref{Propdificil} \textbf{(ii)}, that there exists $b\in\mathbb{N}$ such that \[ \nu_Q(Q')-\nu_Q(\partial_b Q')=b\epsilon(Q). \] Since $\deg(\partial_b Q')<\deg(Q')=\alpha(Q)$, we have $\nu_Q(\partial_b Q')=\nu(\partial_b Q')$. Consequently, by Proposition \ref{Propdificil} \textbf{(iii)}, $\epsilon(Q)<\epsilon(Q')$.
Now take any polynomial $f\in K[x]$ such that $\deg(f)<\deg(Q')=\alpha(Q)$. In particular, $\nu_Q(f)=\nu(f)$. Moreover, for every $b\in\mathbb{N}$, $\deg(\partial_b f)<\deg(Q')= \alpha(Q)$ which implies that $\nu_Q(\partial_bf)=\nu(\partial_b f)$. Then, for every $b\in\mathbb{N}$, \[ \frac{\nu(f)-\nu(\partial_bf)}{b}=\frac{\nu_Q(f)-\nu_Q(\partial_bf)}{b}\leq \epsilon(Q)<\epsilon(Q'). \] This implies that $\epsilon(f)<\epsilon(Q')$, which shows that $Q'$ is a key polynomial.
\end{proof} \begin{Teo}\label{definofkeypol} A polynomial $Q$ is a key polynomial if and only if there exists a key polynomial $Q_-\in K[x]$ such that $Q\in \Psi(Q_-)$ or the following conditions hold: \begin{description} \item[(K1)] $\alpha(Q_-)=\deg (Q_-)$ \item[(K2)] the set $\{\nu(Q')\mid Q'\in\Psi(Q_-)\}$ does not contain a maximal element \item[(K3)] $\nu_{Q'}(Q)<\nu(Q)$ for every $Q'\in \Psi(Q_-)$ \item[(K4)] $Q$ has the smallest degree among polynomials satisfying \textbf{(K3)}. \end{description} \end{Teo}
\begin{proof} We will prove first that if such $Q_-$ exists, then $Q$ is a key polynomial. The case when $Q\in \Psi(Q_-)$ follows from Lemma \ref{lemmapsikeypoly}. Assume now that \textbf{(K1) - (K4)} hold. Take $f\in K[x]$ such that $\deg(f)<\deg(Q)$. This implies that $\deg(\partial_bQ)<\deg(Q)$ and $\deg(\partial_bf)<\deg(Q)$ for every $b\in\mathbb{N}$. Hence, by \textbf{(K4)}, there exists $Q'\in\Psi(Q_-)$ such that \[ \nu_{Q'}(f)=\nu(f), \nu_{Q'}(\partial_bf)=\nu(\partial_bf) \mbox{ and }\nu_{Q'}(\partial_bQ)=\nu(\partial_bQ)\mbox{ for every }b\in\mathbb{N}. \] We claim that $\epsilon(Q')<\epsilon(Q)$. If not, by Proposition \ref{Propcompkeypol} \textbf{(i)}, we would have $\deg(Q)\leq\deg(Q')$. Since $\nu_{Q'}(Q)<\nu(Q)$, this implies that $\deg(Q)=\deg(Q')$. This and Proposition \ref{Propcompkeypol} \textbf{(iii)} give us that $\epsilon(Q')<\epsilon(Q)$ which is a contradiction.
Now, \[ \epsilon(f)\leq \frac{\nu(f)-\nu(\partial_bf)}{b}=\frac{\nu_{Q'}(f)-\nu_{Q'}(\partial_bf)}{b}\leq\epsilon(Q')<\epsilon(Q). \] Hence $Q$ is a key polynomial.
For the converse, take a key polynomial $Q\in K[x]$ and consider the set \[ \mathcal S:=\{Q'\in K[x]\mid Q'\mbox{ is a key polynomial and }\nu_{Q'}(Q)<\nu(Q)\}. \] Observe that $\mathcal S\neq\emptyset$. Indeed, if $\deg(Q)>1$, then every key polynomial $x-a\in\mathcal S$. If $Q=x-a$, then there exits $b\in K$ such that $\nu(b)<\min\{\nu(a),\nu(x)\}$. Therefore, $x-b\in \mathcal S$.
If there exists a key polynomial $Q_-\in \mathcal S$ such that $\deg(Q)=\deg(Q_-)$ , then we have $Q\in \Psi(Q_-)$ and we are done. Hence, assume that every polynomial $Q'\in \mathcal S$ has degree smaller that $\deg(Q)$.
Assume that there exists $Q_-\in \mathcal S$ such that for every $Q'\in \mathcal S$ we have \begin{equation}\label{eqmaxphi} (\deg(Q_-),\nu(Q_-))\geq ((\deg(Q'),\nu(Q')) \end{equation} in the lexicographical ordering. We claim that $Q\in \Psi(Q_-)$. If not, there would exist a key polynomial $Q''$ such that $\nu_{Q_-}(Q'')<\nu(Q'')$ and $\deg(Q'')<\deg(Q)$. Since $\deg(Q'')<\deg(Q)$ Proposition \ref{Propcompkeypol} \textbf{(i)} and \textbf{(ii)} give us that $\nu_{Q''}(Q)<\nu(Q)$. Hence $Q''\in \mathcal S$. The inequality (\ref{eqmaxphi}) gives us that $\deg(Q'')\leq\deg(Q_-)$. On the other hand, since $\nu_{Q_-}(Q'')<\nu(Q'')$ we must have $\deg(Q_-)=\deg(Q'')$. Hence, Proposition \ref{Propcompkeypol} \textbf{(iii)} gives us that $\nu(Q_-)<\nu(Q'')$ and this is a contradiction to the inequality (\ref{eqmaxphi}).
Now assume that for every $Q'\in \mathcal S$, there exists $Q''\in \mathcal S$ such that \begin{equation}\label{eqmaxphihas} (\deg(Q'),\nu(Q'))<(\deg(Q''),\nu(Q'')) \end{equation} in the lexicographical ordering. Take $Q_-\in\mathcal S$ such that $\deg(Q_-)\geq \deg(Q')$ for every $Q'\in\mathcal S$. We will show that the conditions \textbf{(K1) - (K4)} are satisfied. By (\ref{eqmaxphihas}), there exists $Q''\in \mathcal S$ such that \begin{equation}\label{eqbanolimit} (\deg(Q_-),\nu(Q_-))<(\deg(Q''),\nu(Q'')). \end{equation} In particular, $\deg(Q_-)=\deg(Q'')$ and $\nu(Q_-)<\nu(Q'')$. Proposition \ref{Propcompkeypol} \textbf{(iii)} gives us that $\nu_{Q_-}(Q'')<\nu(Q'')$. Hence $\alpha(Q_-)=\deg(Q_-)$ and we have proved \textbf{(K1)}. If $Q'\in\Psi(Q_-)$, then $\deg(Q')=\deg(Q_-)<\deg(Q)$ and hence $\nu_{Q'}(Q)<\nu(Q)$. This implies that $Q'\in\mathcal S$. The equation (\ref{eqbanolimit}) tells us that $\{\nu(Q')\mid Q'\in\Psi(Q_-)\}$ has no maximum, so we have proved \textbf{(K2)}. Now take any element $Q'\in\Psi(Q_-)$. Then $\deg(Q')<\deg(Q)$ and Proposition \ref{Propcompkeypol} \textbf{(i)} and \textbf{(ii)} give us that $\nu_{Q'}(Q)<\nu(Q)$. This proves \textbf{(K3)}. Take a polynomial $\widetilde{Q}$ with $\nu_{Q'}(\widetilde{Q})<\nu(\widetilde{Q})$ for every $Q'\in\Psi(Q_-)$ with minimal degree possible. We want to prove that $\deg(\widetilde Q)=\deg(Q)$. Assume, aiming for a contradiction, that $\deg(\widetilde{Q})<\deg(Q)$. The first part of the proof gives us that $\widetilde Q$ is a key polynomial. Fix $Q'\in\Psi(Q_-)$. Then $\nu_{Q'}(\widetilde Q)<\nu(\widetilde Q)$ and consequently $\deg(\widetilde{Q})=\deg(Q')=\deg(Q_-)$. Therefore $\nu(Q')<\nu(\widetilde Q)$ for every $Q'\in \Psi(Q_-)$, which is a contradiction to (\ref{eqmaxphihas}). This concludes our proof. \end{proof}
\begin{Def} When conditions \textbf{(K1) - (K4)} of Theorem \ref{definofkeypol} are satisfied, we say that $Q$ is a \textbf{limit key polynomial}. \end{Def}
\begin{Obs} Observe that as a consequence of the proof we obtain that $$ \epsilon(Q_-)<\epsilon(Q). $$ \end{Obs}
\begin{proof}[Proof of Theorem \ref{Theoremexistencecompleteseqkpol}] Consider the set \[ \Gamma_0:=\{\nu(x-a)\mid a\in K\}. \] We have two possibilities:
\begin{itemize} \item $\Gamma_0$ has a maximal element \end{itemize} Set $Q_0:=x-a_0$ where $a_0\in K$ is such that $\nu(x-a_0)$ is a maximum of $\Gamma_0$. If $\nu=\nu_{Q_0}$ we are done, so assume that $\nu\neq\nu_{Q_0}$. If the set \[ \{\nu(Q)\mid Q\in \Psi(Q_0)\} \] has a maximum, choose $Q_1\in \Psi(Q_0)$ such that $\nu(Q_1)$ is this maximum. If not, choose $Q_1$ as any polynomial in $\Psi(Q_0)$. Set $\Lambda_1:=\{Q_0,Q_1\}$ (ordered by $Q_0<Q_1$).
\begin{itemize} \item $\Gamma_0$ does not have a maximal element \end{itemize} For every $\gamma\in \Gamma_0$ set $Q_\gamma:=x-a_\gamma$ for some $a_\gamma\in K$ such that $\nu(x-a_\gamma)=\gamma$. If for every $f\in K[x]$, there exists $\gamma\in \Gamma_0$ such that $\nu(f)=\nu_{Q_\gamma}(f)$ we are done. If not, let $Q$ be a polynomial of minimal degree among all the polynomials for which $\nu_{Q_\gamma}(Q)<\nu(Q)$ for every $\gamma\in \Gamma_0$. If $\alpha(Q)=\deg(Q)$ and the set $\{\nu(Q')\mid Q'\in \Psi(Q)\}$ contains a maximal element, choose $Q_1\in \Psi(Q)$ such that $\nu(Q_1)\geq \nu(Q')$ for every $Q'\in \Psi(Q)$. If not, set $Q_1:=Q$. Set $\Lambda_1:=\{Q_\gamma\mid \gamma\in \Gamma_0\}\cup\{Q_1\}$ (ordered by $Q_1>Q_\gamma$ for every $\gamma\in \Gamma$ and $Q_\gamma>Q_{\gamma'}$ if $\gamma>\gamma'$).
Observe that in either case, $\deg(Q_1)>\deg(Q_0)$ and for $Q,Q'\in \Lambda_1$, $Q<Q'$ if and only if $\epsilon(Q)<\epsilon(Q')$. Moreover, if $\alpha(Q_1)=\deg(Q_1)$, then $\{\nu(Q)\mid Q\in\Psi(Q_1)\}$ does not have a maximum.
Assume that for some $i\in\mathbb{N}$, there exists a totally ordered set $\Lambda_i$ consisting of key polynomials with the following properties: \begin{description} \item[(i)] there exist $Q_0,Q_1,\ldots,Q_i\in \Lambda_i$ such that $Q_i$ is the last element of $\Lambda_i$ and $\deg(Q_0)<\deg(Q_1)<\ldots<\deg(Q_i)$. \item[(ii)] if $\alpha(Q_i)=\deg(Q_i)$, then $\Gamma_i:=\{\nu(Q)\mid Q\in \Psi(Q_i)\}$ does not have a maximum. \item[(iii)] for $Q,Q'\in \Lambda_i$, $Q<Q'$ if and only if $\epsilon(Q)<\epsilon(Q')$. \end{description} If $\nu_{Q_i}\neq \nu$, then we will construct a set $\Lambda_{i+1}$ of key polynomials having the same properties (changing $i$ by $i+1$).
Since $\nu_{Q_i}\neq \nu$, the set $\Psi(Q_i)$ is not empty. We have two cases: \begin{itemize} \item $\alpha(Q_i)>\deg(Q_i)$. \end{itemize} If $\Gamma_i$ has a maximum, take $Q_{i+1}\in\Psi(Q_i)$ such that $\nu(Q_{i+1})\geq \Gamma_i$. Otherwise, choose $Q_{i+1}$ to be any element of $\Psi(Q_i)$. Observe that if $\alpha(Q_{i+1})=\deg(Q_{i+1})$, then $\Gamma_{i+1}$ does not have a maximum. Set $\Lambda_{i+1}=\Lambda_i\cup\{Q_{i+1}\}$ with the extension of the order in $\Lambda_i$ obtained by setting $Q_{i+1}>Q$ for every $Q\in \Lambda_i$. \begin{itemize} \item $\alpha(Q_i)=\deg(Q_i)$. \end{itemize} By assumption, the set $\Gamma_i$ does not have a maximum. For each $\gamma\in\Gamma_i$, choose a polynomial $Q_\gamma\in \Psi(Q_i)$ such that $\nu(Q_\gamma)=\gamma$. If for every $f\in K[x]$, there exists $\gamma\in \Gamma_i$ such that $\nu_{Q_\gamma}(f)=\nu(f)$, then we are done. Otherwise, choose a monic polynomial $Q$, of smallest degree possible, such that $\nu_{Q'}(Q)<\nu(Q)$ for every $Q'\in\Psi(Q_i)$. If $\alpha(Q)=\deg(Q)$ and $\{\nu(Q')\mid Q'\in\Psi(Q)\}$ has a maximum, we choose $Q_{i+1}$ such that $\nu(Q_{i+1})\geq\{\nu(Q')\mid Q'\in\Psi(Q)\}$. Otherwise we set $Q_{i+1}=Q$. Then set \[ \Lambda_{i+1}:=\Lambda_i\cup\{Q_\gamma\mid \gamma\in\Gamma_i\}\cup\{Q_{i+1}\}, \] with the extension of the order of $\Lambda_i$ given by $$ Q_{i+1}>Q'\mbox{ for every }Q'\in \Lambda_{i+1}\setminus\{Q_{i+1}\}, $$ $Q_\gamma> Q'$ for every $\gamma\in \Gamma_i$ and $Q'\in\Lambda_i$ and $Q_\gamma>Q_{\gamma'}$ for $\gamma,\gamma'\in \Gamma_i$ with $\gamma>\gamma'$.
In all cases, the set $\Lambda_{i+1}$ has the properties \textbf{(i)}, \textbf{(ii)} and \textbf{(iii)}.
Assume now that for every $i\in\mathbb{N}$ the sets $\Lambda_i$ and $\Lambda_{i+1}$ can be constructed. Then we can construct a set \[ \Lambda_\infty:=\bigcup_{i=1}^\infty\Lambda_i \] of key polynomials having the property that for $Q,Q'\in \Lambda_\infty$, $Q<Q'$ if and only if $\epsilon(Q)<\epsilon(Q')$ and there are polynomials $Q_0,\ldots,Q_i,\ldots\in \Lambda_\infty$ such that $$ \deg(Q_{i+1})>\deg(Q_i) $$ for every $i\in\mathbb{N}$. This means that for every $f\in K[x]$ there exists $i\in\mathbb{N}$ such that $\deg(f)<\deg(Q_i)$, which implies that $\nu_{Q_i}(f)=\nu(f)$. Therefore, $\Lambda_\infty$ is a complete set of key polynomials for $\nu$. \end{proof}
Observe that at each stage, the same construction would work if we replaced $\Gamma_i$ by any cofinal set $\Gamma_i'$ of $\Gamma_i$. Hence, if the rank of $\nu$ is equal to 1, then we can choose $\Gamma_i'$ to have order type at most $\omega$. Then, from the construction of the sets $\Lambda_i$ and $\Lambda_\infty$, we can conclude the following: \begin{Cor} If the rank of $\nu$ is equal to one, then there exists a complete sequence of key polynomials of $\nu$ with order type at most $\omega\times\omega$. \end{Cor}
\section{Pseudo-convergent sequences} The next two theorems justify the definitions of algebraic and transcendental pseudo-convergent sequences. \begin{Teo}[Theorem 2 of \cite{Kap}] If $\{a_\rho\}_{\rho<\lambda}$ is a pseudo-convergent sequence of transcendental type, without a limit in $K$, then there exists an immediate transcendental extension $K(z)$ of $K$ defined by setting $\nu(f(z))$ to be the value $\nu(f(a_{\rho_f}))$ as in condition (\ref{condforpscstotra}). Moreover, for every valuation $\mu$ in some extension $K(u)$ of $K$, if $u$ is a pseudo-limit of $\{a_\rho\}_{\rho<\lambda}$, then there exists a value preserving $K$-isomorphism from $K(u)$ to $K(z)$ taking $u$ to $z$. \end{Teo}
\begin{Teo}[Theorem 3 of \cite{Kap}]\label{thmonalgimmext} Let $\{a_\rho\}_{\rho<\lambda}$ be a pseudo-convergent sequence of algebraic type, without a limit in $K$, $q(x)$ a polynomial of smallest degree for which (\ref{condforpscstoalg}) holds and $z$ a root of $q(x)$. Then there exists an immediate algebraic extension of $K$ to $K(z)$ defined as follows: for every polynomial $f(x)\in K[x]$, with $\deg f<\deg q$ we set $\nu(f(z))$ to be the value $\nu(f(a_{\rho_f}))$ as in condition (\ref{condforpscstotra}). Moreover, if $u$ is a root of $q(x)$ and $\mu$ is some extension $K(u)$ of $K$ making $u$ a pseudo-limit of $\{a_\rho\}_{\rho<\lambda}$, then there exists a value preserving $K$-isomorphism from $K(u)$ to $K(z)$ taking $u$ to $z$. \end{Teo}
For the rest of this paper, let $\{a_\rho\}_{\rho<\lambda}$ be a pseudo-convergent sequence for the valued field $(K,\nu)$, without a limit in $K$. For each $\rho<\lambda$, we denote $\nu_\rho=\nu_{x-a_\rho}$. For a polynomial $f(x)\in K[x]$ and $a\in K$ we consider the Taylor expansion of $f$ at $a$ given by \[ f(x)=f(a)+\partial_1f(a)(x-a)+\ldots+\partial_nf(a)(x-a)^n. \] Assume that $\{a_\rho\}_{\rho<\lambda}$ fixes the value of the polynomials $\partial_if(x)$ for every $1\leq i\leq n$. We denote by $\beta_i$ this fixed value. \begin{Lema}[Lemma 8 of \cite{Kap}]\label{lemmakaplvalpol} There is an integer $h$, which is a power of $p$, such that for sufficiently large $\rho$ \[ \beta_i+i\gamma_\rho>\beta_h+h\gamma_\rho\mbox{ whenever }i\ne h\mbox{ and } \nu(f(a_\rho))=\beta_h+h\gamma_\rho. \] \end{Lema}
\begin{Cor}\label{correlanurhowithnu} If $\{a_\rho\}_{\rho<\lambda}$ fixes the value of $f(x)$, then $\nu_\rho(f(x))=\nu(f(x))$. On the other hand, if $\{a_\rho\}_{\rho<\lambda}$ does not fix the value of $f(x)$, then $\nu_\rho(f(x))<\nu(f(x))$ for every $\rho<\lambda$. \end{Cor} \begin{proof} By definition of $\nu_\rho$ we have \[ \nu_\rho(f(x))=\min_{0\leq i\leq n}\{\nu(\partial_if(a_\rho)(x-a_\rho)^i)\}=\min_{0\leq i\leq n}\{\beta_i+i\gamma_\rho\}, \] where $\beta_0:=\nu(f(a_\rho))$. This implies, using the lemma above, that \[ \nu_\rho(f(x))=\nu(f(a_\rho)). \] If $\{a_\rho\}_{\rho<\lambda}$ fixes the value of $f(x)$, then $\nu(f(a_\rho))=\nu(f(x))$ for $\rho$ sufficiently large. Thus $\nu_\rho(f(x))=\nu(f(x))$. On the other hand, if $\{a_\rho\}_{\rho<\lambda}$ does not fix the value of $f(x)$, then $\nu(f(x))>\nu(f(a_\rho))=\nu_\rho(f(x))$ for every $\rho<\lambda$. \end{proof}
\begin{proof}[Proof of Theorem \ref{compthemkppsc}] If $\{a_\rho\}_{\rho<\lambda}$ is of transcendental type it fixes, for any polynomial $f(x)\in K[x]$, the values of the polynomials $\partial_if(x)$ for every $0\leq i\leq n$ (here $\partial_0f:=f$). Hence, Corollary \ref{correlanurhowithnu} implies that $\nu_\rho(f(x))=\nu(f(x))$ for sufficiently large $\rho<\lambda$, which is what we wanted to prove.
Now assume that $\{a_\rho\}_{\rho<\lambda}$ is of algebraic type. Take $\rho<\lambda$ such that $$ \nu(q(a_\tau))>\nu(q(a_\sigma)) $$ for every $\rho<\sigma<\tau<\lambda$ and set $Q_-=x-a_\rho$. Then \[ \nu_{Q_-}(x-a_\sigma)=\nu_{Q_-}(x-a_\rho+a_\rho-a_\sigma)=\nu(x-a_\rho)<\nu(x-a_\sigma) \] for every $\rho<\sigma<\lambda$. This implies that $\alpha(Q-)=1$ and then $\alpha(Q_-)=\deg (Q_-)$. Consequently, \textbf{(K1)} is satisfied. Moreover, \[ \Psi(Q_-)=\{x-a\mid \nu_{Q_-}(x-a)<\nu(x-a)\}. \] In order to prove \textbf{(K2)} assume, aiming for a contradiction, that $\nu(\Psi(Q_-))$ has a maximum, let us say $\nu(x-a)$. Then, in particular, $\nu(x-a)>\nu(x-a_\sigma)$ for every $\rho<\sigma<\lambda$. This implies that $a\in K$ is a limit of $\{a_\rho\}_{\rho<\lambda}$, which is a contradiction. Condition \textbf{(K3)} and \textbf{(K4)} follow immediately from Corollary \ref{correlanurhowithnu} and the fact that $\{\nu(x-a_\rho)\mid \rho<\lambda\}$ is cofinal in $\nu(\Psi(Q_-))$. \end{proof}
\end{document} |
\begin{document}
\title{ \bf Inverse problem on a tree-shaped network}
\maketitle
\footnotetext[1]{CNRS ; LAAS ; 7 avenue du colonel Roche, F-31077 Toulouse, France ;\\ Universit\'e de Toulouse ; UPS, INSA, INP, ISAE, UT1, UTM, LAAS ; F-31077 Toulouse, France.\\
E-mail: {\tt [email protected]}} \footnotetext[2]{Graduate School of Mathematical Sciences, University of Tokyo, 3-8-1 Komaba, Tokyo, 153-8914 Japan.\\
E-mail: {\tt [email protected]}}
\abstract{ In this article, we prove a uniqueness result for a coefficient inverse problems regarding a wave, a heat or a Schr\"odinger equation set on a tree-shaped network, as well as the corresponding stability result of the inverse problem for the wave equation. The objective is the determination of the potential on each edge of the network from the additional measurement of the solution at all but one external end-points. Our idea for proving the uniqueness is to use a traditional approach in coefficient inverse problem by Carleman estimate. Afterwards, using an observability estimate on the whole network, we apply a compactness-uniqueness argument and prove the stability for the wave inverse problem. }
{\bf Keywords:} networks, inverse problem, Carleman estimate.
{\bf AMS subject classifications:} 35R30, 93C20, 34B45
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\section{Introduction and main results}
Let $\Lambda$ be a tree-shaped network composed of $ N+1$ open segments $(e_j)_{j=0, 1, ..., N}$ of length~$\ell_j$, linked by $N_1$ internal node points belonging to the set $\Pi_1$ and let us denote by $\Pi_2$ the set of $N_2$ exterior end-points where only one segment starts. Here we note that $N+1 = N_1+N_2$. By ``tree-shaped network'', we mean that $\Lambda$ does not contain any closed loop.
For any function $f: \Lambda\rightarrow\mathbb{R}$ and any internal node $P\in\Pi_1$ where $n_P$ segments, say $e_1, ..., e_{n_P}$, meet, we set $$ f_j=f\vert_{e_j}: \mbox{the restriction of $f$ to the edge $e_j$}, \mbox{ and} \quad \left[ f \right]_P := \sum_{j=1}^{n_P}f_{j}(P). $$ We consider on this plane $1$-d tree-shaped network $\Lambda$ either wave or heat or even Schr\"odinger equations, with a different potential term $x\mapsto p_j(x)$ on each segment.
Our first, and main, system of interest is the following $1$-d wave equation on the network $\Lambda$: \begin{equation}\label{NW} \left\{ \begin{array}{lll} \partial_t^2 u_j - \partial_x^2 u_j + p_j(x)u_j = 0\quad &\quad\forall j\in\{0,1,..., N\}, (x,t)\in e_j \times (0,T),\\ u(Q,t)=h(t),&\quad \,\forall Q\in \Pi_2, t\in(0,T),\\ u(x,0) = u^0(x), \partial_t u(x,0)=u^1(x),&\quad x\in\Lambda, \end{array} \right. \end{equation} assuming some compatibility condition between the boundary and initial data. Moreover we assume the continuity and what is called the Kirchhoff law at any internal node $P\in\Pi_1$, which are given by \begin{equation}\label{C} u_j(P,t)=u_i(P,t)=:u(P,t),\quad\forall i,j\in\left\{1,...,n_P\right\},\, 0<t<T,\\ \end{equation} \begin{equation}\label{K} \left[ u_x (t)\right]_P :=\sum_{j=1}^{n_P} \partial_x u_{j}(P,t)=0,\quad 0<t<T. \end{equation} Henceforth we choose an orientation of $\Lambda$ such that to two endpoints of each segment $e$, correspond an initial node $I(e)$ and a terminal node $T(e)$. We further define the outward normal derivative $\partial_{n_e}u_j$ at a node $P$ of $e_j$ by $$ \partial_{n_e}u_j(P,t) = \left\{ \begin{array}{lll} -\partial_xu_j(P,T), \quad &\mbox{if $P\in I(e_j)$}, \\ \partial_xu_j(P,T), \quad &\mbox{if $P\in T(e_j)$}. \\ \end{array} \right. $$ Henceforth we set $$ u = (u_0, ..., u_N), \quad u_j = u\vert_{e_j}, \quad\hbox{ and } \quad p = (p_0, ..., p_N), \quad p_j = p\vert_{e_j} \quad \mbox{for $j \in \{0, 1, ..., N\}$}. $$ Let us also mention that at a node point, at least three segments $e_j$ meet. If only two segments, say $e_1, e_2$, meet at a node point, then by \eqref{C} and \eqref{K}, setting $u = u_1$ and $p=p_1$ in $e_1$ and $u = u_2$, $p=p_2$ in $e_2$, we have $\partial_t^2u - \partial_x^2 u + p u$ in $e_1 \cup e_2$. Therefore we can regard $e_1 \cup e_2$ as one open segment.\\ Since one can prove the unique existence of solution to (1) - (3) in a suitable function space (e.g., Lions and Magenes \cite{LionsMagenesBook}), we denote the solution by $u[p](x,t)$, and we set $u[p] = (u[p]_0, ..., u[p]_N)$.
Moreover we consider the following heat system on the same network~$\Lambda$ \begin{equation}\label{NH} \left\{ \begin{array}{lll} \partial_t u_j - \partial_x^2 u_j + p_j(x)u_j = 0\quad &\quad\forall j\in\{0,1,..., N\}, \forall (x,t)\in e_j \times (0,T),\\ \partial_xu(Q,t)=0,&\quad \forall Q\in \Pi_2, \forall t\in(0,T),\\ u(x,0) = u^0(x), &\quad \forall x\in\Lambda, \end{array} \right. \end{equation} and the Schr\"odinger system on the network $\Lambda$ \begin{equation}\label{NS} \left\{ \begin{array}{lll} i\partial_t u_j - \partial_x^2 u_j + p_j(x)u_j = 0\quad &\quad\forall j\in\{0,1,..., N\}, \forall (x,t)\in e_j \times (0,T),\\ u(Q,t)=h(t),&\quad \forall Q\in \Pi_2, \forall t\in(0,T),\\ u(x,0) = u^0(x), &\quad \forall x\in\Lambda, \end{array} \right. \end{equation} both under the same node conditions \eqref{C} and \eqref{K}. Here and henceforth we set $i = \sqrt{-1}$. If there is no possible confusion, by the same notation $u[p]$ we denote the solution to \eqref{NH} or \eqref{NS}, under \eqref{C} and \eqref{K}.\\
\noindent\textbf{Inverse Problem}: Is it possible to retrieve the potential $p$ everywhere in the whole network~$\Lambda$ from measurements at all external nodes except one?\\
In our article, we address the following two fundamental theoretical questions concerning coefficient inverse problems: \\
\noindent\textbf{Uniqueness}: Do the equalities of the measurements $\partial_x u[p] (Q,t) = \partial_x u[q] (Q,t)$ for all $t\in(0,T)$ and $Q\in \Pi_2\setminus {\{Q_{N_2}\}}$ imply $p = q$ on $\Lambda$?\\
\noindent\textbf{Stability}: Can we estimate, in appropriate norms, the difference of two potentials $p - q$ on $\Lambda$ by the difference of the corresponding measurements $\partial_x u[p] (Q,t) - \partial_x u[q] (Q,t)$ for all $t\in(0,T)$ and $Q\in \Pi_2\setminus {\{Q_{N_2}\}}$ ?\\
\begin{figure}
\caption{A star-shaped network with $10$ edges ($N=9$, $N_1 = 4$, $N_2= 7$).
}
\label{fig}
\end{figure}
This inverse problem is nonlinear and we will give here the proof of the uniqueness of the solution with an argument which do not use a global Carleman estimate. Very recent papers on coefficient inverse problems on networks, as Baudouin, Cr\'epeau and Valein \cite{BaudouinCrepeauValein11} for the wave equation, and Ignat, Pazoto and Rosier \cite{IgnatPazRosier12} for the heat and the Schr\"odinger equations, give indeed \textit{stability} and therefore \textit{uniqueness} from appropriate global Carleman estimates. Our first goal is to prove the uniqueness of the potential on the tree-shaped network from measurements only at all the exterior end-points of the network, except one. The argument for the uniqueness will work for either the wave or the heat or the Schr\"odinger equations on the network. The question of the proof of the Lipschitz stability in the case of the wave equation will be addressed afterwards, using a compacteness-uniqueness argument, and relies on the observability estimate on the whole network which was already proved in the literature in several situations.
Concerning the precise topic which we are considering, the bibliography lies in two different domains, namely coefficient inverse problems for partial differential equation on the one hand and control and stabilization in networks on the other hand.
Therefore one can begin by mentioning the book of Isakov \cite{Isakov} which adresses some techniques linked to the study of inverse problem for several partial differential equations. Actually, as the first answer to the uniqueness for a coefficient inverse problem with a single measurement, we refer to Bukhgeim and Klibanov \cite{BuKli81}, and see also Klibanov \cite{Klibanov92} and Yamamoto \cite{Yam99} for example. Here we do not intend to give an exhaustive list of references. After the proof of uniqueness using the basic $1$-d result on the basis of local Carleman estimates, the idea beneath this article is to take advantage of an observability estimate to obtain the Lipschitz stability of the inverse problem with a compactness-uniqueness argument. Nowadays, many results on the stability of inverse problems are derived directly from global Carleman estimates, and see e.g., \cite{BaudouinCrepeauValein11} and \cite{IgnatPazRosier12}. One should also know that studies on inverse problems and controllability of partial differential equations share some technical materials such as Carleman estimates and observability inequalities. In the particular network setting, we would like to make use of classical results such as well-known $1$-d local Carleman estimates, observability estimates on the network borrowed from control studies, in order to obtain uniqueness and stability results. We can also give some more references on inverse problems for hyperbolic equations such as Baudouin, Mercado and Osses \cite{BMO07}, Imanuvilov and Yamamoto \cite{ImYamIP01}, \cite{ImYamCom01}, Puel and Yamamoto \cite{PuelYam97}, Yamamoto and Zhang \cite{YamZhang03}, which are all based upon local or global Carleman estimates.
Besides, the control, observation and stabilization problems of networks have been the object of recent and intensive researches such as e.g., D\'ager and Zuazua \cite{DagZua06}, Lagnese, Leugering and Schmidt \cite{LaLeSch94}, Zuazua \cite{ZuaSurveyNetworks}. More specifically, the control being only applied at one single end of the network, the articles D\'ager \cite{Dag04}, D\'ager and Zuazua \cite{DagZua00,DagZua06} prove controllability results for the wave equation on networks, using observability inequalities under assumptions about the irrationality properties
of the ratios of the lengths of the strings. We can also underline that many results of controllability on networks concern only the wave equation without lower order terms (see \cite{LaLeSch94}, Schmidt \cite{Schmidt92} for instance). However it is difficult to consider such measurements at more limited nodes for the inverse problem and we do not consider the measurements at less external nodes. \\
In the sequel, we shall use the following notations: \begin{eqnarray*} L^{\gamma}(\Lambda)&=&\left\{f; \thinspace f_j\in L^{\gamma}(e_j), \,\forall j\in\{0,1,..., N\}\right\}, \quad \gamma \ge 1,\\ H^1_0(\Lambda)&=& \Big\{f; \thinspace f_j\in H^1(e_j),\,\forall j\in\{0,1,..., N\}, \, f_j(P)=f_k(P) \thinspace \mbox{if $e_j$ and $e_k$ meet at $P$},\\
&&\, \forall P \in\Pi_1,\, \textnormal{and } f(Q)=0, \, \forall Q\in \Pi_2 \Big\}. \end{eqnarray*} For shortness, for $f\in L^1(\Lambda)$, we often write, \begin{equation*} \int_{\Lambda}f dx=\sum_{j=0}^N \int_{e_j} f_j(x)dx, \end{equation*} where the integral on $e_j$ is oriented from $I(e_j)$ to $T(e_j)$. Then the norms of the Hilbert spaces $L^2(\Lambda)$ and $H_0^1(\Lambda)$ are defined by $$
\left\|f\right\|_{L^2(\Lambda)}^2=\int_{\Lambda}\left|f\right|^2dx
\hbox{ and }\left\|f\right\|_{H_0^1(\Lambda)}^2=\int_{\Lambda}\left|\partial_x f\right|^2dx. $$ For $M\ge 0$, we introduce the set $$ L^\infty_M(\Lambda) = \left\{q=(q_0,..., q_N); \thinspace q_j\in L^\infty(e_j),\, \forall j\in\{0,1,..., N\}\thinspace
\mbox{such that $\|q\|_{L^\infty(\Lambda)} \leq M$} \right\}. $$
We are ready to state our first main result:
\begin{theorem}[\bf Uniqueness]\label{Thm1}
Let $r>0$ be an arbitrary constant. Assume that $p, q \in L^{\infty}(\Lambda)$ and the initial value $u^0$ satisfies $$
|u^0(x)|\geq r> 0, \quad \mbox{a.e. in $\Lambda$}. $$ Assume further that the solutions $u[p], u[q]$ of \eqref{NW}-\eqref{C}-\eqref{K} belong to $$H^3(0,T; L^{\infty}(\Lambda)) \cap H^1(0,T; H^2(\Lambda)).$$ Then there exists $T_0>0$ such that for all $T\geq T_0$, if $$ \partial_x u[p] (Q,t) = \partial_x u[q] (Q,t) \quad \mbox{for each $t\in(0,T)$ and $Q\in\Pi_2\setminus\{Q_{N_2}\}$}, $$ then we have $p=q$ in $\Lambda$. \end{theorem}
The proof of this result in Section 2 relies on a 1-d result of uniqueness for the determination of potential in the wave equation and an ``undressing'' argument.\\
It is worth mentioning that our argument gives the uniqueness for the inverse problems of determination of potentials on tree-shaped networks also for the heat and the Schr\"odinger equations using only measurements at $N_2 - 1$ exterior end-points. In fact, our arguments in proving the uniqueness for the wave and the Schr\"odinger equations are essentially the same and are based on local Carleman estimates, while the uniqueness for the inverse heat problem is reduced to the uniqueness for the corresponding inverse wave problem (in a sense to be detailed later).
\begin{theorem}[\bf Uniqueness for the heat inverse problem]\label{Thm1-1}
Assume that $p, q \in L^{\infty}(\Lambda)$, the initial value $u^0$ satisfies $$
|u^0(x)|\geq r> 0, \quad \mbox{a.e. in $\Lambda$} $$ for some constant $r$, and the solutions $u[p]$ and $u[q]$ to \eqref{NH}-\eqref{C}-\eqref{K}, belong to $$ H^2(0,T; L^{\infty}(\Lambda)) \cap H^1(0,T;H^2(\Lambda)).$$ Then there exists $T>0$ such that if $$ u[p] (Q,t) = u[q] (Q,t) \quad \mbox{for each $t\in(0,T)$ and $Q\in\Pi_2\setminus\{Q_{N_2}\}$}, $$ then we have $p=q$ in $\Lambda$. \end{theorem}
\begin{theorem}[\bf Uniqueness for the Schr\"odinger inverse problem]\label{Thm1-2}
Assume that $p, q \in L^{\infty}(\Lambda)$, the initial value $u^0$ satisfies $$
|u^0(x)|\geq r> 0, \quad \mbox{a.e. in $\Lambda$} $$ for some constant $r$, and the solutions $u[p]$ and $u[q]$ to \eqref{NS}-\eqref{C}-\eqref{K}, belong to $$H^2(0,T; L^{\infty}(\Lambda)) \cap H^1(0,T;H^2(\Lambda)).$$ Then there exists $T>0$ such that $$ \partial_xu[p] (Q,t) = \partial_xu[q] (Q,t) \quad \mbox{for each $t\in(0,T)$ and $Q\in\Pi_2\setminus\{Q_{N_2}\}$}, $$ then we have $p=q$ in $\Lambda$. \end{theorem}
One can refer to \cite{BaudouinCrepeauValein11} for the same inverse problem in the wave equation on a network where the proof is detailed in a star-shaped network but is actually generalizable to tree-shaped networks. Reference \cite{IgnatPazRosier12} discusses the inverse heat problem on tree-shaped network. Moreover the paper \cite{IgnatPazRosier12} treats the Schr\"odinger case in a star-shaped network and needs measurements at all external nodes. We do not know any uniqueness result for non-tree graphs, which are graphs containing a closed cycle. For observability inequality on general graph, see e.g., \cite{DagZua06}.\\
For the inverse problem in the wave equation case, we state
\begin{theorem}[\bf Stability]\label{Thm2} Let $M>0$ and $r>0$. Assume that $p\in L^\infty_M(\Lambda)$ and the solutions $u[p]$ and $u[q]$ to \eqref{NW}-\eqref{C}-\eqref{K} satisfy $$ u[p], u[q] \in H^3(0,T;L^{\infty}(\Lambda)) \cap H^1(0,T;H^2(\Lambda)). $$ Assume also that the initial value $u^0$ satisfies $$
|u^0(x)|\geq r> 0, \quad \mbox{a.e. in $\Lambda$}. $$ Then there exists $T_0>0$ such that for all $T\geq T_0$, there exists $C=C(T,r,M, \ell_0,..., \ell_N)>0$ such that \begin{equation}\label{stabpi}
|| q-p||_{L^2(\Lambda)}\leq C \sum_{j=1}^{N_2-1}\left\| \partial_xu_{j}[p](Q_j)
- \partial_xu_{j}[q](Q_j)\right\|_{H^1(0,T)}. \end{equation} \end{theorem}
This paper is composed of five sections. The proof of uniqueness in the inverse problem in the wave equation case (Theorem~\ref{Thm1}) is presented in Section 2. The cases of Schr\"odinger and heat equations are studied in Section 3, devoted to the proofs of Theorems~\ref{Thm1-1} and \ref{Thm1-2}. Theorem~\ref{Thm2} is finally proven in Section 5 by a compactness-uniqueness argument and an observability estimate on the whole network.\\
We conclude this section with a classical result on the existence and regularity of solutions of the wave system and provide the corresponding energy estimates for the solution which we will need later.
\begin{lemma}\label{Energy} Let $\Lambda$ be a tree-shaped network and assume that $p\in L^{\infty}_M(\Lambda)$, $g \in L^1(0,T;L^2(\Lambda))$, $u^0\in H_0^1(\Lambda)$ and $u^1\in L^2(\Lambda)$. We consider the 1-d wave equation on the network with the conditions \eqref{C} and \eqref{K}: \begin{equation}\label{e} \left\{\begin{array}{lll} \partial_t^2u-\partial_x^2 u+p(x)u=g(x,t),&\quad\mbox{in $\Lambda\times(0,T)$},\\
u(Q,t)=0,&\quad \mbox{in $(0,T),\, Q\in\Pi_2$},\\ u_j(P,t)=u_k(P,t),&\quad \mbox{in $(0,T),\, P\in\Pi_1,\, j,k\in \{ 1, ..., n_P \}$}, \\ \left[ \partial_x u (t)\right]_P=0,&\quad \mbox{in $(0,T)$, $P\in\Pi_1$},\\ u(0) = u^0, \quad \partial_t u(0)=u^1,&\quad \mbox{in $\Lambda$}. \end{array}\right. \end{equation} The Cauchy problem is well-posed and equation \eqref{e} admits a unique weak solution $$ u\in C([0,T],H_0^1(\Lambda)) \cap C^1([0,T],L^2(\Lambda)). $$ Moreover there exists a constant $C=C(\Lambda,T,M)>0$ such that for all $t\in(0,T)$, the energy $$
E(t) = ||\partial_t u(t)||^2_{L^2(\Lambda)}+||\partial_x u(t)||^2_{L^2(\Lambda)} $$ of the system \eqref{e} satisfies \begin{equation}\label{estimeenergy}
E(t) \leq C\left(||u^0||^2_{H_0^1(\Lambda)} + ||u^1||^2_{L^2(\Lambda)} +
||g||^2_{L^1(0,T,L^2(\Lambda))} \right) \end{equation} and we also have the following trace estimate \begin{equation}\label{hiddenregularity}
\sum_{j=1}^{N_2}\left\|\partial_x u_j(Q_j)\right\|_{L^2(0,T)}^2
\leq C \left(||u^0||^2_{H_0^1(\Lambda)}
+ ||u^1||^2_{L^2(\Lambda)} + ||g||^2_{L^1(0,T,L^2(\Lambda))} \right). \end{equation} \end{lemma}
The proof of the unique existence of solution to equation~\eqref{e} can be read in \cite[Chap. 3]{LionsMagenesBook}. Estimate \eqref{estimeenergy} is a classical result which can be formally obtained by multiplying the main equation in \eqref{e} by $\partial_t u_{j}$, summing up for $j\in\left\{0,...,N\right\}$ the integral of this equality on $(0,T)\times e_j$ and using some integrations by parts. Estimate~\eqref{hiddenregularity} is a hidden regularity result which can be obtained by multipliers technique (we refer to \cite[Chapter 1]{Lions}). Formally, for the particular case of a star-shaped network of vertex $P=0$ for example, it comes from the multiplication of \eqref{e} by $m(x)\partial_x u_{j}$, where $m \in C^1(\bar{\Lambda})$ with $m(0)=0$ and $m_j(l_j) = 1$, summing up the integrals of this equality on $(0,T)\times (0,l_j)$ over $j\in \{0,...,N\}$ and using integrations by parts.
\section{Uniqueness of the inverse problem - wave network case}
As already evoked in the introduction, the proof of Theorem~\ref{Thm1} will use a well-known 1-d result of uniqueness for the inverse problem. We recall it in the following lemma.
\begin{lemma}\label{1d} Let $r>0$, $p\in L^\infty(0,\ell)$ and $T > 2\ell$. Consider the 1-d wave equation in $[0,\ell]$ with homogeneous Dirichlet boundary data as follows: \begin{equation}\label{1D} \left\{ \begin{array}{lll} \partial_t^2 y - \partial_x^2 y + p(x)y = f(x)R(x,t), \qquad& (x,t) \in (0,\ell) \times (0,T),\\ y(\ell,t)=0,& t\in(0,T),\\ y(x,0) = 0, \partial_{t}y(x,0)=0 ,& x\in(0,\ell), \end{array} \right. \end{equation} where $f\in L^2(0,\ell)$ and $R\in H^1(0,T;L^\infty(0,\ell))$ satisfies
$ |R(x,0)|\geq r> 0$ a.e. in $(0,\ell)$.\\ If $\, \partial_x y (\ell,t) = 0 \,$ for all $t\in(0,T)$, then we have $f\equiv 0$ in $(0,\ell)$ and $y\equiv 0$ in $(0,\ell) \times (0,T)$. \end{lemma}
This lemma is a classical uniqueness result for the inverse source problem in a wave equation and the proof can be done by the method in \cite{BuKli81} on the basis of a 1-d Carleman estimate and the even extension of $y$ to negative times $t$. We further refer to Imanuvilov and Yamamoto \cite{ImYamIP01}, \cite{ImYamCom01}, Klibanov \cite{Klibanov92}, Klibanov and Timonov \cite{KliTiBook} for example, and we omit details of the proof. \\
\noindent {\bf Proof of Theorem~\ref{Thm1}.} We define the following operation of ``removing'' segments from the tree-shaped network $\Lambda$, starting from all the external nodes where we make measurements, except one. We divide the proof into several steps. \\
{\bf Step 1.} From Lemma~\ref{1d}, we can easily prove that if $e_j$ is a segment of $\Lambda$ which ends at an external node $Q_j\in \Pi_2$, and if the solutions $u[p]$ and $u[q]$ to \eqref{NW} satisfy $\partial_x u[p](Q_j,t) = \partial_x u[q](Q_j,t)$ for all $t\in(0,T)$, then $p= q$ on the segment~$e_j$ and $u[p](x,t) = u[q](x,t)$ for all $x\in e_j$ and for all $ t\in(0,T)$. Indeed, if we set $y = u_j[p_j] - u_j[q_j]$, then \begin{equation}\label{1Dj} \left\{ \begin{array}{lll} \partial_t^2 y - \partial_x^2 y + p_j(x)y = (q_j-p_j)(x)u_j[q_j](x,t)\qquad &\quad (x,t)\in (0,\ell) \times (0,T),\\ y(Q_j,t)=0,&\quad t\in(0,T),\\ y(x,0) = 0, \partial_{t}y(x,0)=0 ,&\quad x\in(0,\ell), \end{array} \right. \end{equation} and noting that $T>0$ is sufficiently large, we can apply Lemma~\ref{1d} since $\partial_x y (Q_j,t) = 0$ for
all $t\in(0,T)$, $u_j[q_j] \in H^1(0,T;L^{\infty}(\Lambda))$ and $|u_j^0(x)| \geq r> 0$ on $e_j$. We obtain that $p_j \equiv q_j$ on $e_j$ and consequently $u_j[p_j](x,t) = u_j[p_j](x,t)$ in $e_j\times(0,T_1)$, where $T_1 \in (0,T)$ is some constant. \\ Therefore, for any segment $e$ with the end-points $P$ and $Q$ such that $Q \in \Pi_2 \setminus \{ Q_{N_2}\}$, we see that $p=q$ on $e$ and $(u[p]\vert_e)(P,t) = (u[q]\vert_e)(P,t)$, $(\partial_xu[p]\vert_e)(P,t) = (\partial_xu[q]\vert_e)(P,t)$ for $0 < t < T_1$. Let $\Pi^2_1$ be all the interior node points $P$ of segments of $\Lambda$ having their other end-point in $\Pi_2 \setminus \{Q_{N_2}\}$. We note that $\Pi_1^2 \subset \Pi_1$. Applying the above argument to all the exterior end-points except for $Q_{N_2}$, we have $$ u[p]_j(P,t) = u[q]_j(P,t), \quad \partial_xu[p]_j(P,t) = \partial_xu[q]_j(P,t) $$ for each $P \in \Pi_1^2$, $0 < t < T_1$ and $j \in \{1, ..., N_3\}$. Here by $e_1, ..., e_{N_3}$, we enumerate the segments connecting a point in $\Pi_1^2$ and a point in $\Pi_2 \setminus \{Q_{N_2}\}$. \\
{\bf Step 2.} Let $P \in \Pi_1$ be a given node such that $n_P$ segments, say, $e_1, ..., e_{n_P}$ meet at $P$ and $e_1, ..., e_{n_P-1}$ connect $P$ with exterior end-points, say, $Q_1, ..., Q_{n_P-1} \in \Pi_2$ and \begin{equation}\label{star} \begin{array}{c} u[p]_j(P,t) = u[q]_j(P,t), \\ \partial_x u[p]_j(P,t) = \partial_x u[q]_j(P,t), \quad j \in \{1, ..., n_P-1\}, \thinspace 0<t<T. \end{array} \end{equation} Using the continuity \eqref{C} and the Kirchhoff law \eqref{K} at node $P$, we can deduce that $$ \begin{array}{c} u[p]_{n_P}(P,t) = u[q]_{n_P}(P,t), \\ \partial_x u[p]_{n_P}(P,t) = \partial_x u[q]_{n_P}(P,t), \quad 0 < t < T. \end{array} $$
{\bf Step 3.} Let $\Lambda^2$ be the graph generated from $\Lambda$ by removing $e_1, ..., e_{N_3}$. Therefore, since $T_1>0$ is still sufficiently large, we can apply the same argument as in Step 1 to the graph $\Lambda^2$.
We repeat this operation to obtain the sets $\Lambda^3$, then $\Lambda^4$,..., $\Lambda^n$. Hence, let $L^k$ be the set of all the open segments of $\Lambda_k$, $\Pi^k_1$ the set of the interior node points of $\Lambda_k$, $\Pi^k_2$ the set of external endpoints of $\Lambda_k$. Setting $\Lambda^1 = \Lambda$, we note that $L^1 = \{ e_0, ..., e_{N} \}$, $\Pi^1_1 = \{ P_1, ..., P_{N_1}\}$, $\Pi^1_2 = \{ Q_1, ..., Q_{N_2}\}$.
By \eqref{C} and \eqref{K}, we see that $$ \Pi^{k-1}_1 \supset \Pi^k_1, \qquad \forall k \in \mathbb N $$ and $$ \Lambda_k = L^k \cup \Pi^k_1 \cup \Pi^k_2, \quad L^k \cap \Pi^k_1 = L^k \cap \Pi^k_2 = \Pi^k_1 \cap \Pi^k_2 = \emptyset, \quad \forall k\in \mathbb N. $$ In order ro complete the proof, it is sufficient to prove there exists $n \in \mathbb N$ such that \begin{equation}\label{emptylambda} \Lambda_n = \emptyset. \end{equation} Assume contrarily that $\Lambda_n \ne \emptyset$ for all $n \in \mathbb N$. Since every segment with exterior end-point in $\Pi_2 \setminus \{ Q_{N_2}\}$, can be removed (meaning that $u[p] = u[q]$ on the segment) by the above operation, we obtain that there exists $n_0 \in N$ such that $\Lambda_{n_0} = L^{n_0} \cup \Pi_1^{n_0}$, \textit{i.e.,} $\Pi^{n_0}_2 = \emptyset$. Then $\Lambda_{n_0}$ must be a closed cycle since it possesses no external endpoint. By assumption, there exist no closed cycles in a tree-shape network. This is a contradiction and thus the proof of \eqref{emptylambda}, and therefore, the one of Theorem~\ref{Thm1} is completed.
{}$\square$
\section{Uniqueness for the inverse problem - Schr\"odinger and heat network cases}
\subsection{Proof of Theorem \ref{Thm1-1} - Heat case.}
We apply an argument similar to the proof of Theorem 4.7 in \cite{Klibanov92} which is based on the reduction of the inverse heat problem to an inverse wave problem by a kind of Laplace transform called the Reznitzkaya transform (e.g., \cite{Isakov}, \cite{LRS}, \cite{RomBook}).
First we define an operator $\Delta_{\Lambda}$ in $L^2(\Lambda)$ by $\Delta_{\Lambda}u = \partial_x^2 u_j$ in $e_j$, for all $j\in \{0, 1, ...., N\}$ with $$ \mathcal{D}(\Delta_{\Lambda}) = \big\{ u = (u_0, ..., u_N); \, \mbox{$u_j \in H^2(e_j)$, $\partial_xu(Q) = 0$ for $Q \in \Pi_2$, $u_j$ satisfying \eqref{C} and \eqref{K}}\big\}. $$ Here, $e_j$ is oriented from $I(e_j)$ to $T(e_j)$ when defining $\partial_x^2$. Then, similarly to \cite{IgnatPazRosier12}, we can prove that $\Delta_{\Lambda}$ is self-adjoint and $(\Delta_{\Lambda}u, u)_{L^2(\Lambda)} := \sum_{j=0}^N (\partial_x^2u_j,u_j)_{L^2(e_j)} \ge 0$. Therefore $\Delta_{\Lambda}$ generates an analytic semigroup $e^{t\Delta_{\Lambda}}$, $t>0$ (e.g., Pazy \cite{Pazy}, Tanabe \cite{TanabeBook}). Since $p \in L^{\infty}(\Lambda)$, the perturbed operator $\Delta_{\Lambda} + p$ generates an analytic semigroup (e.g., Theorem 2.1 in \cite{Pazy}, p.80). Therefore by the semigroup theory (e.g. \cite{Pazy}, \cite{TanabeBook}), we know that the solutions $u[p](x,t)$ and $u[q](x,t)$ of equation (4) are analytic in $t$ for any fixed $x\in \Lambda$. More precisely, $u[p], u[q]: (0,\infty) \longrightarrow H^2(\Lambda)$ are analytic in $t>0$.
By $u^H[p]$ we denote the solution of the heat system \eqref{NH} and by $u^H[q]$ the corresponding solution when the potential is $q$. By the analyticity in $t$ and the assumption in the theorem, we have \begin{equation}\label{assumHeat} u^H[p](Q,t) = u^H[q](Q,t), \quad \forall Q \in\Pi_2\setminus \{Q_{N_2}\}, \thinspace \forall t > 0. \end{equation} On the other hand, denote by $\widetilde u[p]$ the solution of the wave system \begin{equation}\label{NWH} \left\{ \begin{array}{lll} \partial_t^2 u_j - \partial_x^2 u_j + p_j(x)u_j = 0,\quad &\quad\forall j\in\{0,1,..., N\}, \forall (x,t)\in e_j \times (0,\infty),\\ \partial_x u[p](Q,t)=0,&\quad \forall Q\in \Pi_2, \forall t\in(0,\infty),\\ u[p](x,0) = 0, \thinspace \partial_t u(x,0)=u^0(x),&\quad \forall x\in\Lambda \end{array} \right. \end{equation} and by $\widetilde u[q]$ the corresponding solution when the potential is $q$. Then we obtain (e.g., \cite[pp.251-252]{LRS}) that $$ \frac{1}{2\sqrt{\pi t^3}}\int^{\infty}_0 \tau e^{-\frac{\tau^2}{4t}} \widetilde u[p](x,\tau) d\tau $$ satisfies (4). The uniqueness of solution to equation (4) implies $$ u^H[p](x,t) = \frac{1}{2\sqrt{\pi t^3}}\int^{\infty}_0 \tau e^{-\frac{\tau^2}{4t}}\widetilde u[p](x,\tau) d\tau, \quad \forall x \in \Lambda, \forall t > 0 $$ and the same equality with $q$. By assumption \eqref{assumHeat}, we obtain $$ \frac{1}{2\sqrt{\pi t^3}}\int^{\infty}_0 \tau e^{-\frac{\tau^2}{4t}}(\widetilde u[p] - \widetilde u[q])(Q,\tau) d\tau = 0, \quad \forall Q\in \Pi_2 \setminus \{Q_{N_2}\}, \forall t>0. $$ By the change of variables $s = \frac{1}{4t}$ and $\tau^2 = \eta$, we obtain $$ \int^{\infty}_0 e^{-s\eta}(\widetilde{u}[p] - \widetilde{u}[q])(Q,\sqrt{\eta}) d\eta = 0, \quad \forall Q\in \Pi_2 \setminus \{Q_{N_2}\}, \forall s>0 $$ and the injectivity of the Laplace transform yields \begin{equation} (\widetilde{u}[p] - \widetilde{u}[q])(Q,\sqrt\eta) = 0, \quad \forall Q\in \Pi_2 \setminus \{Q_{N_2}\}, \forall \eta>0. \end{equation} Applying the same argument as in Section 2 for the wave system, we prove $p=q$ in $\Lambda$. Thus the proof of Theorem~\ref{Thm1-1} is completed.
\subsection{Proof of Theorem~\ref{Thm1-2} - Schr\"odinger case.}
It is sufficient to prove the following lemma. \\ \begin{lemma}\label{schrodi} Let $r>0$ and $p \in L^{\infty}(0,\ell)$, $f\in L^2(0,\ell)$ be real-valued, and $T>0$ be arbitrarily fixed. We consider a 1-d Schr\"odinger equation: $$ \left\{ \begin{array}{lll} i\partial_t y - \partial_x^2 y + p(x)y = f(x)R(x,t), \qquad &\forall (x,t) \in (0,\ell) \times (0,T), \\ y(\ell,t) = 0, &\forall t \in (0, T),\\ y(x,0) = 0, &\forall x \in (0, \ell), \end{array} \right. $$ where $R \in H^1(0,T; L^{\infty}(0,\ell))$ satisfies $\vert R(x,0)\vert \ge r > 0$ a.e. in $(0,\ell)$. \\
If $\, \partial_xy(\ell,t) = 0\,$ for all $ t \in (0, T)$, then we have $f=0$ in $(0,\ell)$ and $y=0$ in $(0,\ell) \times (0,T)$. \end{lemma}
Using the same method as the one for the proof of Lemma~\ref{1d}, this lemma is proved by means of the following Carleman estimate: \\ \begin{lemma} For $x_0 \not\in [0,\ell]$ and $\beta > 0$ arbitrarily fixed, we set $$ Sv = i\partial_tv - \partial_x^2v, \quad \varphi(x,t) = e^{\gamma(\vert x-x_0\vert^2 - \beta t^2)}, \quad (x,t) \in (0,\ell) \times (0,T). $$ Then there exists a constant $\gamma_0 > 0$ such that for arbitrary $\gamma\ge \gamma_0$ we can choose $s_0 > 0$ satisfying, for a constant $C>0$, $$ \int^T_0\int^{\ell}_0 (s\vert \partial_xv\vert^2 + s^3\vert v\vert^2) e^{2s\varphi} dxdt \le C\int^T_0\int^{\ell}_0 \vert Sv\vert^2 e^{2s\varphi} dxdt $$ for all $s > s_0$ and all $v \in L^2(0,T;H^2_0(0,\ell)) \cap H^1_0(0,T;L^2(0,\ell))$. \end{lemma}
This is a Carleman estimate with regular weight function $\gamma(\vert x-x_0\vert^2 - \beta t^2)$ and for the proof, we refer to e.g. \cite[Lemma 2.1]{YuYam-AA07} (see also \cite{YuYam-CAM10}). Concerning a Carleman estimate for Schr\"odinger equation in a bounded domain $\Omega \subset \mathbb R^n$ with singular weight function $\varphi$, we can refer for example to \cite{BaudouinPuel02,MOR08}.
On the basis of this lemma, the proof of Lemma~\ref{schrodi} is done by a usual method by Bukhgeim and Klibanov \cite{BuKli81} by using the extension of $y$ to $-T<t<0$ by $y(\cdot,t) = \overline{y(\cdot,-t)}$ and a cut-off argument. We omit the details of the proof.
\section{Observability in the wave network}
The proof of the stability result will rely strongly on the classical result of observability that we are now presenting and proving. One should specifically mention the survey \cite{ZuaSurveyNetworks} and the books \cite{DagZua06}, \cite{LaLeSch94}, where the question of observability in networks of strings (or wave equations) is widely explored in different cases.
We concentrate here on the case where the observation available comes from all but one external nodes, in a setting with a system of wave equations with potential. Since most of the literature on string networks focus only on the wave equation without lower order terms (see \cite{LaLeSch94} or \cite{DagZua06} for instance), we detail here how to obtain the observability result for the wave equation with potential. In some other cases, we can prove the observability inequality directly by a global Carleman estimate (e.g. \cite{BaudouinCrepeauValein11}).
\begin{theorem}[\bf Observability inequality]\label{Thm3} On the tree-shaped network $\Lambda$, assuming $p\in L^\infty(\Lambda)$, let us consider the system of 1-d wave equations under the continuity and Kirchhoff law's assumptions \eqref{C} and \eqref{K}: \begin{equation}\label{eqobs}
\left\{\begin{array}{lll} \partial_t^2u-\partial_x^2 u+p(x)u=0,&\quad\mbox{in $\Lambda\times(0,T)$},\\
u(Q,t)=0,&\quad \mbox{in $(0,T),\, \forall Q\in\Pi_2$},\\ u_j(P,t)=u_k(P,t),&\quad \mbox{in $(0,T),\,\forall P\in\Pi_1,\, \forall j,k\in \{1, ..., n_P\}$}, \\ \left[ \partial_x u (t)\right]_P=0,&\quad \mbox{in $(0,T), \forall P\in\Pi_1$},\\ u(x,0) = 0, \quad \partial_t u(x,0)=a(x),&\quad \mbox{in $\Lambda$}, \end{array}\right. \end{equation} Then there exists a minimal time $T_0$ such that for all $T > T_0$, the observability estimate \begin{equation}\label{obs}
\int_\Lambda | a(x)|^2 dx
\le C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xu_j(Q_j,t)|^2 dt \end{equation} holds for a solution $u$ of \eqref{eqobs}. \end{theorem}
\noindent {\bf Proof of Theorem~\ref{Thm3}.} Let $v$ be the solution of the system $$ \left\{ \begin{array}{lll} \partial_t^2 v - \partial_x^2 v= -pu \quad &\quad\forall (x,t)\in \Lambda \times (0,T),\\ v(Q,t)=0,&\quad \forall Q\in \Pi_2, t\in(0,T),\\ v_j(x,0) = 0, \partial_t v_j(x,0)=0,&\quad\forall j\in\{0,1,..., N\}, \quad x\in e_j, \end{array} \right. $$ under conditions \eqref{C} and \eqref{K}. Then \eqref{hiddenregularity} in Lemma~\ref{Energy} and $p\in L^\infty(\Lambda)$ yields \begin{equation}\label{eqeq}
\sum_{j=1}^{N_2} \int^T_0 | \partial_xv_j(Q_j,t)|^2dt
\le C \int^T_0\int_\Lambda | pu|^2 dxdt
\le C \int^T_0\int_\Lambda | u|^2 dxdt. \end{equation} Setting $w = u - v$, we still have \eqref{C} and \eqref{K} satisfied by $w$, along with the following equation $$ \left\{ \begin{array}{lll} \partial_t^2 w - \partial_x^2 w= 0 \quad &\quad\forall (x,t)\in \Lambda \times (0,T),\\ w(Q,t)=0,&\quad \forall Q\in \Pi_2, t\in(0,T),\\ w_j(x,0) = 0, \partial_t w_j(x,0)=a(x),&\quad\forall x\in \Lambda. \end{array} \right. $$ Therefore, using a classical observability inequality in the case where $p=0$ (e.g., \cite{DagZua06,LaLeSch94}), we have $$
\int_\Lambda | a(x)|^2 dx
\le C \sum_{j=1}^{N_2-1} \int^T_0 | \partial_xw_j(Q_j,t)|^2 dt. $$ Hence, by \eqref{eqeq}, we have \begin{eqnarray}
&& \int_\Lambda | a(x)|^2 dx
\le C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xu_j(Q_j,t)|^2 dt
+ C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xv_j(Q_j,t)|^2 dt\nonumber\\
&&\le C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xu_j(Q_j,t)|^2 dt
+ C \int^T_0\int_\Lambda | u|^2dxdt.\label{unicompact} \end{eqnarray} Therefore a usual compactness-uniqueness argument yields the observability inequality \eqref{obs}. Indeed, if \eqref{obs} is not satisfied, then we can assume that there exists $a^n\in L^2(\Lambda), n\in \mathbb N$ such that \begin{equation}\label{hypcontr}
\|a^n\|_{L^2(\Lambda)} = 1, \, \forall n\in\mathbb N \quad \hbox{ and } \quad
\lim_{n\to + \infty}\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xu_j^n(Q_j,t)|^2 dt = 0. \end{equation} Using the energy estimate \eqref{estimeenergy} of Lemma~\ref{Energy} on the solution $u^n$ of system \eqref{eqobs} with initial data $a^n$, we obtain $$
|| u^n(t)||^2_{H^1_0(\Lambda)} = ||\partial_x u^n(t)||^2_{L^2(\Lambda)}
\leq C||a^n||^2_{L^2(\Lambda)} \leq C. $$ Since the embedding $H^1_0(\Lambda) \subset L^2(\Lambda)$ is compact, we can extract a subsequence, denoted again by the same notation and we have $(u^n)_{n\in\mathbb N^*}$ convergent in $L^2(\Lambda)$. Therefore, using \eqref{unicompact}, we obtain \begin{align*}
\int_\Lambda | a^n-a^m|^2 dx
\le & ~C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_xu_j^n(Q_j,t) |^2 dt
+ C\sum_{j=1}^{N_2-1} \int^T_0 |\partial_xu_{j}^m(Q_j,t)|^2 dt \\ &
+~ C \int^T_0\int_\Lambda | u^n - u^m|^2dxdt \end{align*} so that \eqref{hypcontr} and $
\displaystyle\lim_{n,m\to \infty} \|u^n - u^m\|_{L^2(\Lambda)} = 0 $ imply $
\displaystyle\lim_{n,m\to \infty} ||a^n-a^m||^2_{L^2(\Lambda)} = 0. $ Consequently, there exists a limit $a_0$ such that $\displaystyle\lim_{n\to +\infty}a^n = a_0$ in $L^2(\Lambda)$ and from
\eqref{hypcontr} , we have $\|a_0\|_{L^2(\Lambda)} = 1$. Moreover, the solution $u[a_0]$ of system \eqref{eqobs} with initial data $a_0$ is such that $$ \partial_xu_{j}^m[a_0](Q,t) = 0 , \quad \forall t\in(0,T), \forall Q\in\Pi_2. $$ Hence we apply a classical unique continuation result for a wave equation to obtain that $u[a_0]$ vanishes everywhere so that $a_0 = 0$, which contradicts
$\|a_0\|_{L^2(\Lambda)} = 1$. Here, the unique continuation can be proved for instance by a Carleman estimate (e.g. \cite{Isakov}, \cite{KliTiBook}). This ends the proof of Theorem~\ref{Thm3}.
\section{Proof of the stability for the wave network inverse problem}
This section is devoted to the proof of Theorem~\ref{Thm2}. The proof relies on a compactness-uniqueness argument and the observability estimate (Theorem~\ref{Thm3}) on the whole network. \\
Let us denote by $u[p]$ the solution of \eqref{NW} under the assumptions \eqref{C} and \eqref{K}. Henceforth we always assume the conditions \eqref{C} and \eqref{K}. We consider $y = \partial_t\left(u[p] - u[q] \right)$ that satisfy \begin{equation}\label{eqy} \left\{ \begin{array}{lll} \partial_t^2 y - \partial_x^2 y + q(x)y = (q-p) \partial_t u[p]\quad &\quad\forall (x,t)\in \Lambda \times (-T,T),\\ y(Q,t)=0,&\quad \forall Q\in \Pi_2, t\in(0,T),\\ y(x,0) = 0, \partial_t y(x,0)=(q-p)u^0(x),&\quad \forall x\in\Lambda, \end{array} \right. \end{equation} We define $\psi$ and $\phi$ as the solutions of \begin{equation}\label{eqpsi} \left\{ \begin{array}{lll} \partial_t^2 \psi - \partial_x^2 \psi + q(x)\psi = (q-p) \partial_t u[p]\quad &\quad\forall (x,t)\in \Lambda \times (-T,T),\\ \psi(Q,t)=0,&\quad \forall Q\in \Pi_2, t\in(0,T),\\ \psi(x,0) = 0, \partial_t \psi(x,0)=0,&\quad \forall x\in\Lambda, \end{array} \right. \end{equation} and \begin{equation}\label{eqphi} \left\{ \begin{array}{lll} \partial_t^2 \phi - \partial_x^2 \phi + q(x)\phi = 0\quad &\quad \forall (x,t)\in \Lambda \times (-T,T),\\ \phi(Q,t)=0,&\quad \forall Q\in \Pi_2, t\in(0,T),\\ \phi(x,0) = 0, \partial_t \phi(x,0)=(q-p)u^0(x),&\quad \forall x\in\Lambda. \end{array} \right. \end{equation} such that $y = \psi + \phi$. We can apply Theorem~\ref{Thm3} to equation \eqref{eqphi} so that \begin{equation}\label{obsphi}
\int_\Lambda | (q-p)u^0|^2 dx
\le C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_x\phi_j(Q_j,t)|^2 dt. \end{equation} On the other hand, a regularity result of Lemma~\ref{Energy} applied to a time derivative of equation \eqref{eqpsi} gives \begin{eqnarray}
\sum_{j=1}^{N_2}\left\|\partial_x \psi_j(Q_j)\right\|_{H^1(0,T)}^2
&\leq& C \left( ||(q-p) \partial_{t}^2 u[p]||^2_{L^1(0,T,L^2(\Lambda))}
+ ||(q-p) u^1||^2_{L^2(\Lambda)}\right)\nonumber\\
&\leq& 2CK^2 ||q-p||^2_{L^2(\Lambda)} \label{hiddenregularitypsi} \end{eqnarray} as soon as we have $ u[p]\in H^2(0,T,L^\infty(\Lambda))$ which yields $\partial_tu[p]\in C([0,T];L^\infty(\Lambda))$ so that
$u^1\in L^\infty(\Lambda)$) with $\|u[p]\|_{H^2(0,T,L^\infty(\Lambda))} \leq K$. The compact embedding $H^1 (0,T) \subset L^2(0,T)$ allows then to write that the operator $\Psi : L^2(\Lambda) \to L^2(0,T)$ defined by $$ \Psi(p-q)(t) = \sum_{j=1}^{N_2} \partial_x \psi_j(Q_j,t) , \qquad 0<t<T $$ is compact.
Therefore, since we have $|u^0(x)|\geq r> 0$ almost everywhere in $\Lambda$, by \eqref{obsphi} and \eqref{hiddenregularitypsi}, we obtain \begin{eqnarray}
|| q-p||_{L^2(\Lambda)}
&\leq& C \int_\Lambda | (q-p)u^0|^2 dx~ \leq ~ C\sum_{j=1}^{N_2-1} \int^T_0 |
\partial_x\phi_j(Q_j,t)|^2 dt \nonumber\\
&\leq& C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_x y_j(Q_j,t)|^2 dt
+ C\sum_{j=1}^{N_2} \int^T_0 | \partial_x\psi_j(Q_j,t)|^2 dt
\nonumber\\
&\leq& C\sum_{j=1}^{N_2-1} \int^T_0 | \partial_x y_j(Q_j,t)|^2 dt
+ C ||\Psi(q-p)||^2_{L^2(0,T)} \label{estimfinale}\\
&\leq& C \sum_{j=1}^{N_2-1}\left\| \partial_xu_{j}[p](Q_j)- \partial_xu_{j}[q](Q_j)
\right\|_{H^1(0,T)} + C ||\Psi(q-p)||^2_{L^2(0,T)}.
\nonumber \end{eqnarray} We aim at proving that we can get rid of the second term on the right-hand side of the last estimate in order to obtain \eqref{stabpi}. Again, a compactness-uniqueness argument will be the key and it relies here on the compactness of $\Psi$ and the uniqueness result of Theorem~\ref{Thm1}.
Indeed, we set $f= q-p$. We assume that $$
|| f||_{L^2(\Lambda)}\leq C \sum_{j=1}^{N_2-1}\left\| \partial_x y_j(Q_j)
\right\|_{L^2(0,T)}, $$ which is equivalent to \eqref{stabpi}, does not hold. Then one can assume that there exists $f^n\in L^2(\Lambda), n\in\mathbb N$ such that \begin{equation}\label{hypcontr1}
\|f^n\|_{L^2(\Lambda)} = 1, \, \forall n\in\mathbb N
\quad \hbox{ and } \quad
\lim_{n\to + \infty} \sum_{j=1}^{N_2-1}\left\| \partial_x y_j^n(Q_j)
\right\|_{L^2(0,T)} = 0. \end{equation}
First, since the sequence $(f^n)_{n\in\mathbb N}$ is bounded in $L^2(\Lambda)$, we can extract a subsequence denoted again by $(f^n)_{n\in\mathbb N}$ such that it converges towards some $f^0\in L^2(\Lambda)$ weakly in $L^2(\Lambda)$. Since $\Psi$ is a compact operator, we obtain therefore the strong convergence result \begin{equation}\label{strongcv}
\lim_{n,m\to \infty} \|\Psi(f^n) - \Psi(f^m)\|_{L^2(0,T)} = 0. \end{equation} Then, from \eqref{estimfinale} we can write $$
|| f^n - f^m||_{L^2(\Lambda)}
\leq C \sum_{j=1}^{N_2-1}\left\| \partial_x y_j^n(Q_j)\right\|_{L^2(0,T)}
+ C \sum_{j=1}^{N_2-1}\left\| \partial_x y_j^m(Q_j)\right\|_{L^2(0,T)}
+ C ||\Psi(f^n) - \Psi(f^m)||^2_{L^2(\Lambda)} $$ and deduce from \eqref{hypcontr1} and \eqref{strongcv} that $\displaystyle\lim_{n,m\to \infty}
\|f^n - f^m\|_{L^2(\Lambda)} = 0$, so that $\displaystyle\lim_{n\to \infty}
\|f^n - f^0\|_{L^2(\Lambda)} = 0$ with \begin{equation}\label{f0}
\|f^0\|_{L^2(\Lambda)} = 1. \end{equation}
Moreover, using the trace estimate \eqref{hiddenregularity} of Lemma~\ref{Energy} for the solution $y^n$ of system \eqref{eqy} with initial data $f^n u^0$ and source term $f^n\partial_tu[p]$, we obtain $$
\sum_{j=1}^{N_2-1}\left\|\partial_x y_j^n(Q_j)\right\|_{L^2(0,T)}^2
\leq C\left( ||f^n u^0||^2_{L^2(\Lambda)}
+ ||f^n\partial_tu[p]||^2_{L^1(0,T,L^2(\Lambda))} \right)
\leq 2CK^2 \|f^n\|_{L^2(\Lambda)}. $$ Thus we can write $$
\lim_{n\to \infty} \sum_{j=1}^{N_2-1}\left\|\partial_x y_j^n(Q_j)
- \partial_x y_j^0(Q_j)\right\|_{L^2(0,T)}^2
\leq 2CK^2 \lim_{n\to \infty} \|f^n - f^0\|_{L^2(\Lambda)} = 0, $$ which, combined with \eqref{hypcontr1}, gives $$ \partial_x y_j^0(Q,t) = 0, \qquad \forall Q\in \Pi_2\setminus {\{Q_{N_2}\}}, \forall t\in(0,T). $$ We finally apply Theorem~\ref{Thm1} and obtain $f^0 = 0$ in $L^2(\Lambda)$, which contradicts \eqref{f0}. Thus the proof of Theorem~\ref{Thm2} is complete.
\providecommand{\MR}[1]{}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Computation of isotopisms of algebras over finite fields by means of graph invariants} \author{O. J. Falc\'on$^{1}$} \ead{[email protected]} \author{R. M. Falc\'on$^2$} \ead{[email protected]} \author{J. N\'u\~nez$^1$} \ead{[email protected]} \author{A. M. Pacheco$^{3}$} \ead{[email protected]} \author{M. T. Villar$^1$} \ead{[email protected]} \address{$^1$ Department of Geometry and Topology. University of Seville, Spain.\\ $^2$ Department of Applied Mathematics I. University of Seville, Spain.\\ $^3$ Department of Quantitative Methods. Loyola University Andalusia, Spain. }
\begin{abstract} In this paper we define a pair of faithful functors that map isomorphic and isotopic finite-dimensional algebras over finite fields to isomorphic graphs. These functors reduce the cost of computation that is usually required to determine whether two algebras are isomorphic. In order to illustrate their efficiency, we determine explicitly the classification of two- and three-dimensional partial quasigroup rings. \end{abstract}
\begin{keyword} Graph theory \sep finite field \sep isomorphism \sep Latin square. \MSC 05C25 \sep 05C30 \sep 05B15. \end{keyword} \end{frontmatter}
\section{Introduction}
Graph invariants constitute an interesting tool in Chemistry, Communication or Engineering \cite{Dobrynin2001, Khalifeh2010, Yousefi2011}. In Mathematics, one of the topics for which graph invariants have revealed to play an important role is the classical problem of deciding whether two algebras are isomorphic. This problem is usually dealt with by computing the reduced Gr\"obner basis of the system of polynomial equations that is uniquely related to the structure constants of both algebras. This computation is, however, very sensitive to the number of variables \cite{Gao2009} and gives rise to distinct problems of computation time and memory usage even for low-dimensional algebras \cite{Falcon2016a, Graaf2005}. This paper deals with Graph Theory in order to reduce this cost of computation.
Graph invariants have been proposed in the last years as an efficient alternative to study isomorphisms of distinct types of algebras \cite{Bocian2014, Ceballos2016, Kaveh2011}. Nevertheless, the problem of identifying a functor that relates the category of algebras with that of graphs remains still open. Based on a proposal of McKay et al. \cite{McKay2007} for identifying isotopisms of Latin squares with isomorphisms of vertex-colored graphs, we describe in Section 3 a pair of graphs that enable us to find faithful functors between finite-dimensional algebras over finite fields and these types of graphs. These functors map isomorphic and isotopic algebras to isomorphic graphs. Reciprocally, any pair of isomorphic graphs is uniquely related to a pair of algebras so that there exists a multiplicative map between them. The main advantage of our proposal, apart from the reduction of the mentioned cost of computation, is the feasibility of studying the possible isomorphism between two given finite-dimensional algebras defined over the same field, whatever the types of both algebras are. As an illustrative example, we focus in Section 4 on the classification of partial quasigroup rings according to the known isotopism classes of partial Latin squares on which they are based.
\section{Preliminaries}
In this section we expose some basic concepts and results on Graph Theory, isotopisms of algebras, partial Latin squares and Computational Algebraic Geometry that we use throughout the paper. For more details about these topics we refer, respectively, to the manuscripts \cite{Harary1969, Albert1942, Denes1974, Cox1998}.
\subsection{Graph Theory}
A {\em graph} is a pair $G=(V,E)$ formed by a set $V$ of {\em vertices} and a set $E$ of $2$-subsets of $V$ called {\em edges}. Two vertices defining an edge are said to be {\em adjacent}. The {\em degree} of a vertex $v$ is the number $d(v)$ of edges containing $v$. The graph $G$ is {\em vertex-colored} if there exists a partition of $V$ into color sets. The color of a vertex $v$ is denoted as $\mathrm{color}(v)$. An {\em isomorphism} between two vertex-colored graphs $G$ and $G'$ is any bijective map $f$ between their sets of vertices that preserves collinearity and color sets, that is, such that it maps edges to edges and $\mathrm{color}(f(v))=\mathrm{color}(v)$, for all vertex $v$ in $G$.
\subsection{Isotopisms of algebras}
Two algebras $A$ and $A'$ over a field $\mathbb{K}$ are said to be {\em isotopic} if there exist three non-singular linear transformations $f$, $g$ and $h$ from $A$ to $A'$ such that $f(u)g(v)=h(uv)$, for all $u,v\in A$. The triple $(f,g,h)$ is an {\em isotopism} between $A$ and $A'$. If $f=g=h$, then this constitutes an {\em isomorphism}.
The {\em structure constants} of an $n$-dimensional algebra $A$ over a field $\mathbb{K}$ of basis $\{e_1,\ldots,e_n\}$ are the numbers $c_{ij}^k\in \mathbb{K}$ such that $e_ie_j = \sum_{k=1}^n c_{ij}^k e_k$, for all $i, j \leq n$. If all of them are zeros, then $A$ is {\em abelian}. In particular, the $n$-dimensional abelian algebra is not isotopic to any other $n$-dimensional algebra.
The {\em left annihilator} of a vector subspace $S$ of the algebra $A$ is the set $\mathrm{Ann}_{A^-}(S)=\{u\in A\mid\, uv=0, \text { for all } v\in S\}$. Its {\em right annihilator} is the set $\mathrm{Ann}_{A^+}(S)=\{u\in A\mid\, vu=0, \text { for all } v\in S\}$. The intersection of both sets is the {\em annihilator} $\mathrm{Ann}_A(S)$.
\begin{lemm} \label{lemm_annihilator} Let $(f,g,h)$ be an isotopism between two $n$-dimensional algebras $A$ and $A'$, and let $S$ be a vector subspace of $A$. Then, \begin{enumerate}[a)] \item $f(\mathrm{Ann}_{A^-}(S)) = \mathrm{Ann}_{{A'}^-}(g(S))$. \item $g(\mathrm{Ann}_{A^+}(S)) = \mathrm{Ann}_{{A'}^+}(f(S))$. \item $f(\mathrm{Ann}_{A^-}(S))\cap g(\mathrm{Ann}_{A^+}(S)) = \mathrm{Ann}_{A'}(f(S)\cap g(S)).$ \end{enumerate} \end{lemm}
\begin{proof} Let us prove assertion (a). Assertion (b) follows similarly and assertion (c) is a consequence of (a) and (b). Let $u\in g(S)$ and $v\in f(\mathrm{Ann}_{A^-}(S))$. Then, $vu=f(f^{-1}(v))g(g^{-1}(u))=h(f^{-1}(v)g^{-1}(u))=h(0)=0$, because $g^{-1}(u)\in S$ and $f^{-1}(v)\in \mathrm{Ann}_{A^-}(S)$. Hence, $f(\mathrm{Ann}_{A^-}(S))$ $\subseteq \mathrm{Ann}_{{A'}^-}(g(S))$. Now, let $u\in \mathrm{Ann}_{{A'}^-}(g(S))$ and $v\in S$. From the regularity of $f$, we have that $h(f^{-1}(u)v)=ug(v)=0$. The regularity of $h$ involves that $f^{-1}(u)v=0$. Thus, $u\in f(\mathrm{Ann}_{A^-}(S))$ and hence, $\mathrm{Ann}_{{A'}^-}(g(S))\subseteq f(\mathrm{Ann}_{A^-}(S))$. \end{proof}
The {\em derived algebra} of $A$ is the subalgebra $A^2=\{uv\mid\, u,v\in A\}\subseteq A$.
\begin{lemm} \label{lemm_M1} Let $(f,g,h)$ be an isotopism between two $n$-dimensional algebras $A$ and $A'$. Then, $h(A^2)=A'^2$. \end{lemm}
\begin{proof} The regularity of $f$ and $g$ involves that $f(A)=g(A)=A'$ and hence, $A'^2=f(A)g(A)=h(A^2)$. \end{proof}
Let $\cdot$ be a partial binary operation over the set $[n]=\{1,\ldots,n\}$. The pair $([n],\cdot)$ is called a {\em partial magma} of {\em order} $n$. It is {\em isotopic} to a partial magma $([n],\circ)$ if there exist three permutations $\alpha$, $\beta$ and $\gamma$ in the symmetric group $S_n$ such that $\alpha(i)\circ\beta(j)=\gamma(i\cdot j)$, for all $i,j\leq n$ such that $i\cdot j$ exists. If $\alpha=\beta=\gamma$, then the partial magmas are said to be {\em isomorphic}. The triple $(\alpha,\beta,\gamma)$ is an {\em isotopism} of partial magmas (an {\em isomorphism} if $\alpha=\beta=\gamma$).
A {\em partial magma algebra} $A^{\cdot}$ {\em based on} a partial magma $([n],\cdot)$ is an $n$-dimensional algebra over a field $\mathbb{K}$ such that there exists a basis $\{e_1,\ldots,e_n\}$ satisfying that, if $i\cdot j$ exists for some pair of elements $i,j\leq n$, then $e_ie_j=c_{ij}e_{i\cdot j}$ for some non-zero structure constant $c_{ij}\in \mathbb{K}\setminus\{0\}$. If all the structure constants are equal to $1$, then this is called a {\em partial magma ring}.
\begin{lemm}\label{lemm_partial_magma} Two partial magma rings are isotopic (isomorphic, respectively) if their respective partial magmas on which they are based are isotopic (isomorphic, respectively). \end{lemm}
\begin{proof} Let $A^{\cdot}$ and $A^{\circ}$ be two partial magma rings based, respectively, on two isotopic partial magmas $([n],\cdot)$ and $([n],\circ)$. Let $\{e_1,\ldots,e_n\}$ and $\{e'_1,\ldots,e'_n\}$ be the respective bases of these two algebras and let $(f,g,h)$ be an isotopism between their corresponding partial magmas. For each $\alpha\in\{f,g,h\}$, let us define the map $\overline{\alpha}(e_i)=e'_{\alpha(i)}$. Then, $\overline{f}(e_i)\overline{g}(e_j)=e'_{f(i)}e'_{g(j)}= e'_{f(i)\circ g(j)}=e'_{h(i\cdot j)} =\overline{h}(e_{i\cdot j})= \overline{h}(e_ie_j)$. From linearity, the triple $(\overline{f},\overline{g},\overline{h})$ determines an isotopism between $A^{\cdot}$ and $A^{\circ}$. If $f=g=h$, then this constitutes an isomorphism. \end{proof}
The reciprocal of Lemma \ref{lemm_partial_magma} is not true in general. Thus, for instance, the two partial magmas $([2],\cdot)$ and $([2],\circ)$ that are respectively described by the non-zero products $1\cdot 1=1$ and $1\circ 1 = 1 = 2\circ 1$ are not isotopic. Nevertheless, the partial magma rings $A^{\cdot}$ and $A^{\circ}$, with respective bases $\{e_1,e_2\}$ and $\{e'_1,e'_2\}$, are isotopic by means of the isotopism $(f,\mathrm{Id},\mathrm{Id})$, where the linear transformation $f$ is described by $f(e_1)=e'_1$ and $f(e_2) = e'_2-e'_1$.
\subsection{Partial Latin squares}
A {\em partial quasigroup} is a partial magma $([n],\cdot)$ such that if the equations $ix=j$ and $yi=j$, with $i,j\in [n]$, have solutions for $x$ and $y$ in $[n]$, then these solutions are unique. The concepts of {\em partial quasigroup algebras} and {\em partial quasigroup rings} arise similarly to those of partial magma algebras and rings. Lemma \ref{lemm_partial_magma} also holds analogously for partial quasigroup rings. Every partial quasigroup of order $n$ constitutes the multiplication table of a {\em partial Latin square} of order $n$, that is, an $n \times n$ array in which each cell is either empty or contains one element chosen from the set $[n]$, such that each symbol occurs at most once in each row and in each column. Every isotopism of a partial quasigroup is uniquely related to a permutation of the rows, columns and symbols of the corresponding partial Latin square. The distribution of partial Latin squares into isotopism classes is known for order up six \cite{Falcon2013, Falcon2015a}. In this paper we make use of graph invariants to study which ones of the known non-isotopic classes of partial Latin squares of order $n\leq 3$ give rise to isotopic classes of partial quasigroup rings over the finite fields $\mathbb{F}_2$ and $\mathbb{F}_3$. In this regard, it is straightforwardly verified that there exists only two one-dimensional partial quasigroup rings: the abelian and that one described by the product $e_1e_1=e_1$. They constitute distinct isotopism classes.
Let $L=(l_{ij})$ be a partial Latin square of order $n$ without empty cells (that is, a {\em Latin square}). McKay et al. \cite{McKay2007} defined the vertex-colored graph $G(L)$ with $n^2+3n$ vertices $\{r_i\mid\, i\leq n\}\cup\{c_i\mid\, i\leq n\}\cup\{s_i\mid\, i\leq n\}\cup \{t_{ij}\mid\, i,j\leq n\}$, where each of the four subsets (related to the rows ($r_i$), columns ($c_i$), symbols ($s_i$) and cells ($t_{ij}$) of the Latin square $L$) has a different color, and $3n^2$ edges $\{r_it_{ij},c_jt_{ij},s_{l_{ij}}t_{ij}\mid\, i,j\leq n\}\}$ (see Figure \ref{Fig_LS}, where we have used distinct styles ($\circ$, $\blacktriangle$, $\blacktriangleright$, $\blacktriangleleft$ and $\bullet$) to represent the colors of the vertices). Two Latin squares $L_1$ and $L_2$ of the same order are isotopic if and only if the graphs $G(L_1)$ and $G(L_2)$ are isomorphic (see Theorem 6 in \cite{McKay2007}).
\begin{figure}
\caption{Graph related to a Latin square of order $2$.}
\label{Fig_LS}
\end{figure}
\subsection{Computational Algebraic Geometry}
Let $\mathbb{K}[X]$ be a multivariate polynomial ring over a field $\mathbb{K}$. The {\em algebraic set} defined by an ideal $I$ of $\mathbb{K}[X]$ is the set $\mathcal{V}(I)$ of common zeros of all the polynomials in $I$. If this set is finite, then the ideal $I$ is {\em zero-dimensional}. This is {\em radical} if every polynomial $f\in \mathbb{K}[X]$ belongs to $I$ whenever there exists a natural number $m$ such that $f^m\in I$. The largest monomial of a polynomial in $I$ with respect to a given monomial term ordering is its {\em leading monomial}. The ideal generated by all the leading monomials of $I$ is its {\em initial ideal}. A {\em standard monomial} of $I$ is any monomial that is not contained in its initial ideal. Regardless of the monomial term ordering, if the ideal $I$ is zero-dimensional and radical, then the number of standard monomials in $I$ coincides with the Krull dimension of the quotient ring $\mathbb{K}[X]/I$ and with the number of points of the algebraic set $\mathcal{V}(I)$. This is computed from the reduced Gr\"obner basis of the ideal. Specifically, a {\em Gr\"obner basis} of the ideal $I$ is any subset $G$ of polynomials in $I$ whose leading monomials generate its initial ideal. This is {\em reduced} if all its polynomials are monic and no monomial of a polynomial in $G$ is generated by the leading monomials of the rest of polynomials in the basis. There exists only one reduced Gr\"obner basis, which can always be computed from Buchberger's algorithm \cite{Buchberger2006}. The computation that is required to this end is extremely sensitive to the number of variables.
\begin{thm}[\cite{Gao2009}, Proposition 4.1.1]\label{Gao} Let $\mathbb{F}_q$ be a finite field, with $q$ a power prime. The complexity time that Buchberger's algorithm requires to compute the reduced Gr\"obner bases of an ideal $\langle\, p_1,\ldots, p_m,p_1^q-p_1,\ldots,p^q_m-p_m\,\rangle$ defined over a polynomial ring $\mathbb{F}_q[x_1,\ldots,x_n]$, where $p_1,\ldots,p_m$ are polynomials given in sparse form and have longest length $l$, is $q^{O(n)}+O(m^2l)$. Here, sparsity refers to the number of monomials. \end{thm}
Gr\"obner bases can be used to determine the isomorphisms and isotopisms between two $n$-dimensional algebras $A$ and $A'$ over a finite field $\mathbb{F}_q$, with $q$ a prime power, respective basis $\{e_1,\ldots,e_n\}$ and $\{e'_1,\ldots,e'_n\}$, and respective structure constants $c_{ij}^k$ and ${c'}_{ij}^k$. To this end, let us define the sets of variables $\mathfrak{F}_n=\{\mathfrak{f}_{ij}\mid\, i,j\leq n\}$, $\mathfrak{G}_n=\{\mathfrak{g}_{ij}\mid\, i,j\leq n\}$ and $\mathfrak{H}_n=\{\mathfrak{h}_{ij}\mid\, i,j\leq n\}$. These variables play the respective role of the entries in the regular matrices related to a possible isotopism $(f,g,h)$ between the algebras $A$ and $A'$. Here, $\alpha(e_i)=\sum_{j=1}^n \alpha_{ij}e'_j$, for each $\alpha\in\{f,g,h\}$. From the coefficients of each basis vector $e_m$ in the expression $f(e_i)g(e_j)=h(e_ie_j)$, we have that $$\sum_{k,l=1}^n \mathfrak{f}_{ik}\mathfrak{g}_{jl}{c'}_{kl}^m = \sum_{s=1}^n c_{ij}^s\mathfrak{h}_{sm}, \text{ for all } i,j,m\leq n.$$
\begin{thm}\label{thm_CAG_Isom} The next two assertions hold. \begin{enumerate}[a)] \item The isotopism group between the algebras $A$ and $A'$ is identified with the algebraic set of the ideal $I^{\mathrm{Isot}}_{A,A'}$ of $\mathbb{F}_q[\mathfrak{F}_n\cup\mathfrak{G}_n\cup\mathfrak{H}_n]$, which is defined as
{\small $$\langle\, \sum_{k,l=1}^n \mathfrak{f}_{ik}\mathfrak{g}_{jl}{c'}_{kl}^m - \sum_{s=1}^n c_{ij}^s\mathfrak{h}_{sm}\mid\, i,j,m\leq n\,\rangle + \langle\,\det(M)^{q-1}-1\mid\, M\in\{F,G,H\}\,\rangle,$$}
\noindent where $F$, $G$ and $H$ denote, respectively, the matrices of entries in $\mathfrak{F}_n$, $\mathfrak{G}_n$ and $\mathfrak{H}_n$. Besides, $|\mathcal{V}(I^{\mathrm{Isot}}_{A,A'})|= \mathrm{dim}_{\mathbb{F}_q} (\mathbb{F}_q[\mathfrak{F}_n\cup\mathfrak{G}_n\cup\mathfrak{H}_n]/ I^{\mathrm{Isot}}_{A,A'})$. \item The isomorphism group between the algebras $A$ and $A'$ is identified with the algebraic set of the ideal $I^{\mathrm{Isom}}_{A,A'}$ of $\mathbb{F}_q[\mathfrak{F}_n]$, which is defined as $$\langle\, \sum_{k,l=1}^n \mathfrak{f}_{ik}\mathfrak{f}_{jl}{c'}_{kl}^m - \sum_{s=1}^n c_{ij}^s\mathfrak{f}_{sm}\mid\, i,j,m\leq n \,\rangle + \langle\,\det(F)^{q-1}-1\,\rangle,$$ where $F$ denotes the matrix of entries in $\mathfrak{F}_n$. Besides,
$|\mathcal{V}(I^{\mathrm{Isom}}_{A,A'})|= \mathrm{dim}_{\mathbb{F}_q}(\mathbb{F}_q[\mathfrak{F}_n]/ I^{\mathrm{Isom}}_{A,A'})$. \end{enumerate} \end{thm}
\begin{proof} Let us prove the second assertion, being analogous the reasoning for assertion (a). The generators of the ideal $I^{\mathrm{Isom}}_{A,A'}$ involve each zero $(f_{11},\ldots,$ $f_{nn})$ of its algebraic set to constitute the entries of the regular matrix of an isomorphism $f$ between the algebras $A$ and $A'$. The result follows from the fact of being this ideal zero-dimensional and radical. Particularly, the ideal $I^{\mathrm{Isom}}_{A,A'}$ is zero-dimensional because its algebraic set is a finite subset of $\mathbb{F}_q^{n^2}$. Besides, from Proposition 2.7 of \cite{Cox1998}, the ideal $I$ is also radical, because, for each $i,j\leq n$, the unique monic generator of $I\cap \mathbb{F}_q[\mathfrak{f}_{ij}]$ is the polynomial $(\mathfrak{f}_{ij})^q-\mathfrak{f}_{ij}$, which is intrinsically included in each ideal of $\mathbb{F}_q[\mathfrak{F}_n]$ and is square-free. \end{proof}
\begin{corollary}\label{coro_CAG_Isom} The complexity times that Buchberger's algorithm requires to compute the reduced Gr\"obner bases of the ideals $I^{\mathrm{Isot}}_{A,A'}$ and $I^{\mathrm{Isom}}_{A,A'}$ in Theorem \ref{thm_CAG_Isom} are, respectively, $q^{O(3n^2)}+O(n^6n!)$ and $q^{O(n^2)}+O(n^6n!)$. \end{corollary}
\begin{proof} We prove the result for the second ideal, being analogous the reasoning for the first one. The result follows straightforwardly from Theorem \ref{Gao} once we observe that all the generators of the ideal in Theorem \ref{thm_CAG_Isom} are sparse in $\mathbb{F}_q[\mathfrak{F}_n]$. More specifically, the number of variables is $n^2$, the number of generators of the ideal under consideration that are not of the form $(\mathfrak{f}_{ij})^q-\mathfrak{f}_{ij}$ is $n^3+1$ and the maximal length of these generators is $n!$. \end{proof}
Theorem \ref{thm_CAG_Isom} has been implemented as a procedure called {\em isoAlg} in the open computer algebra system for polynomial computations {\sc Singular} \cite{Decker2016}. This has been included in the library {\em GraphAlg.lib}, which is available online at {\texttt{http://personales.us.es/raufalgan/LS/GraphAlg.lib}}. Let us illustrate the use of this procedure with an example related to the distribution of the set $\mathcal{P}_2(\mathbb{F}_2)$ of two-dimensional partial quasigroup rings over the finite field $\mathbb{F}_2$ into isotopism and isomorphism classes. All the computations that are exposed throughout this paper are implemented in a system with an {\em Intel Core i7-2600, with a 3.4 GHz processor and 16 GB of RAM}.
\begin{example}\label{ejemplo_PQ2} Let us consider the pair of partial quasigroup rings in $\mathcal{P}_2(\mathbb{F}_2)$ that are respectively related to the partial Latin squares
$$\begin{array}{|c|c|} \hline 1 & 2\\ \hline 2 & \ \\ \hline \end{array} \hspace{0.25cm} \text { and } \hspace{0.25cm} \begin{array}{|c|c|} \hline 1 & 2\\ \hline 2 & 1 \\ \hline \end{array}$$ These two partial Latin squares are not isotopic because isotopisms preserve the number of filled cells. Nevertheless, their related partial quasigroup rings over $\mathbb{F}_2$, with respective bases $\{e_1,e_2\}$ and $\{e'_1,e'_2\}$, and which are respectively described by the products $$\begin{cases}e_1e_1=e_1,\\ e_1e_2=e_2=e_2e_1. \end{cases} \hspace{0.5cm} \text{ and } \hspace{0.5cm} \begin{cases}e'_1e'_1=e'_1=e'_2e'_2,\\ e'_1e'_2=e'_2=e'_2e'_1. \end{cases}$$ are isotopic. Specifically, by implementing the procedure {\em isoAlg}, our system computes in $0$ seconds the existence of four isotopisms between these two partial quasigroup rings. One of this isotopisms is, for instance, the isomorphism $f$ such that $f(e_1)=e'_1$ and $f(e_2)=e'_1+e'_2$. The procedure {\em isoAlg} also ensures us the existence of $f$ as the unique possible isomorphism.
$\lhd$ \end{example}
In practice, in those cases in which the run time required for the computations involved in Theorem \ref{thm_CAG_Isom} becomes excessive, it is recommendable to eliminate the generators of the corresponding ideal that are referred to the determinants of the matrices $F$, $G$ and $H$. This reduces the complexity time in Corollary \ref{coro_CAG_Isom} to $q^{O(3n^2)} + O(n^8)$ and $q^{O(n^2)} + O(n^8)$, respectively, and gives enough information to analyze a case study on which base the possible isomorphisms and isotopisms between two given algebras, whatever the base field is. The next example illustrates this fact by focusing on the possible isotopisms that there exist over any field between the two partial quasigroup rings that appear in Example \ref{ejemplo_PQ2}.
\begin{example}\label{ejemplo_PQ2a} The implementation of the procedure {\em isoAlg} enables us to ensure that, whatever the base field is, the reduced Gr\"obner basis of the ideal $I^{\mathrm{Isot}}_{A,A'}$ in Theorem \ref{thm_CAG_Isom} related to the isotopism group between the two partial quasigroup rings of Example \ref{ejemplo_PQ2} holds that $2\mathfrak{h}_{22}^3=0$ and $\mathfrak{h}_{21}^2+\mathfrak{h}_{22}^2=0$. If the characteristic of the base field is not two, then $\mathfrak{h}_{21}=\mathfrak{h}_{22}=0$. This involves $H$ to be singular and hence, these two partial quasigroup rings are not isotopic. Otherwise, it is straightforwardly verified that the linear transformation $f$ that is indicated in Example \ref{ejemplo_PQ2} constitutes an isomorphism between both rings for every base field of characteristic two.
$\lhd$ \end{example}
\section{Description of faithful functors between algebras and graphs}
Based on the proposal of McKay et al. \cite{McKay2007} for Latin squares, we describe now a pair of graphs that are uniquely related to a finite-dimensional algebra $A$ over a finite field $\mathbb{K}$. Firstly, we define the vertex-colored graph $G_1(A)$ with four maximal monochromatic subsets $R_{A}=\{r_u\mid\, u\in A\setminus \mathrm{Ann}_{A^-}(A)\}$, $C_{A}=\{c_u\mid\, u\in A\setminus \mathrm{Ann}_{A^+}(A)\}$, $S_{A}=\{s_u\mid\, u\in A^2\setminus \{0\}\}$ and $T_{A}=\{t_{u,v}\mid\, u,v\in A, uv\neq 0\}$, and edges $\{r_ut_{u,v}, c_vt_{u,v}, s_{uv}t_{u,v}\mid u,v\in A, uv\neq 0\}$. From this graph we also define the vertex-colored graph $G_2(A)$ by adding the edges $\{r_uc_u,\mid\, u\in A\setminus \mathrm{Ann}_A(A)\}\, \cup \{c_us_u\mid\, u \in A^2\setminus \mathrm{Ann}_{A^+}(A)\} \, \cup \{r_us_u\mid\, u \in A^2\setminus \mathrm{Ann}_{A^-}(A)\}$. As an illustrative example, Figure \ref{Fig_1} shows the two graphs that are related to any $n$-dimensional algebra over the finite field $\mathbb{F}_2$, with basis $\{e_1,\ldots,e_n\}$, that is described as $e_1e_2=e_2e_1=e_1$.
\begin{figure}
\caption{Graphs related to the algebra $e_1e_2=e_2e_1=e_1$ over $\mathbb{F}_2$.}
\label{Fig_1}
\end{figure}
\begin{lemm}\label{lemm_graph0} The next assertions hold. \begin{enumerate}[a)] \item If the algebra $A$ is abelian, then $G_1(A)$ and $G_2(A)$ have no vertices. \item The graph $G_1(A)$ does not contain triangles. \item In both graphs $G_1(A)$ and $G_2(A)$, \begin{itemize} \item The number of vertices is
{\small \[|A\setminus \mathrm{Ann}_{A^-}(A)|+|A\setminus \mathrm{Ann}_{A^+}(A)|+
|A^2|+|\{(u,v)\in A\times A\mid\, uv\neq 0\}| - 1. \]} \item The degree of the vertex $t_{u,v}$ is \[d(t_{u,v})=3, \text{ for all } u,v\in A \text{ such that } uv\neq 0.\] \end{itemize} \item In the graph $G_1(A)$, \begin{itemize}
\item $d(r_u)=|A\setminus \mathrm{Ann}_{A^+}(\{u\})|$, for all $u\not\in \mathrm{Ann}_{A^-}(A)$.
\item $d(c_u)=|A\setminus \mathrm{Ann}_{A^-}(\{u\})|$, for all $u\not\in \mathrm{Ann}_{A^+}(A)$.
\item $d(s_u)=\sum_{v\in A}|\mathrm{ad}^{-1}_v(u)|$, for all $u\in A^2\setminus\{0\}$, where $\mathrm{ad}_v:A\rightarrow A^2$ is the {\em adjoint action} of $v$ in $A$ such that $\mathrm{ad}_v(w)=vw$, for all $w\in A$. \end{itemize} \item Let $\mathbf{1}$ denotes the characteristic function. Then, in the graph $G_2(A)$, \begin{itemize}
\item $d(r_u)=|A\setminus \mathrm{Ann}_{A^+}(\{u\})|+ \mathbf{1}_{A\setminus\mathrm{Ann}_{A^+}(A)}(u) + \mathbf{1}_{A^2}(u)$, for all $u\not\in \mathrm{Ann}_{A^-}(A)$.
\item $d(c_u)=|A\setminus \mathrm{Ann}_{A^-}(\{u\})|+ \mathbf{1}_{A\setminus\mathrm{Ann}_{A^-}(A)}(u) + \mathbf{1}_{A^2}(u)$, for all $u\not\in \mathrm{Ann}_{A^+}(A)$.
\item $d(s_u)=\mathbf{1}_{A\setminus\mathrm{Ann}_{A^-}(A)}(u)+ \mathbf{1}_{A\setminus\mathrm{Ann}_{A^+}(A)}(u)+\sum_{v\in A}|\mathrm{ad}^{-1}_v(u)|$, for all $u\in A^2\setminus\{0\}$. \end{itemize} \end{enumerate} \end{lemm}
\begin{proof} The result follows straightforwardly from the definition of the graphs $G_1(A)$ and $G_2(A)$. \end{proof}
\begin{prp}\label{prop_graph0_a} The next assertions hold. \begin{enumerate}[a)] \item The number of edges of the graph $G_1(A)$ is
$\sum_{u\not\in\mathrm{Ann}_{A^-}(A)} |A\setminus \mathrm{Ann}_{A^+}(\{u\})|$ $+\sum_{u\not\in\mathrm{Ann}_{A^+}(A)}|A\setminus \mathrm{Ann}_{A^-}(\{u\})|+\sum_{u\in A^2\setminus\{0\}}\sum_{v\in A}|\mathrm{ad}_v^{-1}(u)|$.
\item The number of edges of the graph $G_2(A)$ coincides with those of $G_1(A)$ plus $|A\setminus\mathrm{Ann}_A(A)| + |A^2\setminus \mathrm{Ann}_{A^-}(A)| + |A^2\setminus\mathrm{Ann}_{A^+}(A)|$. \end{enumerate} \end{prp}
\begin{proof} The result follows from the first theorem of Graph Theory \cite{Harary1969}, which enables us to ensure that the number of edges of a graph is the half of the summation of degrees of its vertices. Now, for each pair of vectors $u,v\in A$ such that $uv\neq 0$, the vertex $t_{u,v}\in T_A$ is the only vertex in $T_A$ that is adjacent to the vertices $r_u\in R_A$, $c_v\in C_A$ and $s_{uv}\in S_A$. They constitute indeed the three vertices related to the degree of $t_{u,v}$ that is indicated in assertion (c) of Lemma \ref{lemm_graph0}. As a consequence, the summation of degrees of all the vertices in $T_A$ coincides with $\sum_{u\in R_A}d(r_u) + \sum_{u\in C_A}d(c_u) + \sum_{u\in S_A}d(s_u)$. The result follows then from assertions (d) and (e) in Lemma \ref{lemm_graph0}. \end{proof}
\begin{thm}\label{theo_graph0} Let $A$ and $A'$ be two finite-dimensional algebras over a finite field $\mathbb{K}$. Then, \begin{enumerate}[a)] \item If both algebras are isotopic, then their corresponding graphs $G_1(A)$ and $G_1(A')$ are isomorphic. Reciprocally, if the graphs $G_1(A)$ and $G_1(A')$ are isomorphic, then there exist three bijective maps $f$, $g$ and $h$ between $A$ and $A'$ such that $f(u)g(v)=h(uv)$. \item If both algebras are isomorphic, then their corresponding graphs $G_2(A)$ and $G_2(A')$ are also isomorphic. Reciprocally, if the graphs $G_2(A)$ and $G_2(A')$ are isomorphic, then there exists a multiplicative bijective map between the algebras $A$ and $A'$, that is, a bijective map $f:A\rightarrow A'$ so that $f(u)f(v)=f(uv)$, for all $u,v\in A$. \end{enumerate} \end{thm}
\begin{proof} Let $(f,g,h)$ be an isotopism between the algebras $A$ and $A'$. We define the map $\alpha$ between $G_1(A)$ and $G_1(A')$ such that $$\begin{cases}\alpha(r_u)=r_{f(u)}, \text{ for all } u\in A\setminus \mathrm{Ann}_{A^-}(A),\\ \alpha(c_u)=c_{g(u)}, \text{ for all } u\in A\setminus \mathrm{Ann}_{A^+}(A),\\ \alpha(s_u)=s_{h(u)}, \text{ for all } u\in A^2\setminus \{0\},\\ \alpha(t_{u,v})=t_{f(u),g(v)}, \text{ for all } u,v\in A \text { such that } uv\neq 0.\end{cases}$$
The description of $G_1(A)$ and $G_1(A')$, together with Lemmas \ref{lemm_annihilator} and \ref{lemm_M1}, and the regularity of $f$, $g$ and $h$, involves $\alpha$ to be an isomorphism between these two graphs. The same map $\alpha$ constitutes an isomorphism between the graphs $G_2(A)$ and $G_2(A')$ in case of being $f=g=h$, that is, if the algebras $A$ and $A'$ are isomorphic. Reciprocally, let $\alpha$ be an isomorphism between the graphs $G_1(A)$ and $G_1(A')$. Collinearity involves this isomorphism to be uniquely determined by its restriction to $R_A\cup C_A\cup S_A$. Specifically, the image of each vertex $t_{u,v}\in T_A$ by means of $\alpha$ is uniquely determined by the corresponding images of $r_u$, $c_v$ and $s_{uv}$. Let $\beta$ and $\beta'$ be the respective bases of the algebras $A$ and $A'$ and let $\pi: A\rightarrow A'$ be the natural map that preserves the components of each vector with respect to the mentioned bases. That is, $\pi((u_1,\ldots,u_n)_{\beta})=(u_1,\ldots,u_n)_{\beta'}$, for all $u_1,\ldots,u_n\in\mathbb{K}$. Let us define three maps $f$, $g$ and $h$ from $A$ to $A'$ such that $$f(u)=\begin{cases}\pi(u), \text{ for all } u\in \mathrm{Ann}_{A^-}(A),\\ v, \text{ otherwise, where } v\in A \text{ is such that } \alpha(r_u)=r_v. \end{cases}$$ $$g(u)=\begin{cases}\pi(u), \text{ for all } u\in \mathrm{Ann}_{A^+}(A),\\ v, \text{ otherwise, where } v\in A \text{ is such that } \alpha(c_u)=c_v. \end{cases}$$ $$h(u)=\begin{cases}\pi(u), \text{ for all } u\in (A\setminus A^2)\cup \{0\},\\ v, \text{ otherwise, where } v\in A \text{ is such that } \alpha(s_u)=s_v. \end{cases}$$
From Lemmas \ref{lemm_annihilator} and \ref{lemm_M1}, these three maps are bijective. Let $u,v\in A$. If $u\in \mathrm{Ann}_{A^-}(A)$ or $v\in \mathrm{Ann}_{A^+}(A)$, then there does not exist the vertex $t_{u,v}$ in the graph $G_1(A)$. Since $\alpha$ preserves collinearity, there does not exist the vertex $t_{f(u),g(v)}$ in the graph $G_1(A')$, which means that $f(u)\in \mathrm{Ann}_{{A'}^-}(A')$ or $g(v)\in \mathrm{Ann}_{{A'}^+}(A')$. In any case, we have that $f(u)g(v)=0=h(uv)$. Finally, if $u\not\in\mathrm{Ann}_{A^-}(A)$ and $v\not \in \mathrm{Ann}_{A^+}(A)$, then the vertex $t_{u,v}$ connects the vertices $r_u$, $c_v$ and $s_{uv}$ in the graph $G_1(A)$. Now, the isomorphism $\alpha$ maps this vertex $t_{u,v}$ in $G_1(A)$ to a vertex $t_{u',v'}$ in $G_2(A)$ that is connected to the vertices $r_{u'}$, $c_{v'}$ and $s_{u'v'}$. Again, since $\alpha$ preserves collinearity, it is $f(u)=u'$, $g(v)=v'$ and, finally, $h(uv)=f(u)g(v)$.
In case of being $\alpha$ an isomorphism between the graphs $G_2(A)$ and $G_2(A')$ it is enough to consider $f=g=h$ in the previous description. This is well-defined because of the new edges that are included to the graphs $G_1(A)$ and $G_1(A')$ in order to define, respectively, the graphs $G_2(A)$ and $G_2(A')$. These edges involve the multiplicative character of the bijective map $f$, that is, $f(u)g(v)=h(uv)$, for all $u,v\in A$. \end{proof}
Theorem \ref{theo_graph0} enables us to ensure that graph invariants reduce the cost of computation that is required to distribute a set of finite-dimensional algebras over finite fields into isotopism and isomorphism classes. It is only necessary to compute those reduced Gr\"obner bases in Theorem \ref{thm_CAG_Isom} that are related to a pair of algebras whose associated graphs have equal invariants. The complexity to compute these invariants is always much less than that related to the calculus of a reduced Gr\"obner basis. Thus, for instance, the complexity to compute the number of vertices, edges and triangles of the graphs related to any $n$-dimensional algebra over the finite field $\mathbb{F}_q$ is $q^{O(2n)}$. This corresponds to the computation of the adjacency matrices of both graphs by means of all the possible products among the $q^n$ distinct vectors of any such an algebra. This enables us in particular to implement the formulas exposed in Lemma \ref{lemm_graph0} and Proposition \ref{prop_graph0_a}. Besides, recall that the trace of the adjacency matrix of a graph raised to the third power coincides with the number of triangles of such a graph. All this computation has been implemented in the procedure {\em isoGraph}, which has been included in the mentioned library {\em GraphAlg.lib}. In order to illustrate the efficiency of these invariants, we focus on the set $\mathcal{L}_n(\mathbb{F}_q)$ of $n$-dimensional Lie algebras over the finite field $\mathbb{F}_q$, with $q$ a power prime. Recall that a {\em Lie algebra} is an anti-commutative algebra $A$ that holds the {\em Jacobi identity} $u(vw)+v(wu)+w(uv)=0$, for all $u,v,w\in A$. For $n=3$, it is known \cite{Falcon2016a, Graaf2005, Strade2007} that there are $32$ distinct Lie algebras in $\mathcal{L}_3(\mathbb{F}_2)$, which are distributed into four isotopism classes and six isomorphism classes, and $123$ Lie algebras in $\mathcal{L}_3(\mathbb{F}_3)$, which are distributed into four isotopism classes and seven isomorphism classes. Table \ref{Table_0} shows the run time and memory usage that our computer system requires to determine the mentioned classification depending on whether graph invariants are considered ({\em Graph}) or not ({\em Alg}). Further, Tables \ref{Table_1} and \ref{Table_1a} show, respectively, the invariants of the graphs $G_1$ and $G_2$ related to the isomorphism classes of $\mathcal{L}_3(\mathbb{F}_q)$, for $q\in\{2,3\}$. The components of the $4$-tuples that are indicated in the corresponding columns of vertices in both tables refer, respectively, to the number of vertices in $R_A$, $C_A$, $S_A$ and $T_A$.
\begin{table}[ht] \begin{center} \resizebox{\textwidth}{!}{
\begin{tabular}{c|cc|cc|cc}
\ & \multicolumn{2}{c|}{Graph} & \multicolumn{4}{c}{Alg}\\
\ & & & \multicolumn{2}{c|}{Isomorphisms} & \multicolumn{2}{c}{Isotopisms}\\ $q$ & Run time & Memory usage & Run time & Memory usage & Run time & Memory usage\\ \hline 2 & 1 s & 0 Mb & 1 s & 0 Mb & 34 s & 384 Mb\\ 3 & 47 s & 3 Mb & 4 s & 6 Mb & \multicolumn{2}{c}{Run out of memory}\\ \end{tabular}} \caption{Cost of computation to distribute $\mathcal{L}_3(\mathbb{F}_q)$ into isotopism and isomorphism classes.} \label{Table_0} \end{center} \end{table}
\begin{table}[ht] \resizebox{\textwidth}{!}{
\begin{tabular}{c|cc|cc}
\ & \multicolumn{2}{c|}{$\mathbb{F}_2$} & \multicolumn{2}{c}{$\mathbb{F}_3$} \\ $ A$ & Vertices & Edges & Vertices & Edges \\ \hline Abelian & (0,0,0,0) & 0 & (0,0,0,0) & 0\\ $e_1e_2=e_3$ & (6,6,1,24) & 72 & (24,24,2,432) & 1296\\ $e_1e_2=e_1$ & (6,6,1,24) & 72 & (24,24,2,432) & 1296\\ $e_1e_2=e_3, e_1e_3=-e_2$ & - & - & (26,26,8,576) & 1728 \\ $e_1e_2=e_3, e_1e_3=e_2$ & (7,7,3,36) & 108 & (26,26,8,576) & 1728\\ $e_1e_2=e_2, e_1e_3=e_3$ & (7,7,3,36) & 108 & (26,26,8,576) & 1728\\ $e_1e_2=e_2, e_1e_3=-e_3, e_2e_3=-e_1$ & (7,7,7,42) & 126 & - & -\\ $e_1e_2=e_2, e_1e_3=-e_3, e_2e_3=2e_1$ & - & - & (26,26,26,624) & 1872\\ \end{tabular}} \caption{Invariants of the graph $G_1$ related to $\mathcal{L}_3(\mathbb{F}_q)$, for $q\in\{2,3\}$.} \label{Table_1} \end{table}
\begin{table}[ht] \resizebox{\textwidth}{!}{
\begin{tabular}{c|ccc|ccc}
\ & \multicolumn{3}{c|}{$\mathbb{F}_2$} & \multicolumn{3}{c}{$\mathbb{F}_3$} \\ $A$ & Vertices & Edges & Triangles & Vertices & Edges & Triangles\\ \hline Abelian & (0,0,0,0) & 0 & 0 & (0,0,0,0) & 0 & 0\\ $e_1e_2=e_3$ & (6,6,1,24) & 78 & 0 & (24,24,2,432) & 1320 & 0\\ $e_1e_2=e_1$ & (6,6,1,24) & 80 & 9 & (24,24,2,432) & 1324 & 38\\ $e_1e_2=e_3, e_1e_3=-e_2$ & - & - & - & (26,26,8,576) & 1770 & 8\\ $e_1e_2=e_3, e_1e_3=e_2$ & (7,7,3,36) & 121 & 11 & (26,26,8,576) & 1770 & 80\\ $e_1e_2=e_2, e_1e_3=e_3$ & (7,7,3,36) & 121 & 27 & (26,26,8,576) & 1770 & 152\\ $e_1e_2=e_2, e_1e_3=-e_3, e_2e_3=-e_1$ & (7,7,7,42) & 147 & 19 & - & - & -\\ $e_1e_2=e_2, e_1e_3=-e_3, e_2e_3=2e_1$ & - & - & - & (26,26,26,624) & 1950 & 74\\ \end{tabular}} \caption{Invariants of the graph $G_2$ related to $\mathcal{L}_3(\mathbb{F}_q)$, for $q\in\{2,3\}$.} \label{Table_1a} \end{table}
\section{Graphs and partial quasigroup rings}
Let us finish with an illustrative example that focuses on those graphs $G_1$ and $G_2$ related to the set $\mathcal{P}_n(\mathbb{K})$ of $n$-dimensional partial quasigroup rings over a finite field $\mathbb{K}$ that are based on the known distribution of partial Latin squares of order $n\leq 3$ into isotopism classes. In this regard, Table \ref{Table_PQ} shows several graph invariants that are related to the isotopism classes of $\mathcal{P}_2(\mathbb{F}_q)$, for $q\in\{2,3\}$. Partial Latin squares are written row after row in a single line, with empty cells represented by zeros.
\begin{table}[ht] \begin{center} \resizebox{\textwidth}{!}{
\begin{tabular}{c|c|c|cc|c|c|cc}
\ & \multicolumn{4}{c|}{$\mathbb{F}_2$} & \multicolumn{4}{c}{$\mathbb{F}_3$}\\
\ & $G_1\, \& \, G_2$ & $G_1$ & \multicolumn{2}{c|}{$G_2$} & $G_1\, \& \, G_2$ & $G_1$ & \multicolumn{2}{c}{$G_2$}\\ Partial Latin square & Vertices & Edges & Edges & Triangles & Vertices & Edges & Edges & Triangles\\ \hline 00 00 & (0,0,0,0) & 0 & 0 & 0 & (0,0,0,0) & 0 & 0 & 0\\ 10 00 & (2,2,1,4) & 12 & 16 & 7 & (6,6,2,36) & 108 & 118 & 20\\ 10 01 & (3,3,1,6) & 18 & 23 & 7 & (8,8,2,48) & 144 & 156 & 22\\ 10 02 & (3,3,3,7) & 21 & 30 & 16 & (8,8,8,56) & 168 & 192 & 48\\ 10 20 & (3,2,3,6) & 18 & 25 & 12 & (8,6,8,48) & 144 & 164 & 42\\ 12 00 & (2,3,3,6) & 18 & 25 & 12 & (6,8,8,48) & 144 & 164 & 42\\ 12 20 & (3,3,3,8) & 24 & 33 & 13 & (8,8,8,60) & 180 & 204 & 38\\ 12 21 & (3,3,3,8) & 24 & 33 & 13 & (8,8,8,56) & 168 & 192 & 48\\ \end{tabular}} \caption{Invariants of the graphs $G_1$ and $G_2$ related to $\mathcal{P}_2(\mathbb{F}_q)$, for $q\in\{2,3\}$.} \label{Table_PQ} \end{center} \end{table}
\begin{thm}\label{theo_PQ2a} The set $\mathcal{P}_2(\mathbb{K})$ is distributed into seven isotopism classes, whatever the base field is. \end{thm}
\begin{proof} A computational case study based on a similar reasoning to that exposed in Example \ref{ejemplo_PQ2a} enables us to ensure the result. If the characteristic of the base field is distinct from two, then the seven isotopism classes under consideration are those related to the next partial Latin squares of order $2$
$$\begin{array}{|c|c|} \hline \ & \ \\ \hline \ & \ \\ \hline \end{array} \hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & \ \\ \hline \ & \ \\ \hline \end{array} \hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & \ \\ \hline \ & 1 \\ \hline \end{array} \hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & \ \\ \hline 2 & \ \\ \hline \end{array} \hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & 2 \\ \hline \ & \ \\ \hline \end{array} \hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & 2 \\ \hline 2 & \ \\ \hline \end{array}\hspace{0.5cm}
\begin{array}{|c|c|} \hline 1 & 2 \\ \hline 2 & 1 \\ \hline \end{array}$$
Otherwise, if the characteristic of the base field is two, then the isotopism classes related to the last two partial Latin squares coincide. In this case, the next partial Latin square corresponds to the seventh isotopism class
$$\begin{array}{|c|c|} \hline 1 & \ \\ \hline \ & 2 \\ \hline \end{array}$$
If the characteristic of the base field is distinct from two, the partial quasigroup ring related to this partial Latin square is isotopic to that related to the unique Latin square of the previous list. \end{proof}
It is known \cite{Falcon2013} that there are $2$, $8$ and $81$ distinct isotopism classes of partial Latin squares of respective orders $1$ to $3$. In order to determine those distinct isotopism classes that give rise to isotopic partial quasigroup rings over the finite field $\mathbb{F}_2$, we have implemented the procedure {\em isoAlg} in our previously mentioned computer system. With a total run time of $761$ seconds, we have obtained that there exist $2$, $7$ and $72$ distinct isotopism classes of partial quasigroup rings of respective dimensions $1$ to $3$. Particularly, the existence of two classes for the one-dimensional case agrees with that exposed in Subsection 2.3. Besides, the seven isotopism classes for the two-dimensional case agrees with Theorem \ref{theo_PQ2a}. For the three-dimensional case, the next nine pairs of non-isotopic partial Latin squares give rise to isotopic partial quasigroup rings $${\scriptsize\begin{array}{ccccc}
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & \ & \ \\ \hline
\ & \ & \ \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & \ \\ \hline \ & \ & \ \\ \hline \end{array} & , &
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & \ & \ \\ \hline
\ & \ & 1 \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & \ \\ \hline
\ & \ & 1 \\ \hline \end{array} & , & \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & \ & \ \\ \hline
\ & \ & 3 \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & \ \\ \hline \ & \ & 3 \\ \hline \end{array}\\ \\
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline \ & 1 & \ \\ \hline 3 & \ & \ \\ \hline \end{array} \text { and }
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & \ \\ \hline 3 & \ & \ \\ \hline \end{array} &,&\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline \ & 1 & 3 \\ \hline \ & \ & \ \\ \hline \end{array} \text { and }
\begin{array}{|c|c|c|} \hline 1 & 2 & 3 \\ \hline 2 & 1 & \ \\ \hline \ & \ & \ \\ \hline \end{array} &,&
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline
\ & 1 & \ \\ \hline 3 & \ & 2 \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & \ \\ \hline 3 & \ & 1 \\ \hline \end{array}\\ \\
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & \ & 3 \\ \hline
\ & \ & 1 \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & 3 \\ \hline \ & \ & 1 \\ \hline \end{array} &,&
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline
\ & 1 & 3 \\ \hline 3 & \ & \ \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline 2 & 1 & 3 \\ \hline 3 & \ & \ \\ \hline \end{array}&,&
\begin{array}{|c|c|c|} \hline 1 & 2 & \ \\ \hline
\ & 1 & 3 \\ \hline 3 & \ & 2 \\ \hline \end{array} \text { and } \begin{array}{|c|c|c|} \hline 1 & 2 & 3 \\ \hline 2 & 1 & \ \\ \hline 3 & \ & 1 \\ \hline \end{array} \end{array}}$$
The run time of 761 seconds that are required to determine the mentioned distribution of partial quasigroup rings reduces to only 30 seconds in the same computer system if the invariants that we have just exposed in Table \ref{Table_PQ} and those exposed in Table \ref{Table_PQ3} are previously computed. The new run time includes indeed the extra 9 seconds of computation that is required for computing such invariants.
\begin{table}[ht] \resizebox{\textwidth}{!}{
\begin{tabular}{ccc|ccc|ccc} Partial Latin square & Vertices & Edges & Partial Latin square & Vertices & Edges & Partial Latin square & Vertices & Edges\\ \hline 100 000 000 & (4,4,1,16) & 48 & 100 010 002 & (7,7,3,34) & 120 & 031 302 & (7,7,7,42) & 126 \\ 120 000 000 & (4,6,3,24) & 72 & 120 001 002 & (7,7,3,36) & 108 & 120 210 301 & (7,7,7,42) & 126 \\ 123 000 000 & (4,7,7,28) & 84 & 120 200 002 & (7,7,3,36) & 108 & 120 213 001 & (7,7,7,42) & 126 \\ 100 200 000 & (6,4,3,24) & 72 & 120 200 001 & (7,7,3,38) & 114 & 120 213 300 & (7,7,7,42) & 126 \\ 100 010 000 & (6,6,1,24) & 72 & 120 210 001 & (7,7,3,38) & 114 & 120 001 312 & (7,7,7,43) & 129 \\ 100 020 000 & (6,6,3,28) & 84 & 120 201 010 & (7,7,3,40) & 120 & 120 201 302 & (7,7,7,43) & 129 \\ 120 200 000 & (6,6,3,32) & 96 & 120 201 012 & (7,7,3,40) & 120 & 120 231 300 & (7,7,7,43) & 129 \\ 120 210 000 & (6,6,3,32) & 96 & 100 020 003 & (7,7,7,37) & 111 & 123 231 312 & (7,7,7,43) & 129 \\ 120 000 300 & (6,6,6,32) & 96 & 120 002 003 & (7,7,7,38) & 114 & 120 003 312 & (7,7,7,44) & 132 \\ 120 000 310 & (6,6,6,36) & 108 & 120 002 300 & (7,7,7,38) & 114 & 120 013 301 & (7,7,7,44) & 132 \\ 120 001 000 & (6,7,3,32) & 96 & 120 003 300 & (7,7,7,38) & 114 & 120 013 302 & (7,7,7,44) & 132 \\ 120 012 000 & (6,7,3,36) & 108 & 120 001 300 & (7,7,7,39) & 117 & 120 200 312 & (7,7,7,44) & 132 \\ 120 003 000 & (6,7,7,34) & 102 & 120 200 003 & (7,7,7,40) & 120 & 120 203 301 & (7,7,7,44) & 132 \\ 120 000 302 & (6,7,7,36) & 108 & 120 200 302 & (7,7,7,40) & 120 & 123 210 301 & (7,7,7,44) & 132 \\ 123 200 000 & (6,7,7,36) & 108 & 120 210 003 & (7,7,7,40) & 120 & 123 031 310 & (7,7,7,45) & 135 \\ 120 013 000 & (6,7,7,38) & 114 & 123 010 001 & (7,7,7,40) & 120 & 123 200 312 & (7,7,7,45) & 135 \\ 123 210 000 & (6,7,7,38) & 114 & 123 200 300 & (7,7,7,40) & 120 & 123 230 310 & (7,7,7,45) & 135 \\ 123 230 000 & (6,7,7,40) & 120 & 120 001 302 & (7,7,7,41) & 123 & 123 012 230 & (7,7,7,46) & 138 \\ 123 231 000 & (6,7,7,40) & 120 & 120 001 310 & (7,7,7,41) & 123 & 123 210 031 & (7,7,7,46) & 138 \\ 100 200 300 & (7,4,7,28) & 84 & 120 201 300 & (7,7,7,41) & 123 & 123 201 312 & (7,7,7,46) & 138 \\ 100 200 010 & (7,6,3,32) & 96 & 123 200 010 & (7,7,7,41) & 123 \\ 120 200 010 & (7,6,3,36) & 108 & 120 003 310 & (7,7,7,42) & 126 \\ 100 200 030 & (7,6,7,34) & 102 & 120 010 301 & (7,7,7,42) & 126 \\ 120 030 300 & (7,6,7,36) & 108 & 120 010 302 & (7,7,7,42) & 126 \\ 120 200 300 & (7,6,7,36) & 108 & 120 012 300 & (7,7,7,42) & 126 \\ 120 010 300 & (7,6,7,38) & 114 & 120 013 300 & (7,7,7,42) & 126 \\ 120 210 300 & (7,6,7,38) & 114 & 120 200 013 & (7,7,7,42) & 126 \\ 120 230 300 & (7,6,7,40) & 120 & 120 203 001 & (7,7,7,42) & 126 \\ 120 230 310 & (7,6,7,40) & 120 & 120 203 300 & (7,7,7,42) & 126 \\ 100 010 001 & (7,7,1,28) & 84 & 123 010 300 & (7,7,7,42) & 126 \\ \end{tabular}} \caption{Invariants of the graph $G_1$ related to non-abelian partial algebras in $\mathcal{P}_3(\mathbb{F}_2)$.} \label{Table_PQ3} \end{table}
\section{Conclusion and further studies}
We have described in this paper a pair of graphs that enable us to define faithful functors between finite-dimensional algebras over finite fields and these graphs. The computation of related graph invariants plays a remarkable role in the distribution of distinct families of algebras into isotopism and isomorphism classes. Some preliminary results have been exposed in this regard, particularly on the distribution of partial quasigroup rings over finite fields. Based on the known classification of partial Latin squares into isotopism classes, further work is required to determine completely this distribution.
\end{document} |
\begin{document}
\title[Loose Hamiltonian cycles forced by $(k-2)$-degree]{Loose Hamiltonian cycles forced by large $(k-2)$-degree \\ -- sharp version --}
\author[J.~de~O.~Bastos]{Josefran de Oliveira Bastos} \author[G.~O.~Mota]{Guilherme Oliveira Mota} \address{Instituto de Matem\'atica e Estat\'{\i}stica, Universidade de
S\~ao Paulo, S\~ao Paulo, Brazil}
\email{\{josefran|mota\}@ime.usp.br}
\author[M.~Schacht]{Mathias Schacht} \author[J.~Schnitzer]{Jakob Schnitzer} \author[F.~Schulenburg]{Fabian Schulenburg} \address{Fachbereich Mathematik, Universit\"at Hamburg, Hamburg, Germany} \email{[email protected]}
\email{\{jakob.schnitzer|fabian.schulenburg\}@uni-hamburg.de}
\thanks{The first author was supported by CAPES\@. The second author was supported by FAPESP (Proc. 2013/11431-2 and 2013/20733-2) and CNPq (Proc. 477203/2012-4 and {456792/2014-7}). The cooperation was supported by a joint CAPES/DAAD PROBRAL (Proc. 430/15).}
\begin{abstract} We prove for all $k\geq 4$ and $1\leq\ifmmode\ell\else\polishlcross\fi<k/2$ the sharp minimum $(k-2)$-degree bound for a $k$-uniform hypergraph~$\cch$ on~$n$ vertices to contain a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle if $k-\ifmmode\ell\else\polishlcross\fi$ divides~$n$ and~$n$ is sufficiently large. This extends a result of Han and Zhao for $3$-uniform hypegraphs. \end{abstract}
\keywords{hypergraphs, Hamiltonian cycles, degree conditions} \subjclass[2010]{05C65 (primary), 05C45 (secondary)}
\maketitle
\@ifstar{\origsection*}{\@startsection{section}{1}\z@{.7\linespacing\@plus\linespacing}{.5\linespacing}{\normalfont\scshape\centering\S}}{Introduction}
Given $k\geq 2$, a $k$-uniform hypergraph $\cch$ is a pair $(V,E)$ with vertex set $V$ and edge set $E\subseteq V^{(k)}$, where $V^{(k)}$ denotes the set of all $k$-element subsets of $V$. Given a $k$-uniform hypergraph $\cch=(V,E)$ and a subset $S \in V^{(s)}$, we denote by $d(S)$ the number of edges in $E$ containing~$S$ and we denote by $N(S)$ the $(k-s)$-element sets $T\in V^{(k-s)}$ such that $T\dcup S\in E$, so $d(S)=|N(S)|$. The \emph{minimum $s$-degree} of $\cch$ is denoted by $\delta_s(\cch)$ and it is defined as the minimum of $d(S)$ over all sets $S\in V^{(s)}$. We denote by the \textit{size} of a hypergraph the number of its edges.
We say that a $k$-uniform hypergraph $\ccc$ is an \emph{$\ifmmode\ell\else\polishlcross\fi$-cycle} if there exists a cyclic ordering of its vertices such that every edge of $\ccc$ is composed of $k$ consecutive vertices, two (vertex-wise) consecutive edges share exactly $\ifmmode\ell\else\polishlcross\fi$ vertices, and every vertex is contained in an edge. Moreover, if the ordering is not cyclic, then $\ccc$ is an \emph{$\ifmmode\ell\else\polishlcross\fi$-path} and we say that the first and last~$\ifmmode\ell\else\polishlcross\fi$ vertices are the ends of the path. The problem of finding minimum degree conditions that ensure the existence of Hamiltonian cycles, i.e.\ cycles that contain all vertices of a given hypergraph, has been extensively studied over the last years (see, e.g., the surveys~\cites{RRsurv,Zhao-survey}). Katona and Kierstead~\cite{KaKi99} started the study of this problem, posing a conjecture that was confirmed by R\"odl, Ruci\'nski, and Szemer\'edi~\cites{RoRuSz06,RoRuSz08}, who proved the following result: For every $k\geq 3$, if $\cch$ is a $k$-uniform $n$-vertex hypergraph with $\delta_{k-1}(\cch)\geq {(1/2+o(1))}n$, then $\cch$ contains a Hamiltonian $(k-1)$-cycle. K\"uhn and Osthus proved that $3$-uniform hypergraphs~$\cch$ with $\delta_2(\cch)\geq {(1/4 +o(1))}n$ contain a Hamiltonian $1$-cycle~\cite{KuOs06}, and H\`an and Schacht~\cite{HaSc10} (see also~\cite{KeKuMyOs11}) generalized this result to arbitrary $k$ and $\ifmmode\ell\else\polishlcross\fi$-cycles with $1\leq \ifmmode\ell\else\polishlcross\fi <k/2$. In~\cite{KuMyOs10}, K\"uhn, Mycroft, and Osthus generalized this result to $1\leq \ifmmode\ell\else\polishlcross\fi<k$, settling the problem of the existence of Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycles in $k$-uniform hypergraphs with large minimum $(k-1)$-degree. In Theorem~\ref{theorem:asymp} below (see~\cites{BuHaSc13,BaMoScScSc16+}) we have minimum $(k-2)$-degree conditions that ensure the existence of Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycles for $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$.
\begin{theorem}\label{theorem:asymp}
For all integers $k\geq 3$ and $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$ and every $\gamma>0$ there exists an $n_0$ such that every $k$-uniform hypergraph $\cch=(V,E)$ on $|V|=n\geq n_0$ vertices with $n\in(k-\ifmmode\ell\else\polishlcross\fi)\bbn$ and \begin{equation*}
\delta_{k-2}(\cch)\geq\left(\frac{4(k-\ifmmode\ell\else\polishlcross\fi)-1}{4{(k-\ifmmode\ell\else\polishlcross\fi)}^2}+\gamma\right)\binom{n}{2} \end{equation*} contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. \qed \end{theorem}
The minimum degree condition in Theorem~\ref{theorem:asymp} is asymptotically optimal as the following well-known example confirms. The construction of the example varies slightly depending on whether~$n$ is an odd or an even multiple of~$k-\ifmmode\ell\else\polishlcross\fi$. We first consider the case that $n = (2m + 1)(k-\ifmmode\ell\else\polishlcross\fi)$ for some integer~$m$. Let $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)=(V,E)$ be a $k$-uniform hypergraph on $n$ vertices such that an edge belongs to $E$ if and only if it contains at least one vertex from $A \subset V$, where $|A|=\left\lfloor \frac{n}{2(k-\ifmmode\ell\else\polishlcross\fi)} \right\rfloor$. It is easy to see that $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)$ contains no Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle, as it would have to contain $\frac{n}{k-\ifmmode\ell\else\polishlcross\fi}$ edges and each vertex in~$A$ is contained in at most two of them. Indeed any maximal $\ifmmode\ell\else\polishlcross\fi$-cycle includes all but $k-\ifmmode\ell\else\polishlcross\fi$ vertices and adding any additional edge to the hypergraph would imply a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. Let us now consider the case that $n= 2m(k-\ifmmode\ell\else\polishlcross\fi)$ for some integer~$m$. Similarly, let $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)=(V,E)$ be a $k$-uniform hypergraph on $n$ vertices that contains all edges incident to $A \subset V$, where $|A|=\frac{n}{2(k-\ifmmode\ell\else\polishlcross\fi)}-1$. Additionally, fix some $\ell+1$ vertices of $B = V\smallsetminus A$ and let $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)$ contain all edges on $B$ that contain all of these vertices, i.e., an $(\ifmmode\ell\else\polishlcross\fi+1)$-star. Again, of the $\frac{n}{k-\ifmmode\ell\else\polishlcross\fi}$ edges that a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle would have to contain, at most $\frac{n}{k-\ifmmode\ell\else\polishlcross\fi} - 2$ can be incident to $A$. So two edges would have to be completely contained in $B$ and be disjoint or intersect in exactly $\ifmmode\ell\else\polishlcross\fi$ vertices, which is impossible since the induced subhypergraph on $B$ only contains an $(\ifmmode\ell\else\polishlcross\fi+1)$-star. Note that for the minimum $(k-2)$-degree the $(\ifmmode\ell\else\polishlcross\fi+1)$-star on $B$ is only relevant if $\ifmmode\ell\else\polishlcross\fi=1$, in which case this star increases the minimum $(k-2)$-degree by one.
In~\cite{HaZh15b}, Han and Zhao proved the exact version of Theorem~\ref{theorem:asymp} when $k=3$, i.e., they obtained a sharp bound for $\delta_{1}(\cch)$. We extend this result to $k$-uniform hypergraphs.
\begin{theorem}[Main Result]\label{theorem:main}
For all integers $k\geq 4$ and $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$ there exists $n_0$ such that every $k$-uniform hypergraph $\cch=(V,E)$ on $|V|=n\geq n_0$ vertices with $n\in(k-\ifmmode\ell\else\polishlcross\fi)\bbn$ and \begin{equation}\label{eq:sharp_minimum_degree}
\delta_{k-2}(\cch)
>
\delta_{k-2}(\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)) \end{equation} contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. In particular, if \begin{equation*}
\delta_{k-2}(\cch)
\geq
\frac{4(k-\ifmmode\ell\else\polishlcross\fi)-1}{4{(k-\ifmmode\ell\else\polishlcross\fi)}^2} \binom{n}{2}, \end{equation*} then $\cch$ contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. \end{theorem}
The following notion of extremality is motivated by the hypergraph $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)$. A $k$-uniform hypergraph $\cch=(V,E)$ is called \emph{$(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal} if there exists a partition $V=A\dcup B$ such that $|A|=\left\lceil \frac{n}{2(k-\ell)} - 1 \right\rceil$, $|B|=\left\lfloor \frac{2(k-\ifmmode\ell\else\polishlcross\fi)-1}{2(k-\ifmmode\ell\else\polishlcross\fi)}n + 1 \right\rfloor$ and $e(B)=|E\cap B^{(k)}|\leq \xi \binom{n}{k}$. We say that $A\dcup B$ is an \emph{$(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal partition} of $V$. Theorem~\ref{theorem:main} follows easily from the next two results, the so-called \emph{extremal case} (see Theorem~\ref{theorem:extremal} below) and the \emph{non-extremal case} (see Theorem~\ref{theorem:non-extremal}).
\begin{theorem}[Non-extremal Case]\label{theorem:non-extremal} For any $0<\xi<1$ and all integers $k\geq 4$ and $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$, there exists $\gamma>0$ such that the following holds for sufficiently large $n$. Suppose $\cch$ is a $k$-uniform hypergraph on $n$ vertices with $n\in(k-\ifmmode\ell\else\polishlcross\fi)\bbn$ such that $\cch$ is not $(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal and \begin{equation*}
\delta_{k-2}(\cch)\geq\left(\frac{4(k-\ifmmode\ell\else\polishlcross\fi)-1}{4{(k-\ifmmode\ell\else\polishlcross\fi)}^2}-\gamma\right)\binom{n}{2}. \end{equation*} Then $\cch$ contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. \qed \end{theorem} The non-extremal case was the main result of~\cite{BaMoScScSc16+}. \begin{theorem}[Extremal Case]\label{theorem:extremal} For any integers $k\geq 3$ and $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$, there exists $\xi>0$ such that the following holds for sufficiently large $n$. Suppose $\cch$ is a $k$-uniform hypergraph on $n$ vertices with $n\in(k-\ifmmode\ell\else\polishlcross\fi)\bbn$ such that $\cch$ is $(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal and \begin{equation*}
\delta_{k-2}(\cch)
>
\delta_{k-2}(\ccx_{k,\ifmmode\ell\else\polishlcross\fi}). \end{equation*} Then $\cch$ contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle. \end{theorem}
In Section~\ref{sec:overview} we give an overview of the proof of Theorem~\ref{theorem:extremal} and state Lemma~\ref{lem:mainlemma}, the main result required for the proof. In Section~\ref{sec:mainproof} we first prove some auxiliary lemmas and then we prove Lemma~\ref{lem:mainlemma}.
\@ifstar{\origsection*}{\@startsection{section}{1}\z@{.7\linespacing\@plus\linespacing}{.5\linespacing}{\normalfont\scshape\centering\S}}{Overview}\label{sec:overview}
Let $\cch=(V,E)$ be a $k$-uniform hypergraph and let $X,Y\subset V$ be disjoint subsets. Given a vertex set $L\subset V$ we denote by $d(L,X^{(i)} Y^{(j)})$ the number of edges of the form $L \cup I \cup J$, where $I \in X^{(i)}$, $J \in Y^{(j)}$, and $|L| + i + j = k$. We allow for $Y^{(j)}$ to be omitted when $j$ is zero and write $d(v,X^{(i)} Y^{(j)})$ for $d(\{ v \},X^{(i)} Y^{(j)})$.
The proof of Theorem~\ref{theorem:extremal} follows ideas from~\cite{HaZh15}, where a corresponding result with a $(k-1)$-degree condition is proved. Let $\cch=(V,E)$ be an extremal hypergraph satisfying~\eqref{eq:sharp_minimum_degree}. We first construct an $\ifmmode\ell\else\polishlcross\fi$-path~$\ccq$ in $\cch$ (see~Lemma~\ref{lem:mainlemma} below) with ends $L_0$ and $L_1$ such that there is a partition $A_{\ast}\dcup B_{\ast}$ of $(V\smallsetminus \ccq) \cup L_0 \cup L_1$ composed only of ``typical'' vertices (see~\ref{it:2-mainlemma} and~\ref{it:3-mainlemma} below). The set $A_{\ast}\cup B_{\ast}$ is suitable for an application of Lemma~\ref{lem:3.10} below, which ensures the existence of an $\ifmmode\ell\else\polishlcross\fi$-path $\ccq'$ on $A_{\ast}\cup B_{\ast}$ with $L_0$ and $L_1$ as ends. Note that the existence of a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-cycle in $\cch$ is guaranteed by $\ccq$ and $\ccq'$. So, in order to prove Theorem~\ref{theorem:extremal}, we only need to prove the following lemma.
\begin{lemma}[Main Lemma]\label{lem:mainlemma}
For any $\varrho > 0$ and all integers $k\geq 3$ and $1\leq \ifmmode\ell\else\polishlcross\fi <k/2$, there exists a positive~$\xi$ such that the following holds for sufficiently large~$n\in (k-\ifmmode\ell\else\polishlcross\fi)\bbn$.
Suppose that~$\cch=(V,E)$ is an $(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal $k$-uniform hypergraph on~$n$ vertices and
\[
\delta_{k-2}(\cch)
>
\delta_{k-2}(\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)).
\]
Then there exists a non-empty $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$ in $\cch$ with ends $L_0$ and $L_1$ and a partition $A_{\ast}\dcup B_{\ast}=(V\smallsetminus \ccq) \cup L_0 \cup L_1$ where $L_0, L_1\subset B_{\ast}$ such that the following hold:
\begin{enumerate}[label=\rmlabel]
\item\label{it:1-mainlemma} $|B_{\ast}|=(2k-2\ifmmode\ell\else\polishlcross\fi-1)|A_{\ast}|+\ifmmode\ell\else\polishlcross\fi$,
\item\label{it:2-mainlemma} $d(v,B_{\ast}^{(k-1)})\geq (1-\varrho) \binom{|B_{\ast}|}{k-1}$ for any vertex $v\in A_{\ast}$,
\item\label{it:3-mainlemma} $d(v,A_{\ast}^{(1)}B_{\ast}^{(k-2)})\geq (1-\varrho) |A_{\ast}| \binom{|B_{\ast}|}{k-2}$ for any vertex $v\in B_{\ast}$,
\item\label{it:4-mainlemma} $d(L_0,A_{\ast}^{(1)} B_{\ast}^{(k - \ifmmode\ell\else\polishlcross\fi - 1)}), d(L_1,A_{\ast}^{(1)}B_{\ast}^{(k - \ifmmode\ell\else\polishlcross\fi - 1)}) \geq (1-\varrho)|A_{\ast}|\binom{|B_{\ast}|}{k - \ifmmode\ell\else\polishlcross\fi - 1}$.
\end{enumerate} \end{lemma}
The next result, which we will use to conclude the proof of Theorem~\ref{theorem:extremal}, was obtained by Han and Zhao (see~\cite{HaZh15}*{Lemma~3.10}).
\begin{lemma}\label{lem:3.10}
For any integers $k\geq 3$ and $1\leq \ifmmode\ell\else\polishlcross\fi < k/2$ there exists $\varrho > 0$ such that the following holds.
If $\cch$ is a sufficiently large $k$-uniform hypergraph with a partition $V(\cch)=A_{\ast}\dcup B_{\ast}$ and there exist two disjoint $\ifmmode\ell\else\polishlcross\fi$-sets $L_0,L_1\subset B_{\ast}$ such that~\ref{it:1-mainlemma}--\ref{it:4-mainlemma} hold, then $\cch$ contains a Hamiltonian $\ifmmode\ell\else\polishlcross\fi$-path $\ccq'$ with $L_0$ and $L_1$ as ends.
\qed \end{lemma}
\@ifstar{\origsection*}{\@startsection{section}{1}\z@{.7\linespacing\@plus\linespacing}{.5\linespacing}{\normalfont\scshape\centering\S}}{Proof of the Main Lemma}\label{sec:mainproof}
We will start this section by describing the setup for the proof, which will be fixed for the rest of the paper. Then we will prove some auxiliary lemmas and finally prove Lemma~\ref{lem:mainlemma}. Let $\varrho > 0$ and integers $k\geq 3$ and $1\leq \ifmmode\ell\else\polishlcross\fi<k/2$ be given. Fix constants \[
\frac{1}{k}, \frac{1}{\ifmmode\ell\else\polishlcross\fi}, \varrho \gg \delta \gg \varepsilon \gg \eps' \gg \vartheta \gg \xi. \] Let $n\in (k-\ifmmode\ell\else\polishlcross\fi)\bbn$ be sufficiently large and let $\cch$ be an $(\ifmmode\ell\else\polishlcross\fi,\xi)$-extremal $k$-uniform hypergraph on $n$ vertices that satisfies the $(k-2)$-degree condition \begin{equation*}
\delta_{k-2}(\cch)
>
\delta_{k-2}(\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)). \end{equation*} Let $A \dcup B=V(\cch)$ be a minimal extremal partition of~$V(\cch)$, i.e.\ a partition satisfying \begin{equation}
\label{eq:partition-sizes}
a = |A| = \left\lceil \frac{n}{2(k-\ifmmode\ell\else\polishlcross\fi)} \right\rceil - 1, \quad b = |B| = n - a, \qand e(B) \leq \xi \binom{n}{k}, \end{equation} which minimises $e(B)$. Recall that the extremal example $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)$ implies \begin{equation}
\label{eq:minimumDegree}
\delta_{k-2}(\cch)
>
\binom{a}{2} + a(b - k + 2). \end{equation} Since $e(B)\leq \xi\binom{n}{k}$, we expect most vertices $v\in B$ to have low degree $d(v,B^{(k-1)})$ into~$B$. Also, most $v\in A$ must have high degree $d(v,B^{(k-1)})$ into $B$ such that the degree condition for $(k-2)$-sets in~$B$ can be satisfied. Thus, we define the sets $A_{\eps}$ and $B_{\eps}$ to consist of vertices of high respectively low degree into~$B$ by \begin{align*}
A_{\eps} &= \left\lbrace v\in V\colon d(v,B^{(k-1)})\geq (1-\varepsilon)\binom{|B|}{k-1}\right\rbrace,\\
B_{\eps} &=\left\lbrace v\in V\colon d(v,B^{(k-1)})\leq \varepsilon\binom{|B|}{k-1}\right\rbrace, \end{align*}
and set $V_{\eps}=V\smallsetminus (A_{\eps} \cup B_{\eps} )$. We will write $a_{\eps} = |A_{\eps}|$, $b_{\eps} = |B_{\eps}|$, and $v_{\eps} = |V_{\eps}|$. It follows from these definitions that \begin{equation}\label{eq:Aeps-Beps-inclusion}
\text{if } A\cap B_{\eps} \neq \varnothing,
\quad \text{then} \quad
B \subset B_{\eps} ,
\quad \text{while otherwise} \quad
A \subset A_{\eps}. \end{equation} For the first inclusion, consider a vertex $v \in A \cap B_{\eps}$ and a vertex $w \in B \smallsetminus B_{\eps}$. Exchanging~$v$ and~$w$ would create a minimal partition with fewer edges in $e(B)$, a contradiction to the minimality of the extremal partition. The other inclusion is similarly implied by the minimality.
Actually, as we shall show below, the sets $A_{\eps}$ and $B_{\eps}$ are not too different from $A$ and $B$ respectively: \begin{equation}\label{eq:Aeps-Beps-sizes}
|A\smallsetminus A_{\eps} |, |B\smallsetminus B_{\eps} |, |A_{\eps} \smallsetminus A|, |B_{\eps} \smallsetminus B|\leq \vartheta b \qand |V_{\eps}|\leq 2\vartheta b. \end{equation} Note that by the minimum $(k-2)$-degree \[
\binom{a}{2}\binom{b}{k-2}+a\binom{b}{k-1}(k-1)
<
\binom{b}{k-2}\delta_{k-2}(\cch)
\leq
\sum_{S\in B^{(k-2)}} d(S). \]
Every vertex $v \in |A\smallsetminus A_{\eps} |$ satisfies $d(v, B^{(k-1)}) < (1 - \varepsilon)\binom{b}{k-1}$, so we have \begin{align*}
\sum_{S\in B^{(k-2)}} d(S)
\leq&
\binom{a}{2}\binom{b}{k-2}+a\binom{b}{k-1}(k-1) \\
&+ e(B)\binom{k}{2} - |A \smallsetminus A_{\eps}| \varepsilon \binom{b}{k-1}(k-1). \end{align*}
Consequently $|A\smallsetminus A_{\eps} |\leq \vartheta b$, as $e(B) < \xi \binom{n}{k}$ and $\xi \ll \vartheta, \varepsilon$.
Moreover, $|B\smallsetminus B_{\eps} | \leq \vartheta b$ holds as a high number of vertices in $B \smallsetminus B_{\eps} $ would contradict $e(B) < \xi \binom{b}{k}$. The other three inequalities~\eqref{eq:Aeps-Beps-sizes} follow from the already shown ones, for example for $|A_{\eps} \smallsetminus A| < \vartheta b$ observe that \[
A_{\eps} \smallsetminus A = A_{\eps} \cap B \subset B \smallsetminus B_{\eps}. \] Although the vertices in $B_{\eps}$ were defined by their low degree into $B$, they also have low degree into the set $B_{\eps}$ itself; for any $v \in B_{\eps}$ we get \begin{align*}
d(v, B_{\eps}^{(k - 1)})
&\leq
d(v, B^{(k - 1)}) + |B_{\eps}\smallsetminus B| \binom{|B_{\eps}| - 1}{k - 2}\\
&\leq
\varepsilon \binom{b}{k -1 } + \vartheta b |B_{\eps}|^{k -1}\\
&<
2 \varepsilon \binom{|B_{\eps}|}{k - 1}. \end{align*}
Since we are interested in $\ifmmode\ell\else\polishlcross\fi$-paths, the degree of $\ifmmode\ell\else\polishlcross\fi$-tuples in $B_{\eps}$ will be of interest, which motivates the following definition. An $\ifmmode\ell\else\polishlcross\fi$-set $L\subset B_{\eps}$ is called $\varepsilon$-\emph{typical} if \[
d(L,B^{(k - \ifmmode\ell\else\polishlcross\fi)})\leq \varepsilon\binom{|B|}{k-\ifmmode\ell\else\polishlcross\fi}. \] If $L$ is not $\varepsilon$-typical, then it is called $\varepsilon$-\emph{atypical}. Indeed, most $\ifmmode\ell\else\polishlcross\fi$-sets in $B_{\eps}$ are $\varepsilon$-typical; denote by $x$ the number of $\varepsilon$-atypical sets in~$B_{\eps}$. We have \begin{equation}\label{eq:num-typical-sets}
\frac{x\cdot\varepsilon \binom{|B|}{k-\ifmmode\ell\else\polishlcross\fi}}{\binom{k}{\ifmmode\ell\else\polishlcross\fi}} \leq e(B \cup B_{\eps}) \leq \xi \binom{n}{k} + \vartheta {|B|}^k,
\quad \text{implying} \quad
x\leq \eps' \binom{|B_{\eps}|}{\ifmmode\ell\else\polishlcross\fi}. \end{equation}
\begin{lemma}\label{lem:typical-degree}
The following holds for any $B_{\eps}^{(m)}$-set $M$ if $m \leq k-2$.
\[
d(M,A_{\eps}^{(1)} B_{\eps}^{(k - m - 1)}) + \frac{k-m}{2} d(M, B_{\eps}^{(k - m)})
\geq
\left(1-\delta\right)|A_{\eps}|\binom{|B_{\eps}| - m}{k-m-1}.
\]
In particular, the following holds for any $\varepsilon$-typical $B^{(\ifmmode\ell\else\polishlcross\fi)}$-set $L$.
\[
d(L,A_{\eps}^{(1)} B_{\eps}^{(k - \ifmmode\ell\else\polishlcross\fi - 1)})\geq (1-2\delta)|A_{\eps}|\binom{|B_{\eps}|-\ifmmode\ell\else\polishlcross\fi}{k-\ifmmode\ell\else\polishlcross\fi-1}.
\] \end{lemma}
In the proof of the main lemma we will connect two $\varepsilon$-typical sets only using vertices that are unused so far. Even more, we want to connect two $\varepsilon$-typical sets using exactly one vertex from $A$. The following corollary of Lemma~\ref{lem:typical-degree} allows us to do this.
\begin{corollary}\label{corr:connect-extend-typical}
Let $L$ and $L'$ be two disjoint $\varepsilon$-typical sets in $B_{\eps}$ and $U\subset V$ with~$|U| \leq \varepsilon n$.
Then the following holds.
\begin{enumerate}[label=\alabel]
\item\label{it:connect-typical} There exists an $\ifmmode\ell\else\polishlcross\fi$-path disjoint from $U$ of size two with ends $L$ and $L'$ that contains exactly one vertex from $ A_{\eps}$.
\item\label{it:extend-typical} There exist $a \in A_{\eps} \smallsetminus U$ and a set $(k - \ifmmode\ell\else\polishlcross\fi -1)$-set $C \subset B_{\eps} \smallsetminus U$ such that $L \cup a \cup C$ is an edge in $\cch$ and every $\ifmmode\ell\else\polishlcross\fi$-subset of $C$ is $\varepsilon$-typical.
\end{enumerate} \end{corollary} \begin{proof}[Proof of Corollary~\ref{corr:connect-extend-typical}]
For~\ref{it:connect-typical}, the second part of Lemma~\ref{lem:typical-degree} for $L$ and $L'$ implies that they both extend to an edge with at least $(1 - 2\delta)|A_{\eps}|\binom{|B_{\eps}| - \ifmmode\ell\else\polishlcross\fi}{k - \ifmmode\ell\else\polishlcross\fi - 1}$ sets in $A_{\eps}^{(1)}B_{\eps}^{(k - \ifmmode\ell\else\polishlcross\fi - 1)}$.
Only few of those intersect $U$ and by an averaging argument we obtain two sets $C, C' \in A_{\eps}^{(1)}B_{\eps}^{(k - \ifmmode\ell\else\polishlcross\fi - 1)}$ such that $|C \cap C'| = \ifmmode\ell\else\polishlcross\fi$ and $L \cup C$ as well as $L' \cup C'$ are edges in $\cch$, which yields the required $\ifmmode\ell\else\polishlcross\fi$-path.
In view of~\eqref{eq:num-typical-sets},~\ref{it:extend-typical} is a trivial consequence of the second part of Lemma~\ref{lem:typical-degree}. \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:typical-degree}]
Let $m \leq k-2$ and let $M \in B_{\eps}^{(m)}$ be an $m$-set.
We will make use of the following sum over all $(k-2)$-sets $D \subset B_{\eps}$ that contain $M$.
\begin{equation}\label{eq:sumdeg1}
\begin{split}
\sum_{\substack{M \subset D \subset B_{\eps}\\|D| = k - 2}} d(D)
=
\sum_{\substack{M \subset D \subset B_{\eps} \\ |D| = k - 2}} \Big(& d(D, A_{\eps}^{(1)} B_{\eps}^{(1)}) + d(D, {(A_{\eps} \cup V_{\eps})}^{(2)}) \\
& \qquad + d(D,B_{\eps}^{(2)}) + d(D, V_{\eps}^{(1)} B_{\eps}^{(1)})\Big)
\end{split}
\end{equation}
Note that we can relate the sums $\sum d(D, A_{\eps}^{(1)}B_{\eps}^{(1)})$ and $\sum d(D,B_{\eps}^{(2)})$ in~\eqref{eq:sumdeg1} to the terms in question as follows.
\begin{equation}\label{eq:sumdeg2}
\begin{split}
d(M, A_{\eps}^{(1)} B_{\eps}^{(k - m - 1)})
&=
\frac{1}{k - m - 1}\sum_{\substack{M \subset D \subset B_{\eps}\\|D| = k - 2}} d(D, A_{\eps}^{(1)} B_{\eps}^{(1)}),
\\
d(M, B_{\eps}^{(k - m)})
&=
\frac{1}{\binom{k-m}{2}}\sum_{\substack{M \subset D \subset B_{\eps}\\|D| = k - 2}} d(D, B_{\eps}^{(2)}).
\end{split}
\end{equation}
We will bound some of the terms on the right-hand side of~\eqref{eq:sumdeg1}.
It directly follows from~\eqref{eq:Aeps-Beps-sizes} that $d(D, {(A_{\eps}\cup V_{\eps})}^{(2)})\leq \binom{a+3\vartheta b}{2}$; moreover, $d(D, V_{\eps}^{(1)} B_{\eps}^{(1)}) \leq 2 \vartheta bb_{\eps}$.
Using the minimum $(k-2)$-degree condition~\eqref{eq:minimumDegree} we obtain
\begin{equation*}
\sum_{\substack{M \subset D \subset B_{\eps}\\|D| = k - 2}} d(D)
>
\binom{b_{\eps} - m}{k - m - 2}\left(\binom{a}{2} + a(b - k + 2)\right).
\end{equation*}
Combining these estimates with~\eqref{eq:sumdeg1}~and~\eqref{eq:sumdeg2} yields
\begin{align*}
& d(M, A_{\eps}^{(1)} B_{\eps}^{(k - m - 1)})
+ \frac{k-m}{2} d(M, B_{\eps}^{(k - m)})
\\
& \quad \geq
\frac{1}{k - m - 1}\binom{b_{\eps} - m}{k - m - 2}
\left(\binom{a}{2} + a(b-k+2) - \binom{a+3\vartheta b}{2} - 2\vartheta bb_{\eps} \right)
\\
& \quad \geq
\left(1-\delta\right)a_{\eps}\binom{b_{\eps} - m}{k - m - 1}.
\end{align*}
For the second part of the lemma, note that the definition of $\varepsilon$-typicality and $\varepsilon \ll \delta$ imply that $\frac{k-\ifmmode\ell\else\polishlcross\fi}{2} d(L, B_{\eps}^{(k - \ifmmode\ell\else\polishlcross\fi)})$ is smaller than $\delta a_{\eps} \binom{b_{\eps} - \ifmmode\ell\else\polishlcross\fi}{k - \ifmmode\ell\else\polishlcross\fi - 1}$ for any $\varepsilon$-typical $\ifmmode\ell\else\polishlcross\fi$-set $L$, which concludes the proof. \end{proof}
For Lemma~\ref{lem:mainlemma}, we want to construct an $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$, such that $V_{\eps} \subset V(\ccq)$ and the remaining sets $A_{\eps}\smallsetminus \ccq$ and $B_{\eps}\smallsetminus \ccq$ have the right relative proportion of vertices, i.e., their sizes are in a ratio of one to $(2k - 2\ifmmode\ell\else\polishlcross\fi - 1)$. If $|A \cap B_{\eps}| > 0$, then $B \subset B_{\eps}$ (see~\eqref{eq:Aeps-Beps-inclusion}) and so $\ccq$ should cover $V_{\eps}$ and contain the right number of vertices from $B_{\eps}$. For this, we have to find suitable edges inside $B_{\eps}$, which the following lemma ensures.
\begin{lemma}\label{lem:2q-path}
Suppose that $q = |A \cap B_{\eps}| > 0$.
Then there exist $2q + 2$ disjoint paths of size three, each of which contains exactly one vertex from $A_{\eps}$ and has two $\varepsilon$-typical sets as its ends. \end{lemma} \begin{proof}
We say that an $(\ifmmode\ell\else\polishlcross\fi - 1)$-set $M \subset B_{\eps}$ is $\emph{good}$ if it is a subset of at least $(1 - \sqrt{\eps' })b_{\eps}$ $\varepsilon$-typical sets, otherwise we say that the set is \emph{bad}.
We will first show that there are $2q+2$ edges in $B_{\eps}$, each containing one $\varepsilon$-typical and one \emph{good} $(\ifmmode\ell\else\polishlcross\fi - 1)$-set.
Then we will connect pairs of these edges to $\ifmmode\ell\else\polishlcross\fi$-paths of size three.
Suppose that $q = |A \cap B_{\eps}| > 0$.
So $B \subset B_{\eps}$ by~\eqref{eq:Aeps-Beps-inclusion} and consequently $|B_{\eps}| = |B| + q$ and $q \leq \vartheta |B|$.
It is not hard to see from~\eqref{eq:num-typical-sets} that at most a $\sqrt{\eps'}$ fraction of the $(\ifmmode\ell\else\polishlcross\fi - 1)$-sets in~$B_{\eps}^{(l-1)}$ are bad.
Hence, at least
\[
\left( 1 - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi}\eps' - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi - 1}\sqrt{\eps'} \right) \binom{b}{k-2}
\]
$(k -2)$-sets in $B_{\eps}$ contain no $\varepsilon$-atypical or bad subset.
Let $\ccb \subset B_{\eps}^{(k)}$ be the set of edges inside $B_{\eps}$ that contain such a $(k-2)$-set.
For all $M \in B_{\eps}^{(k - 2)}$, by the minimum degree condition, we have $d(M, B_{\eps}^{(2)}) \geq q(b-k+2) + \binom{q}{2}$ and, with the above, we have
\begin{align*}
|\ccb| & \geq \left(1 - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi}\eps' - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi - 1}\sqrt{\eps' }\right)\binom{b}{k-2}\frac{q(b-k+2)}{\binom{k}{2}} \nonumber \\
& = \left(1 - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi}\eps' - \binom{k - 2}{\ifmmode\ell\else\polishlcross\fi - 1}\sqrt{\eps' }\right)\binom{b}{k-1}\frac{2q}{k} \geq \frac{q}{k} \binom{b}{k-1}.
\end{align*}
On the other hand, for any $v \in B_{\eps}$ we have $d(v, B_{\eps}^{(k - 1)}) < 2 \varepsilon \binom{b_{\eps}}{k - 1}$ which implies that any edge in $\ccb$ intersects at most $2k \varepsilon \binom{b_{\eps}}{k - 1}$ other edges in $\ccb$.
So, in view of $\varepsilon \ll \frac{1}{k}$ we may pick a set $\ccb'$ of $2q+2$ disjoint edges in $\ccb$.
We will connect each of the edges in $\ccb'$ to an $\varepsilon$-typical set.
Assume we have picked the first $i-1$ desired $\ifmmode\ell\else\polishlcross\fi$-paths, say $\ccp_1, \dots, \ccp_{i-1}$, and denote by $U$ the set of vertices contained in one of the paths or one of the edges in $\ccb'$.
For the rest of this proof, when we pick vertices and edges, they shall always be disjoint from $U$ and everything chosen before.
Let $e$ be an edge in $\ccb'$ we have not considered yet and pick an arbitrary $\varepsilon$-typical set $L' \subset B_{\eps} \smallsetminus U$.
We will first handle the cases that $2\ifmmode\ell\else\polishlcross\fi + 1 < k$ or that $\ifmmode\ell\else\polishlcross\fi=1$, $k=3$.
In the first case, a $(k-2)$-set that contains no $\varepsilon$-atypical set already contains two disjoint $\varepsilon$-typical sets.
In the second case, an $\ifmmode\ell\else\polishlcross\fi$-set $\{v\}$ is $\varepsilon$-typical for any vertex $v$ in $B_{\eps}$ by the definition of $\varepsilon$-typicality.
Hence in both cases $e$ contains two disjoint $\varepsilon$-typical sets, say $L_0$ and $L_1$.
We can use Corollary~\ref{corr:connect-extend-typical}\,\ref{it:connect-typical}, as $|U| \leq 6kq$, to connect $L_1$ to $L'$ and obtain an $\ifmmode\ell\else\polishlcross\fi$-path $\ccp_i$ of size three that contains one vertex in $A_{\eps}$ and has $\varepsilon$-typical ends $L_0$ and $L'$.
So now assume that $2\ifmmode\ell\else\polishlcross\fi + 1 = k$ and $k > 3$, in particular $k - 2 = 2\ifmmode\ell\else\polishlcross\fi - 1$ and we may split the $(k-2)$-set considered in the definition of $\ccb$ into an $\varepsilon$-typical $\ifmmode\ell\else\polishlcross\fi$-set $L$ and a good $(\ifmmode\ell\else\polishlcross\fi-1)$-set $G$.
Moreover, let $w \in e \smallsetminus (L \cup G)$ be one of the remaining two vertices and set~$N = G \cup {w}$.
First assume that $d(N, A_{\eps}^{(1)} B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)}) \geq \frac{\delta}{3} a_{\eps}\binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi}$.
As $\vartheta \ll \delta$, at most $\frac{\delta}{3} a_{\eps} \binom{b}{\ifmmode\ell\else\polishlcross\fi}$ sets in $A_{\eps}^{(1)} B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)}$ intersect $U$.
So it follows from Lemma~\ref{lem:typical-degree} that there exist $A_{\eps}^{(1)} B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)}$-sets $C$, $C'$ such that $N \cup C$ and $L' \cup C'$ are edges, $|C \cap C'| = \ifmmode\ell\else\polishlcross\fi$ and~$|C \cap C' \cap A_{\eps}|=1$.
Now assume that $d(N, A_{\eps}^{(1)} B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)}) < \frac{\delta}{3} a_{\eps}\binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi}$.
As the good set $G$ forms an $\varepsilon$-typical set with most vertices in $B_{\eps}$, there exists $v \in B_{\eps}\smallsetminus U$ such that
\begin{equation*}
d(N \cup \{v\}, A_{\eps}^{(1)} B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi - 1)}) < \delta a_{\eps}\binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi - 1}
\end{equation*}
and $G \cup \{ v \}$ is an $\varepsilon$-typical set.
Lemma~\ref{lem:typical-degree} implies that
\begin{align*}
d(N \cup \{v\}, B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)})
&\geq
\frac{2}{\ifmmode\ell\else\polishlcross\fi} \left((1-\delta) a_{\eps} \binom{b_{\eps}-(\ell+1)}{\ifmmode\ell\else\polishlcross\fi - 1} - \deltaa_{\eps}\binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi - 1}\right)\\
&\geq
\frac{2}{\ifmmode\ell\else\polishlcross\fi} \left(\frac{1}{2} - 2\delta\right) a_{\eps} \binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi - 1}\\
&\geq
\delta \binom{b_{\eps}}{\ifmmode\ell\else\polishlcross\fi}.
\end{align*}
So there exists an $\varepsilon$-typical $\ifmmode\ell\else\polishlcross\fi$-set $L^* \subset (B_{\eps} \smallsetminus U)$ such that $N \cup L^* \cup \{ v \}$ is an edge in $\cch$.
Use Lemma~\ref{corr:connect-extend-typical}\,\ref{it:connect-typical} to connect $L^*$ to $L'$ and obtain an $\ifmmode\ell\else\polishlcross\fi$-path $\ccp_i$ of size three that contains one vertex in $A_{\eps}$ and has $\varepsilon$-typical ends $G \cup \{ v \}$ and $L'$. \end{proof}
If the hypergraph we consider is very close to the extremal example then Lemma~\ref{lem:2q-path} does not apply and we will need the following lemma.
\begin{lemma}\label{lem:one-or-two-edges}
Suppose that $B = B_{\eps}$.
If $n$ is an odd multiple of $k-\ell$ then there exists a single edge on $B_{\eps}$ containing two $\varepsilon$-typical $\ell$-sets.
If $n$ is an even multiple of $k-\ell$ then there either exist two disjoint edges on $B_{\eps}$ each containing two $\varepsilon$-typical $\ell$-sets or an $\ell$-path of size two with $\varepsilon$-typical ends. \end{lemma} \begin{proof}
For the proof of this lemma all vertices and edges we consider will always be completely contained in $B_{\eps}$.
First assume that there exists an $\varepsilon$-atypical $\ell$-set $L$.
Recall that this means that $d(L,B^{(k - \ifmmode\ell\else\polishlcross\fi)}) > \varepsilon\binom{|B|}{k-\ifmmode\ell\else\polishlcross\fi}$ so in view of~\eqref{eq:num-typical-sets} and $\eps' \ll \varepsilon$ we can find two disjoint $(k-\ifmmode\ell\else\polishlcross\fi)$-sets extending it to an edge, each containing an $\varepsilon$-typical set, which would prove the lemma.
So we may assume that all $\ell$-sets in $B_{\eps}^{(\ifmmode\ell\else\polishlcross\fi)}$ are $\varepsilon$-typical.
We infer from the minimum degree condition that $B_{\eps}$ contains a single edge, which proves the lemma in the case that $n$ is an odd multiple of $k-\ifmmode\ell\else\polishlcross\fi$ and for the rest of the proof we assume that $n$ is an even multiple of $k-\ifmmode\ell\else\polishlcross\fi$.
Assume for a moment that $\ifmmode\ell\else\polishlcross\fi = 1$.
Recall that in this case any $(k-2)$-set in $B$ in the extremal hypegraph $\ccx_{k,\ifmmode\ell\else\polishlcross\fi}(n)$ is contained in one edge.
Consequently, the minimum degree condition implies that any $(k-2)$-set in $B_{\eps}$ extends to at least two edges on $B_{\eps}$.
Fix some edge $e$ in $B_{\eps}$; any other edge on $B_{\eps}$ has to intersect $e$ in at least two vertices or the lemma would hold.
Consider any pair of disjoint $(k-2)$-sets $K$ and $M$ in $B_{\eps} \smallsetminus e$ to see that of the four edges they extend to, there is a pair which is either disjoint or intersect in one vertex, proving the lemma for the case $\ifmmode\ell\else\polishlcross\fi=1$.
Now assume that $\ifmmode\ell\else\polishlcross\fi > 1$.
In this case the minimum degree condition implies that any $(k-2)$-set in $B_{\eps}$ extends to at least one edge on $B_{\eps}$.
Again, fix some edge $e$ in $B_{\eps}$; any other edge on $B_{\eps}$ has to intersect $e$ in at least one vertex or the lemma would hold.
Applying the minimum degree condition to all $(k-2)$-sets disjoint from $e$ implies that one vertex $v \in e$ is contained in at least $\frac{1}{2k^2} \binom{|B_{\eps}|}{k-2}$ edges.
We now consider the $(k-1)$-uniform link hypergraph of $v$ on $B_{\eps}$.
Since any two edges intersecting in $\ifmmode\ell\else\polishlcross\fi-1$ vertices would finish the proof of the lemma, we may assume that there are no such pair of edges.
However, a result of Frankl and Füredi~\cite[Theorem 2.2]{FranklFuredi} guarantees that this $(k-1)$-uniform hypergraph without an intersection of size $\ifmmode\ell\else\polishlcross\fi-1$ contains at most $\binom{|B_{\eps}|}{k-\ifmmode\ell\else\polishlcross\fi-1}$ edges, a contradiction. \end{proof}
The following lemma will allow us to handle the vertices in $V_{\eps}$.
\begin{lemma}\label{lemma:pathV0}
Let $U \subset B_{\eps}$ with $|U| \leq 4k\vartheta$.
There exists a family $\ccp_1, \ldots, \ccp_{v_{\eps}}$ of disjoint $\ifmmode\ell\else\polishlcross\fi$-paths of size two, each of which is disjoint from $U$ such that for all $i \in [v_{\eps}]$
\[
|V(\ccp_i) \cap V_{\eps}| = 1 \qand |V(\ccp_i) \cap B_{\eps}| = 2k - \ifmmode\ell\else\polishlcross\fi - 1,
\]
and both ends of $\ccp_i$ are $\varepsilon$-typical sets. \end{lemma} \begin{proof}
Let $V_{\eps} = \{ x_1, \dots, x_{v_{\eps}} \}$.
We will iteratively pick the paths.
Assume we have already chosen $\ifmmode\ell\else\polishlcross\fi$-paths $\ccp_1, \dots, \ccp_{i-1}$ containing the vertices $v_1, \dots, v_{i-1}$ and satisfying the lemma.
Let $U'$ be the set of all vertices in $U$ or in one of those $\ifmmode\ell\else\polishlcross\fi$-paths.
From $v_i \notin B_{\eps}$ we get
\[
d(v_i, B_{\eps}^{(k-1)}) \geq d(v_i, B) - |B \smallsetminus B_{\eps}| \cdot \binom{|B|}{k-2} \geq \frac{\varepsilon}{2} \binom{b_{\eps}}{k-1}.
\]
From~\eqref{eq:num-typical-sets} we get that at most $k^\ifmmode\ell\else\polishlcross\fi \eps' \binom{b_{\eps}}{k-1}$ sets in $B_{\eps}^{(k-1)}$ contain at least one $\varepsilon$-atypical $\ifmmode\ell\else\polishlcross\fi$-set.
Also, less than~$\frac{\varepsilon}{8} \binom{b_{\eps}}{k-1}$ sets in $B_{\eps}^{(k-1)}$ contain one of the vertices of $U'$.
In total, at least $\frac{\varepsilon}{4} \binom{b_{\eps}}{k-1}$ of the $B_{\eps}^{(k-1)}$-sets form an edge with $v_i$.
So we may pick two edges $e$ and $f$ in $V_{\eps}^{(1)} B_{\eps}^{(k-1)}$ that contain the vertex $v_i$ and intersect in $\ifmmode\ell\else\polishlcross\fi$ vertices.
In particular, these edges form an $\ifmmode\ell\else\polishlcross\fi$-path of size two as required by the lemma. \end{proof}
We can now proceed with the proof of Lemma~\ref{lem:mainlemma}. Recall that we want to prove the existence of an $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$ in $\cch$ with ends $L_0$ and $L_1$ and a partition \[
A_{\ast}\dcup B_{\ast}=(V\smallsetminus \ccq)\dcup L_0\dcup L_1 \]
satisfying properties~\ref{it:1-mainlemma}--\ref{it:4-mainlemma} of Lemma~\ref{lem:mainlemma}. Set $q = |A \cap B_{\eps}|$. We will split the construction of the $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$ into two cases, depending on whether $q=0$ or not.
First, suppose that $q > 0$. In the following, we denote by $U$ the set of vertices of all edges and $\ifmmode\ell\else\polishlcross\fi$-paths chosen so far. Note that we will always have $|U| \leq 20 k \vartheta n$ and hence we will be in position to apply Corollary~\ref{corr:connect-extend-typical}. We use Lemma~\ref{lem:2q-path} to obtain paths $\ccq_1, \ldots, \ccq_{2q+2}$ and then we apply Lemma~\ref{lemma:pathV0} to obtain $\ifmmode\ell\else\polishlcross\fi$-paths $\ccp_1, \ldots, \ccp_{v_{\eps}}$. Every path $\ccq_i$, for $i \in [2q+2]$, contains $3k - 2\ifmmode\ell\else\polishlcross\fi - 1$ vertices from $B_{\eps}$ and one from $A_{\eps}$, while every $\ccp_j$, for $j \in [v_{\eps}]$, contains $2k - \ifmmode\ell\else\polishlcross\fi - 1$ from $B_{\eps}$ and one from $V_{\eps}$.
As the ends of all these paths are $\varepsilon$-typical, we apply Corollary~\ref{corr:connect-extend-typical}\,\ref{it:connect-typical} repeatedly to connect them to one $\ifmmode\ell\else\polishlcross\fi$-path $\ccp$. In each of the $v_{\eps} + 2q + 1$ steps of connecting two $\ifmmode\ell\else\polishlcross\fi$-paths, we used one vertex from $A_{\eps}$ and $2k - 3\ifmmode\ell\else\polishlcross\fi - 1$ vertices from $B_{\eps}$. Overall, we have that \[
|V(\ccp) \cap A_{\eps}| = v_{\eps} + 4q + 3, \] as well as \[
|V(\ccp) \cap B_{\eps}| = (4k - 4\ifmmode\ell\else\polishlcross\fi - 2)v_{\eps} + (5k - 5\ifmmode\ell\else\polishlcross\fi - 2)(2q+2) - (2k - 3\ifmmode\ell\else\polishlcross\fi - 1). \]
Furthermore $|V(\ccp)| \leq 10 k \vartheta b$.
Using the identities $a_{\eps} + b_{\eps} + v_{\eps} = n$ and $a_{\eps} + q + v_{\eps} = a$, we will now establish property~\ref{it:1-mainlemma} of Lemma~\ref{lem:mainlemma}. Set $s(\ccp) = (2k - 2\ifmmode\ell\else\polishlcross\fi - 1)|A_{\eps}\smallsetminus V(\ccp)| - |B_{\eps}\smallsetminus V(\ccp)| - 2\ifmmode\ell\else\polishlcross\fi$, so \begin{align*}
s(\ccp) & = (2k - 2\ifmmode\ell\else\polishlcross\fi - 1)|A_{\eps} \smallsetminus V(\ccp)| - |B_{\eps} \smallsetminus V(\ccp)| - 2\ifmmode\ell\else\polishlcross\fi\\
& = (2k - 2\ifmmode\ell\else\polishlcross\fi - 1)(a_{\eps} - (v_{\eps} + 4q + 3)) - b_{\eps} \\
& \phantom{{} = {}} {} + (4k - 4\ifmmode\ell\else\polishlcross\fi -2)v_{\eps} + (5k - 5\ifmmode\ell\else\polishlcross\fi - 2)(2q+2) - (2k - 3\ifmmode\ell\else\polishlcross\fi - 1) - 2\ifmmode\ell\else\polishlcross\fi \\
& = (2k - 2\ifmmode\ell\else\polishlcross\fi - 1)a_{\eps} - b_{\eps} + (2k - 2\ifmmode\ell\else\polishlcross\fi - 1)v_{\eps} + 2(k - \ifmmode\ell\else\polishlcross\fi)q + 2k - 3\ifmmode\ell\else\polishlcross\fi \\
& = 2(k - \ifmmode\ell\else\polishlcross\fi)(a_{\eps} + v_{\eps} + q + 1) - n - \ifmmode\ell\else\polishlcross\fi \\
& = 2(k - \ifmmode\ell\else\polishlcross\fi)(a + 1) - n - \ifmmode\ell\else\polishlcross\fi. \end{align*} If $n/(k - \ifmmode\ell\else\polishlcross\fi)$ is even, $s(\ccp) = -\ifmmode\ell\else\polishlcross\fi$ (see~\eqref{eq:partition-sizes}) and we set $\ccq = \ccp$. Otherwise $s(\ccp) = k - 2\ifmmode\ell\else\polishlcross\fi$ and we use Corollary~\ref{corr:connect-extend-typical}\,\ref{it:extend-typical} to append one edge to $\ccp$ to obtain $\ccq$. It is easy to see that one application of Corollary~\ref{corr:connect-extend-typical}\,\ref{it:extend-typical} decreases $s(\ccp)$ by $k - \ifmmode\ell\else\polishlcross\fi$. Setting $A_{\ast} = A_{\eps}\smallsetminus V(\ccq)$ and $B_{\ast} = (B_{\eps} \smallsetminus V(\ccq)) \cup L_0 \cup L_1$ we get from $s(\ccq) = -\ifmmode\ell\else\polishlcross\fi$ that $A_{\ast}$ and $B_{\ast}$ satisfy~\ref{it:1-mainlemma}.
Now, suppose that $q = 0$. Apply Lemma~\ref{lemma:pathV0} to obtain $\ifmmode\ell\else\polishlcross\fi$-paths $\ccp_1, \ldots, \ccp_{v_{\eps}}$. If $B=B_{\eps}$, apply Lemma~\ref{lem:one-or-two-edges} to obtain one or two more $\ifmmode\ell\else\polishlcross\fi$-paths contained in $B_{\eps}$. We apply Corollary~\ref{corr:connect-extend-typical}\,\ref{it:connect-typical} repeatedly to connect them to one $\ifmmode\ell\else\polishlcross\fi$-path $\ccp$.
Since $q = 0$, we have that $B_{\eps} \subset B$ and $a_{\eps} + v_{\eps} = |V\smallsetminus B_{\eps}| = a + |B\smallsetminus B_{\eps}|$. We can assume without loss of generality that $V_{\eps} \neq \varnothing$, otherwise just take $V_{\eps} = \{ v \}$ for an arbitrary $v \in V (\cch)$. If $B=B_{\eps}$ let $x$ be $2(k-\ifmmode\ell\else\polishlcross\fi)$ or $k-\ifmmode\ell\else\polishlcross\fi$ depending on whether $n$ is an odd or even multiple of $k-\ifmmode\ell\else\polishlcross\fi$; otherwise let $x=0$. With similar calculations as before and the same definition of $s(\ccp)$ we get that \[
s(\ccp) = 2(k-\ifmmode\ell\else\polishlcross\fi)a + x + 2(k-\ifmmode\ell\else\polishlcross\fi)|B \smallsetminus B_{\eps}| - n - \ifmmode\ell\else\polishlcross\fi \equiv -\ifmmode\ell\else\polishlcross\fi \mod(k - \ifmmode\ell\else\polishlcross\fi). \] Extend the $\ifmmode\ell\else\polishlcross\fi$-path $\ccp$ to an $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$ by adding $\frac{s(\ccp) + \ifmmode\ell\else\polishlcross\fi}{k - l}$ edges using Corollary~\ref{corr:connect-extend-typical}\,\ref{it:extend-typical}. Thus $s(\ccq) = -\ifmmode\ell\else\polishlcross\fi$, and we get~\ref{it:1-mainlemma} as in the previous case.
In both cases, we will now use the properties of the constructed $\ifmmode\ell\else\polishlcross\fi$-path $\ccq$ to show~\ref{it:2-mainlemma}-\ref{it:4-mainlemma}. We will use that $v(\ccq) \leq 20 k \vartheta b$, which follows from the construction. Since $A_{\ast} \subset A_{\eps}$, for all $v \in A_{\ast}$ we have $d(v, B^{(k-1)}) \geq (1 - \varepsilon)B^{(k-1)}$. Thus \[
d(v, B_{\ast}^{(k-1)}) \geq d(v, B^{(k-1)}) - |B_{\ast}\smallsetminus B|\binom{|B_{\ast}| - 1}{k-2} \geq (1 - 2\varepsilon)\binom{|B_{\ast}|}{k-1}, \] which shows~\ref{it:2-mainlemma}.
For~\ref{it:3-mainlemma}, Lemma~\ref{lem:typical-degree} yields for all vertices $v \in B_{\ast} \subset B_{\eps}$ that \[
d(v,A_{\eps}^{(1)} B_{\eps}^{(k-2)}) + \frac{k-1}{2} d(v, B_{\eps}^{(k-1)})
\geq
\left(1-\delta\right)|A_{\eps}|\binom{|B_{\eps}| - 1}{k-2}. \]
The second term on the left can be bounded from above by $2k\varepsilon\binom{b_{\eps}}{k-1}$. So, as $\delta, \varepsilon \ll \varrho$ and $a_{\eps} - |A_{\ast}| \ll \varrho |A_{\ast}| $ as well as $b_{\eps} - |B_{\ast}| \ll \varrho |B_{\ast}|$, we can conclude~\ref{it:3-mainlemma}.
By Lemma~\ref{lem:typical-degree}, we know that \[
d(L_{0}, A_{\eps}^{(1)}B_{\eps}^{(k - 1)}),d(L_{1}, A_{\eps}^{(1)}B_{\eps}^{(k - 1)}) \geq (1 - \delta) a_{\eps}\binom{b_{\eps} - \ifmmode\ell\else\polishlcross\fi}{k - \ifmmode\ell\else\polishlcross\fi - 1}. \]
As $\delta \ll \varrho$ and $a_{\eps} - |A_{\ast}| \ll \varrho |A_{\ast}| $ as well as $b_{\eps} - |B_{\ast}| \ll \varrho |B_{\ast}|$, we can conclude~\ref{it:4-mainlemma}.
\begin{bibdiv} \begin{biblist}
\bib{BaMoScScSc16+}{article}{ author={Bastos, {J. de O.}},
author={Mota, G. O.},
author={Schacht, M.},
author={Schnitzer, J.},
author={Schulenburg, F.},
title={Loose Hamiltonian cycles forced by large (k-2)-degree -- approximate version},
journal={SIAM J. Discrete Math.},
volume= {31},
number={4},
year={2017},
pages={ 2328--2347},
doi={10.1137/16M1065732}, }
\bib{BuHaSc13}{article}{ author={Bu{\ss}, Enno},
author={H{\`a}n, Hi\d{\^e}p},
author={Schacht, Mathias},
title={Minimum vertex degree conditions for loose Hamilton cycles in
3-uniform hypergraphs},
journal={J. Combin. Theory Ser. B},
volume={103},
date={2013},
number={6},
pages={658--678},
issn={0095-8956},
review={\MR{3127586}},
doi={10.1016/j.jctb.2013.07.004}, }
\bib{FranklFuredi}{article}{
author={Frankl, Peter},
author={F\"uredi, Zolt\'an},
title={Forbidding just one intersection},
journal={J. Combin. Theory Ser. A},
volume={39},
date={1985},
number={2},
pages={160--176},
issn={0097-3165},
review={\MR{793269}},
doi={10.1016/0097-3165(85)90035-4}, }
\bib{HaSc10}{article}{
author={H{\`a}n, Hi\d{\^e}p},
author={Schacht, Mathias},
title={Dirac-type results for loose Hamilton cycles in uniform
hypergraphs},
journal={J. Combin. Theory Ser. B},
volume={100},
date={2010},
number={3},
pages={332--346},
issn={0095-8956},
review={\MR{2595675}},
doi={10.1016/j.jctb.2009.10.002}, }
\bib{HaZh15}{article}{ author={Han, Jie},
author={Zhao, Yi},
title={Minimum codegree threshold for Hamilton $\ell$-cycles in $k$-uniform hypergraphs},
journal={J. Combin. Theory Ser. A},
volume={132},
date={2015},
pages={194--223},
issn={0097-3165},
review={\MR{3311344}},
doi={10.1016/j.jcta.2015.01.004}, }
\bib{HaZh15b}{article}{
author={Han, Jie},
author={Zhao, Yi},
title={Minimum vertex degree threshold for loose Hamilton cycles in
3-uniform hypergraphs},
journal={J. Combin. Theory Ser. B},
volume={114},
date={2015},
pages={70--96},
issn={0095-8956},
review={\MR{3354291}},
doi={10.1016/j.jctb.2015.03.007}, }
\bib{KaKi99}{article}{
author={Katona, Gyula Y.},
author={Kierstead, H. A.},
title={Hamiltonian chains in hypergraphs},
journal={J. Graph Theory},
volume={30},
date={1999},
number={3},
pages={205--212},
issn={0364-9024},
review={\MR{1671170}},
doi={10.1002/(SICI)1097-0118(199903)30:3<205::AID-JGT5>3.3.CO;2-F}, }
\bib{KeKuMyOs11}{article}{
author={Keevash, Peter},
author={K{\"u}hn, Daniela},
author={Mycroft, Richard},
author={Osthus, Deryk},
title={Loose Hamilton cycles in hypergraphs},
journal={Discrete Math.},
volume={311},
date={2011},
number={7},
pages={544--559},
issn={0012-365X},
review={\MR{2765622}},
doi={10.1016/j.disc.2010.11.013}, }
\bib{KuMyOs10}{article}{
author={K{\"u}hn, Daniela},
author={Mycroft, Richard},
author={Osthus, Deryk},
title={Hamilton $\ell$-cycles in uniform hypergraphs},
journal={J. Combin. Theory Ser. A},
volume={117},
date={2010},
number={7},
pages={910--927},
issn={0097-3165},
review={\MR{2652102}},
doi={10.1016/j.jcta.2010.02.010}, }
\bib{KuOs06}{article}{
author={K{\"u}hn, Daniela},
author={Osthus, Deryk},
title={Loose Hamilton cycles in 3-uniform hypergraphs of high minimum
degree},
journal={J. Combin. Theory Ser. B},
volume={96},
date={2006},
number={6},
pages={767--821},
issn={0095-8956},
review={\MR{2274077}},
doi={10.1016/j.jctb.2006.02.004}, }
\bib{RRsurv}{article}{
author={R{\"o}dl, Vojtech},
author={Ruci{\'n}ski, Andrzej},
title={Dirac-type questions for hypergraphs---a survey (or more problems
for Endre to solve)},
conference={
title={An irregular mind},
},
book={
series={Bolyai Soc. Math. Stud.},
volume={21},
publisher={J\'anos Bolyai Math. Soc., Budapest},
},
date={2010},
pages={561--590},
review={\MR{2815614}},
doi={10.1007/978-3-642-14444-8\_16}, }
\bib{RoRuSz06}{article}{
author={R{\"o}dl, Vojt{\v{e}}ch},
author={Ruci{\'n}ski, Andrzej},
author={Szemer{\'e}di, Endre},
title={A Dirac-type theorem for 3-uniform hypergraphs},
journal={Combin. Probab. Comput.},
volume={15},
date={2006},
number={1-2},
pages={229--251},
issn={0963-5483},
review={\MR{2195584}},
doi={10.1017/S0963548305007042}, }
\bib{RoRuSz08}{article}{
author={R{\"o}dl, Vojt{\v{e}}ch},
author={Ruci{\'n}ski, Andrzej},
author={Szemer{\'e}di, Endre},
title={An approximate Dirac-type theorem for $k$-uniform hypergraphs},
journal={Combinatorica},
volume={28},
date={2008},
number={2},
pages={229--260},
issn={0209-9683},
review={\MR{2399020}},
doi={10.1007/s00493-008-2295-z}, }
\bib{Zhao-survey}{article}{
author={Zhao, Yi},
title={Recent advances on Dirac-type problems for hypergraphs},
conference={
title={Recent trends in combinatorics},
},
book={
series={IMA Vol. Math. Appl.},
volume={159},
publisher={Springer},
},
date={2016},
pages={145--165},
review={\MR{3526407}},
doi={10.1007/978-3-319-24298-9\_6}, }
\end{biblist} \end{bibdiv}
\end{document} |
\begin{document}
\title{Characterization of Extreme Copulas}
\begin{abstract}
In this paper our aim is to characterize the set of extreme points of the set of all $n$-dimensional copulas $(n>1)$. We have shown that a copula must induce a singular measure with respect to Lebesgue measure in order to be an extreme point in the set of $n$-dimensional copulas. We also have discovered some sufficient conditions for a copula to be an extreme copula. We have presented a construction of a small subset of $n$-dimensional extreme copulas such that any $n$-dimensional copula is a limit point of that subset with respect to weak convergence. The applications of such a theory are widespread, finding use in many facets of current mathematical research, such as distribution theory, survival analysis, reliability theory and optimization purposes. To illustrate the point further, examples of how such extremal representations can help in optimization have also been included.
\end{abstract}
\section{Introduction}
\hspace*{3mm} Copula models are popular in high dimensional statistical applications due to their ability to describe the dependence structure among random variables; see, e.g. \hyperlink{ref9}{[9]}, \hyperlink{ref10}{[10]}. In a situation where we need to study the influence of dependence structure on a statistical problem with given marginals of some random vector, it would be helpful if we do not have to study dependence structure over the class of all copulas- only a small class of copulas would do the job. In \hyperlink{ref6}{[6]}, \hyperlink{ref7}{[7]}
a special case has been considered. For a bi-variate random vector $(X,Y)$ with continuous and strictly increasing marginal distribution functions they have found a copula under which the probability of the event $\left\{X=Y\right\}$ is maximal. However, our work is applicable in more general scenarios. Using Krein-Milman theorem (see e.g. \hyperlink{ref2}{[2]}) we can see that study of dependence structure over only the convex hull of the extreme copulas is enough. This motivates us to study and characterize the set of extreme copulas. It is also interesting from mathematical point of view. In this paper we aim to provide some properties such that any copula satisfying these properties will be an extreme copula. We also have shown that the probability induced by any extreme copula has to be singular with respect to Lebesgue measure. This shows how small the class of extreme copulas is. Finally, we have shown a particularly strong result that says we do not need to consider even the convex hull of the extreme points, but only a small subset of these extreme points in order to study the influence of dependence structure.
\section{Preliminaries}
\hspace*{3mm} First, we recall the definition of copula. see, e.g. \hyperlink{ref5}{[5]}.\\
\begin{defn}
An ${n}${-dimensional copula} is an $n$-dimensional distribution function concentrated on $\left[0,1\right]^n$ whose univariate marginals are uniformly distributed on $\left[0,1\right]$.
\end{defn}
\hspace*{3mm} Clearly the set of all copulas is a convex set and it is closed (with respect to $ d_{\infty}$ metric), uniformly bounded by 1 and every copula is 1-Lipschitz continuous. Hence by Arzel\`{a}-Ascoli theorem (see, e.g. \hyperlink{ref3}{[3]}) the set of all copulas is compact. Again, as it is convex and compact by Krein-Milman theorem (see, e.g. \hyperlink{ref4}{[4]}) the set of all copulas is the closure (with respect to $d_{\infty}$ metric) of the convex hull of its extreme points. So we will study the set of extreme points.
\section{Notations}
\hspace*{3mm} For a Borel-measurable function $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ denote the graph of $f$ as
\[
\mathcal{G}_f=\left\{ \left(x,f(x)\right) \mid x\in \left[0,1\right] \right\}
\]
Write $f$ as $(g_1,g_2,\ldots,g_{n-1})$. We denote \[\mathcal{G}_f^{(i)}=\left\{(g_1(x),\ldots,g_{i-1}(x),x,g_i(x),\ldots,g_{n-1}(x) ) \mid x\in \left[0,1\right] \right\}\]
We denote the projection on $i$th co-ordinate by $\pi_i$ i.e. $\pi_i:\left[0,1\right]^n\mapsto\left[0,1\right]$ such that $\pi_i\left((x_1,x_2,\ldots,x_n)\right)=x_i$. Now for any probability $P$ on $\left[0,1\right]^n$ we denote $P\circ\pi_i^{-1}$, the $i$th marginal of $P$ by $P_i$. Also, we denote the Lebesgue measure by $\lambda$.\\
Define the $n$-dimensional square $S_{\utilde{a},\varepsilon}$ as $\prod_{i=1}^{n}\left[a_i,a_i+\varepsilon\right]$.
Now denote $R_{\utilde{a},\utilde{b}}$ as the $n$-dimensional rectangle formed by the two points $\utilde{a}$ and $\utilde{b}$ i.e.
${R_{\utilde{a},\utilde{b}}}=\{\utilde{x}\in \mathbb{R}^n \mid \utilde{a}\leq \utilde{x}\leq\utilde{b} \}$.\\
We denote $u_j(t)=(x_1,x_2,\ldots,x_n)$ such that $x_j=t$ and $x_k=1$ for all $k\neq j$ and $v_j(t)=(y_1,y_2,\ldots,y_n)$ such that $y_j=t$ and $y_k=0$ for all $k\neq j$.
\section{Sufficient Conditions}
\hspace*{3mm} In this section we provide certain sufficient conditions for a copula to be an extreme copula. \\
\begin{thm}
\hypertarget{theo.4.1}{}
Let $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ be a Borel measurable function and $\mu$ be a probability measure on $\left(\left[0,1\right],\mathcal{B}(\left[0,1\right])\right)$. Then there exists a unique $n$-dimensional probability supported on $\mathcal{G}_f$ whose first marginal is $\mu$.
\end{thm}
\begin{proof}
At first we will show that there exists an $n$-dimensional probability supported on $\mathcal{G}_f$ whose first marginal is $\mu$. It will be enough to get such an $n$-dimensional probability $P$ on $\{S_1\times S_2 \mid S_1 \in \mathcal{B}(\left[0,1\right]), S_2 \in \mathcal{B}(\left[0,1\right]^{n-1}) \}$ as it is a semi-field for $\mathcal{B}\left(\left[0,1\right]^{n}\right)$. Define P on this semi-field as
\[P(S_1\times S_2)=\mu(S_1\cap f^{-1}(S_2))\]
for all $S_1 \in \mathcal{B}(\left[0,1\right])$ and $S_2 \in \mathcal{B}(\left[0,1\right]^{n-1}) $.
Clearly it is a probability and by Caratheodory Extension Theorem $P$ can be defined on $\mathcal{B}\left(\left[0,1\right]^{n}\right)$. Observe that $P$ is supported on $\mathcal{G}_f$ and its first marginal is $\mu$. \\
\hspace*{3mm} Now it remains to show is that $P$ is the unique probability with the required property. Let $\bar{P}$ be another $n$-dimensional probability supported on $\mathcal{G}_f$ whose first marginal is $\mu$. Then for all $S_1 \in \mathcal{B}(\left[0,1\right])$ and $S_2 \in \mathcal{B}(\left[0,1\right]^{n-1} ) $ we have\\
\begin{equation*}
\begin{split}
\bar{P}(S_1\times S_2) &= \bar{P}\left((S_1\times S_2)\cap\mathcal{G}_f \right)\\
&= \bar{P}\left( \left\{(x,f(x)) \mid x \in S_1, f(x) \in S_2 \right\} \right)\\
&= \bar{P}\left( \left\{(x,f(x)) \mid x \in S_1\cap f^{-1}(S_2) \right\} \right)\\
&= \bar{P}\left( \left\{(x,y) \mid x \in S_1\cap f^{-1}(S_2), y\in \left[0,1\right]^{n-1} \right\} \right)\\
&= \mu(S_1\cap f^{-1}(S_2))\\
&= P(S_1\times S_2)
\end{split}
\end{equation*}
As $\{S_1\times S_2 \mid S_1 \in \mathcal{B}(\left[0,1\right]), S_2 \in \mathcal{B}(\left[0,1\right]^{n-1}) \}$ is a semi-field for $\mathcal{B}\left(\left[0,1\right]^{n}\right)$, by Caratheodory extension theorem $\bar{P}=P$. Hence $P$ is the unique probability supported on $\mathcal{G}_f$ whose first marginal is $\mu$.
\end{proof}
\begin{cor}
For any Borel measurable function $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ any copula supported on $\mathcal{G}_f$ is an extreme copula.
\end{cor}
\begin{proof}
Let $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ be a Borel measurable function. Suppose there exists a copula $C$ supported on $\mathcal{G}_f$. As the first marginal of every copula is $\mathbb{U}\left[0,1\right]$, by using \hyperlink{theo.4.1}{Theorem 4.1} we can conclude that it is the unique copula supported on $\mathcal{G}_f$. If there exist any two copulas $C_1$ and $C_2$ such that $C$ can be written as a convex combination of them, then both $C_1$ and $C_2$ need to give zero measure on ${\mathcal{G}_f}^c$. Hence $C_1=C_2=C$, which implies $C$ is an extreme copula.
\end{proof}
\hspace*{3mm} For an extreme copula $C$ and for any permutation $\sigma$ on $\left\{1,2,\ldots,n\right\}$ the copula $C_\sigma$ defined as $C_\sigma(x_1,x_2,\ldots,x_n)=C(x_{\sigma(1)},x_{\sigma(2)},\ldots,x_{\sigma(n)})$ is also an extreme copula. Clearly there exists a copula supported on $\mathcal{G}_f^{(i)}$ iff there exists a copula supported on $\mathcal{G}_f^{(j)}$
for any $i,j \in \left\{1,2,\ldots,n\right\}$.\\
\begin{cor}
There are uncountably many extreme copulas on $\left[0,1\right]^n$.
\end{cor}
\begin{proof}
Fix $t\in (0,1)$. Let $L_1$ be the line joining $\utilde{0}$ and $(t,1,1,\ldots,1)$ and $L_2$ be the line joining $(t,1,1,\ldots,1)$ and $(1,0,0,\ldots,0)$. See the figure for $n=2$.\\
\begin{center}
\scalebox{.45}{ \includegraphics{cor03}}\\
\end{center}
Let $\mathbb{U}(L_i)$ denote the uniform distribution on $L_i$. Then c.d.f. of $t\mathbb{U}(L_1)+(1-t)\mathbb{U}(L_2)$ is a copula on $L_1\cup L_2$ and hence by previous corollary it is an extreme copula. Thus for every distinct $t\in(0,1)$ we get a distinct extreme copula. And hence there are uncountably many extreme copulas.
\end{proof}
\hspace*{3mm}We can see the above theorem talks about a small class of copulas. For example it can not say whether the following 2-dimensional curve can be a support of an extreme copula or not.
\begin{center}
\scalebox{.35}{\includegraphics{thm1_n}}
\end{center}
Therefore we need to have a more general theorem.\\
\begin{thm}
Let $D$ be a support of a copula such that there exist $B_1, B_2,\ldots, B_n\in \mathcal{B}(\left[0,1\right]) $ and Borel measurable functions $f_i : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ for $i\in \{1,2,\ldots,n\}$ such that $\bigcup_{i=1}^n\pi_i^{-1}(B_i) \supseteq D $ and $\pi_i^{-1}(B_i)\cap D=\pi_i^{-1}(B_i)\cap \mathcal{G}_{f_i}^{(i)}$. Then the copula will be an extreme copula. In particular it will be the unique copula supported on $D$.
\end{thm}
\begin{proof}
We only need to show that there exists at most one copula supported on $D$. Let $C$, $\bar{C}$ be two copulas supported on $D$. Let $P$, $\bar{P}$ be the corresponding induced probabilities. Observe for any $S_i\in \mathcal{B} \left( \left[0,1\right]^n \right) $, $S_i\subseteq\pi_i^{-1}(B_i)$
\begin{equation*}
\begin{split}
P(S_i)= P(S_i\cap D)=P\left(S_i\cap \mathcal{G}_{f_i}^{(i)} \right)
&= P\left(\pi_i^{-1}\left( \pi_i \left(S_i\cap \mathcal{G}_{f_i}^{(i)} \right) \right) \right)\\
&= \lambda \left( \pi_i \left(S_i\cap \mathcal{G}_{f_i}^{(i)}\right) \right)
\end{split}
\end{equation*}
Similarly
\begin{equation*}
\begin{split}
\bar{P}(S_i)= \bar{P}(S_i\cap D)=\bar{P}\left(S_i\cap \mathcal{G}_{f_i}^{(i)} \right)
&= \bar{P}\left(\pi_i^{-1}\left( \pi_i \left(S_i\cap \mathcal{G}_{f_i}^{(i)} \right) \right) \right)\\
&= \lambda \left( \pi_i \left(S_i\cap \mathcal{G}_{f_i}^{(i)} \right) \right)
\end{split}
\end{equation*}
Therefore for all $i$, ${P}(S_i)=\bar{P}(S_i)$ whenever $S_i\in \mathcal{B} \left( \left[0,1\right]^n \right) $ and $S_i\subseteq\pi_i^{-1}(B_i)$. Now for any $A\in \mathcal{B} \left( \left[0,1\right]^n \right) $ there exist Borel sets $A_0,A_1,\ldots,A_n$ disjoint such that $A=\bigcup_{i=0}^n A_i$ where $A_0\subseteq D^C$ and for $i \geq 1$, $A_i\subseteq\pi_i^{-1}(B_i)$. So we have
\[\bar{P}(A)=\sum\limits_{i=0}^n \bar{P}(A_i)=\sum\limits_{i=0}^n {P}(A_i) = P(A) \] Which implies $P=\bar{P}$ and hence $C=\bar{C}$. So, we have proved that there exists at most one copula supported on $D$ and hence it is an extreme copula.
\end{proof}
\hspace*{3mm} This theorem overcomes some of the drawbacks of the previous theorem. For example it can say that if there exists a copula supported on the following 2-dimensional curve then it has to be an extreme copula.\\
\begin{center}
\includegraphics[scale=.5]{gen_thm_n}
\end{center}
Now we will give some examples of extreme copulas.\\
\begin{exmp}
Let $0=t_1<t_2<\cdots<t_m<t_{m+1}=1$.
Let $L_i$ be any interior diagonal of the $n$-dimensional rectangle $R_{v_1(t_i),u_1(t_{i+1})}$. See the figure for $n=2$.\\
\begin{center}
\scalebox{.5}{\includegraphics{ex1}}
\end{center}
Then c.d.f. of the probability measure $P=\sum_{i=1}^{m}(t_{i+1}-t_i)\mathbb{U}(L_i)$ is an extreme copula. To see this let
\[
f(x)=\sum\limits_{i=1}^m L_i(x)\cdot 1_{\left\{x\in \left[ t_i,t_{i+1} \right) \right\}}
\]
Observe
\[
\left( \bigcup\limits_{i=1}^m L_i \right)\Delta\mathcal{G}_f \subseteq \left\{ \left( t_{i+1}, L_i\left(t_{i+1}\right) \right) \mid i \in \mathbb{N} \right\}
\]
But $P\left(\left\{ \left( t_{i+1}, L_i\left(t_{i+1}\right) \right) \mid i \in \mathbb{N} \right\}\right)=0$. So $P$ is supported on $\mathcal{G}_f$, $f$ measurable.
Observe $P_1=\sum_{i=1}^{m} (t_{i+1}-t_i)\mathbb{U}\left[t_i,t_{i+1}\right]=\mathbb{U}\left[0,1\right]$ and for $j\neq 1$ we have $P_j=\sum_{i=1}^{m} (t_{i+1}-t_i)\mathbb{U}\left[0,1\right]=\mathbb{U}\left[0,1\right]$. Therefore c.d.f. of $P$ is a copula and hence is an extreme copula.\\
\end{exmp}
\begin{exmp}
Let $\{t_m\}$ be any non-decreasing sequence of real numbers in $\left[0,1\right]$ with $t_1=0$ and $t_0=\lim_{m\rightarrow\infty} t_m$. Let $L_i$ be any interior diagonal of the $n$-dimensional rectangle $R_{v_1(t_i),u_1(t_{i+1})}$ and $L_0$ be any interior diagonal of the $n$-dimensional rectangle $R_{v_1(t_0),\utilde{1}}$. Then by similar approach one can get that the c.d.f. of the probability measure $P=(1-t_0)\mathbb{U}(L_0)+\sum_{i=1}^{\infty}(t_{i+1}-t_i)\mathbb{U}(L_i)$ is an extreme copula.\\
\end{exmp}
\begin{exmp}[\textbf{Permutation copula of order} $\mathbf{m}$]
Draw grids of size $\frac{1}{m}$ in $\left[0,1\right]^n$. Let $\sigma_k$ for $k=2,3,\ldots,n$ are permutations on $\{0,1,\ldots,m-1\}$. Define $\sigma_1$ as the identity permutation.\\
Let $L_i$ be an interior diagonal of $S_{(\frac{\sigma_1(i)}{m},\frac{\sigma_2(i)}{m},\ldots,\frac{\sigma_n(i)}{m}),\frac{1}{m} }$ for all $i=0,1,\ldots,m-1$. See the figure for $n=2$.\\
\begin{center}
\scalebox{.5}{\includegraphics{ex4}}
\end{center}
Let us define a probability $P$ as
\[
P=\sum_{i=0}^{m-1} \frac{1}{m}\cdot\mathbb{U}(L_i)
\]
Observe for all $k$, the $k$th marginal of $S_{(\frac{\sigma_1(i)}{m},\frac{\sigma_2(i)}{m},\ldots,\frac{\sigma_n(i)}{m}),\frac{1}{m} }$ with respect to $P$ is\linebreak $\frac{1}{m}\cdot\mathbb{U}\left(\left[\frac{\sigma_k(i)}{m},\frac{\sigma_{k}(i)+1}{m} \right]\right)$. Therefore for all $k$
\[
P_k= \sum_{i=0}^{m-1} \frac{1}{m}\cdot\mathbb{U}\left(\left[\frac{\sigma_k(i)}{m},\frac{\sigma_{k}(i)+1}{m} \right]\right)=\mathbb{U}\left(\left[0,1\right]\right)
\]
Hence c.d.f. of $P$ is a copula. Such a copula will be called permutation copula. Let
\[
f(x)=\sum\limits_{i=0}^{m-1} L_i(x)\cdot 1_{\left\{x\in \left[ \frac{i}{m},\frac{i+1}{m} \right) \right\}}
\]
Clearly $f$ is measurable. Observe
\[
P\left[ \left( \bigcup\limits_{i=0}^{m-1} L_i \right)\Delta\mathcal{G}_f \right]\leq P\left[ \left\{ \left. \left( \frac{i+1}{m}, L_i\left(\frac{i+1}{m}\right) \right)\,\,\,\right| \,\,\, i \in \{0,1,\ldots,m-1\} \right\} \right]=0
\]
Therefore $P$ is supported on $\mathcal{G}_f$ and hence c.d.f. of $P$ is an extreme copula.\\
\end{exmp}
\begin{exmp}
Let $C$ be a copula and $P$ be the corresponding induced probability For any $\utilde{\alpha}\in \mathbb{R}^n$ for any $A\subseteq \left[0,1\right]^n$ define
\[
A+\utilde{\alpha}=\left\{ \left( \{x_1+\alpha_1\},\{x_2+\alpha_2\},\ldots,\{x_n+\alpha_n\} \right) \mid \utilde{x}\in A \right\}
\]
where $\{.\}$ denotes the fractional part. See the figure for $n=2$.\\
\begin{center}
\includegraphics[scale=1]{ex_n4}
\end{center} \vspace*{.5cm}
Define $P_{\utilde{\alpha}}$ as $P_{\utilde{\alpha}}(B)=P(B+\utilde{\alpha})$ for all $B\in \mathcal{B}\left(\left[0,1\right]^n\right)$. Clearly $C_{\utilde{\alpha}}$, the c.d.f. of $P_{\utilde{\alpha}}$ is a copula. If $C$ is not an extreme copula i.e. $\exists$ copulas $C_1\neq C_2 \neq C$ such that $C=\frac{1}{2}\left(C_1+ C_2\right)$. This implies $C_{\utilde{\alpha}}=\frac{1}{2}\left((C_1)_{\utilde{\alpha}}+ (C_2)_{\utilde{\alpha}}\right)$. Hence $C_{\utilde{\alpha}}$ is not an extreme copula. Again, as $C=\left(C_{\utilde{\alpha}}\right)_{-\utilde{\alpha}}$. In a similar vein we can say $C_{\utilde{\alpha}}$ is not an extreme copula implies $C$ is not an extreme copula. Hence $C$ is an extreme copula iff $C_{\utilde{\alpha}}$ is an extreme copula.\\
\end{exmp}
\begin{exmp}
Let $C$ be a copula and $P$ be the induced probability. Fix a co-ordinate $i$ and two disjoint intervals $\left[a,a+\delta\right],\,\left[b,b+\delta\right]\subseteq\left[0,1\right]$. Define a transformation $T$ as
\[
T(\utilde{x})=\left\{
\begin{array}{ll}
\left(x_1,\ldots,x_{i-1},x_i-a+b,x_{i+1},\ldots,x_n\right)& \mbox{, if } \utilde{x}\in \pi_i^{-1}\left(\left[a,a+\delta\right] \right)\\
\left(x_1,\ldots,x_{i-1},x_i-b+a,x_{i+1},\ldots,x_n\right)& \mbox{, if } \utilde{x}\in \pi_i^{-1}\left(\left[b,b+\delta\right] \right)\\
\utilde{x}&\mbox{, otherwise}
\end{array}
\right.
\]
\vspace*{.5cm}
\begin{center}
\includegraphics[scale=1]{ex_n5}
\end{center}
Let $P'$ be the probability induced by $T$. As $T=T^{-1}$ we can say $P'=P\circ T$. Let the $C'$ be c.d.f. of $P'$. Clearly $C'$ is a copula and it can be easily shown that $C$ is an extreme copula iff $C'$ is an extreme copula.\\
\end{exmp}
\hspace*{3mm}Starting from the copula supported on any interior diagonal of $\left[0,1\right]^n$ if one applies the transformations mentioned in the above examples, he will always get a copula supported on a graph of a Borel measurable function. Now we are going to show that if a copula is supported on graph of a Borel measurable function then the function has to be measure preserving in each co-ordinate with respect to Lebesgue measure.
Conversely for any Borel measurable function from $\left[0,1\right]$ to $\left[0,1\right]^{n-1}$, there exists a copula supported on its graph if it is measure preserving in each co-ordinate with respect to Lebesgue measure.\\
\begin{thm}
For any Borel measurable function $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$, $\exists$ a copula supported on $\mathcal{G}_f$ iff $f$ is measure preserving in each co-ordinate with respect to $\lambda$. In that case the copula will be an extreme copula
\end{thm}
\begin{proof}
Let $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ be a Borel measurable function. Suppose there is a copula $C$ supported on $\mathcal{G}_f$. Let $P$ be the probability induced by $C$. write $f$ as $(f_2,f_3,\ldots,f_n)$. Then for all $i$, for all $B\in\mathcal{B}(\left[0,1\right])$
\[
\begin{split}
\lambda\left[f_i^{-1}(B)\right] &= P\left[\pi_1^{-1}\left(f_i^{-1}(B)\right)\right]\\
&= P\left[\pi_1^{-1}\left(f_i^{-1}(B)\right)\cap \mathcal{G}_f \right]\\
&= P\left[ \left\{ (x,f(x)) \mid x\in f_i^{-1}(B) \right\} \right]\\
&= P\left[ \left\{ (x,f(x)) \mid f_i(x)\in B \right\} \right]\\
&= P\left[\pi_i^{-1}(B)\cap \mathcal{G}_f \right]\\
&= P\left[ \pi_i^{-1}(B) \right]\\
&= \lambda\left[B\right]
\end{split}
\]
Hence $f$ is measure preserving in each co-ordinate with respect to $\lambda$.\\
\begin{center}
\includegraphics[scale=.4]{theo_4_3_n}
\end{center}
\hspace*{3mm}Now suppose $f : \left[0,1\right]\mapsto\left[0,1\right]^{n-1}$ be a Borel measurable function which is measure preserving in each co-ordinate with respect to $\lambda$. Define a probability $P$ as \[P(B) = \lambda\left[\pi_1(B\cap\mathcal{G}_f)\right]\] for all $B\in\mathcal{B}(\left[0,1\right])$. Clearly $P$ is supported on $\mathcal{G}_f$. Let $P_i$ be its $i$th marginal. Observe
\[
\begin{split}
P_1 (B)= P\left[\pi_1^{-1}(B)\right] &=P\left[\pi_1^{-1}(B)\cap \mathcal{G}_f \right]\\
&= \lambda \left[ \pi_1\left( \pi_1^{-1}(B)\cap \mathcal{G}_f \right) \right]\\
&= \lambda \left[B\right]
\end{split}
\]
For all $i\neq 1$
\[
\begin{split}
P_i (B)= P\left[\pi_i^{-1}(B)\right] &=P\left[\pi_i^{-1}(B)\cap \mathcal{G}_f \right]\\
&= P\left[\pi_1^{-1}\left(f_i^{-1}(B)\right)\cap \mathcal{G}_f \right]\\
&= P\left[\pi_1^{-1}\left(f_i^{-1}(B)\right)\right]\\
&= P_1\left[f_i^{-1}(B)\right]\\
&= \lambda\left[f_i^{-1}(B)\right]\\
&= \lambda \left[B\right]
\end{split}
\]
Therefore c.d.f. of $P$ is a copula.\\
\hspace*{3mm}Clearly as the copula is supported on a graph of a Borel measurable function it is an extreme copula.
\end{proof}
\section{Necessary Conditions}
\hspace*{3mm}A trivial necessary condition for a set $D\subseteq \left[0,1\right]^n$ to be the support of a copula is that for any $B\in \mathcal{B} \left( \left[0,1\right]^n \right)$ satisfying $B\cap D= \pi_i^{-1}\left(\pi_i(B)\right)\cap D=\pi_j^{-1}\left(\pi_j(B)\right)\cap D $ we must have $\lambda\left(\pi_i(B)\right)=\lambda\left(\pi_j(B)\right)$.
Here using Lebesgue decomposition theorem (see, e.g. \hyperlink{ref1}{[1]}) we are going to show that probability induced by any extreme copula has to be singular with respect to Lebesgue measure. To prove this we need to prove the following lemma.\\
\begin{lem}
\hypertarget{lemma.5.1}{}
Let $F$ be a copula and $P_F$ be the induced probability. Let the Lebesgue decomposition be \[ P_F (A)=\int_A f\,d\lambda^n\, +\mu(A)\]
Where $f$ is non-negative, $A\in \mathcal{B}\left(\left[0,1\right]^n\right)$ and $\mu\perp\lambda^n$. Suppose there exists a closed square $S_{\utilde{a},\varepsilon}=\prod_{i=1}^{n}\left[a_i,a_i+\varepsilon\right]$ $ \subseteq \left[0,1\right]^n$ such that
\[\lambda^n\left[f^{-1}\left( \{0\} \right)\cap S_{\utilde{a},\varepsilon} \right] <\frac{1}{4}\cdot\lambda^n\left[ S_{\utilde{a},\varepsilon} \right] =\frac{\varepsilon^n}{4} \]
Then $F$ can not be an extreme copula.
\end{lem}
\begin{proof}
We will get a $g: \left[0,\frac{\varepsilon}{2}\right]\times\left[0,\frac{\varepsilon}{2}\right] \times \left[0,\varepsilon\right]^{n-2}\mapsto\mathbb{R}$ which is not 0 almost everywhere such that $h_1$ and $h_2$ defined below are non-negative. Define for $i=1,2$
\[
h_i(\utilde{x})=\left\{ \begin{array}{l}
f(\utilde{x}) \mbox{\hspace*{1.7cm} if } \utilde{x}\in S_{\utilde{a},\varepsilon}^C\\[.35cm]
f(\utilde{x}) +(-1)^ig(\utilde{x}-\utilde{a})\\
\mbox{\hspace*{2.5cm} if } \utilde{x}\in \left[a_1,a_1+\frac{\varepsilon}{2}\right]\times\left[a_2,a_2+\frac{\varepsilon}{2}\right]\times\prod\limits_{i=3}^n\left[a_i,a_i+\varepsilon\right]\\[.35cm]
f(\utilde{x}) +(-1)^{i+1}g(\utilde{x}-(a_1+\frac{\varepsilon}{2},a_2,a_3,\ldots,a_n)) \\
\mbox{\hspace*{2.5cm} if } \utilde{x}\in \left[a_1+\frac{\varepsilon}{2},a_1+\varepsilon\right]\times\left[a_2,a_2+\frac{\varepsilon}{2}\right]\times\prod\limits_{i=3}^n\left[a_i,a_i+\varepsilon\right]\\[.35cm]
f(\utilde{x}) +(-1)^{i+1}g(\utilde{x}-(a_1,a_2+\frac{\varepsilon}{2},a_3,\ldots,a_n))\\
\mbox{\hspace*{2.5cm} if } \utilde{x}\in \left[a_1,a_1+\frac{\varepsilon}{2}\right]\times\left[a_2+\frac{\varepsilon}{2},a_2+\varepsilon\right]\times\prod\limits_{i=3}^n\left[a_i,a_i+\varepsilon\right]\\[.35cm]
f(\utilde{x}) +(-1)^{i}g(\utilde{x}-(a_1+\frac{\varepsilon}{2},a_2+\frac{\varepsilon}{2},a_3,\ldots,a_n))\\
\mbox{\hspace*{2.5cm} if } \utilde{x}\in \left[a_1+\frac{\varepsilon}{2},a_1+\varepsilon\right]\times\left[a_2+\frac{\varepsilon}{2},a_2+\varepsilon\right]\times\prod\limits_{i=3}^n\left[a_i,a_i+\varepsilon\right]
\end{array}
\right.
\]
Then we will define the copulas $H_1$ and $H_2$ such that
\[P_{H_1}(A)=\int_A h_1\,d\lambda^n\, +\mu(A) \mbox{\hspace{2cm}} P_{H_2}(A)=\int_A h_2\,d\lambda^n\, +\mu(A)\]
where $P_{H_1}$ and $P_{H_2}$ are probabilities induced by $H_1$ and $H_2$ respectively. Clearly $H_1$ and $H_2$ are copulas. As $f=\frac{1}{2}(h_1+h_2)$, hence $P_F=\frac{1}{2}(P_{H_1}+P_{H_2})$. Therefore $F=\frac{1}{2}(H_1+H_2)$. As $g$ in not 0 almost everywhere $F\neq {H_1}\neq {H_2}$. Hence $F$ will not be
an extreme copula. So it is enough to get $g$ which is not 0 almost everywhere such that both $h_1$ and $h_2$ are non-negative. For this we will define functions $f_1,\,f_2,\,f_3,\,f_4$ on $\left[0,\frac{\varepsilon}{2}\right]\times\left[0,\frac{\varepsilon}{2}\right] \times \left[0,\varepsilon\right]^{n-2}$ such that
\[
\begin{split}
f_1(\utilde{x})&= f(\utilde{x}+\utilde{a})\\
f_2(\utilde{x})&= f\left(\utilde{x}+\left(a_1+\frac{\varepsilon}{2},a_2,a_3,\ldots,a_n\right)\right)\\
f_3(\utilde{x})&= f\left(\utilde{x}+\left(a_1,a_2+\frac{\varepsilon}{2},a_3,\ldots,a_n\right)\right)\\
f_4(\utilde{x})&= f\left(\utilde{x}+\left(a_1+\frac{\varepsilon}{2},a_2+\frac{\varepsilon}{2},a_3,\ldots,a_n\right)\right)
\end{split}
\]
Take $g$ to be $\min\{f_1,\,f_2,\,f_3,\,f_4\}$. This ensures that both $h_1$ and $h_2$ are non-negative almost everywhere. Now we need to show $g$ is not 0 almost everywhere. Observe $g^{-1}\left(\{0\}\right)=\bigcup_{i=1}^4f_i^{-1}\left(\{0\}\right)$.
Therefore
\[
\begin{split}
\lambda^n\left(g^{-1}\left(\{0\}\right)\right)
&= \lambda^n \left(\bigcup_{i=1}^4f_i^{-1}\left(\{0\}\right)\right)\\[.25cm]
&\leq \lambda^n\left(f^{-1}\left(\{0\}\right)\cap S_{\utilde{a},\varepsilon} \right)\\[.25cm]
&< \frac{1}{4}\cdot\lambda^n\left( S_{\utilde{a},\varepsilon} \right) \mbox{\hspace{1cm}(by assumption)}\\[.25cm]
&= \frac{\varepsilon^n}{4}\\[.25cm]
&= \lambda^n\left(\left[0,\frac{\varepsilon}{2}\right]\times\left[0,\frac{\varepsilon}{2}\right] \times \left[0,\varepsilon\right]^{n-2}\right)\\[.25cm]
\end{split}
\]
Therefore $g$ is not $0$ almost everywhere. Hence $F$ can not be an extreme copula.
\end{proof}
\hspace{.25cm}
\begin{thm}
Probability induced by any extreme copula has to be singular with respect to Lebesgue measure.
\end{thm}
\begin{proof}
Let $C$ be a copula and $P$ be the probability induced by $C$. Suppose $P$ is not singular with respect to $\lambda^n$. Then there exists non-negative Borel measurable $f$ which is not 0 almost everywhere and a measure $\mu\perp\lambda^n$ such that the Lebesgue decomposition of $P$ is given by:
\[
P(A)=\int_A f\,d\lambda^n\, +\mu(A)
\]
So, there exists $ a,b $ with $0<a<b$ such that
\[
\alpha=\lambda^n\left(f^{-1}\left([a,b]\right)\right)>0
\]
Fix $\varepsilon\in (0,\alpha)$.
By Lusin's theorem, there exists a compact set $K$ with $\lambda^n(K)>1-\varepsilon$ such that $f$ restricted to $K$ is continuous. Therefore the set
$ f^{-1}\left([a,b]\right)\cap K $ is closed. Denote it by $K_0$. So, we have $\lambda^n\left(K_0\right) \geq\alpha-\varepsilon>0$ \\
Now draw grids of size $\frac{1}{m}$. Let $S_{\utilde{c_1},\frac{1}{m}},\, S_{\utilde{c_2},\frac{1}{m}},\,\ldots,\, S_{\utilde{c_r},\frac{1}{m}}$ be the minimum possible closed squares in the grid required to cover $K_0$.\\
Define $A_\xi=\left\{ \utilde{x} \mid d( \utilde{x},A )\leq \xi \right\}$ where $d$ is the Euclidean distance. Then it can be easily shown that ${K_0}_{\frac{\sqrt{n}}{m}}\supseteq \bigcup_{p=1}^r S_{\utilde{c_p},\frac{1}{m}}$. Observe as $m\rightarrow \infty$, ${K_0}_{\frac{\sqrt{n}}{m}}\downarrow K_0$ as $K_0$ is closed. Therefore
\[\lim\limits_{m\rightarrow\infty}\lambda^n\left({K_0}_{\frac{\sqrt{n}}{m}}\cap f^{-1}\left\{0\right\}\right)=\lambda^n\left( K_0\cap f^{-1}\left\{0\right\}\right)\Rightarrow \lim\limits_{m\rightarrow\infty}\lambda^n\left(\bigcup_{p=1}^r S_{\utilde{c_p},\frac{1}{m}}\cap f^{-1}\left\{0\right\}\right)=0
\]
So we can choose $m$ large enough such that
\[
\begin{split}
\lambda^n\left[f^{-1}\left( \{0\} \right)\cap \left( \bigcup_{p=1}^r S_{\utilde{c_p},\frac{1}{m}} \right) \right]
&< \frac{\alpha-\varepsilon}{4}\\[.25cm]
&\leq \frac{1}{4}\cdot \lambda^n \left(K_0\right)\\[.25cm]
&\leq \frac{1}{4}\cdot \lambda^n\left( \bigcup_{p=1}^r S_{\utilde{c_p},\frac{1}{m}} \right)
\end{split}
\]
So, $\exists\, q\in\left\{1,2,\ldots,r\right\}$ such that
\[
\lambda^n\left[f^{-1}\left( \{0\} \right)\cap S_{\utilde{c_q},\varepsilon} \right] \geq\frac{1}{4}\cdot\lambda^n\left[ S_{\utilde{c_q},\varepsilon} \right]
\]
And hence by \hyperlink{lemma.5.1}{Lemma 5.1}, $C$ can not be an extreme copula.
\end{proof}
\hspace*{3mm} In the following 3-dimensional figure denote $L_1$ to be the line joining $ (0,0,\frac{1}{2}) $ and $ (\frac{1}{2},\frac{1}{2},1) $, $L_2$ to be the line joining $ (\frac{1}{2},\frac{1}{2},\frac{1}{2}) $ and $ (1,1,1) $, $L_3$ to be the line joining $ (0,\frac{1}{2},0) $ and $ (\frac{1}{2},1,\frac{1}{2}) $ and $L_4$ to be the line joining $ (\frac{1}{2},0,0) $ and $ (1,\frac{1}{2},\frac{1}{2}) $. Define a probability $P$ as $P=\sum_{i=1}^{4}\mathbb{U}(L_i)$. It can be easily shown that c.d.f. of $P$ is an extreme copula. Clearly it satisfies the necessary condition but there does not exist any $i\in\{1,2,3\}$, $B\in \mathcal{B}(\left[0,1\right]) $ and Borel measurable functions $f : \left[0,1\right]\mapsto\left[0,1\right]^2$ such that $\pi_i^{-1}(B)\cap(\cup_{i=1}^4 L_i)=\pi_i^{-1}(B)\cap \mathcal{G}_f^{(i)}$. Hence it does not satisfy the sufficient condition.
\begin{center}
\includegraphics[scale=.8]{nes_suff}
\end{center}
\hspace*{3mm}As it has been shown above, the necessary condition and sufficient condition that we have postulated do not necessarily hold together and hence cannot be melded to give a complete characterization of extreme copulas. The initiative to find such a result that combine both might be undertaken as a future research problem.
\section{A construction of dense set using extreme copulas}
\hspace*{3mm}We are going to show that any copula can be uniformly approximated by extreme copulas. In fact, any copula can be uniformly approximated by permutation copulas. For this we will need the following definitions.\\
\begin{defn}
A $n$-dimensional matrix of order $m_1\times m_2\times\cdots\times m_n$ is a real valued function on $\{0,1,\ldots,m_1-1\}\times\{0,1,\ldots,m_2-1\}\times\cdots\times\{0,1,\ldots,m_n-1\}$
\end{defn}
\begin{defn}
A $n$-dimensional $m\times m\times\cdots\times m$ matrix $M$ is called stochastic in $k$th co-ordinate if for all $i_k\in\{0,1,\ldots,m-1\}$ \[\mathop{\sum\limits_{(r_1,r_2,\ldots,r_n)}}_{r_k=i_k} M(r_1,r_2,\ldots,r_n) =\frac{1}{m} \]
\end{defn}
\hspace*{3mm}Let us denote by $\mathcal{M}_m^n$ the set of all $n$-dimensional $m\times m\times\cdots\times m$ matrices with non-negative entries which are stochastic in every co-ordinate.\\
\begin{thm}
Any copula can be uniformly approximated by permutation copulas.
\end{thm}
\begin{proof}
Let $C$ be any copula and $P$ be the probability induced by $C$. Draw grids of size $\frac{1}{m}$ in $\left[0,1\right]^n$. Define a $n$-dimensional $m\times m\times\cdots\times m$ matrix $M$ as
\[ M(i_1,i_2,\ldots,i_n) = P\left(S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}\right) \]
Clearly $M\in\mathcal{M}_m^n $. Define a metric $\rho$ on $\mathcal{M}_m^n$ as for all $M_1,M_2\in\mathcal{M}_m^n$
\[\rho\left(M_1,M_2 \right)= \sup_{\left(i_1,i_2,\ldots,i_n\right)}
\abs{ M_1\left(i_1,i_2,\ldots,i_n\right)- M_2\left(i_1,i_2,\ldots,i_n\right)} \]
\hspace*{3mm}Now get $N\in\mathcal{M}_m^n $ with all rational entries such that $\rho\left(N,M\right)<\varepsilon=m^{-(n+1)}$. We are going to construct a permutation copula $\bar{C}_m$ with induced probability $\bar{P}_m$ such that \\
\[ \bar{P}_m\left(S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}\right) = N(i_1,i_2,\ldots,i_n) \]\\
If we construct it then we will have \\
\[
\begin{split}
&\abs{C\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right) - \bar{C}_m\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right) }\\[.25cm]
&\leq \sum\limits_{(r_1,r_2,\ldots,r_n)} \abs{ P\left(S_{\left(\frac{r_1}{m},\frac{r_2}{m},\ldots,\frac{r_n}{m}\right),\frac{1}{m}}\right) - \bar{P}_m\left(S_{\left(\frac{r_1}{m},\frac{r_2}{m},\ldots,\frac{r_n}{m}\right),\frac{1}{m}}\right) }\\[.25cm]
&= \sum\limits_{(r_1,r_2,\ldots,r_n)} \abs{ M(r_1,r_2,\ldots,r_n) - N (r_1,r_2,\ldots,r_n) }\\[.25cm]
&< m^n\varepsilon\\[.25cm]
&= \frac{1}{m}
\end{split}
\]\\
and therefore for all $\utilde{t}\in \left[0,1\right]^n$\\
\[
\begin{split}
& \abs{C(\utilde{t})-\bar{C}_m(\utilde{t})}\\[.25cm]
&\leq \,\abs{ C(\utilde{t}) - C\left(\frac{\lfloor mt_1 \rfloor}{m},\ldots,\frac{\lfloor mt_n \rfloor}{m} \right) }+
\abs{ \bar{C}_m(\utilde{t}) - \bar{C}_m\left(\frac{\lfloor mt_1 \rfloor}{m},\ldots,\frac{\lfloor mt_n \rfloor}{m} \right) }\\[.25cm]
& \mbox{\hspace{2.6cm}}+ \abs{ {C}\left(\frac{\lfloor mt_1 \rfloor}{m},\ldots,\frac{\lfloor mt_n \rfloor}{m} \right) - \bar{C}_m\left(\frac{\lfloor mt_1 \rfloor}{m},\ldots,\frac{\lfloor mt_n \rfloor}{m} \right) }\\[.25cm]
&\leq \frac{n}{m}+\frac{n}{m}+\frac{1}{m}\\[.25cm]
&= \frac{2n+1}{m}
\end{split}
\]\\
This will imply $\bar{C}_m$ converges to $C$ uniformly as $m\rightarrow\infty$. Hence $C$ can be uniformly approximated by permutation copulas. So, the only thing remains is to construct such a permutation copula $\bar{C}_m$.\\
\hspace*{3mm}To do this, for all $(i_1,i_2,\ldots,i_n)$ we want to have an $n$-dimensional cube which is a subset of $S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}$ and length of whose edges is same as $N(i_1,i_2,\ldots,i_n)$ such that the co-ordinate projections of those cubes will be disjoint. Then if we have a permutation copula which gives probability $N(i_1,i_2,\ldots,i_n)$ to the cube that is subset of $S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}$ we are done. See the figure for $n=2$\\
\begin{center}
\scalebox{.5}{\includegraphics{dense}}
\end{center}
\hspace{.25cm}
Observe for all $k$
\[ \lambda\left(\left[ \frac{j}{m},\frac{j+1}{m} \right) \right) = \frac{1}{m}= \mathop{\sum\limits_{(r_1,r_2,\ldots,r_n)}}_{r_k=j} N(r_1,r_2,\ldots,r_n) \]
So we can divide $\left[ \frac{j}{m},\frac{j+1}{m} \right)$ into $m^{n-1}$ disjoint intervals as
\[\left[ \frac{j}{m},\frac{j+1}{m} \right) = \mathop{\bigcup\limits_{(r_1,r_2,\ldots,r_n)}}_{r_k=j} I^{(k)}_{(r_1,r_2,\ldots,r_n)} \]
such that \[\lambda\left(I^{(k)}_{(r_1,r_2,\ldots,r_n)} \right) =N(r_1,r_2,\ldots,r_n) \] Observe \[S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}\supseteq \prod_{k=1}^n I^{(k)}_{(i_1,i_2,\ldots,i_n)} \]
Let $L_{(i_1,i_2,\ldots,i_n)}$ be any one of the interior diagonals of $\overline{\left(\prod_{k=1}^{n}I^{(k)}_{(i_1,i_2,\ldots,i_n)}\right)} $. Define $\bar{P}_m$ as
\[ \bar{P}_m=\sum_{(i_1,i_2,\ldots,i_n)} N(i_1,i_2,\ldots,i_n)\cdot \mathbb{U}\left( L_{(i_1,i_2,\ldots,i_n)} \right) \]
Clearly
\[ \bar{P}_m\left(S_{\left(\frac{i_1}{m},\frac{i_2}{m},\ldots,\frac{i_n}{m}\right),\frac{1}{m}}\right) = N(i_1,i_2,\ldots,i_n) \]
and $\bar{C}_m$ is a copula. As $N(i_1,i_2,\ldots,i_n)$ are rationals then $\exists q\in \mathbb{N}$ such that for all $k$ for all $(i_1,i_2,\ldots,i_n) \in \{0,1,\ldots,m-1\}^n$, $\lambda\left(I^{(k)}_{(r_1,r_2,\ldots,r_n)} \right)$ is a integer multiple of $\frac{1}{q}$. Hence $\bar{C}_m$ is a permutation copula of order $q$.
\end{proof}
\hspace*{3mm}So we obtain that permutation copulas are dense in the space of copulas. This is an example of a convex set containing more than one point whose extreme points are dense in that set. As a corollary we got that the set of extreme copulas are not closed under $L_{\infty}$ norm.
\section{Application}
\hspace*{3mm} In a situation where we need to study the influence of the dependence structure on a statistical problem with given marginals of some random vector, we consider an optimization problem over the Fr\'{e}chet class $\mathcal{F}(F_1, F_2,\ldots,F_n)$ of all joint distributions with the marginals $F_1, F_2,\ldots,F_n$ (see, e.g. \hyperlink{ref8}{[8]}). For a given bounded continuous function $g:\mathbb{R}^n\mapsto\mathbb{R}$, an optimization problem over the Fr\'{e}chet class $\mathcal{F}(F_1, F_2,\ldots,F_n)$ looks like
\[ m(g):= \sup\left\{\int g\,dF : F\in \mathcal{F}(F_1, F_2,\ldots,F_n) \right\} \]
Clearly we have
\[
\begin{split}
m(g)&= \sup\left\{\mathbb{E}\left( g(\utilde{X}) \right) : \utilde{X}\sim F\in \mathcal{F}(F_1, F_2,\ldots,F_n) \right\}\\
&= \sup\left\{\mathbb{E}\left( g\circ (F_1, F_2,\ldots,F_n)^{-1}(\utilde{U}) \right) : \utilde{U}\sim C \mbox{, where $C$ is a copula} \right\}\\
&= \lim\limits_{k\rightarrow\infty} \max\left\{\mathbb{E}\left( g\circ (F_1, F_2,\ldots,F_n)^{-1}(\utilde{U}) \right) : \utilde{U}\sim C_k \mbox{, a permutation copula of order $k$} \right\}
\end{split}
\]
where $(F_1, F_2,\ldots,F_n)^{-1}(\utilde{U}) = (F_1^{-1}(U_1), F_2^{-1}(U_2), \ldots,F_n^{-1}(U_n))$.\\
\hspace*{3mm} In special cases in which we need to maximize probability of the event $\left\{X=Y\right\}$ (i.e., $g=1_{\left\{x=y\right\}}$) where $X,\,Y$ are random variables with distribution functions $F_X,\, F_Y$, we can construct a function $g_{\varepsilon}$ as
\[ g_{\varepsilon}(x,y)=\left\{ \begin{array}{ll}
1& \mbox{, if $x=y$}\\
1-\frac{|x-y|}{\varepsilon}&\mbox{, if $|x-y|<\varepsilon$}\\
0& \mbox{, otherwise}
\end{array} \right. \]
If $P_F$ is the probability induced by the joint distribution function $F$ we have
\[
\begin{split}
m(g)&=\sup\left\{P_F(X=Y) : F\in \mathcal{F}(F_X, F_Y) \right\}\\
&= \lim\limits_{\varepsilon\rightarrow 0}\lim\limits_{k\rightarrow\infty} \max\left\{\mathbb{E}\left( g_{\varepsilon}\circ (F_X, F_Y)^{-1}(\utilde{U}) \right) : \utilde{U}\sim C_k \mbox{, a permutation copula of order $k$} \right\}
\end{split}
\]
\hspace*{3mm} We can use these results to simulate in computer the approximate value of $m(g)$ to solve the optimization problem.
\begin{description}
\item[[\hypertarget{ref1}{1}]] \textsc{R. B. Ash}: Probability and Measure Theory, 2nd edn. Harcourt Academic Press, 2000.
\item[[\hypertarget{ref2}{2}]] \textsc{Dunford, Schwartz}: Linear Operators, Part I: General Theory, Wiley, 1958.
\item[[\hypertarget{ref3}{3}]] \textsc{George F. Simmons}: Introduction to Topology and Modern Analysis, Robert E Krieger Publishing Company, 1983.
\item[[\hypertarget{ref4}{4}]] \textsc{Barry Simon}: Convexity: an Analytic Viewpoint (Cambridge Tracts in Mathematics 187), Cambridge University Press, 2011.
\item[[\hypertarget{ref5}{5}]] \textsc{Fabrizio Durante, Carlo Sempi}: Principles of Copula Theory. CRC Press, 2015.
\item[[\hypertarget{ref6}{6}]] \textsc{Fabrizio Durante, J. Fern\'{a}ndez S\'{a}nchez, W. Trutschnig}: On the Singular Components of a Copula, J. Appl. Prob. 52, 1175-1182, 2015.
\item[[\hypertarget{ref7}{7}]] \textsc{J.-F. Mai, M. Scherer}: Simulating from the copula that generates the maximal probability for a
joint default under given (inhomogeneous) marginals. In Topics in Statistical Simulation, eds V. B. Melas et al.,
Springer, New York, pp. 333-341, 2014.
\item[[\hypertarget{ref8}{8}]] \textsc{Giovanni Puccetti, Ruodu Wang}: Extremal Dependence Concepts. Statistical Science, vol.30, No.4, 485-517, 2015.
\item[[\hypertarget{ref9}{9}]] \textsc{P. Jaworski, F. Durante, W. K. H\"{a}rdle}: Copulae in Mathematical and Quantitative Finance (Lecture Notes in Statistics 213), Springer, 2013.
\item[[\hypertarget{ref10}{10}]] \textsc{P. Jaworski, F. Durante, W. K. H\"{a}rdle, T. Rychlik}: Copula Theory and Its Applications (Lecture Notes in Statistics 198), Springer, 2010.
\end{description}
\end{document} |
\begin{document}
\title{Dynamic Schnyder Woods}
\author{Sujoy Bhore\inst{1}\orcidID{0000-0003-0104-1659} \and Prosenjit Bose \inst{2}\orcidID{0000-0002-8906-0573} \and Pilar Cano\inst{3}\orcidID{0000-0002-4318-5282} \and Jean Cardinal\inst{3}\orcidID{0000-0002-2312-0967} \and John Iacono\inst{3,4}\orcidID{0000-0001-8885-8172}}
\authorrunning{S. Bhore, P. Bose, P. Cano, J. Cardinal, and J. Iacono.}
\institute{Indian Institute of Science Education and Research Bhopal, India. \email{[email protected]}\and Carleton University, Canada. \email{[email protected]} \and Université libre de Bruxelles, Belgium. \email{\{pilar.cano, jean.cardinal\}@ulb.be}\\ \and New York University, USA. \email{[email protected]}}
\maketitle
\begin{abstract} A \emph{realizer}, commonly known as Schnyder woods, of a triangulation is a partition of its interior edges into three oriented rooted trees. A \emph{flip} in a realizer is a local operation that transforms one realizer into another. Two types of flips in a realizer have been introduced: \emph{colored flips} and \emph{cycle flips}. A corresponding \emph{flip graph} is defined for each of these two types of flips. The vertex sets are the realizers, and two realizers are adjacent if they can be transformed into each other by one flip. In this paper we study the relation between these two types of flips and their corresponding flip graphs.
We show that a cycle flip can be obtained from linearly many colored flips. We also prove an upper bound of $O(n^2)$ on the diameter of the flip graph of realizers defined by colored flips. In addition, a data structure is given to dynamically maintain a realizer over a sequence of colored flips which supports queries, including getting a node's barycentric coordinates, in $O(\log n)$ time per flip or query.
\keywords{Schnyder woods \and Realizers \and Flips \and Dynamic maintenance.}
\end{abstract}
\section{Introduction} Schnyder in his seminal work proved that every planar graph with $n\ge 3$ vertices has a plane straight-line drawing in an $(n-2)\times (n-2)$ grid~\cite{schnyder1989planar, schnyder1990embedding}. This result was achieved in two parts: First, it was shown that every maximal planar graph admits a decomposition of its interior edges into three trees, called \emph{realizer}; Then, by using the realizer, a straight line embedding can be achieved. The Schnyder tree partitions, commonly referred to as \emph{Schnyder woods}, are an important concept in the area of graph drawing and order dimension theory; see~\cite{bonichon2007convex, felsner2005posets, felsner2004lattice, felsner2011order, felsner2014order}. Schnyder woods have been widely used to obtain combinatorial and algorithmic results for a wide range of problems from various domains, e.g., geometric spanners~\cite{bonichon2010plane}, optimal encoding and uniform sampling of planar maps~\cite{poulalhon2006optimal}, compact data structures~\cite{aleardi2018array}, grid drawing~\cite{bonichon2007convex, gonccalves2014toroidal, schnyder1990embedding}, etc. Moreover, the connection between Schnyder woods and orthogonal surfaces have been explored over the years; see~\cite{bonichon2010connections, gonccalves2014toroidal, felsner2008schnyder, felsner2008orthogonal}. Recently, Castelli Aleardi~\cite{aleardi2019balanced} considered \emph{balanced Schnyder woods}, in which the number of incoming edges of each color at each vertex is balanced, and provided linear time heuristics. Realizers are useful in designing algorithms for problems in graph drawing, graph encoding and others; see~\cite{chuang1998compact, kant1996drawing, de1990draw}. Brehm~\cite{brehm20003} and Mendez~\cite{ossona1994orientations} investigated the suitable operations that transform a realizer of a graph to another realizer of the same graph (see also~\cite{wagner1936bemerkungen, bonichon2002wagner}).
A \emph{flip} in a realizer is a local operation that transforms one realizer into another. Two types of flips in a realizer have been introduced: \emph{colored flip} and \emph{cycle flip} (see Section~\ref{sec:flips}).
A corresponding \emph{flip graph} is defined for each of these two types of flips, the vertex sets of which are the realizers, and two realizers are adjacent if they can be transformed into each other by one flip.
\paragraph{\textbf{Our Contribution.}}
We describe Schnyder woods and related constructions in Section~\ref{sec:sw}. In Section~\ref{sec:flips} we show that if an edge $e$ admits a diagonal flip in a triangulation $\mathtt{T}$, then there exists a realizer $R$ of $\mathtt{T}$ where the oriented edge $e$ admits a colored flip in $R$ (Section~\ref{subsec:flippable}). Later, we show that a cycle flip can be obtained from linearly many colored flips (Section~\ref{subsec:cycleflip}). Using these two results, we prove an upper bound of $O(n^2)$ on the diameter of the flip graph of realizers defined by colored flips (Section~\ref{subsec:upperbound}). Finally, in Section~\ref{sec:DynamicSec} we present a data structure to dynamically maintain a realizer under colored flips while supporting queries to a corresponding straight line embedding over a sequence of colored flips in $O(\log n)$ time per update or query.
\section{Schnyder Woods}\label{sec:sw}
In this section we define a realizer and two other structures that are a bijection with realizers.
\begin{figure}
\caption{(a) Example of the counter-clockwise order of the edges entering and leaving the vertex (\emph{left}) and a realizer (\emph{right}). (b) A 3-orientation. (c) Each colored region with its respectively colored path represents a region $R_i(u)$.}
\label{fig:contact-representation}
\end{figure}
A \emph{triangulation} is a maximal planar graph (all faces are triangles), with a fixed outer face. \begin{definition}\label{def:realizer} A \emph{realizer}, of a triangulation $\mathtt{T}$ is a partition of its interior edges into three sets $T_0, T_1$ and $T_2$ of directed edges such that for each interior vertex $u$ the following holds: \begin{compactenum} \item Vertex $u$ has out-degree exactly one in each of $T_0, T_1$ and $T_2$ in counter-clockwise order. \item All incoming edges of $T_i$ adjacent to $u$ occur between the outgoing edge of $T_j$ and $T_k$ for distinct $i,j,k \mod 3$. See Fig.~\ref{fig:contact-representation}(a). \end{compactenum} \end{definition}
Each tree $T_i$ of a realizer has as root $r_i$, one of the vertices in its outer face, and each vertex in the outer face is a sink in the directed graph defined by the given realizer. Note that a realizer can be represented as a $3$-coloring of its interior edges. See Fig.~\ref{fig:contact-representation}(a). Schnyder defined realizers of triangulations in~\cite{schnyder1989planar,schnyder1990embedding} and proved that any triangulation with $n\geq 3$ vertices has a realizer.
\begin{definition} A \emph{3-orientation} of a triangulation $\mathtt{T}=(V\cup\{r_0,r_1,r_2\}, E)$ is an orientation of the edges of $T$ such that each vertex has out-degree 3 except three special vertices $r_0,r_1,r_2$ that are sinks and define the outer face of $\mathtt{T}$. See Fig.~\ref{fig:contact-representation}(b) \end{definition}
In~\cite{de2001topological} de Fraysseix and de Mendez showed that any triangulation $\mathtt{T}$ admits a $3$-orientation of its interior edges and that the realizers of a triangulation $\mathtt{T}$ form a bijection with $3$-orientations of $\mathtt{T}$.
\begin{definition} A \emph{barycentric representation}\footnote{Note that this is called \emph{weak} barycentric representation in~\cite{schnyder1990embedding}.} of a triangulation $\mathtt{T}$ is an injective function $u \in V(T)\mapsto (u_0,u_1,u_2) \in R^3$ that satisfies the conditions: \begin{compactenum} \item $u_0+u_1+u_3=1$ for all vertices $u \in V(t)$. \item For each edge $uv$ and each vertex $w \notin \{u,v\}$, there is some $i \mod 3$ such that $(u_i, u_{i+1}) \prec (w_i, w_{i+1})$ and $(v_i, v_{i+1}) \prec (w_i, w_{i+1})$, where $\prec$ represents the lexicographic order. \end{compactenum} \end{definition}
For each interior vertex $u$ of $\mathtt{T}$ we denote by $P_i(u)$ the path in $T_i$ from $u$ to its root $r_i$ with $i \mod 3$. For each interior vertex $u$ its paths $P_0(u), P_1(u)$ and $P_2(u)$ divide the triangulation into three disjoint regions $R_0(u), R_1(u)$ and $R_2(u)$ where $R_i(u)$ denotes the region defined by the vertices in path $P_{i+1}(u)\setminus\{u\}$ and the interior vertices enclosed by paths $P_{i-1}(u)$ and $P_{i+1}(u)$. See Fig.~\ref{fig:contact-representation}(c). The following lemma about these regions was shown in~\cite{schnyder1990embedding}.
\begin{lemma}[Schnyder~\cite{schnyder1990embedding}]\label{lemma:schnyder-regions} For every different pair of interior vertices $u$ and $v$ of a triangulation it holds that if $v \in R_i(u)\cup P_{i-1}(u)$, then $R_i(v) \subset R_i(u)$. \end{lemma}
Let $|R_i(u)|$ and $|P_i(u)|$ denote the number of vertices in $R_i(u)$ and $P_i(u)$, respectively. Let $f:V(\mathtt{T})\mapsto \mathbb{R}^3$ be the function defined as follows. For each interior vertex $u$ in $\mathtt{T}$, $f(u)=\frac{1}{n-1}(|R_0(u)|, |R_1(u)|,$ $|R_2(u)|)$, and for each root $r_i \in T_i$, $f(r_i)$ has its $i$th coordinate equal to $n-1$, its $(i+1)$th coordinate equal to $1$ and its $(i+2)$th coordinate equal to $0$. Schnyder~\cite{schnyder1990embedding} showed that $f$ defines barycentric coordinates of the vertices of $\mathtt{T}$. Thus, every triangulation admits a barycentric representation that is in correspondence with a realizer.
\section{Flips}\label{sec:flips}
\begin{figure}
\caption{Illustration of a diagonal flip, colored flip, cycle flip and face flip.}
\label{fig:flips}
\end{figure}
In this section we study the relationship between a \emph{diagonal flip} in triangulations with $n\geq4$ vertices, a \emph{colored flip} in a realizer and a \emph{cycle flip} of a realizer of a triangulation. See Fig.~\ref{fig:flips}.
A \emph{diagonal flip} in a triangulation $\mathtt{T}$ is the operation that exchanges the diagonal $u_1u_3$ of a quadrilateral $u_1u_2u_3u_4$ in $\mathtt{T}$ by the diagonal $u_2u_4$. See Fig.~\ref{fig:flips}(a). The \emph{flip graph of triangulations} ${\mathcal{T}}_n$ of $n$ vertices is defined as the graph with vertex set defined by all distinct triangulations on $n$ vertices and two vertices of $\mathcal{T}_n$ are adjacent if their corresponding triangulations can be transformed into each other by exactly one diagonal flip. We say that the diagonal $u_1u_3$ of a quadrilateral $u_1u_2u_3u_4$ in $\mathtt{T}$ is \emph{flippable} if the edge $u_2u_4$ is not in $\mathtt{T}$. See Fig.~\ref{fig:flips}(b).
Wagner~\cite{wagner1936bemerkungen} showed that the flip graph ${\mathcal{T}}_n$ is connected: Any triangulation of $n$ vertices can be transformed into another by a finite sequence of diagonal flips. Given that a realizer is an orientation of the edges of a triangulation, it is natural to ask whether these flips can be extended to realizers. In other words, whether there exist local functions that transform one realizer into another. Bonichon et al.~\cite{bonichon2002wagner} defined flips in realizers that map to diagonal flips in the underlying triangulation.
We refer to edge $\Vec{uv}$ of a realizer of a triangulation $\mathtt{T}$ as the oriented edge $uv$ of $\mathtt{T}$.
\begin{definition}\label{def:bonichon-flip} A \emph{colored flip} in a realizer of edge $\Vec{u_1u_3}$ with respect to $\Vec{u_2u_1}$ of the quadrilateral $u_1u_2u_3u_4$ is the operation that exchanges the edges $\Vec{u_1u_3}$ and $\Vec{u_2u_1}$ in tree $T_i$ and $T_j$, by the edges $\Vec{u_1u_2}$ and $\Vec{u_2u_4}$, respectively. There are two types of colored flips denoted $f_1^i$ or $f_2^i$ given in Fig.~\ref{fig:flips}(c).
A \emph{colored flippable} edge preserves all realizers after it is flipped. \end{definition}
Note that there might be edges that are flippable in $\mathtt{T}$ that are not colored flippable as shown in Fig.~\ref{fig:flips}(d). However, Bonichon et al.~\cite{bonichon2002wagner} showed that the colored flip graph, denoted $\mathcal{R}_n$, is connected. Their proof relies on the fact that the flip graph restricted to colored flips of the type $f_1^i$ defines a bounded poset as does the one restricted to their inverse $f_2^i$.
For simplicity, we only refer to a directed cycle as a \emph{cycle} in a directed graph. Brehm~\cite{brehm20003} defines cycle flips between $3$-orientations of a given triangulation, with its corresponding definition for realizers.
\begin{definition}\label{def:cycle-flip} A \emph{cycle flip} in a realizer $R$ is the operation that reverses the orientation of a cycle $\mathcal{C}$ such that if $\mathcal{C}$ is counter-clockwise oriented (resp. clockwise oriented), then: \begin{compactenum} \item the color of each edge in $\mathcal{C}$ is exchanged by the color succeeding (resp. preceding) its original color, \item for each edge inside $\mathcal{C}$ the new color is set to be the color preceding (resp. succeeding) its original color, and \item the color of all other edges is unchanged. See Fig.~\ref{fig:flips}(e). \end{compactenum} A \emph{face flip} of a realizer $R$ is a cycle flip of a cycle of length $3$ defined by the edges of a face. See Fig.~\ref{fig:flips}(f). \end{definition}
Brehm~\cite{brehm20003} showed that given a 4-connected triangulation $\mathtt{T}$, the flip graph of face flips $\mathcal{R}(\mathtt{T})$ of the realizers of $\mathtt{T}$ is connected. The proof is obtained by showing that the structure of flipping counter-clockwise faces into clockwise defines a poset, similar to the proof of colored flips.
In this section, we provide a new proof of the connectivity of $\mathcal{R}_n$ using the relation between colored flips and cycle flips. In addition, we prove an upper bound on the diameter of $\mathcal{R}_n$. In order to show that $\mathcal{R}_n$ is connected we divide this section as follows. In Subsection~\ref{subsec:flippable} we show that for any flippable edge $uv$ in a triangulation $\mathtt{T}$ there exists a realizer $R$ of $\mathtt{T}$ that admits a colored flip in edge $uv$. In Subsection~\ref{subsec:cycleflip} we show that any cycle flip in a realizer $R$ can be obtained by a sequence of a linear number of colored flips. In Subsection~\ref{subsec:upperbound}, we conclude, using the results from the two previous subsections, that two realizers $R$ and $R'$ can be transformed into each other by $O(n^2)$ colored flips.
\subsection{Diagonal flips and colored flips}\label{subsec:flippable}
In this subsection we show that there exists a realizer $R$ of a triangulation $\mathtt{T}$ for each flippable edge $e$ in $\mathtt{T}$ in which the oriented edge $e$ in $R$ is colored flippable.
\begin{lemma}\label{lemma:find-cycle} Let $\mathtt{T}$ be a triangulation and $R$ be a realizer of $\mathtt{T}$. Let $uv$ be a flippable edge in $\mathtt{T}$ where $uv$ is the diagonal of the quadrilateral $uwvz$. If $\Vec{uv}$ is not colored flippable in $R$, then there exists a cycle $\mathcal{C}$ in $R$ that passes through either $\Vec{uw}$ or $\Vec{uz}$ in $R$ but avoids $\Vec{uv}$, that can be found in $O(n)$ time. \end{lemma}
\begin{proof} Note that if either the edge $\Vec{wu}$ or $\Vec{zu}$ is in $R$, then $\Vec{uv}$ is colored flippable in $R$ and the result holds. Thus, both $\Vec{uw}$ and $\Vec{uz}$ are in $R$. See Fig.~\ref{fig:paths}(a). Let $i \mod 3$ be the label of $\Vec{uv}$ in $R$, hence the labels of $\Vec{uw}$ and $\Vec{uz}$ are $i+1$ and $i-1$ module $3$, respectively.
\begin{figure}
\caption{An illustration of Theorem~\ref{thm:flippable}: (a) Vertex $z$ is in $P_{i+1}(w)$. (b) Vertex $z$ is not in $P_{i+1}(w)$. (c) The heavier cycle $\mathcal{C}$ when $(P_{i+1}(w)\setminus\{w\})\cap C \neq \emptyset$. (d) The heavier cycle $ \mathcal{C} $ when $(P_{i+1}(w)\setminus\{w\})\cap C = \emptyset$.}
\label{fig:paths}
\end{figure}
Since $uv$ is flippable, it follows that at least one of $w$ or $z$ is an interior vertex of $\mathtt{T}$. Assume without loss of generality that such vertex is $w$. Consider the paths $P_{i+1}(w)$ and $P_{i+1}(u)$. Note that both paths are of length at least 1, since both $u$ and $w$ are interior vertices of $\mathtt{T}$. Since $w \in P_{i-1}(u)$, by Lemma~\ref{lemma:schnyder-regions} the path $P_{i+1}(w)$ lies in $R_i(u)$. For the same reason, if $z$ is an interior vertex of $\mathtt{T}$, then the path $P_{i-1}(z)$ lies in $R_i(a)$ as well. See Figs.~\ref{fig:paths}(a) and~\ref{fig:paths}(b). Let $C$ be a closed region defined by the following boundary: If $P_{i+1}(w) \cap P_{i+1}(u) = z$, then $\partial C=w\cup P_{i+1}(w)\cup zuw$. See Fig.~\ref{fig:paths}(a). Otherwise, $\partial C= w \cup P_{i+1}(w)\cup(P_{i+1}(w)\cap P_{i-1}(z))\cup P_{i-1}(z)^{-1} \cup zuw$. See Fig.~\ref{fig:paths}(b).
Note that by definition of realizer, each vertex in $\partial C \setminus\{u,w,z\}$ has its outgoing edge of $T_{i}$ in $C$. Similarly, by definition of realizer, each vertex in $(P_{i+1}(w)\cap\partial C)\setminus\{w\}$ and $( P_{i-1}(z)\cap\partial C)\setminus\{z\}$ has its outgoing edge of $T_{i-1}$ and $T_{i+1}$ outside $\mathcal{C}$, respectively. See Fig.~\ref{fig:paths}(b). In addition, by Definition of realizer, we have that an incoming edge from $T_i$ of an interior vertex $x$ in $\mathtt{T}$ lies between its outgoing edges from $T_{i+1}$ and $T_{i-1}$. Thus, if a vertex in $\partial C\setminus\{u\}$ has an incoming edge from $T_{i}$, such edge is not in $C$. Therefore, for each vertex $x \in \partial C\setminus\{u,z,w\}$ its path $P_{i}(x)$ passes through $u$.
Since $uv$ is flippable, it follows that $\partial C \setminus\{u,z,w\} \neq \emptyset$. Let $\mathcal{C}$ be a cycle in $R$ as follows: If $(P_{i+1}(w)\setminus\{w\}) \cap \partial C \neq \emptyset$, then let $\mathcal{C}= uwc\cup P_{i}(c)a$, which is a cycle in $R$ with $c$ the first vertex after $w$ in $P_{i+1}(w)$. See Fig.~\ref{fig:paths}(c). Otherwise, let $\mathcal{C}= uzc'\cup P_{i}(c')u$, which a cycle in $R$ with $c'$ the first vertex after $z$ in $P_{i-1}(z)$. See Fig.~\ref{fig:paths}(d). \qed \end{proof}
Brehm~\cite{brehm20003} showed that applying a cycle flip to a realizer $R$ of a triangulation $\mathtt{T}$ transforms $R$ into another realizer $R'$ of $\mathtt{T}$. Thus, Lemma~\ref{lemma:find-cycle} implies the following.
\begin{theorem}\label{thm:flippable} Let $e$ be a flippable edge in a triangulation $\mathtt{T}$. Then, there exists a realizer $R$ of $\mathtt{T}$ where the oriented edge $\Vec{e}$ in $R$ is colored flippable. \end{theorem}
\begin{proof} Consider an arbitrary realizer $R$ of $\mathtt{T}$ and let $\Vec{e}=\Vec{uv}$ be the orientation of $e$ in $R$. Let $uwvz$ be the quadrilateral in $\mathtt{T}$ with diagonal $uv$ given in counter-clockwise order. If $\Vec{uv}$ admits a colored flip in $R$, then the statement holds. If $\Vec{uv}$ is not colored flippable, then from Lemma~\ref{lemma:find-cycle} there exists a cycle $\mathcal{C}$ that passes through either $\Vec{uw}$ or $\Vec{uz}$ in $R$ and does not pass through $\Vec{uv}$.
Let $R'$ be the oriented graph when flipping $C$. Hence, either $\Vec{wu}$ or $\Vec{zu}$ is in $R'$. In addition, edge $\Vec{uv}$ is in $R'$, since it does not lie in the interior of $C$ in $R$. By Brehm~\cite{brehm20003} the directed graph $R'$ is a realizer of $\mathtt{T}$ different from $R$. Therefore, $\Vec{uv}$ is colored flippable in $R'$. \qed \end{proof}
\subsection{Cycle flips and colored flips}\label{subsec:cycleflip}
In this subsection we will show that any cycle flip of a realizer results from an $O(n)$ sequence of colored flips.
We say that a triangle $\triangle$ (not necessarily a face) in $\mathtt{T}$ is \emph{three-colored} in $R$ if each pair of edges have different colors. From the Definition~\ref{def:realizer} we observe the following.
\begin{observation}\label{obs:three-colored} A triangle $\triangle$ is three-colored in $R$ if and only if $\triangle$ is a cycle in $R$. \end{observation}
Now, we show that a face flip results from two colored flips.
\begin{lemma}\label{lemma:2-consecutive-flips} Let $F$ be an oriented face in a realizer $R$. Then, $F$ can be face flipped by two consecutive colored flips. \end{lemma}
\begin{proof} Consider the oriented face $F=u_1u_2u_3$ in the realizer $R$. Let $R'$ be the realizer obtained when $F$ is face flipped by $F'=u_3u_2u_1$. Without loss of generality assume that $F$ is counter-clockwise oriented and that edge $u_1u_2$ has label $i \mod 3$ in $R$. Since $F$ is a three-colored triangle, all $u_1,u_2$ and $u_3$ are interior vertices. Hence, $u_1u_2$ is the diagonal of a quadrilateral $u_1u_4u_2u_3$, see Fig.~\ref{fig:lemma-2-flips}(a). Note that edge $u_1u_2$ is flippable in $\mathtt{T}$: otherwise, either $u_2$ or $u_1$ is a vertex of degree $3$ in $\mathtt{T}$ but with less than three outgoing edges. Which contradicts that $R$ is a realizer. Since $F$ is counter-clockwise oriented, $R$ admits a $f_{1}^i$ colored flip in the edge $u_1u_2$ with respect to $u_3u_1$. Let $R^{2}$ be the resulting realizer when applying such flip. Thus, the orientation of $\Vec{u_3u_1}$ changes to $\Vec{u_1u_3}$ and is re-labelled by $i$. In addition, the edge $\Vec{u_1u_2}$ by $\Vec{v_3v_4}$ and with label $i-1$. Now, note that $R^2$ admits an $f_1^{i-1}$ colored flip in the edge $\Vec{u_3u_4}$ with respect to $\Vec{v_2v_3}$. Consider the resulting realizer $R^{3}$ when applying such colored flipped in $R^2$. Then, the edge $\Vec{u_2u_3}$ changes its orientation to $\Vec{u_3u_2}$ and label by $i-1$. In addition, the edge $\Vec{u_3u_4}$ is exchanged by $\Vec{u_2u_1}$ with label $i+1$. Note that the face $u_1u_2u_3$ is now clockwise oriented in $R^{3}$ and the label of each edge of $F$ in $R$ is labelled by its successor in $R^{3}$. Even more, since none other edge has been changed, it follows that $F$ was face flipped in $R^3$. Therefore, $R^3=R'$.
The reverse follows from the fact that $f_2$ colored flips are the inverse of $f_1$ colored flips. \qed \end{proof}
\begin{figure}
\caption{(a) Illustration of Lemma~\ref{lemma:2-consecutive-flips}. (b) Illustration of Lemma~\ref{lemma:re-orienting-ST}.}
\label{fig:lemma-2-flips}
\end{figure}
The following lemma will be important for the next results.
\begin{lemma}\label{lemma:Brehm}[Brehm~\cite{brehm20003}] Let $\mathtt{T}$ be a triangulation and let $\mathcal{C}$ be a counter-clockwise cycle (resp. clockwise cycle) of length at least $4$ in a realizer $R$ of $\mathtt{T}$ that contains no separating triangles. Then, $\mathcal{C}$ can be cycle flipped by a sequence of face flips of its interior faces where each face is flipped exactly once and it is oriented counter-clockwise (resp. clockwise) before it is flipped. \end{lemma}
Using Lemmas~\ref{lemma:2-consecutive-flips} and~\ref{lemma:Brehm} we obtain the next result.
\begin{corollary} \label{corollary:2faces-flip} Let $\mathtt{T}$ be a triangulation and let $\mathcal{C}$ be a cycle of length $\geq 4$ in a realizer $R$ of $\mathtt{T}$ with $m$ interior faces and it does not contain separating triangles. Then, $\mathcal{C}$ can be cycle flipped by a sequence of $2m$ colored flips. \end{corollary}
Note that from Obs.~\ref{obs:three-colored} and definition of realizer, follows that the interior edges adjacent to the vertices of a cycle that is a separating triangle in $R$ are incoming edges. Using this fact we observe the following.
\begin{observation}\label{obs:flippable-ST} Let $\mathcal{C}=u_1u_2u_3$ be a counter-clockwise (resp. clockwise) cycle that is a separating triangle in a realizer $R$. Consider its interior face $cu_ju_{j+1}$ for any $j \mod 3$. Then, $R$ admits an $f_1^i$ (resp. $f_2^i$) colored flip in the edges $cu_j$ and $u_ju_{j+1}$. \end{observation}
\begin{lemma}\label{lemma:re-orienting-ST} Let $\mathcal{C}$ be a cycle that is a separating triangle in a realizer $R$ with $m$ interior faces and no interior separating triangles. Then, $\mathcal{C}$ can be cycle flipped by a sequence of $2m$ colored flips. \end{lemma}
\begin{proof} Let $\mathcal{C} = u_1u_2u_3$ be a counter-clockwise (resp. clockwise) cycle and consider its interior face $u_1cu_2$ and $\Vec{u_1u_2}$ with label $i \mod 3$. From Obs.~\ref{obs:flippable-ST} we can apply a colored flip to $u_1u_2$ with respect to $\Vec{cu_1}$ from a quadrilateral $u_1cu_2d$ in $\mathtt{T}$. Let $R'$ be the resulted realizer when applying such colored flip. See Fig.~\ref{fig:lemma-2-flips}(b). Note that the edges $\Vec{u_1c}, \Vec{cu_2}, \Vec{u_2u_3}$ and $\Vec{u_3u_1}$ define a counter-clockwise cycle (resp. clockwise cycle) in $R'$ with $m-1$ interior faces. Let $\mathcal{C} '$ be such cycle. Let $R''$ be the resulted realizer when applying a cycle flip to $\mathcal{C}'$. From Corollary~\ref{corollary:2faces-flip} it follows that $R''$ is obtained from $R'$ by a sequence of $2(m-1)$ colored flips.
Note that all interior edges of $\mathcal{C}'$ and edge $\vec{cu_1}$ are labelled in $R''$ by its preceding label (resp. succeeding label) in $R$. Also, edges $u_2u_3, u_3u_1$ and $cu_2$ have opposite orientation in $R''$ and its label in $R''$ is the succeeding (resp. preceding label) from $R$. Finally, $R''$ admits a colored flip in $\Vec{cd}$ with respect to edge $\Vec{u_2c}$. Let $R^3$ be the resulted realizer when applying such colored flip. Note that $\Vec{cu_2}$ had label $i$ in $R$ as $\Vec{u_1u_2}$ in $R$. See Fig.~\ref{fig:lemma-2-flips}(b). Hence, $R^3$ corresponds to a cycle flip of $\mathcal{C}$ in $R$. Therefore, $\mathcal{C}$ can be cycle flipped by a sequence of $2m-2+2=2m$ colored flips. \qed \end{proof}
We say that a separating triangle in a cycle $\mathcal{C}$ is \emph{maximal} if it is not contained in another separating triangle contained in $\mathcal{C}$. Next, we show that any cycle flip is a sequence of $2m$ colored flips.
\begin{lemma}\label{lemma:cycles} Let $\mathcal{C}$ be a cycle with $m$ interior faces in a realizer $R$. Then, $\mathcal{C}$ can be cycle flipped by a sequence of $2m$ colored flips. \end{lemma}
\begin{proof} If $\mathcal{C}$ does not contain a separating triangles, then from Corollary~\ref{corollary:2faces-flip} and Lemma~\ref{lemma:re-orienting-ST} the statement holds. Now, let us assume that $\mathcal{C}$ contains $t\geq 1$ separating triangles.
Let us show that $\mathcal{C}$ can be cycle flipped by a sequence of $2m$ colored flips. We proceed by induction on $t$.
First, if $\mathcal{C}$ has length at least $4$, we denote $R' = R$ and $\mathcal{C}' =\mathcal{C}$ a cycle in $R'$ and $m'=m$. If $\mathcal{C}=u_1u_2u_3$ is of length $3$, i.e., is a separating triangle, we define $R'$, $\mathcal{C}'$ and $m'$ as follows. Consider its interior face $cu_1u_2$ and label $i$ of $\vec{u_1u_2}$. From Obs.~\ref{obs:flippable-ST} we can apply a colored flip to $u_1u_2$ from a quadrilateral $u_1cu_2d$ in $\mathtt{T}$ such that $\vec{cu_1}$ is re-oriented to $\vec{u_1c}$ with label $i$ and $\vec{u_1u_2}$ is exchanged by $\vec{cd}$ with same label as $\vec{cu_1}$ in $R$. Let $R'$ be the resulted realizer when applying such colored flip to $R$. Note that the edges $\vec{u_1c}, \vec{cu_2}, \vec{u_2u_3}$ and $\vec{u_3u_1}$ define a cycle in $R'$ and is oriented as $\mathcal{C}$. Let $\mathcal{C} '$ be such cycle. Note that $\mathcal{C}'$ contains $m'=m-1$ interior faces and at most $t$ separating triangles.
[Base case] Assume $t=1$. Consider the interior separating triangle $\triangle$ of $\mathcal{C}'$ and let $m''$ be the number of interior faces in $\triangle$. Let $\mathcal{C}''$ be the resulting cycle when removing the interior vertices in $\triangle$. From Lemma~\ref{lemma:Brehm}, $\mathcal{C}''$ can be cycle flipped from $R''$ by a sequence of $m' - m''+1$ many face flips. Note that when applying the colored flips for face flipping each face $F$ in $\mathcal{C}''$, the rest of the edges are unchanged. In addition, since each face is face flipped exactly once on the same direction as $\mathcal{C}'$, it follows that replacing the face flip defined by $\triangle$ in $\mathcal{C}''$ by a cycle flip of $\triangle$, the interior edges in $\triangle$ have the corresponding new labelling. Thus, $\mathcal{C}'$ can be cycle flipped by a sequence of $m'-m''$ face flips and a cycle flip of $\triangle$. From Lemmas~\ref{lemma:2-consecutive-flips} and~\ref{lemma:re-orienting-ST} we obtain that $\mathcal{C}'$ can be cycle by a sequence of $2m'$ colored flips.
If $\mathcal{C}$ is of length at least $4$, the statement holds. Otherwise, note since $\mathcal{C}'$ had the same orientation as $\mathcal{C}$, then when applying the cycle flip to $\mathcal{C}'$ we obtained $cu_2u_3u_1$ in the resulted realizer $R''$, where all of its interior edges are interior edges in $\mathcal{C}$ and were re-label as desired. Similarly, the orientation and labels of $u_1u_3$ and $u_2u_3$ were changed as desired. In addition, edge $cu_1$ was re-oriented twice (applying the colored flip to $u_1u_2$) with the corresponding labelling. It remains to re-orient edge $u_2c$ once and flip $cd$. In fact, $R''$ admits a colored flip in edges $\Vec{cd}$ and $\Vec{u_2c}$. Let $R^3$ the resulting realizer when applying the colored flip to $\Vec{cd}$. Note that $R^3$ contains the cycle $u_3u_2u_1$ and edge $\Vec{cu_2}$ with the desired labelling as for a cycle flip. Thus, $\mathcal{C}$ can be cycle flip by a sequence of $2+2m'=2m$ colored flips.
[Inductive Hypothesis] $\mathcal{C}$ can be cycle flipped by a sequence of $2m$ colored flips if it contains $t-1\geq 1$ separating triangles.
[Inductive step] Assume $\mathcal{C}'$ contains $t$ separating triangles. Let $\triangle_1, \ldots, \triangle_k$ be the maximal separating triangles in $\mathcal{C}'$ with $m_1, \ldots, m_k$ interior faces, respectively. Let $\mathcal{C}''$ be the resulting cycle when removing the interior vertices of each $\triangle_{1}, \ldots, \triangle_k$. Again, $\mathcal{C}''$ is a cycle of length at least $4$ with no separating triangles. By Lemma~\ref{lemma:Brehm}, $\mathcal{C}''$ can be cycle flipped by a sequence of $m'-(\sum_{j=1}^k m_j)+k$ face flips. Analogously as in the base case, we have that each face flip of a triangle $\triangle_j$ in $\mathcal{C}''$ corresponds as a cycle flip of $\triangle_j$ in $\mathcal{C}'$. Since each $\triangle_j$ contains at most $t-1$ separating triangles, by inductive hypothesis, we obtained that $\mathcal{C}'$ can be cycle flipped by a sequence of $2(m'-\sum_{j=1}^km_j)+2\sum_{j=1}^km_j = 2m'$ colored flips.
Analogously as in the base case, if $\mathcal{C}$ is of length at least $4$, the statement holds. Otherwise, since $\mathcal{C}'$ had the same orientation as $\mathcal{C}$, then when applying the cycle flip to $\mathcal{C}'$ we obtained $cu_2u_3u_1$ in the resulted realizer $R''$ and $R''$ admits a colored flip in edges $\Vec{cd}$ and $\Vec{u_2c}$. Let $R^3$ the resulting realizer when applying the colored flip to $\Vec{cd}$. Note that $R^3$ contains the cycle $u_3u_2u_1$ and edge $\Vec{cu_2}$ with the desired labelling as for a cycle flip. Thus, $\mathcal{C}$ can be cycle flip by a sequence of $2+2m'=2m$ colored flips. \qed \end{proof}
\subsection{A bound on the diameter of $\mathcal{R}_n$}\label{subsec:upperbound}
Komuro~\cite{komuro1997diagonal} proved that the diameter of $\mathcal{T}_n$ is $O(n)$ while the edges of the outer face are fixed~\footnote{The best bound known is in~\cite{cardinal2018arc} but their procedure might change the outerface.}. Using this and previous results we obtain the desire theorem.
\begin{theorem}\label{thm:connectedness} A given realizer of a triangulation of $n$ vertices is at colored flip distance $O(n^2)$ to any other realizer in $\mathcal{R}_n$. \end{theorem}
\begin{proof} Let $R$ and $R'$ be two different realizers in $\mathcal{R}_n$ and consider its underlying triangulations $\mathtt{T}$ and $\mathtt{T}'$, respectively. Let $\mathtt{T}''$ be the triangulation with vertices $r_0$ and $r_1$ on its outerface adjacent to all the vertices. $\mathtt{T}''$ has a unique realizer~\cite{brehm20003}. From Komuro~\cite{komuro1997diagonal}, $\mathtt{T}$ and $\mathtt{T}'$ can be transformed into $\mathtt{T}''$ by $O(n)$ diagonal flips. Hence, from Theorem~\ref{thm:flippable} it follows that $R$ and $R'$ can be transformed into each other by a sequence of $O(n)$ colored flips and a cycle flip in between such flips. Since there can be cycles with $O(n)$ interior faces, from Lemma~\ref{lemma:cycles} it follows that $R$ and $R'$ can be transformed into each other by $O(n^2)$ colored flips. \qed \end{proof}
\section{Dynamic maintenance}\label{sec:DynamicSec}
In this section we study the problem of maintaining a realizer over a sequence of colored flips.
Let $T_i(u)$ denote the subtree of $T_i$ rooted at vertex $u$. Let $R$ be a realizer and let $R'$ be the resulted realizer when applying an $f_1^i$ (resp. $f_2^i$) colored flip to $\Vec{uv}$ with respect to edge $\Vec{wu}$ in quadrilateral $uwvz$. Define $c(u)=|R_{i-1}(w)|-|R_{i-1}(u)|+1$ (resp. $c(u)= |R_{i+1}(u)|-|R_{i+1}(w)|+1$) and define $c(w)$ as follows: If $\Vec{uz} \in R$, then $c(w)=-1$ (resp. $c(w)=0$). Otherwise, $c(w)=|R_{i}(u)|-|R_i(z)|$ (resp. $c(w)=|R_i(z)|-|R_i(u)|-1$).
Consider the labels $i$ and $j$ of $\Vec{uv}$ and $\Vec{wu}$, respectively. We observe that the only change made when applying a colored flip to $\Vec{uv}$ are the paths passing through edges $\Vec{uv}$ and $\Vec{uw}$. These paths are exactly the paths $P_i(x)$ for all $x \in T_i(u)$ and $P_{j}(y)$ for all $y \in T_{j}(w)$. Thus, the only vertices changing its regions are the ones in $T_i(u)$ and $T_{j}(w)$. Moreover, the $i$-th region of any $x \in T_i(u)$ and the $j$-th region of any $y \in T_j(w)$ remain unchanged. Thus, the regions $R_{i-1}(x)$ and $R_{i+1}(x)$ exchange elements for all $x \in T_i(u)$. The same applies to elements in $T_j(w)$. More precisely, we obtain the following lemma.
\begin{figure}
\caption{(a) The filled area corresponds to $R'_{i+1}(u)=R_{i+1}(w)\cup\{w\}$ and the tilled one corresponds to $R_{i-1}(u)$. (b) The filled area corresponds to $R'_{i}(w)=R_i(w)\cup R_i(z)$ and the tilled area corresponds to $R_i(w)$.}
\label{fig:difference}
\end{figure}
\begin{lemma}\label{lemma:difference} Let $R$ be a realizer and let $R'$ be the resulted realizer when applying a colored flip to $\Vec{uv}$ with respect to edge $\Vec{wu}$ in quadrilateral $uwvz$ with labels $i$ and $j$, respectively. Let $c(u)$ and $c(w)$ defined as above. Then, \begin{compactenum}
\item For all $x$ in $T_i(u)$, $|R'_{i-1}(x)|-|R_{i-1}(x)|=c(u)$ and $|R'_i(x)|=|R_i(x)|$.
\item For all $y$ in $T_{j}(w)$, $|R'_{j-1}(y)|-|R_{j-1}(y)|=c(w)$ and $|R'_{j}(y)|=|R_{j}(y)|$.
\item The regions of any vertex in $V(R)\setminus (V(T_i(u)\cup T_{j}(w)))$ remain the same. \end{compactenum} \end{lemma}
\begin{proof} Note that the only change made when applying a colored flip to $\Vec{uv}$ are the paths passing through edges $\Vec{uv}$ and $\Vec{uw}$. These paths are exactly the paths $P_i(x)$ for all $x \in T_i(u)$ and $P_{j}(y)$ for all $y \in T_{j}(w)$. Thus, the regions of any vertex in $V(R)\setminus (V(T_i(u)\cup T_{j}(w)))$ remain the same.
From Lemma~\ref{lemma:schnyder-regions} we note that each $x \in T_{i}(u)$ is in $R_i(u)$. Since the paths in $R_i(u)$ are unchanged in $R'$, it follows that $R'_i(x)=R_i(x)$ for all $x \in T_i(u)$. Similarly, we show that $R'_{j}(y)=R_{j}(y)$ for all $y \in T_{j}(w)$.
Now, we assume $j=i-1$. Let us show that $|R'_{i-1}(x)|=|R_{i-1}(x)|+c(u)$. Since $u \in P_i(x)$ for each $x \in T_i(u)\setminus\{u\}$, $u \in R_{i-1}(x)$. In addition, since $x \in R_i(u)$, $(R_{i-1}(x)\setminus R_{i-1}(u)) \subset R_i(u)$ which remains unchanged. Hence, $|R'_{i-1}(x)|=|R_{i-1}(x)|-|R_{i-1}(u)|+|R'_{i-1}(u)|$.
On the other hand, note that $P_{i-1}(u)\subset P_{i-1}(w)$. Hence, $R_{i+1}(u) \subset R_{i+1}(w)$. Note that $R'_{i+1}(u)$ is given by the region between paths $P_{i-1}(u)$ and $uw\cup P_i(w)$, which is exactly $R_{i+1}(w)\cup\{w\}$. See Fig~\ref{fig:difference}(a). Hence, $|R'_{i+1}(u)|-|R_{i+1}(u)|=|(R_{i+1}(w)\cup\{w\})\setminus R_{i+1}(u)| = - c(u)$.
Therefore, $|R'_{i-1}(x)|= |R_{i-1}(x)| + c(u)$ for all $x \in T_i(u)$.
Finally, let us show that $|R'_{i+1}(y)|=|R_{i+1}(y)|+c(w)$ for all $y \in T_{i+1}(w)$. Since $w \in P_{i-1}(y)$ for each $y \in T_{i-1}(w)\setminus\{w\}$, $R_{i+1}(w) \in R_{i+1}(y)$. In addition, since $y \in R_{i-1}(w)$, $(R_{i+1}(y)\cap R_{i-1}(w)) \subset R_{i+1}(y)$ which remains unchanged. Hence, $|R'_{i+1}(y)|=|R_{i+1}(y)|-|R'_{i+1}(w)|+|R_{i+1}(w)|$.
On the other hand, if $\Vec{uz} \in R$: then $P_{i-1}(w) = (wvz)\cup P_{i-1}(z)$. Since $v$ is the only new vertex in the interior of $R'_{i+1}(w)$ and $|P'_{i+1}(w)|-|P_{i+1}(w)|=-1$, it follows that $|R'_{i+1}(w)|-|R_{i+1}(w)|=-1$.
Now, consider the case $\Vec{zu} \in R$: then $P_{i+1}(u) \subset P_{i+1}(z)$. Hence, $R_i(u) \subseteq R_i(z)$. In addition, since $\Vec{wu} \in R$, we have that $P_{i-1}(u)\subset P_{i-1}(w)$ and $R_i(u) \subseteq R_i(w)$. Thus, $R_i(z)\cap R_i(w)=R_i(u)$. Therefore, $|R'_{i+1}(w)|-|R_{i+1}(w)|= - |R_i(z)\setminus R_i(w)|=|R_i(u)|-|R_i(z)|=c(w)$. See Fig~\ref{fig:difference}(b). Therefore, $|R'_{i+1}(y)|=|R_{i+1}(y)|+c(w)$ for every $y \in T_{i-1}(w)$.
The case when $j=i+1$ is symmetric. \qed \end{proof}
A \emph{link/cut tree} is a data structure proposed by Sleator and Tarjan~\cite{sleator1983data} that maintains a forest of vertex-disjoint rooted trees with cost in its vertices under two dynamic operations: $\textsc{link}$ and $\textsc{cut}$ (see table below). The link/cut tree supports the operations 1--6 from table below in worse case $O(\log n)$ time.
\subsubsection{The data structure.} Consider a data structure of a realizer $R$ as a set of three link/cut trees defined by each tree $T_0, T_1, T_3$. In each vertex $u$ we store its parent $\textsc{parent}_i(u)$ in $T_i$ for each $i \mod 3$, its initial barycentric coordinates $\incoordinates{u}$ and two costs: $\textsc{d-cost}_i(u)$ and $\textsc{r-cost}_i(u)$. Where $\textsc{d-cost}_i$ refers to the distance of $u$ to the root $r_i$ of $T_i$ and the $\textsc{r-cost}_i$ refers to the difference between the initial ($i-1$)th coordinate with the current ($i-1$)th coordinate of $u$. Precisely, $\textsc{r-cost}_i(u)$ is the amount that has to be added to the initial ($i-1$)th coordinate and subtracted to the initial ($i+1$)th coordinate. The initial $\textsc{r-cost}$ is $0$. We define extra functions for our data structure in lines 7--11 from the table below. Using this data structure we obtain the next theorem. \\
\begin{tabularx}{0.96\textwidth} {|c |c |>{\raggedright\arraybackslash}X| }
\hline
1 & \textsc{link} ($u, v$) & Add edge $uv$. \\
\hline
2 & \textsc{parent}($u$) & Return parent of vertex $u$. \\ \hline
3 & \textsc{cut}($u$) & Delete edge $v$\textsc{parent}($v$). \\ \hline 4 & \textsc{cost}($u$) & Return current cost in $u$ \\ \hline 5 & \textsc{t-updatecost}($u, x$) & Add $x$ to the cost of all vertices in subtree $T(u)$. \\ \hline 6 & \textsc{leastcommon}($u,v$) & Return least common ancestor in $T$ of $u$ and $v$.\\ \hline 7 & \lab{$u,v$} & Return label of edge $uv$. \\ \hline 8 & \orientation{$u, v$} & Return orientation of edge $uv$. \\ \hline 9 & \incoordinates{$u$} & Return initial barycentric coordinates of vertex $u$. \\ \hline
10 & \coordinates{$u$} & Return current barycentric coordinates of $u$. \\ \hline 11 & \flip{$u, v,w,z$} & Apply colored flip to edge $\Vec{uv}$ with respect to $\vec{wu}$. \\ \hline \end{tabularx}
\
\begin{theorem}\label{thm:dynamic-realizer} A realizer of a triangulation can be maintained in $O(\log n)$ per \textsc{flip}. Furthermore, queries \textsc{orientation}, \textsc{label}, \textsc{coordinates}, $\textsc{leastcommon}_i$ and $\textsc{d-cost}_i$ can be obtained in $O(\log n)$ amortized time. \end{theorem}
\begin{proof} Consider the procedures define below.
From~\cite{sleator1983data} $\textsc{parent}$ and \textsc{in-coordinates} take $O(1)$ time and $\textsc{t-updatecost}_i$ takes worse case $O(\log n)$ time. Since $|R_{i+1}(u)|=n-1-|R_i(u)|-|R_{i-1}(u)|$, it follows that $\coordinates{u}$ is correct. Since \textsc{in-coordinates} takes constant time and $\textsc{r-cost}$ was called exactly three times, it follows that \textsc{coordinates} can be obtained in $O(\log n)$ time. Since the functions \textsc{orientation} and \textsc{label} are calling $\textsc{parent}$ at most 6 times, \textsc{orientation} and \textsc{label} can be obtained in $O(1)$ time. Since $\textsc{d-cost}_i$ and $\textsc{r-cost}_i$ behave as $\textsc{cost}$ from a link/cut tree, then both can be obtained in $O(\log n)$. It remains to analyse \textsc{flip}. Note that in line 18 the function removes edges $\Vec{uv}$ and $\Vec{wu}$. In line 19 the new edges $\Vec{uw}$ and $\Vec{wz}$ are added. Thus, \textsc{flip} does the desired colored flip. Line 20 changes the $\textsc{r-cost}$ and $\textsc{d-cost}$ for each vertex in the subtree $T_i(u)$ by $c(u)$ and $d(u)$, respectively. Similarly, in the subtree $T_{j}(w)$ by $c(w)$ and $d(w)$, respectively. From Lemma~\ref{lemma:difference} updated $\textsc{r-cost}_i$ is correct. Therefore, \textsc{flip} is correct. Finally, we call exactly 3 times the function \textsc{coordinates}, twice each function $\textsc{cut}, \textsc{link}$ and $\textsc{t-updatecost}$. Each of these functions have amortized cost $O(\log n)$. Hence, \textsc{flip} has amortized cost $O(\log n)$. \qed \end{proof}
\noindent\fbox{\scalebox{0.75}{\begin{minipage}{\textwidth} \begin{algorithmic}[1]
\Procedure{Label}{$u, v$}
\Comment{Returns the label of edge $uv$.}
\For{Each $i \mod 3$}
\If{$\textsc{parent}_i(u) = v$}
\State return $i$
\Else
\If{$\textsc{parent}_i(v) = u$}
\State \Return $i$
\EndIf
\EndIf
\EndFor
\EndProcedure \end{algorithmic} \end{minipage}}}
\noindent\fbox{\scalebox{0.75}{\begin{minipage}{\textwidth} \begin{algorithmic}[1]
\Procedure{orientation}{$u, v$}
\Comment{Returns orientation of edge $uv$.}
\State Let $b= \textsc{False}$
\For{each $i \mod 3$}
\If{$\textsc{parent}_i(u) = v$}
\State let $b= \textsc{True}$
\EndIf
\EndFor
\If{$b= \textsc{True}$}
\State \Return $\Vec{uv}$
\Else
\State \Return $\Vec{vu}$
\EndIf
\EndProcedure \end{algorithmic} \end{minipage}} }
\noindent\fbox{\scalebox{0.75}{\begin{minipage}{\textwidth} \begin{algorithmic}[1]
\Procedure{Coordinates}{$u$}
\Comment{Returns a vector with the barycentric coordinates of $u$ in current realizer $R$.}
\State let $(u_0, u_1, u_3)=\incoordinates{u}$.
\For{each $i \mod 3$}
\State let $c_i = \textsc{cost}_i{u}$.
\EndFor
\For{each $j \mod 3$}
$u'_j = u_j + \frac{c_{j+1}-c_{j-1}}{n-1}$.
\EndFor
\State \Return $(u'_0, u'_1, u'_2)$
\EndProcedure \end{algorithmic} \end{minipage}} }
\noindent\fbox{\scalebox{0.8}{\begin{minipage}{\textwidth} \begin{algorithmic}[1]
\Procedure{Flip}{$u, v, w, z$}
\Comment{Creates a flip while updates the cost in the subtrees that are changed.}
\State Let $i=\lab{u,v}, j=\lab{u,w}$.
\State let $d(u)=\textsc{d-cost}_i(w)-\textsc{d-cost}_i(u)+1$ and $d(w)=\textsc{d-cost}_j(z)-\textsc{d-cost}_j(w)+1$
\State $(v_0, v_1, v_2)= \coordinates{u}$,
\State $(w_0, w_1, w_2)=\coordinates{w}$,
\State $(z_0, z_1, z_2)=\coordinates{z}$.
\If{$j=i-1 \mod 3$}
\State let $c(u) = (n-1)(w_{i-1}-u_{i-1})$.
\If{$\orientation{u,z} =\Vec{uz}$}
\State let $c(w)= 0$
\Else \State let $c(w)=(n-1)(z_i-u_i)$
\EndIf
\Else \State let $c(u) = (n-1)(u_{i-1}-w_{i-1})+1$
\If{$\orientation{u,z}=\Vec{uz}$}
\State let $c(w)=-1$
\Else \State let $c(w)=(n-1)(u_i-z_i)$
\EndIf
\EndIf
\State $\textsc{cut}_i(u), \textsc{cut}_j(w)$
\State $\textsc{link}_i(u, w), \textsc{link}_j(w, z)$
\State $\textsc{t-updatecost}_i(u, c(u), d(u)), \textsc{t-updatecost}_j(w, c(w), d(w))$.
\EndProcedure \end{algorithmic} \end{minipage}} }
\appendix
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Boundary Lipschitz Regularity and the Hopf Lemma on Reifenberg Domains for Fully Nonlinear Elliptic Equations \tnoteref{t1}}
\author[rvt]{Yuanyuan Lian} \ead{[email protected]; [email protected]} \author[rvt]{Wenxiu Xu\corref{cor1}} \ead{[email protected]} \author[rvt]{Kai Zhang} \ead{zhang\[email protected]; [email protected]} \tnotetext[t1]{This research is supported by the National Natural Science Foundation of China (Grant No. 11701454) and the Natural Science Basic Research Plan in Shaanxi Province of China (Program No. 2018JQ1039).}
\cortext[cor1]{Corresponding author}
\address[rvt]{Department of Applied Mathematics, Northwestern Polytechnical University, Xi'an, Shaanxi, 710129, PR China}
\begin{abstract} In this paper, we prove the boundary Lipschitz regularity and the Hopf Lemma by a unified method on Reifenberg domains for fully nonlinear elliptic equations. Precisely, if the domain $\Omega$ satisfies the exterior Reifenberg $C^{1,\mathrm{Dini}}$ condition at $x_0\in \partial \Omega$ (see \Cref{d-re}), the solution is Lipschitz continuous at $x_0$; if $\Omega$ satisfies the interior Reifenberg $C^{1,\mathrm{Dini}}$ condition at $x_0$ (see \Cref{d-H-re}), the Hopf lemma holds at $x_0$. Our paper extends the results under the usual $C^{1,\mathrm{Dini}}$ condition. \end{abstract}
\begin{keyword} Boundary regularity \sep Lipschitz continuity \sep Hopf lemma \sep Fully nonlinear elliptic equation \sep Reifenberg domain
\end{keyword}
\end{frontmatter}
\section{Introduction}\label{S1}
In this paper, we intend to obtain the pointwise boundary Lipschitz regularity and prove the Hopf Lemma for the viscosity solutions of the following fully nonlinear elliptic equations \begin{equation}\label{r-e} \left\{\begin{aligned} &u\in S(\lambda,\Lambda,f)&& ~~\mbox{in}~~\Omega;\\ &u=g&& ~~\mbox{on}~~\partial \Omega, \end{aligned}\right. \end{equation} where $\Omega$ is a bounded domain and $S\left( {\lambda ,\Lambda ,f} \right)$ denotes the Pucci class with uniform constants $\lambda$ and $\Lambda$ (see \cite{MR1351007} for the definition and its basic properties).
It is well known that the exterior sphere condition and the interior sphere condition imply the boundary Lipschitz regularity and the Hopf lemma respectively. In recent decades, sphere condtion has been extended to a more general geometrical condition, i.e., the $C^{1,\mathrm{Dini}}$ condition (see \Cref{r-2}). With aid of the boundary Harnack inequality, Safonov \cite{Safonov2008} proved the boundary Lipschitz regularity under the exterior $C^{1,\mathrm{Dini}}$ condition, and the Hopf lemma under the interior $C^{1,\mathrm{Dini}}$ condition for classical solutions of linear elliptic equations in nondivergence form. Huang, Li and Wang \cite{MR3167627} also obtained the boundary Lipschitz regularity for linear elliptic equations under the exterior $C^{1,\mathrm{Dini}}$ condition. They used an auxiliary barrier function and the iteration technique, without using the boundary Harnack inequality. Lieberman \cite{MR779924} proved the Hopf lemma for linear elliptic equations under the interior $C^{1,\mathrm{Dini}}$ condition by applying the regularized distance. Recently, Lian and Zhang \cite{lian2018boundary} extend above results to fully nonlinear elliptic equations by a unified method. Moreover, the proof is simple. In that paper, curved boundaries were regarded as the perturbation of a hyperplane. Then the desired regularity can be obtained by a perturbation argument.
In this paper, we prove the boundary Lipschitz regularity and the Hopf lemma for viscosity solutions of fully nonlinear elliptic equations under the Reifenberg $C^{1,\mathrm{Dini}}$ condition which extends the $C^{1,\mathrm{Dini}}$ condition. We use an improved technique of \cite{lian2018boundary} to derive our results. It can be shown that the directions at different scales converges to a direction, say $e_n$. The main difficulty is that we can not estimate the derivative of the solution along $e_n$ directly, which is carried out in and the difference between two adjoint scales has to be estimated. The Reifenberg $C^{1,\mathrm{Dini}}$ condition was introduced by Ma, Moreira and Wang \cite{MR1351008}where the boundary $C^{1}$ regularity was obtained for fully nonlinear parabolic equations. Note that the Reifenberg ${C^{1,\mathrm{Dini}}}$ condition is more general than the ${C^{1,\mathrm{Dini}}}$ condition (see \Cref{d-re} and \Cref{d-H-re}).
Before the statement of our main results, we introduce some standard notations and definitions. Let $B_r(x_0)$ denote the open ball in $R^n$ with center $x_0$ and radius $r$. Set $B_r=B_r(0)$, $B_r^+=B_r\cap \left\{x|x_n>0\right\}$ and $T_r=B_r\cap \left\{x|x_n=0\right\}$. Denote by $Q_r(x_0)$ the open cube in $R^n$ with center $x_0$ and side-length $r$. Set $Q_r=Q_r(0)$ and $Q_r^+=Q_r\cap \left\{x|x_n>0\right\}$. In this paper, $\left\{e_1,...,e_n\right\}$ stands for the standard basis in $R^n$.
\begin{definition}\label{d-Dini} The function $\omega:[0,+\infty)\rightarrow [0,+\infty)$ is called a Dini function if $\omega$ is nondecreasing and satisfies the following Dini condition for some $r_0>0$ \begin{equation}\label{e-dini}
\int_{0}^{r_0}\frac{\omega(r)}{r}dr<\infty. \end{equation} \end{definition}
\begin{definition}\label{d-Dini-f} Let $\Omega \subset R^{n}$ be a bounded domain and $f$ be a function defined on $\bar{\Omega}$. We say that $f$ is Lipschitz at $x_0\in \bar{\Omega}$ or $f\in C^{0,1}(x_0)$ if there exists a constant $C$ such that \begin{equation*}
|f(x)-f(x_0)|\leq C|x-x_0|,~~\forall~x\in \bar{\Omega}. \end{equation*}
Then, define $[f]_{C^{0,1}(x_0)}=\inf C$ where $C$ satisfies above equation, and $\|f\|_{C^{0,1}(x_0)}=\|f\|_{L^{\infty}(\Omega)}+[f]_{C^{0,1}(x_0)}$.
Similarly, we call that $f$ is $C^{1,\mathrm{Dini}}$ at $x_0$ or $f\in C^{1,\mathrm{Dini}}(x_0)$ if there exist a vector $l$ and a constant $C$ such that \begin{equation*}
|f(x)-f(x_0)-l\cdot (x-x_0)|\leq C|x-x_0|\omega(|x-x_0|),~~\forall~x\in \bar{\Omega}, \end{equation*}
where $\omega$ is a Dini function. Then we denote $l$ by $\nabla f(x_0)$. We define $[f]_{C^{1,\mathrm{Dini}}(x_0)}=\inf C$ where $C$ satisfies above equation, and $\|f\|_{C^{1,\mathrm{Dini}}(x_0)}=\|f\|_{L^{\infty}(\Omega)}+|l|+[f]_{C^{1,\mathrm{Dini}}(x_0)}$. \end{definition}
Now, we give the definitions of the Reifenberg $C^{1,\mathrm{Dini}}$ condition. \begin{definition}[\textbf{Exterior Reifenberg $C^{1,\mathrm{Dini}}$ condition}]\label{d-re}
We say that $\Omega$ satisfies the exterior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0 \in \partial \Omega$ if there exist a positive constant $R$ and a Dini function $\omega_{\Omega}(r)$ such that
a) for any $0 < r < R,$ there exists a hyperplane ${\Gamma _r}$ and its unit normal vector $n_{\Gamma _r}$ such that \begin{equation}\label{e-re} {B_r} \cap \Omega \subset {B_r} \cap \left\{ {{x \cdot {n_{{\Gamma _r}}}} > - r\omega_{\Omega}(r)} \right\}. \end{equation}
b) $\left| n_{\Gamma _r}-n_{\Gamma _{\theta r}} \right| \leq {K(\theta) }\omega_{\Omega}(r)$ for each $ 0<\theta <1$ and $0<r<R$, where the nonnegative function $K$ depends only on $\theta$ and is bounded on $[\theta _0,1]$ for any $0<{\theta _0}<1$. \end{definition}
\begin{definition}[\textbf{Interior Reifenberg $C^{1,\mathrm{Dini}}$ condition}]\label{d-H-re}
We say that $\Omega$ satisfies the interior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0 \in \partial \Omega$ if there exist a positive constant $R$ and a Dini functions $\omega_{\Omega}(r)$ such that
a) for any $0 < r < R,$ there exists a hyperplane ${\Gamma _r}$ and its unit normal vector $n_{\Gamma _r}$ such that \begin{equation}\label{e-re-2} {B_r} \cap \Omega^c\subset {B_r} \cap \left\{ {{x \cdot {n_{{\Gamma _r}}}} < r\omega_{\Omega}(r)} \right\} . \end{equation}
b) $\left| n_{\Gamma _r}-n_{\Gamma _{\theta r}} \right| \leq {K(\theta) }\omega_{\Omega}(r)$ for each $ 0<\theta <1$ and $0<r<R$. \end{definition}
\begin{remark}\label{r-1} If $\Omega$ satisfies both the exterior and interior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0$, we call that $\partial\Omega$ is Reifenberg ${C^{1,\mathrm{Dini}}}$ at $0$. Without loss of generality, we always assume that $K\geq 1$. \end{remark}
\begin{remark}\label{r-2} If $\Gamma_r$ and $n_{\Gamma_r}$ are the same for different $r$, we arrive at the definition of the usual ${C^{1,\mathrm{Dini}}}$ conditions (see \cite[Definition 1.2 and Definition 1.3]{lian2018boundary}). The Reifenberg ${C^{1,\mathrm{Dini}}}$ condition is more general than the usual ${C^{1,\mathrm{Dini}}}$ condition, which is shown clearly by the following example adopted from \cite{MR1351008}.
Let $\Omega = {B_1} \cap \left\{ {y > f\left( x \right)} \right\}\subset R^2$ where $f\left( x \right) ={x}/ {\ln \left| x \right|},x \in \left(-1/2,1/2 \right)$. Clearly, $\partial\Omega$ is not ${C^{1,\mathrm{Dini}}}$ at $0$ since $1/|\ln r|$ is not a Dini function. Now we show that it is Reifenberg ${C^{1,\mathrm{Dini}}}$ at $0$.
For any $0<r<1/2$, let $\Gamma_r$ be the line $\left\{(x,y)| y=x/\ln r\right\}$ and take $n_{\Gamma_r}=(-1/\ln r, 1)$. It is easy to see that there exists a unique $-r<x^*<0$ satisfying $f'\left( {{x^*}} \right) = {1}/{\ln r}$, and for any $(x,y)\in B_r\cap \Omega$,
\begin{equation*}
(x,y)\cdot n_{\Gamma_r}> (x^*,f(x^*))\cdot n_{\Gamma_r}= \frac{x^*}{\ln |x^*|} - \frac{x^*}{\ln r}. \end{equation*} Note that \begin{equation*}
f'(x^*)=\frac{1}{\ln |x^*|}-\frac{1}{\ln^2 |x^*|}=\frac{1}{\ln r}. \end{equation*} I.e., \begin{equation*}
\frac{x^*}{\ln r}=\frac{x^*}{\ln |x^*|}-\frac{x^*}{\ln^2 |x^*|}. \end{equation*} Hence, \begin{equation*}
(x,y)\cdot n_{\Gamma_r}> \frac{x^*}{\ln^2 |x^*|}\geq \frac{-r}{\ln^2r}. \end{equation*} In addition, \begin{equation*}
|n_{\Gamma_r}-n_{\Gamma_{\theta r}}| \leq \left| {\frac{1}{{\ln r}} - \frac{1}{{\ln \left( {\theta r} \right)}}} \right| \leq \left| {\frac{{\ln \theta }}{{\ln r\ln \left( {\theta r} \right)}}} \right| \leq \left| \frac{\ln \theta }{ \ln^2 r} \right|. \end{equation*}
Since $1/\ln^2r$ is a Dini function, $\partial\Omega$ satisfies the exterior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0$. Similarly, it can be verified that $\partial\Omega$ also satisfies the interior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0$. \end{remark}
Now, we state our main results. For the boundary Lipschitz regularity, we have \begin{theorem}\label{t-2} Suppose that $\Omega$ satisfies the exterior Reifenberg $C^{1,\mathrm{Dini}}$ condition at $0\in \partial \Omega$ for some Dini function $\omega_{\Omega}(r)$ and $R>0$. Let $u$ be a viscosity solution of \begin{equation} \left\{\begin{aligned} &u\in S(\lambda,\Lambda,f)&& ~~\mbox{in}~~\Omega;\\ &u=g&& ~~\mbox{on}~~\partial \Omega, \end{aligned}\right. \end{equation} where $g$ is $C^{1,\mathrm{Dini}}$ at $0$ with a Dini function $\omega_g$ and $f\in L^{n}(\Omega)$ satisfies \begin{equation}\label{e-dini-f}
\int_{0}^{R}\frac{\omega_f(r)}{r}dr:= \int_{0}^{R}\frac{\|f\|_{L^{n}(\Omega\cap B_r)}}{r\|f\|_{L^{n}(\Omega)}}dr<\infty. \end{equation}
Then $u$ is $C^{0,1}$ at $0$ and \begin{equation*}
|u(x)-u(0)|\leq C |x|\left(\|u\|_{L^{\infty }(\Omega)}+\|f\|_{L^{n}(\Omega)}+[g]_{C^{1,\mathrm{Dini}}(0)}\right), ~~\forall ~x\in \Omega\cap B_{R}, \end{equation*} where $C$ depends only on $n, \lambda, \Lambda, K,\omega_{\Omega},\omega_{f},\omega_{g}$ and $R$. \end{theorem}
For the Hopf lemma, we have \begin{theorem}[\textbf{Hopf lemma}]\label{t-H-2} Suppose that $\Omega$ satisfies the interior Reifenberg $C^{1,\mathrm{Dini}}$ condition at $0\in \partial \Omega$ for some Dini function $\omega_{\Omega}(r)$ and $R>0$. Let $u\in C(\bar{\Omega})$ satisfy \begin{equation} M^{-}(D^2u,\lambda,\Lambda)\leq 0 ~~\mbox{in}~~\Omega~~ (\mathrm{i.e.,}~~ u\in \bar{S}(\lambda,\Lambda,0)) \end{equation} with $u(0)=0$ and $u\geq 0$ in $\Omega$.
Then there exists a unit vector, say $e_n$, such that for any $l\in R^n$ with $|l|=1$ and $l\cdot {e_n}>0$, \begin{equation}\label{e-H-main}
u(tl)\geq cu\left(\frac{e_n}{2}\cdot R \right)t, ~~\forall~ 0<t<\delta, \end{equation} where $c>0$ and $\delta>0$ depend only on $n, \lambda, \Lambda,K,\omega_{\Omega},R$ and $l$. \end{theorem}
\section{Proofs of the main results} In this section, we give the detailed proofs of the main results.
Now, we clarify the idea briefly. Firstly, note that if the $\Omega$ satisfies the Reifenberg ${C^{1,\mathrm{Dini}}}$ condition from the exterior (or the interior) at $0\in \partial\Omega$, the normal vectors in different scales converges. In addition, the difference of unit normal vectors in different scales is controlled by the Dini function. Next, we use solutions with flat boundaries (i.e., $v$ in the proofs) to approximate the solution $u$. Then the error between $u$ and $v$ (i.e., $w$ in the proofs) can be estimated by maximum principles. By an iteration argument, the boundary regularity for $u$ is obtained. For the boundary Lipschitz regularity, the right hand function $f$, the boundary value $g$ and the curved boundary $\partial \Omega$ are regarded perturbations of $0$, $0$ and a hyperplane (see the definition of $v$ in the proof) which are inspired directly by \cite{MR3780142}. For the Hopf lemma, since the solution is nonnegative and the equation has the right hand zero, it is easier to prove.
First, we prove the following simple lemma. \begin{lemma}\label{t-1}
Suppose that $\left| n_{\Gamma _r}-n_{\Gamma _{\theta r}} \right| \leq {K(\theta) }\omega_{\Omega}(r)$ for each $0 < \theta < 1$ and $0<r<R$. Then there exists a unique unit vector $n_0$ such that \begin{equation} \mathop {\lim }\limits_{r \to 0 } {{n_{{\Gamma _r}}}} = {n_0} . \end{equation} \end{lemma} \proof For any $0<\eta<1$ and $l,m\in N^+$, we have \begin{equation*}
| {{n_{m+l}} - {n_{m}}}| \leq {K(\eta) }\sum_{i=m}^{m+l-1}\omega_{\Omega}(\eta^i) , \end{equation*} where $n_k$ denotes ${n_{{\Gamma _{{\eta ^k}}}}}$ for convenience. Since $\omega_{\Omega}\left( r \right)$ is a Dini function, there exists a unit vector $n_0$ satisfying \begin{equation*} \mathop {\lim }\limits_{k \to \infty } {{n_{k}} = n_0}. \end{equation*}
Now, for any $\varepsilon>0$, there exists $k_0\geq 0$ such that $|n_k-n_0|\leq \varepsilon/2$ for any $k\geq k_0$ and $\sup_{\theta\in[\eta,1]}K(\theta)\cdot \omega_{\Omega}(\eta^{k_0})\leq \varepsilon/2$. Then for any $0<r<R\eta^{k_0}$, there exists $k\geq k_0$ such that $R\eta^{k+1}\leq r<R\eta^{k}$. Then for some $\eta\leq \theta<1$, \begin{equation*}
|n_{\Gamma_r}-n_0|\leq |n_{\Gamma_r}-n_k+n_k-n_0|\leq K(\theta)\omega_{\Omega}(\eta^{k})+\varepsilon/2\leq \varepsilon. \end{equation*} Therefore, $\lim_{r \to 0}n_{\Gamma_r} = n_0$.~\qed ~\\
\begin{remark}\label{r-3} Without loss of generality, we always assume that $n_0=e_n$ throughout this paper. \end{remark}
Next, we introduce the following lemma, which concerns the boundary $C^{1,\alpha}$ regularity for solutions with flat boundaries. It was first proved by Krylov \cite{MR688919} and further simplified by Caffarelli (see \cite[Theorem 9.31]{MR1814364} and \cite[Theorem 4.28]{MR787227}). We will use the solutions in this lemma to approximate the solutions in \Cref{t-2} and \Cref{t-H-2}. \begin{lemma}\label{l-1} Let $u$ satisfy \begin{equation} \left\{\begin{aligned} &u\in S(\lambda,\Lambda,0)&& ~~\mbox{in}~~B_1^+;\\ &u=0&& ~~\mbox{on}~~T_1. \end{aligned}\right. \end{equation}
Then $u$ is $C^{1,\alpha}$ at $0$ and \begin{equation*}
|u(x)-u(0)-ax_n|\leq C |x|^{1+\alpha}\|u\|_{L^{\infty }(B_1^+)}, ~~\forall ~x\in B_{1/2}^+ \end{equation*} with \begin{equation*}
|a|\leq C\|u\|_{L^{\infty }(B_1^+)}, \end{equation*} where $\alpha$ and $C$ depend only on $n, \lambda$ and $\Lambda$. \end{lemma}
Now, we give the
\noindent\textbf{Proof of \Cref{t-2}.} Let $\omega(r)=\max \left\{\omega_{\Omega}(r),\omega_g(r),\omega_f(r)\right\}$. From the Dini condition, there exists $r_1>0$ such that for any $0<r\leq r_1$, \begin{equation}\label{e-H-dini-2}
\omega(r)\leq c_0 ~~\mbox{and}~~ \int_{0}^{r}\frac{\omega(s)}{s}ds\leq c_0, \end{equation} where $c_0\leq 1/4$ is a small constant to be specified later and depends only on $n,\lambda,\Lambda$ and $K$. By a proper scaling, we assume that $r_1=1$. Furthermore, we assume that $u(0)=g(0)=0$ and $\nabla g(0)=0$. Otherwise, we may consider $v:=u-g(0)-\nabla g(0)\cdot x$, which satisfies the same equation.
Let $M=\|u\|_{L^{\infty }(\Omega)}+\|f\|_{L^{n}(\Omega)}+[g]_{C^{1,\mathrm{Dini}}(0)}$ and $\Omega _{r}=\Omega \cap B_{r}$. To prove that $u$ is $C^{0,1}$ at $0$, we only need to prove the following:
There exist constants $0<\alpha_{0}, \eta < 1,\bar{C}$ (depending only on $n$, $\lambda$, $\Lambda$), $\hat{C}$ (depending only on $n,\lambda,\Lambda$ and $K$) and a nonnegative sequence $\{a_k\}$ ($k\geq -1$) such that for all $k\geq 0$ \begin{equation}\label{e1.16} \sup_{\Omega _{\eta^k}}(u-a_k{n_{k}} \cdot x)\leq \hat{C} M \eta ^{k}A_k \end{equation} and \begin{equation}\label{e1.17}
|a_k-a_{k-1}|\leq \bar{C}\hat{C}MA_k, \end{equation} where \begin{equation}\label{e-Ak} n_k=n_{\Gamma_{\eta^k}},A_0=c_0, A_k=\max(\omega(\eta^{k}),\eta^{\alpha_0} A_{k-1}) (k\geq 1). \end{equation}
Indeed, from \cref{e-Ak}, we have for any $k\geq 1$, \begin{equation*}
A_k\leq \omega(\eta^k)+\eta^{\alpha_0}A_{k-1}. \end{equation*} Hence, \begin{equation*}
\sum_{i=0}^{k} A_i\leq \sum_{i=1}^{k}\omega(\eta^i)+\eta^{\alpha_0}\sum_{i=0}^{k} A_i+c_0, \end{equation*} which indicates \begin{equation}\label{e.n1} \begin{aligned} \sum_{i=0}^{k} A_i\leq \frac{1}{1-\eta^{\alpha_0}}\left(\sum_{i=1}^{\infty}\omega(\eta^i)+c_0\right)\leq C \end{aligned} \end{equation} for some constant $C$ independent of $k$. That is, $\sum_{i=0}^{\infty} A_i$ converges. Thus, $a_k$ converges to some constant $a$.
Then for any $r>0$, there exists $k\geq 0$ such that $\eta^{k+1}<r\leq \eta^{k}$. From \cref{e1.16} and \cref{e1.17}, we have \begin{equation*} \sup_{\Omega _{r}}u\leq \sup_{\Omega _{\eta^k}}u\leq \hat{C} M \eta ^{k}A_k+CM\eta^k\leq CM\eta^k\leq CMr, \end{equation*} where $C$ depends only on $n,\lambda,\Lambda$ and $K$. In addition, \begin{equation*} \inf_{\Omega _{r}}u\geq -C M r \end{equation*} can be proved similarly. Therefore, \begin{equation*}
\|u\|_{L^{\infty}(\Omega_{r})}\leq CMr. \end{equation*} That is, $u$ is $C^{0, 1}$ at $0$.
Now, we prove \cref{e1.16} and \cref{e1.17} by induction. For $k=0$, by setting $a_{-1}=a_0=0$, they hold clearly provided \begin{equation}\label{e.21} \hat{C}c_0\geq 1. \end{equation} Suppose that they hold for $k$. We need to prove that they hold for $k+1$.
For convenience, we use the following notations. Let $r=\eta ^{k}$, $B_{{\Gamma _r}}^ + =B_r\cap \left\{x|x\cdot{n_{{\Gamma _r}}}>0\right\}$ and ${T_{{\Gamma _r}}} = {B_r} \cap \left\{ {x\left| {x \cdot {n_{{\Gamma _r}}} = 0} \right.} \right\}$ where ${{\Gamma _r}}$ denotes a hyperplane depending only on $r$ and ${n_{{\Gamma _r}}}$ is the unit normal vector of ${{\Gamma _r}}$. We may also denote $n_{\Gamma _{\eta^k}}(n_{\Gamma _r})$ by $n_k$.
Since $\partial\Omega$ satisfies the exterior Reifenberg ${C^{1,\mathrm{Dini}}}$ condition at $0$, there exist a hyperplane ${\Gamma _r}$ and its unit normal vector $n_{\Gamma _r}$ such that $B_r\cap\Omega\subset B_r\cap\{x\cdot {n_{\Gamma _r}}>-r\omega(r)\}$. Let $\tilde B_{{\Gamma _r}}^ + = B_{{\Gamma _r}}^ + -K(\eta) r\omega \left( r \right) n_{\Gamma _r},$ $\tilde T_{{\Gamma _r}}^ += T_{{\Gamma _r}}^ + - K(\eta)r\omega \left( r \right) n_{\Gamma _r}$ and $\tilde{\Omega }_{r}=\Omega \cap \tilde B_{{\Gamma _r}}^ + $. Take \begin{equation}\label{e.K} c_0K(\eta)<\frac{1}{4}. \end{equation} Then $\omega(r) \leq \omega(1)\leq c_0\leq 1/(4K(\eta))$ and $\Omega _{\eta r}\subset\tilde{\Omega }_{r}$.
Let $v$ solve \begin{equation*} \left\{\begin{aligned}
&M^{+}(D^2v,\lambda,\Lambda)=0 &&\mbox{in}~~\tilde{B}^{+}_{{\Gamma _r}}; \\
&v=0 &&\mbox{on}~~\tilde{T}_{{\Gamma _r}};\\
&v=\hat{C} M \eta ^{k}A_k &&\mbox{on}~~\partial \tilde{B}^{+}_{{\Gamma _r}}\backslash \tilde{T}_{{\Gamma _r}}. \end{aligned} \right. \end{equation*} Let $w=u-a_k{n_{k}} \cdot x-v$. Then $w$ satisfies (note that $v\geq 0$ in $\tilde{B}^{+}_{{\Gamma _r}}$) \begin{equation*}
\left\{
\begin{aligned}
&w\in \underline{S}(\lambda /n,\Lambda , f) &&\mbox{in}~~ \Omega \cap \tilde{B}^{+}_{{\Gamma _r}}; \\
&w\leq g-a_k {n_{k}} \cdot x &&\mbox{on}~~\partial \Omega \cap \tilde{B}^{+}_{{\Gamma _r}};\\
&w\leq 0 &&\mbox{on}~~\partial \tilde{B}^{+}_{{\Gamma _r}}\cap \bar{\Omega}.
\end{aligned}
\right. \end{equation*}
In the following arguments, we estimate $v$ and $w$ respectively. By the boundary $C^{1,\alpha}$ estimate for $v$ (see \Cref{l-1}) and the maximum principle, there exist $0<\alpha<1$ (depending only on $n,\lambda$ and $\Lambda$) and $\bar{a}\geq 0$ such that \begin{equation*} \begin{aligned}
\|v-\bar{a}({n_{k}}\cdot x+K(\eta)r\omega \left( r \right){n_{k}})\|_{L^{\infty }(\Omega _{\eta r})}&\leq C_1\frac{(\eta r)^{1+ \alpha}}{r^{1+ \alpha}}\|v\|_{L^{\infty }( \tilde{B}^{+}_{\Gamma_r})}\\
&\leq C_1\eta ^{\alpha-\alpha_0 }\cdot \hat{C}M\eta ^{(k+1)}\eta^{\alpha_0}A_k\\
&\leq C_1\eta ^{\alpha-\alpha_0 }\cdot \hat{C}M\eta ^{(k+1)}A_{k+1} \end{aligned} \end{equation*} and \begin{equation}\label{e.19} 0\leq\bar{a}\leq C_2\hat{C}MA_k, \end{equation} where $C_1$ and $C_2$ depend only on $n,\lambda$ and $\Lambda$. Take $\alpha_0=\alpha/2$ and then \begin{equation}\label{e.20} \begin{aligned}
\|v-\bar{a} n_{k}\cdot x\|_{L^{\infty }(\Omega _{\eta^{k+1}})}=&\|v-\bar{a} n_{k}\cdot x\|_{L^{\infty }(\Omega _{\eta r})}\\ \leq & C_1\eta ^{\alpha_0 }\cdot \hat{C}M\eta ^{(k+1)}A_{k+1}+\bar{a}K(\eta)r\omega(r)\\ \leq &\left( C_1\eta ^{\alpha_{0} }+\frac{C_2K(\eta)\omega(\eta^{k})}{\eta^{1+\alpha_{0}}}\right)\cdot \hat{C}M\eta ^{(k+1)}A_{k+1}\\ \leq &\left( C_1\eta ^{\alpha_{0} }+\frac{C_2K(\eta)c_0}{\eta^{1+\alpha_{0}}}\right)\cdot \hat{C}M\eta ^{(k+1)}A_{k+1}. \end{aligned} \end{equation}
For $w$, by the Alexandrov-Bakel'man-Pucci maximum principle, we have \begin{equation*}
\begin{aligned}
\sup_{\Omega_{\eta^{k+1}}}w\leq \sup_{\tilde{\Omega} _{r}}w& \leq\|g\|_{L^{\infty }(\partial \Omega \cap \tilde{B}^{+}_{\Gamma _{ r}})}+\sup_{\partial \Omega \cap \tilde{B}^{+}_{\Gamma _r}}(-a_k n_{k} \cdot x)+C_3r\|f\|_{L^n(\tilde{\Omega}_{r})}\\
&\leq Mr\omega_g(r) +\sum_{i=0}^{k}|a_i-a_{i-1}|\eta^k\omega(\eta^k)+C_3r\|f\|_{L^{n}(\Omega)}\omega_f(r)\\
&\leq M \eta^k \omega(\eta^k)+\bar{C}\hat{C}M\sum_{i=0}^{k}A_i\eta^k\omega( \eta^k)+C_3M\eta^k \omega(\eta^k),
\end{aligned} \end{equation*} where $C_3$ depends only on $n,\lambda$ and $\Lambda$.
From \cref{e.n1}, we have \begin{equation}\label{e.22} \begin{aligned}
\sum_{i=0}^{\infty} A_i&\leq \frac{1}{1-\eta^{\alpha_0}}\sum_{i=1}^{\infty}\omega(\eta^i)+c_0\\ &=\frac{1}{1-\eta^{\alpha_0}}\sum_{i=1}^{\infty}\frac{\omega(\eta^i) \left(\eta^{i-1}-\eta^i\right)}{\eta^{i-1}-\eta^i}+c_0\\ &=\frac{1}{\left(1-\eta^{\alpha_0}\right)\left(1-\eta\right)}\sum_{i=1}^{\infty} \frac{\omega(\eta^i)\left(\eta^{i-1}-\eta^i\right)}{\eta^{i-1}}+c_0\\ &\leq \frac{1}{\left(1-\eta^{\alpha_0}\right)\left(1-\eta\right)}\int_{0}^{1} \frac{\omega(r)dr}{r}+c_0\\ &\leq \frac{c_0}{\left(1-\eta^{\alpha_0}\right)\left(1-\eta\right)}+c_0\leq 3c_0, \end{aligned} \end{equation} provided \begin{equation}\label{e-w} \left(1-\eta^{\alpha_0}\right)\left(1-\eta\right)\geq 1/2. \end{equation} From the definition of $A_k$ again, \begin{equation*}
\omega(\eta^{k})\leq A_k\leq \frac{A_{k+1}}{\eta^{\alpha_0}}. \end{equation*} Hence, \begin{equation}\label{e1.22}
\begin{aligned} \sup_{\Omega_{k+1}}w &\leq \frac{1}{\eta^{1+\alpha_0}} M\eta^{k+1}A_{k+1}+\frac{3c_0\bar{C}}{\eta^{1+\alpha_0}}\hat{C}M\eta^{k+1}A_{k+1}+\frac{C_3 }{\eta^{1+\alpha_0}} M\eta^{k+1}A_{k+1}\\ &\leq \frac{C_3+1}{\eta^{1+\alpha_0}} M\eta^{k+1}A_{k+1}+\frac{3c_0\bar{C}}{\eta^{1+\alpha_0}}\hat{C}M\eta^{k+1}A_{k+1}\\ &\leq\left(\frac{C_3+1}{\hat{C}\eta^{1+\alpha_0}}+\frac{3c_0\bar{C}}{\eta^{1+\alpha_0}}\right) \hat{C}M\eta^{k+1}A_{k+1}. \end{aligned} \end{equation} Since \begin{equation*}
\begin{aligned}
\left| n_{k} - n_{k+1}\right| \leq {K(\eta) }\omega \left( {{\eta ^k}} \right), \end{aligned} \end{equation*} we have for $x\in B_{\eta^{k+1}}$, \begin{equation}\label{e-H-1.25}
\begin{aligned}
\left| {{a_{k + 1}}\left(n_k-n_{k+1} \right) \cdot x} \right| &\leq {a_{k + 1}}{K(\eta) }\omega ( \eta ^k){\eta ^{k + 1}}\\
&\leq \frac{{{a_{k + 1}}{K(\eta) }}}{{\hat CM{\eta ^{{\alpha _0}}}}}\hat C M{\eta ^{\left( {k + 1} \right)}}{A_{k + 1}}\\
&\leq \frac{3c_0 K(\eta)\bar{C}}{\eta^{\alpha_0}}\hat C M{\eta ^{\left( {k + 1} \right)}}{A_{k + 1}}. \end{aligned} \end{equation}
Let $\bar{C}=C_2/{\eta ^{{\alpha _0}}}$. Take $\eta $ small enough such that \cref{e-w} holds and \begin{equation*}
C_1\eta ^{\alpha_0 }\leq \frac{1}{6}. \end{equation*} Take $c_0$ small enough such that \begin{equation*} \frac{C_2K(\eta)c_0}{\eta^{1+\alpha_0}}\leq\frac{1}{6}, \frac{3c_0\bar{C}}{\eta^{1+\alpha_0}}\leq\frac{1}{6}~\mbox{and}~ \frac{3c_0 K(\eta)\bar{C}}{\eta^{\alpha_0}}\leq \frac{1}{3}. \end{equation*} Finally, take $\hat{C}$ large enough such that \cref{e.21} holds and \begin{equation*}
\frac{C_3+1}{\hat{C} \eta ^{1+\alpha_0}}\leq \frac{1}{6}. \end{equation*}
Let $a_{k+1}=a_k+\bar{a}$. Then combining \cref{e.20}, \cref{e1.22} and \cref{e-H-1.25}, we have for $x\in \Omega_{\eta^{k+1}}$, \[\begin{array}{l} \quad u - {a_{k + 1}}{n_{k + 1}} \cdot x = u - {a_{k + 1}}{n_{k}} \cdot x + {a_{k + 1}}{n_{k}} \cdot x - {a_{k + 1}}{n_{k + 1}} \cdot x\\
= u - \left( {{a_k} + \bar a} \right){n_{k}} \cdot x + {a_{k + 1}}\left( {{n_{k}} - {n_{k + 1}}} \right) \cdot x\\
= u - {a_k}{n_{k}} \cdot x - v + v - \bar a{n_k} \cdot x + {a_{k + 1}}\left( {{n_{k}} - {n_{k + 1}}} \right) \cdot x\\
\le \hat CM{\eta ^{\left( {k + 1} \right)}}{A_{k + 1}}. \end{array}\] By induction, the proof is completed. \qed~\\
The proof of the Hopf lemma is similar to that of the boundary Lipschitz regularity. Here, we focus on the curved boundary toward the interior of the domain. We need the following lemma, which can be easily proved by constructing a proper barrier. \begin{lemma}\label{le-H-1} Let $u\geq 0$ satisfy \begin{equation*}
\left\{\begin{aligned}
M^{-}(D^2u,\lambda,\Lambda)&=0~~\mbox{in}~~Q_1^+;\\
u&=0~~\mbox{on}~~T_1;\\
u&\geq 1~~\mbox{on}~~T_1+e_n. \end{aligned}\right. \end{equation*} Then \begin{equation*}\label{e-H-l} u(x)\geq c_1x_n ~~\mbox{in}~~B_{\delta_1}^+, \end{equation*} where $\delta_1>0$ and $c_1>0$ depend only on $n,\lambda$ and $\Lambda$. \end{lemma}
Now, we give the~\\ \noindent\textbf{Proof of \Cref{t-H-2}.} As before, from the Dini condition, there exists $0<r_1<R$ such that for any $0<r\leq r_1$, \begin{equation}\label{e-H-dini-4}
\omega(r)\leq c_0 ~~\mbox{and}~~ \int_{0}^{r}\frac{\omega(s)}{s}ds\leq c_0, \end{equation} where $c_0\leq 1/4$ is a small constant to be specified later and depends only on $n,\lambda,\Lambda$ and $K$. Moreover, since $n_{\Gamma_r}$ converges as $r\to 0$ (see \Cref{t-1}), we assume that $n_{\Gamma_r} \to e_n$ without loss of generality. Then there exists $0<r_1<R$ such that for any $0<r\leq r_2$, \begin{equation}\label{e-H-dini-3}
\left| {{n_{\Gamma_r}} - {e_n}} \right| \leq c_0. \end{equation} By a proper scaling, we assume that $\min\{r_1,r_2\}=1$ and $u(e_n/2)=1$. Let $\Omega _r^ + = \Omega \cap B_{{\Gamma _r}}^ +$. To prove \cref{e-H-main}, we only need to prove the following:
There exist constants $0<\alpha_{0}, \eta < 1$, $\bar{C}$ and $\tilde{a}>0$ (depending only on $n,\lambda$ and $\Lambda$), $\hat{C}$ (depending only on $n,\lambda,\Lambda$ and $K$) and a nonnegative sequence $\{a_k\}$ such that for $k\geq0$, \begin{equation}\label{e-H1.16} \inf_{\Omega _{{\eta ^{k + 1}}}^ + }(u-\tilde{a}{n_{k}}\cdot x+a_k{n_{k}}\cdot x)\geq -\hat{C} \eta ^{k}A_k, \end{equation} \begin{equation}\label{e-H1.17}
|a_k-a_{k-1}|\leq \bar{C}\hat{C}A_k \end{equation} and \begin{equation}\label{e-H-atilde} a_k\leq \frac{\tilde{a}}{2}, \end{equation} where $A_k$ and $n_k$ are defined as before.
Indeed, for any unit vector $l\in R^n$ with $l\cdot {e_n}=\tau>0$, there exists $k_0\geq1$ such that $\omega(\eta^{k})\leq \tau/4$ and $l\cdot n_k\geq \tau/2$ for any $k\geq k_0$. Then $tl\in \Omega$ for any $0<t<1$. Note that $A_k\to 0$ as $k\to \infty$ and then there exits $k_1\geq k_0$ such that \begin{equation*}
A_k\cdot \frac{\hat{C}}{ l\cdot{n_{k}}\eta^2}\leq \frac{\tilde{a}}{4}. \end{equation*} Take $\delta=\eta^{k_1}$. For $0<t<\delta$, there exists $k\geq k_1$ such that \begin{equation*}
\eta^{k+2}\leq t \leq \eta^{k+1}. \end{equation*} Then by \cref{e-H1.16}, \begin{equation*} \begin{aligned} u(tl)&\geq \tilde{a}({n_{k}}\cdot l)t-a_k({n_{k}}\cdot l)t-\hat{C}\eta^kA_k &\geq \frac{\tilde{a}({n_{k}}\cdot l)t}{2}-\frac{\hat{C}A_kt}{\eta^2} &\geq \frac{\tilde{a}({n_{k}}\cdot l)t}{4} &\geq \frac{\tilde{a}\tau t}{4}. \end{aligned} \end{equation*} That is, \cref{e-H-main} holds.
Now, we prove \crefrange{e-H1.16}{e-H-atilde} by induction. Let $\tilde{Q}=Q^{+}_{1/2}+c_0{e_n},\tilde{T}=\partial \tilde{Q}\cap \left\{x_n=1/4+c_0\right\}$. That is, $\tilde{T}$ is the top boundary of $\tilde{Q}$. Note that $c_0\leq 1/4$. Thus, $\tilde{Q}\subset \Omega\cap B^+_{1}$.
By the Harnack inequality, \begin{equation*}
\inf_{\tilde{T}} u\geq \tilde{c} u({e_n}/2)=\tilde{c}, \end{equation*} where $\tilde{c}$ depends only on $n,\lambda$ and $\Lambda$. Let $\tilde{u}$ solve \begin{equation*}
\left\{\begin{aligned} &M^{-}(D^2\tilde{u},\lambda,\Lambda)= 0 &&\mbox{in}~~\tilde{Q}; \\ &\tilde{u}=\tilde{c}&&\mbox{on}~~\tilde{T};\\ &\tilde{u}=0 &&\mbox{on}~~\partial \tilde{Q}\backslash \tilde{T}.
\end{aligned}\right. \end{equation*} From \Cref{le-H-1}, there exist $\delta_1>0$ and $0<c_2<1/2$ such that \begin{equation*} u(x)\geq \tilde{u}(x) \geq c_2(x_n-c_0) ~~\mbox{in}~~B^+_{\delta_1}+c_0{e_n}. \end{equation*} Note that $u\geq 0$ and hence, \begin{equation*}
u(x)\geq c_2(x_n-c_0) ~~\mbox{in}~~\Omega\cap B_{\delta_1}. \end{equation*} Therefore, by noting \cref{e-H-dini-3}, \begin{equation*}
u(x)\geq c_2x\cdot n_0-2c_2c_0\geq c_2x\cdot n_0-c_0 ~~\mbox{in}~~\Omega\cap B_{\delta_1}. \end{equation*}
Set $\tilde{a}=c_2$ and $a_{-1}=a_0=0$. Take \begin{equation}\label{e-H-1} \eta\leq \delta_1. \end{equation} Then \crefrange{e-H1.16}{e-H-atilde} hold for $k=0$. Suppose that they hold for $k$. We need to prove that they hold for $k+1$.
Let $r=\eta ^{k+1},\tilde{B}^+=B^{+}_{\Gamma _r}-K(\eta)r\omega(r)n_{\Gamma_r}$ and $\tilde{T}=T_{\Gamma _r}-K(\eta)r\omega(r)n_{\Gamma_r}$. Then $B^{+}_{\Gamma _{\eta r}}\subset \tilde{B}^+$. Let $v$ solve \begin{equation*} \left\{\begin{aligned}
&M^{-}(D^2v,\lambda,\Lambda)=0 &&\mbox{in}~~\tilde{B}^+; \\
&v=0 &&\mbox{on}~~\tilde{T};\\
&v=-\hat{C}\eta ^{k}A_k &&\mbox{on}~~\partial \tilde{B}^{+}\backslash T_{\Gamma _r}. \end{aligned} \right. \end{equation*} Let $w=u-\tilde{a}{n_{k}}\cdot x+a_k{n_{k}}\cdot x-v$. Then $w$ satisfies (note that $u\geq 0$ and $v\leq 0$) \begin{equation*}
\left\{
\begin{aligned}
&M^{-}(D^2w,\lambda/n,\Lambda)\leq 0 &&\mbox{in}~~ \Omega\cap \tilde{B}^{+}; \\
&w\geq -\tilde{a}{n_{k}}\cdot x+a_k{n_{k}}\cdot x&&\mbox{on}~~\partial \Omega \cap \tilde{B}^{+};\\
&w\geq 0 &&\mbox{on}~~\partial \tilde{B}^{+}\cap \bar{\Omega}.
\end{aligned}
\right. \end{equation*}
In the following arguments, we estimate $v$ and $w$ respectively. By the boundary $C^{1,\alpha}$ estimate for $v$ (see \Cref{l-1}) and the maximum principle, there exist $0<\alpha<1$ (depending only on $n,\lambda$ and $\Lambda$) and $\bar{a}\geq 0$ such that (note that $A_k\leq A_{k+1}/\eta^{\alpha_0}$) \begin{equation*} \begin{aligned}
\|v&+\bar{a}{n_{k+1}}\cdot (x-K(\eta)r\omega(r)n_{k+1})\|_{L^{\infty }(B^+ _{\Gamma _{\eta r}})}\leq C_1 \eta ^{1+ \alpha}\|v\|_{L^{\infty }(\tilde{B}^{+})}\\
&\leq C_1\eta ^{\alpha}\cdot \hat{C}\eta ^{(k+1)}A_k
\leq C_1\eta ^{\alpha-\alpha_0 }\cdot \hat{C}\eta ^{(k+1)}A_{k+1} \end{aligned} \end{equation*} and \begin{equation}\label{e.H-19} \bar{a}\leq C_2\hat{C}A_k/\eta, \end{equation} where $C_1$ and $C_2$ depend only on $n,\lambda$ and $\Lambda$. Take $\alpha_0=\alpha/2$. Then \begin{equation}\label{e.H-20} \begin{aligned}
\|v+\bar{a}{n_{k+1}}\cdot x\|_{L^{\infty }(\Omega^+_{k+2})}&\leq\|v+\bar{a}{n_{k+1}}\cdot x\|_{L^{\infty }(B^+_{\Gamma _{\eta r}})}\\ &\leq \left(C_1\eta ^{\alpha_0 }+\frac{C_2A_kK(\eta)}{\eta}\right)\cdot \hat{C}\eta ^{(k+1)}A_{k+1}\\ &\leq \left(C_1\eta ^{\alpha_0 }+\frac{C_2c_0K(\eta)}{\eta}\right)\cdot \hat{C}\eta ^{(k+1)}A_{k+1} \end{aligned} \end{equation}
For $w$, by the maximum principle, we have \begin{equation*}
\begin{aligned} \inf_{\Omega^+_{k+2}}w&\geq \inf_{\Omega\cap\tilde{B}^+}w \geq \inf_{\partial \Omega \cap \tilde{B}^{+}}\left(-\tilde{a}{n_{k}}\cdot x+a_k n_{k}\cdot x\right)\\
&\geq \inf_{\partial \Omega \cap \tilde{B}^{+}}\left(-\tilde{a}|n_{k}-n_{k+1}|\cdot|x|-\tilde{a}(n_{k+1}\cdot x)\right) \\ &\geq -\tilde{a}K(\eta)\eta^{k + 1}\omega(\eta^k)-\tilde{a}{\eta ^{k + 1}}\omega(\eta ^{k + 1}).
\end{aligned} \end{equation*} As before, from the definition of $A_k$ (see \cref{e-Ak}), \begin{equation*}
\omega(\eta^{k+1})\leq A_{k+1}~\mbox{and}~\omega(\eta^{k})\leq A_{k}\leq \frac{A_{k+1}}{\eta^{\alpha_0}}. \end{equation*} Hence, \begin{equation}\label{e-H-1.21}
\begin{aligned} \inf_{\Omega^+_{k+2}}w &\geq -\frac{K(\eta)+1}{\hat{C}\eta^{\alpha_0}}\cdot \hat{C}\eta^{k+1}A_{k+1}. \end{aligned} \end{equation}
Since $\left| {{n_{k}} - {n_{k + 1}}} \right| \le {K(\eta) }\omega (\eta ^k)$, for $x\in B_{\eta^{k+2}}$, \begin{equation}\label{e-H-1.22}
\begin{aligned}
\left| {\tilde a\left( {{n_{k}} - {n_{k + 1}}} \right) \cdot x} \right|
\leq \tilde a{K(\eta) }\omega \left( {{\eta ^k}} \right)\eta^{k+2}
\leq \frac{K(\eta) }{\hat C}\hat C{\eta ^{\left( {k + 1} \right)}}{A_{k + 1}}. \end{aligned} \end{equation}
Take $\eta $ small enough such that \cref{e-H-1} holds, \begin{equation*}
C_1\eta ^{\alpha_0 }\leq 1/6 \end{equation*} and \begin{equation}\label{e-H-2} \left(1-\eta^{\alpha_0}\right)\left(1-\eta\right)\geq 1/2. \end{equation}
Let $\bar{C}=C_2/{{\eta ^{1 + {\alpha _0}}}}$. Take $\hat{C}$ large enough such that \begin{equation*} \frac{K(\eta)+1}{\hat C\eta ^{\alpha _0}} \leq \frac{1}{3}. \end{equation*} As before, by noting that \cref{e.22} and \cref{e-H-2}, we have \begin{equation*}
\sum_{i=0}^{\infty} A_i\leq 3c_0. \end{equation*} Finally, take $c_0$ small enough such that \begin{equation*} 3c_0 \bar{C}\hat{C}\leq\frac{\tilde{a}}{2}~\mbox{and}~\frac{C_2c_0K(\eta)}{\eta} \leq \frac{1}{6}. \end{equation*}
Let $a_{k+1}=a_k+\bar{a}$. Then combining \crefrange{e.H-20}{e-H-1.22}, we have for $x\in \Omega^+_{k+2}$ \begin{equation*}
\begin{aligned} u &-\tilde a{n_{k+1}} \cdot x + {a_{k + 1}}{n_{k+1}} \cdot x\\ &=u-\tilde a n_{k} \cdot x + a_{k} n_{k} \cdot x-\tilde a(n_{k+1}-n_{k}) \cdot x + a_k(n_{k+1}-n_{k}) \cdot x+ \bar{a}n_{k+1} \cdot x\\ &= u - \tilde a{n_{k}} \cdot x + {a_k}{n_{k}} \cdot x - v + \left( {v + \bar{a}n_{k+1} \cdot x} \right) + (a_k-\tilde a)\left( {{n_{k}} - {n_{k+1}}} \right) \cdot x\\ &= \omega + \left( {v + \bar{a}n_{k+1} \cdot x} \right)+ (a_k-\tilde a)\left( {{n_{k}} - {n_{k+1}}} \right) \cdot x\\ &\geq - \hat C{\eta ^{\left( {k + 1} \right)}}{A_{k + 1}}
\end{aligned} \end{equation*} and \begin{equation*}
a_{k+1}\leq \sum_{i=0}^{k}|a_i-a_{i-1}|\leq\bar{C}\hat C\sum\limits_{i = 0}^\infty {{A_i}}\leq 3c_0 \bar{C}\hat{C}\leq \frac{\tilde{a}}{2}. \end{equation*} By induction, the proof is completed. \qed~\\
\end{document} |
\begin{document}
\begin{abstract}
We survey the development of the notion of Lazarsfeld-Mukai bundles together with various applications, from the classification of Mukai manifolds to Brill-Noether theory and syzygies of $K3$ sections. To see these techniques at work, we present a short proof of a result of M. Reid on the existence of elliptic pencils. \end{abstract} \maketitle
\section*{Introduction}
Lazarsfeld--Mukai bundles appeared naturally in connection with two completely different important problems in algebraic geometry from the 1980s. The first problem, solved by Lazarsfeld, was to find explicit examples of smooth curves which are generic in the sense of Brill-Noether-Petri \cite{LazarsfeldJDG}. The second problem was the classification of prime Fano manifolds of coindex 3 \cite{MukaiPNAS}. More recently, Lazarsfeld--Mukai bundles have found applications to syzygies and higher-rank Brill--Noether theory.
The common feature of all these research topics is the central role played by $K3$ surfaces and their hyperplane sections. For the Brill--Noether--Petri genericity, Lazarsfeld proves that a general curve in a linear system that generates the Picard group of a $K3$ surface satisfies this condition. For the classification of prime Fano manifolds of coindex 3, after having proved the existence of smooth fundamental divisors, one uses the geometry of a two-dimensional linear section which is a very general $K3$ surface.
The idea behind this definition is that the Brill--Noether theory of smooth curves~on a $K3$ surface, also called \textit{$K3$ sections}, is governed by higher-rank vector bundles on the surface. To be more precise, consider $S$ a $K3$ surface (considered always to be smooth, complex, projective), $C$ a smooth curve on $S$ of genus $\ge 2$, and $|A|$ a base-point-free pencil on $C$. If we attempt to lift the linear system
$|A|$ to the surface $S$, in~most cases, we will fail. For instance, $|A|$ cannot lift to a pencil on $S$ if $C$ generates
$\mathrm{Pic}(S)$ or if $S$ does not contain any elliptic curve at all. However, interpreting a general divisor in $|A|$ as a zero-dimensional subscheme of $S$, it is natural to try and find a rank-two bundle $E$ on $S$ and a global section of $E$ whose scheme of zeros coincides with the divisor in question. Varying the divisor, one~should exhibit in fact a two-dimensional space of global sections of $E$. The effective construction of $E$ is realized through elementary modifications, see Sect.\,\ref{section: Def}, and this is precisely a Lazarsfeld--Mukai bundle of rank two. The passage to higher ranks is natural, if we start with a complete, higher-dimensional, base-point-free linear system on $C$. At the end, we obtain vector bundles with unusually high number of global sections, which provide us with a rich geometric environment.
The structure of this chapter is as follows. In the first section, we recall the definition of Lazarsfeld--Mukai bundles and its first properties. We note equivalent conditions for a bundle to be Lazarsfeld--Mukai in Sect.\,\ref{subsection: Definition LM}, and we discuss simplicity in the rank-two case in Sect.\,\ref{subsection: Simple LM}. The relation with the Petri conjecture and the classification of Mukai manifolds, the original motivating problems for the~definition, are considered in Sects.\,\ref{subsection: BNP} and \ref{subsection: Mukai manifolds}, respectively. In Sect.\,\ref{section: Constancy} we treat the problem of constancy of invariants in a given linear system. For small gonalities, Saint-Donat and Reid proved that minimal pencils on $K3$ sections are induced from elliptic pencils on the $K3$ surface; we present a short proof using Lazarsfeld--Mukai bundles in Sect.\,\ref{subsection Gonality I}. Harris and Mumford conjectured that the gonality should always be constant. We discuss the evolution of this conjecture, from Donagi--Morrison's counterexample, Sect.\,\ref{subsection Gonality I}, to Green--Lazarsfeld's reformulation in terms of Clifford index, Sect.\,\ref{subsection: Cliff} and to Ciliberto--Pareschi's results on the subject, Sect.\,\ref{subsection: Gonality II}. The works around this problem emphasized the importance of parameter spaces of Lazarsfeld--Mukai bundles. We conclude the section with a discussion of dimension calculations of these spaces, Sect.\,\ref{subsection: Parameter}, which are applied afterwards to Green's conjecture. Sect.\,\ref{section: Green} is devoted to Koszul cohomology and notably to Green's conjecture for $K3$ sections. After recalling the definition and the motivations that led to the definition, we discuss the statement of Green's conjecture, and we sketch the proof for $K3$ sections. Voisin's approach using punctual Hilbert schemes, which is an essential ingredient, is examined in Sect.\,\ref{subsection: Hilbert}. Lazarsfeld--Mukai bundles are fundamental objects in this topic, and their role is outlined in Sect.\,\ref{subsection: Role of LM}. The final step in the solution of Green's conjecture for $K3$ sections is tackled in Sect.\,\ref{subsection: Green for K3}. We conclude this chapter with a short discussion on Farkas--Ortega's new applications of Lazarsfeld--Mukai bundles to Mercat's conjecture (which belongs to the rapidly developing higher-dimensional Brill--Noether theory), Sect.\,\ref{section: Higher BN}.
\noindent{\it Notation.} The additive and the multiplicative notation for divisors and line bundles will be mixed sometimes. If $E$ is a vector bundle on $X$ and $L\in \mbox{Pic}(X)$, we set $E(-L):=E\otimes L^*$; this notation will be used especially when $E$ is replaced by the canonical bundle $K_C$ of a curve $C$.
\section{Definition, Properties, the First Applications} \label{section: Def}
\subsection{Definition and First Properties} \label{subsection: Definition LM}
We fix $S$ a smooth, complex, projective $K3$ surface and $L$ a globally generated line bundle on $S$ with
$L^2=2g-2$. Let $C\in|L|$ be a smooth curve and $A$ be a base-point-free line bundle in $W^r_d(C)\setminus W^{r+1}_d(C)$. As mentioned in the Introduction, the definition of Lazarsfeld--Mukai bundles emerged from the attempt to lift the linear system
$A$ to the surface $S$. Since it is virtually impossible to lift it to another linear system, a higher-rank vector bundle is constructed such that $H^0(C,A)$ corresponds to an $(r+1)$-dimensional space of global sections. Hence $|A|$ lifts to a higher-rank analogue of a linear system.
The kernel of the evaluation of sections of $A$\vspace*{-3pt} \begin{equation} \label{eqn: F}
0\to F_{C,A}\to H^0(C,A)\otimes \mathcal{O}_S \buildrel{\mathrm{ev}}\over{\to} A\to 0\vspace*{-3pt} \end{equation} is a vector bundle of rank $(r+1)$.
\begin{defn}[Lazarsfeld \cite{LazarsfeldJDG}, Mukai \cite{MukaiPNAS}] The \textit{Lazarsfeld--Mukai bundle} $E_{C,A}$ associated to the pair $(C,A)$ is the dual of $F_{C,A}$. \end{defn}
By dualizing the sequence~(\ref{eqn: F}) we obtain the short exact sequence\vspace*{-3pt} \begin{equation} \label{eqn: E}
0\to H^0(C,A)^*\otimes \mathcal{O}_S \to E_{C,A}\to K_C(-A)\to 0,\vspace*{-3pt} \end{equation} and hence $E_{C,A}$ is obtained from the trivial bundle by modifying it along the curve $C$ and comes equipped with a natural $(r+1)$-dimensional space of global sections as planned.
We note here the first properties of $E_{C,A}$:
\begin{prop}[Lazarsfeld] \label{prop: E_{C,A}} The invariants of $E$ are the following: \begin{itemize}
\item[(1)] $\det (E_{C,A})= L$.
\item[(2)] $c_2(E_{C,A})=d$.
\item[(3)] $h^0 (S,E_{C,A})= h^0(C,A)+h^1(C,A)= 2r-d+1+g$.
\item[(4)] $h^1(S,E_{C,A})=h^2(S,E_{C,A})=0$.
\item[(5)] $\chi(S,E_{C,A}\otimes F_{C,A})=2(1-\rho(g,r,d))$.
where $\rho(g,r,d)=g-(r+1)(g-d+r)$.
\item[(6)] $E_{C,A}$ is globally generated off the base locus of $K_C(-A)$;
in particular, $E_{C,A}$ is globally generated if $K_C(-A)$
is globally generated. \end{itemize} \end{prop} It is natural to ask conversely if given $E$ a vector bundle on $S$ with $\mbox{rk}(E)=r+1$, $h^1(S,E)=h^2(S,E)=0$, and $\det (E)= L$, $E$ is the Lazarsfeld--Mukai bundle associated to a pair $(C,A)$. To this end, note that there is a rational map\vspace*{-3pt} \[
h_E : G(r+1,H^0(S,E)) \dashrightarrow |L|\vspace*{-3pt} \] defined in the following way. A general subspace $\varLambda\in G(r+1,H^0(S,E))$ is mapped to the degeneracy locus of the evaluation map: $ \mathrm{ev} _{\varLambda} : \varLambda \otimes \mathcal{O}_S \to E. $
If~the image $h_E(\varLambda)$ is a smooth curve $C\in |L|$, we set $\mbox{Coker}(\mathrm{ev}_{\varLambda}):=K_C(-A)$, where $A\in \mbox{Pic}(C)$ and $\mbox{deg}(A)=c_2(E)$, and observe that $E=E_{C,A}$. Indeed, since $h^1(S,E)=0$, $A$ is globally generated, and from $h^2(S,E)=0$ it follows that $\varLambda\cong H^0(C,A)^*$. The conclusion is that:
\begin{prop} \label{prop: caracterizare LM} A rank-$(r+1)$ vector bundle $E$ on $S$ is a Lazarsfeld--Mukai bundle if and only if $H^1(S,E)=H^2(S,E)=0$ and there exists an $(r+1)$-dimensional subspace of sections $\varLambda\subset H^0(S,E)$, such that the degeneracy locus of the morphism $\mathrm{ev}_{\varLambda}$ is a smooth curve. In particular, being a Lazarsfeld--Mukai vector bundle is an \textit{open condition}. \end{prop}
Note that there might be different pairs with the same Lazarsfeld--Mukai bundles, the difference being given by the corresponding spaces of global sections.
\subsection{Simple and Non-simple Lazarsfeld--Mukai Bundles} \label{subsection: Simple LM}
We keep the notation from the previous subsection. In the original situation, the bundles used by Lazarsfeld \cite{LazarsfeldJDG} and Mukai \cite{MukaiPNAS} are simple. The non-simple Lazarsfeld--Mukai bundles are, however, equally useful \cite{Aprodu-FarkasCOMP,Ciliberto-PareschiCRELLE}. For instance, Lazarsfeld's argument is partly based on an analysis of the non-simple bundles.
Proposition~\ref{prop: E_{C,A}} already shows that for $\rho(g,r,d)<0$ the associated Lazarsfeld--Mukai bundle cannot be simple. The necessity of making a distinction between simple and non-simple bundles for nonnegative $\rho$ will become more evident in the next sections.
In the rank-two case, one can give a precise description \cite{Donagi-MorrisonJDG} of non-simple Lazarsfeld--Mukai bundles, see also \cite{Ciliberto-PareschiCRELLE} Lemma 2.1:
\begin{lem}[Donagi--Morrison] \label{lemma: DM} Let $E_{C,A}$ be a non-simple Lazarsfeld--Mukai bundle. Then there exist line bundles $M,N\in \mathrm{Pic}(S)$ such that $h^0(S,M)$, $h^0(S,N)\ge 2$, $N$ is globally generated, and there exists a locally complete intersection subscheme $\xi$ of $S$, either of dimension zero or the empty set, such that $E_{C,A}$ is expressed as an extension \begin{equation} \label{eq: DM} 0\to M\to E_{C,A}\to N\otimes I_{\xi} \to 0. \end{equation} Moreover, if $h^0(S,M\otimes N^*)=0$, then $\xi=\emptyset$ and the extension splits. \end{lem}
One can prove furthermore that $h^1(S,N)=0$, \cite{Aprodu-FarkasCOMP} Remark 3.6.
We say that~(\ref{eq: DM}) is the \emph{Donagi--Morrison extension} associated to $E_{C, A}$. This notion makes perfect sense as this extension is uniquely determined by the vector bundle, if it is indecomposable \cite{Aprodu-FarkasCOMP}. Actually, a \textit{decomposable} Lazarsfeld--Mukai bundle $E$ cannot be expressed as an extension~(\ref{eq: DM}) with $\xi\ne\emptyset$, and hence a Donagi--Morrison extension is always unique, up to a permutation of factors in the decomposable case. Moreover, a Lazarsfeld--Mukai bundle is decomposable if and only if the corresponding Donagi--Morrison extension is trivial.
In the higher-rank case, we do not have such a precise description.\footnote{In fact, we do have a Harder--Narasimhan filtration, but we cannot control all the factors.} However, a similar sufficiently strong statement is still valid \cite{LazarsfeldJDG,LazarsfeldICTP,PareschiJAG}.
\begin{prop}[Lazarsfeld]
Notation as above. If $E_{C,A}$ is not simple, then
the linear system $|L|$ contains a reducible or a multiple curve. \end{prop}
In the rank-two case, this statement comes from the decomposition $L\cong M\otimes N$.
\subsection{The Petri Conjecture Without Degenerations} \label{subsection: BNP}
A smooth curve of genus $g$ is said to satisfy \textit{Petri's condition}, or to be \textit{Brill--Noether--Petri generic}, if the multiplication map (the Petri map) \[
\mu_{0,A}: H^0(C, A) \otimes H^0(C, K_C(-A))
\to H^0(C, K_C), \] is injective for any line bundle $A$ on $C$. One consequence of this condition is that all the Brill--Noether loci $W^r_d(C)$ have the expected dimension and are smooth away from $W^{r+1}_d(C)$; recall that the tangent space at the point $[A]$ to $W^r_d(C)$ is naturally isomorphic to the dual of $\mathrm{Coker}(\mu_{0,A})$ \cite{Arbarello-Cornalba-Griffiths-Harris}. The Petri conjecture, proved by degenerations by Gieseker, states that a general curve satisfies Petri's condition. Lazarsfeld \cite{LazarsfeldJDG} found a simpler and elegant proof without degenerations by analyzing curves on very general $K3$ surfaces.
Lazarsfeld's idea is to relate the Petri maps to the Lazarsfeld--Mukai bundles; this relation is valid in general and has many other applications. Suppose, as in the previous subsections, that $S$ is a $K3$ surface and $L$ is a globally generated line bundle on $S$. For the moment, we do not need to assume that $L$ generates the Picard group. E. Arbarello and M. Cornalba constructed a scheme $\mathcal{W}^r_d(|L|)$
parameterizing pairs $(C,A)$ with $C\in |L|$ smooth and $A\in W^r_d(C)$ and a morphism\vspace*{-3pt} \[
\pi_S:\mathcal{W}^r_d(|L|)\to |L|.\vspace*{-3pt} \]
Assume that $A\in W^r_d(C)\setminus W^{r+1}_d(C)$ is globally generated, and consider $M_A$ the vector bundle of rank $r$ on $C$ defined as the kernel of the evaluation map \begin{equation}\label{MA}
0\to M_A \to H^0(C,A)\otimes \mathcal{O}_C \buildrel{\mathrm{ev}}\over{\to} A\to 0. \end{equation} Twisting~(\ref{MA}) with $K_C\otimes A^*$, we obtain the following description of the kernel of the Petri map:\footnote{This ingenious procedure is an efficient replacement of the base-point-free pencil trick; ``it has killed the base-point-free pencil trick,'' to quote Enrico Arbarello.}\vspace*{4pt} \[
\mbox{Ker}(\mu_{0,A})= H^0(C, M_A\otimes K_C\otimes A^*). \]
There is another exact sequence on $C$\vspace*{-4pt} \[
0\to \mathcal{O}_C\to F_{C,A}|_C\otimes K_C\otimes A^*\to M_A\otimes K_C\otimes A^*\to 0,\vspace*{-4pt} \] and from the defining sequence of $E_{C,A}$ one obtains the exact sequence on $S$\vspace*{-4pt} \[
0\to H^0(C, A)^*\otimes F_{C,A}\to E_{C,A}\otimes F_{C,A}\to F_{C, A}|_C\otimes K_C\otimes A^*\to 0.\vspace*{-4pt} \]
From the vanishing of $h^0(C,F_{C,A})$ and of $h^1(C, F_{C, A})$, we obtain\vspace*{-4pt} \[
H^0(C,E_{C,A}\otimes F_{C,A})=H^0(C,F_{C,A}|_C\otimes K_C\otimes A^*).\vspace*{-4pt} \]
Suppose that $\mathcal{W}\subset \mathcal{W}^r_d(|L|)$ is a dominating component and $(C,A)\in\mathcal{W}$ is an element such that $A$ is globally generated and $h^0(C,A)=r+1$. A deformation-theoretic argument shows that if the Lazarsfeld--Mukai bundle $E_{C,A}$ is simple, then the coboundary map $H^0(C,M_A\otimes K_C\otimes A^*)\to H^1(C,\mathcal{O}_C)$ is zero \cite{PareschiJAG}, which eventually implies the injectivity of $\mu_{0,A}$.
By reduction to complete base-point-free bundles on the curve \cite{LazarsfeldJDG,PareschiJAG} this analysis yields:
\begin{thm}[Lazarsfeld] \label{thm: Lazarsfeld} Let $C$ be a smooth curve of genus $g\ge 2$ on a $K3$
surface~$S$, and assume that any divisor in the linear system $|C|$ is reduced and irreducible. Then a generic element in the linear system $|C|$ is Brill--Noether--Petri generic. \end{thm}
A particularly interesting case is when the Picard group of $S$ is generated by $L$ and $\rho(g,r,d)=0$. Obviously, the condition $\rho=0$ can be realized only for composite genera, as $g=(r+1)(g-d+r)$, for example, $r=1$ and $g$ even. Under these assumptions, there is a unique Lazarsfeld--Mukai bundle $E$ with $c_1(E)=L$ and $c_2(E)=d$, and different pairs $(C,A)$ correspond to different \hbox{$\varLambda\in G(r+1,$} $H^0(S,E))$; in other words the natural rational map $G(r+1,H^0(S,E))\dashrightarrow
\mathcal{W}^r_d(|L|)$ is dominating. Note that $E$ must be stable and globally generated.
\subsection{Mukai Manifolds of Picard Number One} \label{subsection: Mukai manifolds}
A Fano manifold $X$ of dimension $n\ge 3$ and index $n-2$ (i.e., of coindex 3) is called a \textit{Mukai manifold}.\footnote{Some authors consider that Mukai manifolds have dimension four or more.} In the classification, special attention is given to prime Fano manifolds: note that if $n\ge 7$, $X$ is automatically prime as shown by Wisniewski; see, for example, \cite{Iskovskih-Prokhorov}.
Assume that the Picard group of $X$ is generated by an ample line bundle $L$, and let the sectional genus $g$ be the integer $(L^n)/2+1$. Mukai and Gushel used vector bundle techniques to obtain a complete classification of these manifolds. A~first major obstacle is to prove that the fundamental linear system contains indeed a smooth element, aspect which is settled by Shokurov and Mella; see, for example, \cite{Iskovskih-Prokhorov}. Then the $(g+n-2)$-dimensional linear system
$|L|$ is base-point-free, and a general linear section with respect to the generator of the Picard group is a $K3$ surface. More precisely, if $\mathrm{Pic}(X) =\mathbb{Z}\cdot L$, then for $H_1,\cdots,H_{n-2}$ general elements in the fundamental linear system
$|L|$, $S:=H_1\cap\cdots\cap H_{n-2}$ is scheme-theoretically a $K3$ surface. Note that if $n\ge 4$ and $i\ge 3$, the intersection $H_1\cap\cdots\cap H_{n-i}$ is again a Fano manifold of coindex 3.
Mukai noticed that the fundamental linear system either is very ample, and the image of $X$ is projectively normal or is associated to a double covering of $\mathbb{P}^n$ ($g=2$) or of the hyper-quadric $Q^n\subset\mathbb{P}^{n+1}$ ($g=3$). The difficulty of the problem is thus to classify all the possible cases where
$|L|$ is normally generated, called \textit{of the first species}. Taking linear sections one reduces (not quite immediately) to the case $n=3$ \cite{Iskovskih-Prokhorov} p.110.
For simplicity, let us assume that $X$ is a prime Fano 3-fold of index 1. If
$g=4$ and $g=5$, $X$ is a complete intersection; hence the hard cases begin with genus $6$. A hyperplane section $S$ is a $K3$ surface, and, by a result of Moishezon, $\mathrm{Pic}(S)$ is generated by $L|_S$.
Let us denote by $\mathcal{F}_g$ the moduli space of polarized $K3$ surfaces of degree $2g-2$, by $\mathcal{P}_g$ the moduli space of pairs $(K3\ \mbox{surface},\mbox{curve})$ and $\mathcal{M}_g$ the moduli space of genus-$g$ curves. There are two nice facts in Mukai's proof involving these two moduli spaces. His first observation is that if there exists a prime Fano 3-fold $X$ of the first species of genus $g\ge 6$ and index $1$, the rational map $\phi_g:\mathcal{P}_g\dashrightarrow \mathcal{M}_g$ is \textit{not} generically finite \cite{MukaiLMS}. The second nice fact is that $\phi_g$ is generically finite if and only if $g=11$ or $g\ge 13$ \cite{MukaiLMS}.\footnote{In genus $11$, it is actually birational \cite{MukaiLNPAM}.} Hence, one is reduced to study the genera $6\le g\le 12$ with $g\ne 11$. At this point, Lazarsfeld--Mukai bundles are employed. By the discussion from Sect.\,\ref{subsection: BNP}, for any decomposition $g=(r+1)(g-d+r)$, with $r\ge 1$, $d\le g-1$, there exists a unique Lazarsfeld--Mukai bundle $E$ of rank $(r+1)$. It has already been noticed that the bundle $E$ is stable and globally generated. Moreover, the determinant map\vspace*{-4pt} \[
\mathrm{det}:\wedge^{r+1}H^0(S,E)\to H^0(S,L)\vspace*{-4pt} \] is surjective \cite{MukaiPNAS}, and hence it induces a linear embedding\vspace*{-4pt} \[
\mathbb{P}H^0(S,L)^*\hookrightarrow \mathbb{P}(\wedge^{r+1}H^0(S,E)^*).\vspace*{-4pt} \]
Following \cite{MukaiPNAS}, we have a commutative diagram \begin{center}
\mbox{\xymatrix{S \ar[r]^{\phi_E}\ar@{^(->}[d]^{\phi_{|L|}} & G \ar@{^(->}[d]^{\mathrm{Pluecker}}\\ \mathbb{P}H^0(L)^*\ar@{^(->}[r] & \mathbb{P}(\wedge^{r+1}H^0(E)^*)}} \end{center} where $G:=G(r+1,H^0(S,E)^*)$ and $\phi_E$ is given by $E$. This diagram shows that $S$ is embedded in a suitable linear section of the Grassmannian $G$. Moreover, this diagram extends over $X$: by a result of Fujita, $E$ extends to a stable vector bundle on $X$, and the diagram over $X$ is obtained for similar reasons. Hence $X$ is a linear section of a Grassmannian. By induction on the dimension, $X$ is contained in a \textit{maximal} Mukai manifold, which is also a linear section of the Grassmannian. A complete list of maximal Mukai manifolds is given in \cite{MukaiPNAS}. Notice that in genus $12$, the maximal Mukai manifolds are threefold already.
\setcounter{thm}{0} \section{Constancy of Invariants of $K3$ Sections} \label{section: Constancy}
\subsection{Constancy of the Gonality. I} \label{subsection Gonality I}
In his analysis of linear systems on $K3$ surfaces Saint--Donat \cite{Saint-DonatAJM} shows that any smooth curve which is linearly equivalent to a hyperelliptic or trigonal curve is also hyperelliptic, respectively trigonal. The idea was to prove that the minimal pencils are induced by elliptic pencils defined on the surface. This result was sensibly extended by Reid
\cite{ReidJLM} who proved the following existence result:
\begin{thm}[Reid] Let $C$ be a smooth curve of genus $g$ on a $K3$ surface $S$ and $A$ be a complete, base-point-free $g^1_d$ on $C$. If\vspace*{-3pt} \[ \frac{d^2}{4}+d+2<g,\vspace*{-3pt} \] then $A$ is the restriction of an elliptic pencil on $S$. \end{thm}
It is a good occasion to present here, as a direct application of techniques involving Lazarsfeld--Mukai bundles, an alternate shorter proof of Reid's theorem.
\begin{proof} We use the notation of previous sections. By the hypothesis, the Lazarsfeld--Mukai bundle $E$ is not simple, and hence we have a unique Donagi--Morrison extension\vspace*{3pt} \[ 0\to M\to E\to N\otimes I_{\xi}\to 0,\vspace*{2pt} \] with $\xi$ of length $\ell$. Note that $M\cdot N=d-\ell\le d$. By the Hodge index theorem, we have $(M^2)\cdot(N^2)\le (M\cdot N)^2\le d^2$, whereas from $M+N=C$ we obtain $(M^2)=2(g-1-d)-(N^2)$, hence\vspace*{-4pt} \[ (N^2)\le\frac{d^2}{2(g-1-d)-(N^2)}.\vspace*{-4pt} \]
Therefore, the even integer $x:=(N^2)$ satisfies the following inequality $x^2-2x(g-1-d)+d^2\ge 0.$ The hypothesis shows that the above inequality fails for $x\ge 2$, and hence $N$ must be an elliptic pencil. \end{proof}
In conclusion, for small values, the gonality\footnote{The gonality $\mathrm{gon}(C)$ of a curve $C$ is the minimal degree of a morphism from $C$ to the projective~line.} is constant in the linear system. Motivated by these facts, Harris and Mumford conjectured that \textit{the gonality of \hbox{$K3$-sections} should always be constant} \cite{Harris-MumfordINVENTIONES}.
This conjecture is unfortunately wrong as stated: Donagi and Morrison \cite{Donagi-MorrisonJDG} gave the following counterexample:
\begin{ex} \label{ex: DM}
Let $S\to \mathbb{P}^2$ be a double cover branched
along a smooth sextic and $L$ be the pull-back of
$\mathcal{O}_{\mathbb{P}^2}(3)$. The curves in $|L|$
have all genus $10$. The general curve $C\in|L|$
is isomorphic to a smooth plane sextic, and hence
it is pentagonal. On the other hand, the pull-back of
a general smooth plane cubic $\varGamma$ is a double
cover of $\varGamma$, and thus it is tetragonal. \end{ex}
\subsection{Constancy of the Clifford Index} \label{subsection: Cliff}
Building on his work on Koszul cohomology and its relations with geometry, M. Green proposed a reformulation of the Harris-Mumford conjecture replacing the gonality by the Clifford index.
Recall that the \textit{Clifford index} of a nonempty linear system $|A|$ on a smooth curve $C$
is the codimension of the image of the natural addition map $|A|\times|K_C(-A)|\to |K_C|$. This definition is nontrivial only for relevant linear systems $|A|$, i.e., such that both $|A|$ and $|K_C(-A)|$ are at least one-dimensional; such an $A$ is said to \textit{contribute to the Clifford index}. The \textit{Clifford index of $C$} is the minimum of all the Clifford indices taken over the linear systems that contribute to the Clifford index and is denoted by $\mathrm{Cliff}(C)$. The Clifford index is related to the gonality by the following inequalities\vspace*{3pt} \[ \mathrm{gon}(C)-3\le \mathrm{Cliff}(C)\le \mathrm{gon}(C)-2,\vspace*{3pt} \] and curves with $\mathrm{gon}(C)-3 = \mathrm{Cliff}(C)$ are very rare: typical examples are plane curves and Eisenbud--Lange--Martens--Schreyer curves \cite{ELMS,KnutsenIJM}.\footnote{It is conjectured that the only other examples should be some half-canonical curves of even genus and maximal gonality \cite{ELMS}; however, this conjecture seems to be very difficult.}
From the Brill--Noether theory, we obtain the bound $\mathrm{Cliff}(C)\le\left[(g-1)/2\right]$ (and, likewise, $\mathrm{gon}(C)\le\left[(g+3)/2\right]$), and it is known that the equality is achieved for general curves. The Clifford index is in fact a measure of how special a curve is in the moduli space.
The precise statement obtained by Green and Lazarsfeld is the following \cite{Green-LazarsfeldINVENTIONES}:
\begin{thm}[Green--Lazarsfeld] \label{thm: GL Cliff}
Let $S$ be a $K3$ surface and $C\subset S$ be a smooth
irreducible curve of genus $g\ge 2$. Then $\mathrm{Cliff}(C')
=\mathrm{Cliff}(C)$ for every smooth curve $C'\in|C|$. Furthermore,
if $\mathrm{Cliff}(C)$ is strictly less than the generic value
$ \left[(g-1)/2\right]$, then there exists a line bundle
$M$ on $S$ whose restriction to any smooth curve $C'\in|C|$ computes
the Clifford index of~$C'$. \end{thm}
The proof strategy is based on a reduction method of the associated Lazarsfeld--Mukai bundles. The bundle $M$ is obtained from the properties of the reductions; we refer to \cite{Green-LazarsfeldINVENTIONES} for details.
From the Clifford index viewpoint, Donagi--Morrison's example is not different from the other cases. Indeed, all smooth curves in $|L|$ have Clifford index $2$. We shall see in the next subsection that Donagi--Morrison's example is truly an isolated exception for the constancy of the gonality.
\subsection{Constancy of the Gonality. II} \label{subsection: Gonality II}
As discussed above, the Green--Lazarsfeld proof of the constancy of the Clifford index was mainly based on the analysis of Lazarsfeld--Mukai bundles. It is natural to try and explain the peculiarity of Donagi--Morrison's example from this point of view. This was done in \cite{Ciliberto-PareschiCRELLE}. The surprising answer found by Ciliberto and Pareschi \cite{Ciliberto-PareschiCRELLE} (see also \cite{Donagi-MorrisonJDG}) is the following:
\begin{thm}[Ciliberto--Pareschi] \label{thm: Ciliberto-Pareschi}
Let $S$ be a $K3$ surface and $L$ be an ample line bundle on $S$. If the gonality of the smooth curves in $|L|$ is not constant, then $S$ and $L$ are as in Donagi--Morrison's example. \end{thm}
Theorem~\ref{thm: Ciliberto-Pareschi} was refined by Knutsen \cite{KnutsenIJM} who replaced ampleness by the more general condition that $L$ be globally generated. The extended setup covers also the case of exceptional curves, as introduced by Eisenbud, Lange, Martens, and Schreyer \cite{ELMS}.
The proof of Theorem~\ref{thm: Ciliberto-Pareschi}
consists of a thorough analysis of the loci $\mathcal{W}^1_d(|L|)$, where $d$ is the minimal gonality of smooth curves in $|L|$, through the associated Lazarsfeld--Mukai bundles. The authors identify Donagi--Morrison's example in the following way:
\begin{thm}[Ciliberto--Pareschi] \label{thm: Ciliberto-Pareschi2}
Let $S$ be a $K3$ surface and $L$ be an ample line bundle on $S$. If the gonality of smooth curves in $|L|$ is not constant and if there is a pair $(C,A)\in\mathcal{W}^1_d(|L|)$ such that $h^1(S,E_{C,A}\otimes F_{C,A})=0$, then $S$ and $L$ are as in Donagi--Morrison's example. \end{thm}
To conclude the proof of Theorem~\ref{thm: Ciliberto-Pareschi}, Ciliberto and Pareschi prove that non-constancy of the gonality implies the existence of a pair $(C,A)$ with $h^1(S,E_{C,A}\otimes F_{C,A})=0$; see \cite{Ciliberto-PareschiCRELLE} Proposition 2.4.
It is worth to notice that, in Example~\ref{ex: DM}, if $C$ is the inverse image of a plane cubic and $A$ is a $g^1_4$ (the pull-back of an involution), then $E_{C,A}$ is the pull-back of $\mathcal{O}_{\mathbb{P}^2}(1)\oplus\mathcal{O}_{\mathbb{P}^2}(2)$ \cite{Ciliberto-PareschiCRELLE}, and hence the vanishing of $h^1(S,E_{C,A}\otimes F_{C,A})$ is guaranteed in this case.
\subsection{Parameter Spaces of Lazarsfeld--Mukai Bundles and~Dimension of Brill--Noether Loci} \label{subsection: Parameter}
We have already seen that the Brill--Noether loci are smooth of expected dimension at pairs corresponding to simple Lazarsfeld--Mukai bundles. It is interesting to know what is the dimension of these loci at other points as well. Precisely, we look for a uniform bound on the dimension of Brill--Noether loci of general curves in a linear system.
A first step was made by Ciliberto and Pareschi \cite{Ciliberto-PareschiCRELLE} who proved, as a necessary step in Theorem~\ref{thm: Ciliberto-Pareschi}, that an ample curve of gonality strictly less than the generic value, general in its linear system, carries finitely many minimal pencils. This result was extended to other Brill--Noether loci \cite{Aprodu-FarkasCOMP}, proving a phenomenon of \textit{linear growth} with the degree; see below. Let us mention that, for the moment, the only results in this direction are known to hold for pencils \cite{Aprodu-FarkasCOMP} and nets \cite{Lelli-Chiesa}.
As before, we consider $S$ a $K3$ surface and $L$ a globally generated line bundle on $S$. In order to parameterize all pairs $(C, A)$ with non-simple Lazarsfeld--Mukai bundles, we need a global construction. We fix a nontrivial globally generated line bundle $N$ on $S$ with $H^0(L(-2N))\neq 0$ and an integer $\ell\ge 0$. We set $M:=L(-N)$ and $g:=1+L^2/2$. Define $\widetilde{\mathcal{P}}_{N,\ell}$ to be the family of \textit{vector bundles} of rank $2$ on
$S$ given by nontrivial extensions\vspace*{-3pt} \begin{equation} \label{eq: extension} 0\to M\to E\to N\otimes I_{\xi}\to 0,\vspace*{-3pt} \end{equation} where $\xi$ is a zero-dimensional locally complete intersection subscheme (or the empty set) of $S$ of length $\ell$, and set \[ \mathcal{P}_{N,\ell}:=\{[E]\in\widetilde{\mathcal{P}}_{N, \ell}:\ h^1(S,E)=h^2(S,E)=0\}. \] Equivalently (by Riemann--Roch), $[E]\in \mathcal{P}_{N, \ell}$ if and only if $h^0(S,E)=$\break $g-c_2(E)+3$ and $h^1(S,E)=0$. Note that any non-simple Lazarsfeld--Mukai bundle on $S$ with determinant $L$ belongs to some family $\mathcal{P}_{N,\ell}$, from Lemma~\ref{lemma: DM}. The family $\mathcal{P}_{N,\ell}$, which, a priori, might be the empty set, is an open Zariski subset of a projective bundle of the Hilbert scheme~$S^{[\ell]}$.
Assuming that $\mathcal{P}_{N,\ell}\ne \emptyset$, we consider the Grassmann bundle $\mathcal{G}_{N,\ell}$ over $\mathcal{P}_{N,\ell}$ classifying pairs $(E,\varLambda)$ with $[E]\in\mathcal{P}_{N,\ell}$ and $\varLambda\in \mathrm{G}(2,H^0(S,E))$. If $d:=c_2(E)$ we define the rational map $h_{N, \ell}: \mathcal{G}_{N, \ell}
\dashrightarrow \mathcal{W}^1_d(|L|)$, by setting $h_{N, \ell}(E, \varLambda):=(C_{\varLambda}, A_{\varLambda})$, where $A_{\varLambda}\in \mbox{Pic}^d(C_{\varLambda})$ is such that the following exact sequence on $S$ holds:\vspace*{4pt} \[ 0\to \varLambda\otimes \mathcal{O}_S\stackrel{\mathrm{ev}_{\varLambda}}\to E\to K_{C_{\varLambda}}\otimes A_{\varLambda}^*\to 0.\vspace*{3pt} \]
One computes $\dim\ \mathcal{G}_{N,\ell}=g+\ell+h^0(S,M\otimes N^*)$. If we assume furthermore that $\mathcal{P}_{N,\ell}$ contains a Lazarsfeld--Mukai vector bundle $E$ on $S$ with $c_2(E)=d$
and consider $\mathcal{W}\subset \mathcal{W}^1_d(|L|)$ the closure of the image of the rational map $h_{N,\ell}:\mathcal{G}_{N,\ell}\dashrightarrow \mathcal{W}^1_d(|L|)$, then we find $\dim\ \mathcal{W} =g+d-M\cdot N=g+\ell$.
On the other hand, if $C\in|L|$ has Clifford dimension one and $A$ is a globally generated line bundle on $C$ with $h^0(C,A)=2$ and $[E_{C,A}]\in\mathcal{P}_{N,\ell}$, then $M\cdot N\ge \mathrm{gon}(C)$.
These considerations on the indecomposable case, together with a simpler analysis of decomposable bundles, yield finally \cite{Aprodu-FarkasCOMP}:
\begin{thm} \label{thm: Green Cliffdim 1}
Let $S$ be a $K3$ surface and $L$ a globally generated line bundle on $S$, such that general curves in $|L|$ are of Clifford dimension one. Suppose that
$\rho(g,1,k)\leq 0$, where $L^2=2g-2$ and $k$ is the (constant) gonality of all smooth curves in $|L|$. Then for a general curve $C\in|L|$, we have \begin{equation} \label{lgc}
\mathrm{dim}\ W^1_{k+d}(C)=d\mbox{ for all } 0\le d\le g-2k+2. \end{equation} \end{thm}
The condition~(\ref{lgc}) is called the \textit{linear growth condition}. It is equivalent to \[ \mathrm{dim}\ W^1_{g-k+2}(C)=\rho(g,1,g-k+2)=g-2k+2. \]
Note that the condition that $C$ carry finitely many minimal pencils, which is a part of~(\ref{lgc}), appears explicitly in~\cite{Ciliberto-PareschiCRELLE}. It is directly related to the constancy of the gonality discussed before.
\setcounter{thm}{0} \section{Green's Conjecture for Curves on $K3$ Surfaces} \label{section: Green}
\subsection{Koszul Cohomology}\label{ch1:sec3.1}
Let $X$ be a (not necessarily smooth) complex, irreducible, projective variety and $L\in\mathrm{Pic}(X)$ globally generated. The Euler sequence on the projective space $\mathbb{P}(H^0(X,L)^*)$ pulls back to a short exact sequence of vector bundles on~$X$ \begin{equation} \label{eqn: Euler} 0 \to M_L\to H^0(X,L)\otimes \mathcal{O}_X\to L\to 0. \end{equation}
After taking exterior powers in the sequence~(\ref{eqn: Euler}), twisting with multiples of $L$ and going to global sections, we obtain an exact sequence for any nonnegative $p$ and $q$:\vspace*{-5pt} \begin{align} \label{eqn: WedgeEuler} 0\to H^0(\wedge^{p+1}M_L\otimes L^{q-1}) \to \wedge^{p+1}H^0(L)\otimes H^0(L^{q-1}) \stackrel{\delta}{\to} H^0(\wedge^pM_L\otimes L^q).\nonumber\\ \vspace*{-5pt} \end{align}
The finite-dimensional vector space $K_{p,q}(X,L):=\mathrm{Coker}(\delta)$ is called the \textit{Koszul cohomology space}\footnote{The indices $p$ and $q$ are usually forgotten when defining Koszul cohomology.} of $X$ with values in $L$ \cite{LazarsfeldICTP,GreenJDG,GreenICTP}. Observe that $K_{p,q}$ can be defined alternatively as: \[ K_{p,q}(X,L)=\mathrm{Ker}\left(H^1(\wedge^{p+1}M_L\otimes L^{q-1}) \to \wedge^{p+1}H^0(L)\otimes H^1(L^{q-1})\right),\vspace*{-3pt} \] description which is particularly useful when $X$ is a curve.
Several versions are used in practice, for example, replace $H^0(L)$ in~(\ref{eqn: Euler}) by a subspace that generates $L$ or twist~(\ref{eqn: WedgeEuler}) by $\mathcal{F}\otimes L^{q-1}$ where $\mathcal{F}$ is a coherent sheaf. For our presentation, however, we do not need to discuss these natural generalizations.
Composing the maps\vspace*{-3pt} \[ \wedge^{p+1}H^0(L)\otimes H^0(L^{q-1}) \stackrel{\delta}{\to} H^0(\wedge^pM_L\otimes L^q) \hookrightarrow \wedge^pH^0(L)\otimes H^0(L^q)\vspace*{-3pt} \] we obtain, by iteration, a complex\vspace*{-3pt} \[ \wedge^{p+1}H^0(L)\otimes H^0(L^{q-1})\to \wedge^pH^0(L)\otimes H^0(L^q)\to \wedge^{p-1}H^0(L)\otimes H^0(L^{q+1})\vspace*{-3pt} \] whose cohomology at the middle is $K_{p,q}(X,L)$, and this is the definition given by Green \cite{GreenJDG}.
An important property of Koszul cohomology is upper-semicontinuity in flat families with constant cohomology; in particular, vanishing of Koszul cohomology is an open property in such families. For curves, constancy of $h^1$ is a consequence of flatness and of constancy of $h^0$, as shown by the Riemann--Roch theorem.
The original motivation for studying Koszul cohomology spaces was given by the relation with minimal resolutions over the polynomial ring. More precisely, if $L$ is very ample, then the Koszul cohomology computes the minimal resolution of the graded module\vspace*{4pt} \[ R(X,L):=\bigoplus_qH^0(X,L^q)\vspace*{3pt} \] over the polynomial ring \cite{GreenJDG,GreenICTP}; see also \cite{EisenbudBOOK,Aprodu-NagelULECT}, in the sense that any graded piece that appears in the minimal resolution is (non-canonically) isomorphic to a $K_{p,q}$. If the image of $X$ is projectively normal, this module coincides with the homogeneous coordinate ring of $X$. The projective normality of $X$ can also be read off Koszul cohomology, being characterized by the vanishing condition $K_{0,q}(X,L)=0$ for all $q\ge 2$. Furthermore, for a projectively normal $X$, the homogeneous ideal is generated by quadrics if and only if $K_{1,q}(X,L)=0$ for all $q\ge 2$.\footnote{The dimension of $K_{1,q}$ indicates the number of generators of degree $(q+1)$ in the homogeneous~ideal.} The phenomenon continues as follows: if $X$ is projectively normal and the homogeneous ideal is generated by quadrics, then the relations between the generators are linear if and only if $K_{2,q}(X,L)=0$ for all $q\ge 2$, whence the relation with syzygies ~\cite{GreenJDG}.
Other notable application of Koszul cohomology is the description of Castelnuovo--Mumford regularity, which coincides with, \cite{GreenJDG,Aprodu-NagelULECT} \[ \mathop{\mathrm{min}}_q\{K_{p,q}(X,L)=0,\mbox{ for all }p\}. \]
Perhaps the most striking property of Koszul cohomology, discovered by Green and Lazarsfeld \cite[Appendix]{GreenJDG}, is a consequence of a nonvanishing result:
\begin{thm}[Green--Lazarsfeld] \label{thm: GL nonvan}
Suppose $X$ is smooth and $L=L_1\otimes L_2$ with $r_i:=h^0(X,L_i)-1\ge 1$.
Then $K_{r_1+r_2-1,1}(X,L)\ne 0$. \end{thm}
Note that the spaces $K_{p,1}$ have the following particular attribute: if $K_{p,1}\ne 0$ for some $p\ge 1$ then $K_{p',1}\ne 0$ for all $1\le p'\le p$. This is obviously false for $K_{p,q}$ with $q\ge 2$.
Theorem~\ref{thm: GL nonvan} shows that the existence of nontrivial decompositions of $L$ reflects onto the existence of nontrivial Koszul classes in some space $K_{p,1}$. Its most important applications are for curves, in particular for canonical curves, case which is discussed in the next subsection. In the higher-dimensional cases, for surfaces, for instance, the meaning of Theorem~\ref{thm: GL nonvan} becomes more transparent if it is accompanied by a restriction theorem which compares the Koszul cohomology of $X$ with the Koszul cohomology of the linear sections \cite{GreenJDG}:
\begin{thm}[Green] \label{thm: Lefschetz}
Suppose $X$ is smooth and $h^1(X,L^q)=0$ for all $q\ge 1$.
Then for any connected reduced divisor $Y\in|L|$,
the restriction map induces an isomorphism\vspace*{4pt}
\[
K_{p,q}(X,L)\stackrel{\sim}{\to}K_{p,q}(Y,L|_Y),\vspace*{3pt}
\]
for all $p$ and $q$. \end{thm}
The vanishing of $h^1(X,\mathcal{O}_X)$ suffices to prove that the restriction is an isomorphism between the spaces $K_{p,1}$ \cite{Aprodu-NagelULECT}.
In the next subsections, we shall apply Theorem~\ref{thm: Lefschetz} for $K3$ sections.
\begin{cor} \label{cor: Lefschetz K3} Let $C$ be a smooth connected curve on a $K3$ surface $S$. Then\vspace*{-4pt} \[ K_{p,q}(S,\mathcal{O}_S(C))\cong K_{p,q}(C,K_C)\vspace*{-4pt} \] for all $p$ and $q$. \end{cor}
One direct consequence is a duality theorem for Koszul cohomology of $K3$ surfaces.\footnote{Duality for Koszul cohomology of curves follows from Serre's duality. For higher-dimensional manifolds, some supplementary vanishing conditions are required \cite{GreenJDG,GreenICTP}.} It shows the symmetry of the table containing the dimensions of the spaces $K_{p,q}$, called \textit{the Betti table}.
\vspace*{-4pt} \subsection{Statement of Green's Conjecture}\label{ch1:sec3.2}
Let us particularize Theorem~\ref{thm: GL nonvan} for a canonical curve. Consider $C$ a smooth curve and choose a decomposition $K_C=A\otimes K_C(-A)$. Theorem~\ref{thm: GL nonvan} applies only if $h^0(C,A)\ge 2$ and $h^1(C,A)\ge 2$, i.e., if $A$ contributes to the Clifford index. The~quantity $r_1+r_2-1$ which appears in the statement equals $g-\mathrm{Cliff}(A)-2$, and hence, if $A$ \textit{computes} the Clifford index, we obtain the following:
\begin{thm}[Green--Lazarsfeld]
For any smooth curve $C$ of genus $g$ Clifford index $c$
we have $K_{g-c-2,1}(C,K_C)\ne 0$. \end{thm}
It is natural to determine whether or not this result is sharp, question which is addressed in the statement Green's conjecture:
\begin{con}[Green]
Let $C$ be a smooth curve. For all $p\ge g-c-1$,
we have $K_{p,1}(C,K_C) = 0$. \end{con}
For the moment, Green's conjecture remains a hard open problem. At the same time, strong evidence has been discovered. For instance, it is known to hold for general curves \cite{VoisinJEMS,VoisinCOMP}, for curves of odd genus and maximal Clifford index \cite{VoisinCOMP,Hirschowitz-RamananAENS}, for general curves of given gonality \cite{VoisinJEMS,TeixidorDUKE},\footnote{Voisin's and Teixidor's cases complete each other quite remarkably.} \cite{SchreyerLNM}, for curves with small Brill--Noether loci \cite{AproduMRL}, for plane curves \cite{LooseMANUSCRIPTA}, for curves on $K3$ surfaces \cite{VoisinJEMS,VoisinCOMP,Aprodu-FarkasCOMP},~etc.; see also \cite{Aprodu-NagelULECT} for a discussion.
We shall consider in the sequel the case of curves on $K3$ surfaces with emphasis on Voisin's approach to the problem and the role played by Lazarsfeld--Mukai bundles. It is interesting to notice that Green's conjecture for $K3$ sections can be formulated directly in the $K3$ setup, as a vanishing result on the moduli space $\mathcal{F}_g$ of polarized $K3$ surfaces. However, in the proof of this statement, as it usually happens in mathematics, we have to exit the $K3$ world, prove a more general result in the extended setup, and return to $K3$ surfaces. The steps we have to take, ordered logically and not chronologically, are the following. In the first, most elaborated step, one finds an example for odd genus \cite{VoisinCOMP,VoisinJEMS}. At this stage, we are placed in the moduli space $\mathcal{F}_{2k+1}$. Secondly, we exit the $K3$ world, land in $\mathcal{M}_{2k+1}$, and prove the equality of two divisors \cite{Hirschowitz-RamananAENS,VoisinJEMS}. The first step is used, and the identification of~the divisors extends to their closure over the component $\varDelta_0$ of the boundary \cite{AproduMRL}. In~the third step, we jump from a gonality stratum $\mathcal{M}^1_{g,d}$ in a moduli space $\mathcal{M}_g$ to the~boundary of another moduli space of stable curves $\overline{\mathcal{M}}_{2k+1}$, where $k=g-d+1$ \cite{AproduMRL}. The second step reflects into a vanishing result on an explicit open subset of $\mathcal{M}^1_{g,d}$. Finally one goes back to $K3$ surfaces and applies the latter vanishing result \cite{Aprodu-FarkasCOMP} on $\mathcal{F}_g$. In the steps concerned with $K3$ surfaces (first and last), the Lazarsfeld--Mukai bundles are central objects.
\subsection{Voisin's Approach} \label{subsection: Hilbert}
The proof of the generic Green conjecture was achieved by Voisin in two papers \cite{VoisinJEMS,VoisinCOMP}, using a completely different approach to Koszul cohomology via Hilbert scheme of points.
Let $X$ be a complex connected projective manifold and $L$ a line bundle on $X$. It is obvious that any global section $\sigma$ is uniquely determined by the collection $\{\sigma(x)\}_x$, where $\sigma(x)\in L|_x\cong\mathbb{C}$ and $x$ belongs to a nonempty open subset of $X$. One tries to find a similar fact for multisections in $\wedge^nH^0(X,L)$.
Let $\sigma_1\wedge\cdots\wedge\sigma_n$ be a decomposable element in $\wedge^nH^0(X,L)$ with $n\ge 1$. By analogy with the case $n=1$, we have to look at the restriction
$\sigma_1|_{\xi}\wedge\cdots\wedge\sigma_n|_{\xi}\in \wedge^nL|_{\xi}$ where $\xi$ is now a zero-dimensional subscheme, and it is clear that we need $n$ points for otherwise this restriction would be zero. Note that a zero-dimensional subscheme of length $n$ defines a point in the punctual Hilbert scheme $X^{[n]}$. For technical reasons, we shall restrict to curvilinear subschemes\footnote{A curvilinear subscheme is defined locally, in the classical topology, by $x_1=\cdots=x_{s-1}=x_s^k=0$; equivalently, it is locally embedded in a smooth curve.} which form a large open subset $X^{[n]}_c$ in a connected component of the Hilbert scheme.\footnote{The connectedness of $X^{[n]}_c$ follows from the observation that a curvilinear subscheme is a deformation of a reduced subscheme.} Varying $\xi\in X^{[n]}_c$, the collection
$\{\sigma_1|_{\xi}\wedge\cdots\wedge\sigma_n|_{\xi}\}_{\xi}$ represents a section in a line bundle described as follows. Put $\varXi_n\subset X^{[n]}_c\times X$ the incidence variety and denote by $q$ and $p$ the projections on the two factors; note that $q$ is finite of degree $n$. Then $L^{[n]}:=q_*p^*(L)$ is a vector bundle of rank $n$ on $X^{[n]}_c$, and the fibre at a point $\xi\in X^{[n]}_c$
is $L^{[n]}|_{\xi}\cong L|_{\xi}$. In conclusion, the collection $\{\sigma|_{\xi}\wedge\cdots\wedge\sigma|_{\xi}\}_{\xi}$ defines a section in the line bundle $\mathrm{det}(L^{[n]})$. The map we are looking at $\wedge^nH^0(L)\to H^0(\mathrm{det}(L^{[n]}))$ is deduced from the evaluation map $\mathrm{ev}_n:H^0(L)\otimes \mathcal{O}_{X^{[n]}_c}\to L^{[n]}$, taking $\wedge^n\mathrm{ev}_n$ and applying $H^0$. It is remarkable that \cite{VoisinJEMS,VoisinCOMP,Ellingsrud-Goettsche-LehnJAG}: \begin{thm}[Voisin, Ellingsrud--G\"ottsche--Lehn]
The map\vspace*{-3pt}
\[
H^0(\wedge^n\mathrm{ev}_n):\wedge^nH^0(X,L)\to
H^0\left(X^{[n]}_c,\mathrm{det}(L^{[n]})\right)\vspace*{-3pt}
\]
is an isomorphism. \end{thm}
Since the exterior powers of $H^0(L)$ are building blocks for Koszul cohomology, it is natural to believe that the isomorphism above yields a relation between the Koszul cohomology and the Hilbert scheme. To this end, the Koszul differentials must be reinterpreted in the new context.
There is a natural birational morphism\footnote{We see one advantage of working on $X^{[n]}_c$: subtraction makes sense only for curvilinear subschemes.}\vspace*{-3pt} \[ \tau:\varXi_{n+1}\to X^{[n]}_c\times X,\ (\xi,x)\mapsto(\xi-x,x)\vspace*{-3pt} \] presenting $\varXi_{n+1}$ as the blowup of $X^{[n]}_c\times X$ along $\varXi_n$. If we denote by $D_{\tau}$ the exceptional locus, we obtain an inclusion \cite{VoisinJEMS} \[
q^*\mathrm{det}(L^{[n+1]})\cong\tau^*(\mathrm{det}(L^{[n]})\boxtimes
L)(-D_{\tau})\hookrightarrow \tau^*(\mathrm{det}(L^{[n]})\boxtimes
L) \] whence \[ H^0\left(X^{[n+1]}_c,\mathrm{det}(L^{[n+1]})\right)\hookrightarrow H^0(X^{[n]}_c\times X,\mathrm{det}(L^{[n]})\boxtimes L), \] identifying the left-hand member with the kernel of a Koszul differential \cite{VoisinJEMS}. A~version of this identification leads us to \cite{VoisinJEMS,VoisinCOMP}:
\begin{thm}[Voisin] \label{thm: Hilbert scheme}
For any integers $m$ and $n$, $K_{n,m}(X,L)$ is isomorphic to the
cokernel of the restriction map:
\[
H^0\left(X^{[n+1]}_c\times X,\mathrm{det}(L^{[n+1]})\boxtimes L^{m-1}\right)\to
H^0\left(\varXi_{n+1},\mathrm{det}(L^{[n+1]})\boxtimes L^{m-1}|_{\varXi_{n+1}}\right).
\] \end{thm}
The vanishing of Koszul cohomology is thus reduced to proving surjectivity of the restriction map above. In general, it is very hard to prove surjectivity directly, and one has to make a suitable base-change \cite{VoisinJEMS}.
\subsection{The Role of Lazarsfeld--Mukai Bundles in the Generic Green Conjecture and Consequences} \label{subsection: Role of LM}
In order to prove Green's conjecture for general curves, it suffices to exhibit one example of a curve of maximal Clifford index, which verifies the predicted vanishing. Afterwards, the vanishing of Koszul cohomology propagates by semicontinuity. Even so, finding one single example is a task of major difficulty. The curves used by Voisin in \cite{VoisinJEMS,VoisinCOMP} are $K3$ sections, and the setups change slightly, according to the parity of the genus. For even genus, we have \cite{VoisinJEMS}:
\begin{thm}[Voisin]
\label{thm: Voisin even} Suppose that $g=2k$. Consider $S$ a $K3$ surface with $\mathrm{Pic}(S)\cong \mathbb{Z}\cdot L$, $L^2=2g-2$, and
$C\in|L|$ a smooth curve. Then $K_{k,1}(C,K_C)=0$. \end{thm}
For odd genus, the result is \cite{VoisinCOMP}:
\begin{thm}[Voisin]
\label{thm: Voisin odd} Suppose that $g=2k+1$. Consider $S$ a $K3$ surface with $\mathrm{Pic}(S)\cong \mathbb{Z}\cdot L\oplus \mathbb{Z}\cdot \varGamma$, $L^2=2g-2$, $\varGamma$ a smooth rational curve. $L\cdot \varGamma =2$ and
$C\in|L|$ a smooth curve. Then $K_{k,1}(C,K_C)=0$. \end{thm}
Note that the generic value for the Clifford index in genus $g$ is $[(g-1)/2]$, and hence, in both cases, the prediction made by Green's conjecture for general curve $C$ is precisely $K_{k,1}(C,K_C)=0$.
There are several reasons for making these choices: the curves have maximal Clifford index, by Theorem~\ref{thm: GL Cliff} (and the Clifford dimension is one), the Lazarsfeld--Mukai bundles associated to minimal pencils are $L$-stable, the hyperplane section theorem applies, etc.
We outline here the role played by Lazarsfeld--Mukai bundles in Voisin's proof and, for simplicity, we restrict to the even-genus case. By the \hbox{hyperplane} section Theorem~\ref{thm: Lefschetz}, the required vanishing on the curve is equivalent to \hbox{$K_{k,1}(S,L)=0$.} From the description of Koszul cohomology in terms of Hilbert schemes, Theorem \ref{thm: Hilbert scheme}, adapting the notation from the previous subsection, one has to prove the surjectivity of the map \[ q^*: H^0\left(S_c^{[n+1]},\mathrm{det}(L^{[n+1]})\right)\to
H^0\left(\varXi_{n+1},q^*\mathrm{det}(L^{[n+1]})|_{\varXi_{n+1}}\right).
\] The surjectivity is proved after performing a suitable base-change.
We are in the case $\rho(g,1,k+1)=0$; hence there is a unique Lazarsfeld--Mukai bundle $E$ on $S$ associated to all $g^1_{k+1}$
on curves in $|L|$. The uniqueness yields an alternate description of $E$ as extension \[ 0\to \mathcal{O}_S\to E\to L\otimes I_{\xi}\to 0, \] where $\xi$ varies in $S_c^{[k+1]}$.
There exists a morphism $\mathbb{P} H^0(S,E)\rightarrow S^{[k+1]}$ that sends a global section $s\in H^0(S,E)$ to its zero set $Z(s)$. By restriction to an open subset $\mathbb{P}\subset\mathbb{P} H^0(S,E)$, we obtain a morphism $\mathbb{P}\rightarrow S_c^{[k+1]}$, inducing a commutative diagram \begin{center}
\mbox{\xymatrix{ \mathbb{P}^{\prime} = \mathbb{P}\times_{S_c^{[k+1]}}\varXi_{k+1}\ar[r]\ar[d]^{q^{\prime}} &\varXi_{k+1} \ar[d]^q \\ \mathbb{P} \ar[r] & S_c^{[k+1]}. }}
\end{center} Set-theoretically \[
\mathbb{P}^{\prime} = \{(Z(s),x)|s\in H^0(S,E),x\in Z(s)\}. \]
Unfortunately, this very natural base-change does not satisfy the necessary conditions that imply the surjectivity of $q^*$, \cite{VoisinJEMS}. Voisin modifies slightly this construction and replaces $\mathbb{P}$ with another variety related to $\mathbb{P}$ which parameterizes zero-cycles of the form $Z(s)-x+y$ with $[s]\in\mathbb{P}$, $x\in\mathrm{Supp}(Z(s))$ and $y\in S$. It turns out, after numerous elaborated calculations using the rich geometric framework provided by the Lazarsfeld--Mukai bundle, that the new base-change is suitable and the surjectivity of $q^*$ follows from vanishing results on the Grassmannian \cite{VoisinJEMS}.
In the odd-genus case, Voisin proves first Green's conjecture for smooth curves in $|L+\varGamma|$, which are easily seen to be of maximal Clifford index. The situation on $|L+\varGamma|$ is somewhat close to the setup of Theorem~\ref{thm: Voisin even}, and the proof is similar. The next hard part is to descend from the vanishing of $K_{k+1,1}(S,L\otimes\mathcal{O}_S(\varGamma))$
to the vanishing of $K_{k,1}(S,L)$. This step uses again intensively the unique Lazarsfeld--Mukai bundle associated to any $g^1_{k+2}$ on curves in $|L+\varGamma|$.
The odd-genus case is of maximal interest: mixed with Hirschowitz-Ramanan result \cite{Hirschowitz-RamananAENS}, Theorem~\ref{thm: Voisin odd} gives a solution to Green's conjecture for \textit{any} curve of odd genus and maximal Clifford index:
\begin{thm}[Hirschowitz--Ramanan, Voisin] \label{thm: HRV}
Let $C$ be a smooth curve of odd genus $2k+1\ge 5$
and Clifford index $k$. Then $K_{k,1}(C,K_C)=0$. \end{thm}
Note that Theorem~\ref{thm: HRV} implies the following statement:
\begin{cor} A smooth curve of odd genus and maximal Clifford index has Clifford dimension one. \end{cor}
The proof of Theorem~\ref{thm: HRV} relies on the comparison of two effective divisors on the moduli space of curves $\mathcal{M}_{2k+1}$, one given by the condition $\mathrm{gon}(C)\le k+1$, which is known to be a divisor from \cite{Harris-MumfordINVENTIONES}, and the second given by $K_{k,1}(C,K_C)\ne 0$. By duality $K_{k,1}(C,K_C)\cong K_{k-2,2}(C,K_C)$. Note that $K_{k-2,2}(C,K_C)$ is isomorphic~to\vspace*{-3pt} \[ \mathrm{Coker} \left(\wedge^kH^0(K_C)\otimes H^0(K_C)/\wedge^{k+1}H^0(K_C) \to H^0(\wedge^{k-1}M_{K_C}\otimes K_C^2)\right)\vspace*{-3pt} \] and the two members have the same dimension. The locus of curves with $K_{k,1}\ne 0$ can be described as the degeneracy locus of a morphism between vector bundles of the same dimension, and hence it is a virtual divisor. Theorem~\ref{thm: Voisin odd} implies that this locus is not the whole space, and in conclusion it must be an effective divisor. \hbox{Theorem \ref{thm: GL nonvan}} already gives an inclusion between the supports of two divisors in question, and the set-theoretic equality is obtained from a divisor class calculation~\cite{Hirschowitz-RamananAENS}.
\vspace*{-6pt} \subsection{Green's Conjecture for Curves on $K3$ Surfaces} \label{subsection: Green for K3}
We have already seen that general $K3$ sections have a mild behavior from the Brill--Noether theory viewpoint. In some sense, they behave like general curves in any gonality stratum of the moduli space of curves.
As in the previous subsections, fix a $K3$ surface $S$ and a globally generated line bundle $L$ with $L^2=2g-2$ on $S$, and denote by $k$ the gonality of a general smooth curve in the linear system $|L|$. Suppose that $\rho(g, 1, k)\le 0$ to exclude the case $g=2k-3$
(when $\rho(g, 1, k)=1$). If in addition the curves in $|L|$ have Clifford dimension one, Theorem~\ref{thm: Green Cliffdim 1} shows that\vspace*{-3pt} \[ \mathrm{dim}\ W^1_{g-k+2}(C)=\rho(g,1,g-k+2)=g-2k+2,\vspace*{-3pt} \] property which was called the \textit{linear growth condition}.
This property appears in connection with Green's conjecture~\cite{AproduMRL} for a much larger class of curves:
\begin{thm} \label{thm: Aprodu}
If $C$ is any smooth curve of genus $g\ge 6$ and gonality $3\le k<[g/2]+2$
with $\mathrm{dim}\ W^1_{g-k+2}(C)=\rho(g,1,g-k+2)$,
then $K_{g-k+1,1}(C,K_C)=0$. \end{thm}
One effect of Theorems~\ref{thm: Aprodu} and~\ref{thm: GL nonvan} is that an arbitrary curve that satisfies the linear growth condition is automatically of Clifford dimension one and verifies Green's conjecture.
Theorem~\ref{thm: Aprodu} is a consequence of Theorem~\ref{thm: HRV} extended over the boundary of the moduli space. Starting from a $k$-gonal smooth curve $[C]\in \mathcal{M}_g$, by identifying pairs of general points $\{x_i,y_i\}\subset C$ for $i=0, \dots, g-2k+2$ we produce a stable irreducible curve \[ \left[X:=C/(x_0\sim y_0, \ldots, x_{g-2k+2}\sim y_{g-2k+2})\right] \in \overline{\mathcal{M}}_{2(g-k+1)+1}, \] and the Koszul cohomology of $C$ and of $X$ are related by the inclusion $K_{p,1}(C,K_C)$ $\subset K_{p,1}(X,\omega_X)$ for all $p\ge 1$, \cite{VoisinJEMS}. If $C$ satisfies the linear growth condition then $X$ has maximal gonality\footnote{The gonality for a singular stable curve is defined in terms of admissible covers \cite{Harris-MumfordINVENTIONES}.} $\mathrm{gon}(X)=g-k+3$, i.e., $X$ lies outside the closure of the divisor $\mathcal{M}_{2(g-k+1)+1, g-k+2}^1$ consisting of curves with a pencil $g^1_{g-k+2}$. The class of the failure locus of Green's conjecture on $\overline{\mathcal{M}}_{2(g-k+1)+1}$ is a multiple of the divisor $\overline{\mathcal{M}}_{2(g-k+1)+1, g-k+2}^1$; hence Theorem~\ref{thm: HRV} extends to irreducible stable curves of genus $2(g-k+1)+1$ of maximal gonality $(g-k+3)$. In particular, $K_{g-k+1,1}(X,\omega_X)=0$, implying $K_{g-k+1,1}(C,K_C)=0$.
Coming back to the original situation, we conclude from Theorems~\ref{thm: Aprodu} and~\ref{thm: Green Cliffdim 1} and Corollary~\ref{cor: Lefschetz K3} that Green's conjecture holds for a $K3$ section $C$ having Clifford dimension one. If $\mbox{Cliff}(C)=\mathrm{gon}(C)-3$, either $C$ is a smooth plane curve or else there exist smooth curves $D, \varGamma \subset S$, with $\varGamma^2=-2, \varGamma\cdot D=1$ and $D^2\geq 2$, such that $C\equiv 2D+ \varGamma$ and $\mbox{Cliff}(C)=\mbox{Cliff}(\mathcal{O}_C(D))$ \cite{Ciliberto-PareschiCRELLE,KnutsenIJM}. The linear growth condition is no longer satisfied, and this case is treated differently, by degeneration to a reduced curve with two irreducible components \cite{Aprodu-FarkasCOMP}.
The outcome of this analysis of the Brill--Noether loci is the following \cite{VoisinJEMS,VoisinCOMP,Aprodu-FarkasCOMP}:
\begin{thm} \label{thm: Green on K3} Green's conjecture is valid for any smooth curve on a $K3$ surface. \end{thm}
Applying Theorem~\ref{thm: Green on K3}, Theorem~\ref{thm: Lefschetz}, and the duality, we obtain a full description of the situations when Koszul cohomology of a $K3$ surface is zero \cite{Aprodu-FarkasCOMP}:
\begin{thm} \label{thm: K3} Let $S$ be a $K3$ surface and $L$ a globally generated line bundle with $L^2=2g-2\ge 2$. The Koszul cohomology group $K_{p,q}(S,L)$ is nonzero if and only if one of the following cases occurs: \begin{itemize} \item[(1)] $q=0$ and $p=0$, or \item[(2)] $q=1$, $1\le p\le g-c-2$, or \item[(3)] $q=2$ and $c\le p\le g-1$, or \item[(4)] $q=3$ and $p=g-2$. \end{itemize} \end{thm}
The moral is that the shape of the Betti table, i.e., the distribution of zeros in the table, of a polarized $K3$ surface is completely determined by the geometry of hyperplane sections; this is one of the many situations where algebra and geometry are intricately related.
\section{Counterexamples to Mercat's Conjecture in Rank Two} \label{section: Higher BN}
Starting from Mukai's works, experts tried to generalize the classical Brill--Noether theory to higher-rank vector bundles on curves. Within these extended theories,\footnote{Higher-rank Brill--Noether theory is a major, rapidly growing research field, and it deserves a separate dedicated survey.} we note the attempt to find a proper generalization of the Clifford index. H. Lange and P. Newstead proposed the following definition. Let $E$ be a semistable vector bundle of rank $n$ of degree $d$ on a smooth curve $C$. Put \[
\gamma(E):=\mu(E)-2\frac{h^0(E)}{n}+2. \]
\begin{defn}[Lange--Newstead]
The \textit{Clifford index} of rank $n$ of $C$ is \[
\mathrm{Cliff}_n(C):=\mathrm{min}\{\gamma(E):\ \mu(E)\le g-1,\ h^0(E)\ge 2n\}. \] \end{defn}
From the definition, it is clear that $\mathrm{Cliff}_1(C)=\mathrm{Cliff}(C)$ and $\mathrm{Cliff}_n(C)\le\mathrm{Cliff}(C)$ for all $n$.\footnote{For any line bundle $A$, we have $\gamma(A^{\oplus n})=\mathrm{Cliff}(A)$.}
Mercat conjectured \cite{MercatIJM} that $\mathrm{Cliff}_n(C)=\mathrm{Cliff}(C)$. In rank two, the conjecture is known to hold in a number of cases: for general curves of small gonality, i.e., corresponding to a general point in a gonality stratum $\mathcal{M}^1_{g,k}$ for small $k$ (Lange-Newstead), for plane curves (Lange--Newstead), for general curves of genus $\le 16$ (Farkas--Ortega), etc. However, even in rank two, the conjecture is false. It is remarkable that counterexamples are found for curves of maximal Clifford index~\cite{Farkas-OrtegaIJM}:
\begin{thm}[Farkas--Ortega]
Fix $p\ge 1$, $a\ge 2p+3$. Then there exists a smooth curve of genus $2a+1$ of maximal Clifford index lying on a smooth $K3$ surface $S$ with $\mathrm{Pic}(S)=\mathbb{Z}\cdot C\oplus \mathbb{Z}\cdot H$, $H^2=2p+2$, $C^2=2g-2$, $C\cdot H=2a+2p+1$, and there exists a stable rank-two vector bundle $E$ with $\mathrm{det}(E)=\mathcal{O}_S(H)$ with $h^0(E)=p+3$, $\gamma(E)=a-\frac{1}{2}<a=\mathrm{Cliff}(A)$, and hence Mercat's conjecture in rank two fails for~$C$. \end{thm}
The proof uses restriction of Lazarsfeld--Mukai bundles. However, it is interesting that the bundles are not restricted to the same curves to which they are associated. More precisely, the genus of $H$ is $2p+2$ and $H$ has maximal gonality $p+2$. Consider $A$ a minimal pencil on $H$, and take $E=E_{H,A}$ the associated Lazarsfeld--Mukai bundle. The restriction of $E$ to $C$ is stable and verifies all the required properties.
A particularly interesting case is $g=11$. In this case, as shown by Mukai \cite{MukaiLNPAM}, a general curve $C$ lies on a unique $K3$ surface $S$ such that $C$ generates $\mathrm{Pic}(S)$.\vadjust{\pagebreak} It~is~remarkable that the failure locus of Mercat's conjecture in rank two \textit{coincides} with the Noether-Lefschetz divisor \[ \mathcal{NL}^4_{11,13}:=\left\{[C]\in \mathcal{M}_{11}: \begin{array}{l} C\mbox{ lies on a } K3 \mbox{ surface } S, \ \mathrm{Pic}(S)\supset \mathbb{Z}\cdot C\oplus \mathbb{Z}\cdot H,\\ H\in \mathrm{Pic}(S) \mbox{ is nef},
H^2=6, \ C\cdot H=13, \ C^2=20 \end{array} \right\} \] inside the moduli space $\mathcal{M}_{11}$. We refer to \cite{Farkas-OrtegaIJM} for details.
\end{document} |
\begin{document}
\title{An alternative framework for quantifying coherence of quantum channels}
\author{Shi-Yun Kong$^1$} \author{Ya-Juan Wu$^2$}
\author{Qiao-Qiao Lv$^1$} \author{Zhi-Xi Wang$^1$}
\author{Shao-Ming Fei$^{1,3}$}
\affiliation{ {\footnotesize $^1$School of Mathematical Sciences, Capital Normal University, Beijing 100048, China}\\ {\footnotesize $^2$School of Mathematics, Zhengzhou University of Aeronautics, Zhengzhou 450046, China}\\ {\footnotesize $^3$Max-Planck-Institute for Mathematics in the Sciences, 04103 Leipzig, Germany} }
\begin{abstract} We present an alternative framework for quantifying the coherence of quantum channels, which contains three conditions: the faithfulness, nonincreasing under sets of all the incoherent superchannels and the additivity. Based on the characteristics of the coherence of quantum channels and the additivity of independent channels, our framework provides an easier way to certify whether a function is a bona fide coherence measure of quantum channels. Detailed example is given to illustrate the effectiveness and advantages of our framework. \end{abstract}
\maketitle
\section{Introduction} As one of the characteristic features that marks the departure of quantum mechanics from the classical realm, the quantum coherence plays a central role in quantum optics \cite{glauber,scully,gyongyosi}, thermodynamics \cite{brandao,gour,narasimhachar,aberg2,lostaglio1,lostaglio2,gentaro,mayuhan,xushengzhi}, nanoscale physics \cite{plenio,rebentrost,licheming,huelga} and quantum measurements \cite{napoli,mateng,longguilu}. Quantum coherence is also the origin of many quantum phenomena such as entanglement and multiparticle interference. Recently the coherence of quantum states, concerning the quantifications \cite{baumgratz,girolami}, interconversion \cite{bromley} and applications \cite{luo,aberg,monras}, has been extensively investigated.
Similar to the resource theory of quantum entanglement, Baumgratz, Cramer and Plenio presented a rigorous framework (BCP framework) for quantifying the coherence of quantum states and introduced several measures of coherence including the relative entropy of coherence, the $l_1-$norm of coherence and fidelity \cite{baumgratz}. The BCP framework is widely used in quantifying coherence. Inspired by the BCP framework, Yu $et.~al.$ put forward another equivalent framework \cite{yuxiaodong}, which can be more conveniently used in some cases and is applicable to various physical contexts.
The quantum state transfer depends on quantum channels. The coherence of quantum channels characterizes the ability to optimize the coherence of all output states of the channels \cite{xueyuanhu,bendana,korzekawa,theurer,chandan}. Similar to the resource theory of coherence for quantum states, the resource theory of coherence for quantum channels has attracted much attention. With respect to the BCP framework for coherence of quantum states, Xu established a framework for quantifying the coherence of quantum channels \cite{xujianwei}.
In this paper, similar to the alternative framework for quantum states given in \cite{yuxiaodong} which simplifies the BCP framework in particular cases, we establish a new framework for quantifying the coherence of quantum channels, which improves the applications of the previous framework given in \cite{xujianwei}. Detailed examples are presented to illustrate the advantages of our framework.
\maketitle
\section{An alternative framework for coherence of quantum channels}
Let $H_A$ and $H_B$ be Hilbert spaces with dimensions ${\rm dim} H_A=|A|$ and ${\rm dim} H_B=|B|$, and $\{|j\rangle\}_j,~\{|k\rangle\}_k$ and $\{|\alpha\rangle\}_\alpha,~\{|\beta\rangle\}_\beta$ be the fixed orthonormal bases of $H_A$ and $H_B$, respectively. Denote $\mathcal{D}_A$ ($\mathcal{D}_B$) the set of all density operators on $H_A$ ($H_B$). Let $\mathcal{C}_{AB}$ be the set of all channels from $\mathcal{D}_A$ to $\mathcal{D}_B$.
A quantum channel $\phi\in \mathcal{C}_{AB}$ is a linear completely positive and trace preserving (CPTP) map with Kraus operators $\{K_n\}_n$ satisfying $\sum_n K_n^\dagger K_n=I$ such that $\phi(\rho)=\sum_n K_n\rho K_n^\dagger$. The corresponding Choi matrix with respect to the channel $\phi\in\mathcal{C}_{AB}$ has the following form, \begin{equation}\label{W1}
J_{\phi}=\sum_{jk}|j\rangle\langle k|\otimes\phi(|j\rangle\langle k|)=\sum_{jk,\alpha\beta}\phi_{jk\alpha\beta}|j\rangle\langle k|\otimes|\alpha\rangle\langle\beta|, \end{equation}
where $\phi_{jk\alpha\beta}=\langle\alpha|\phi(|j\rangle\langle k|)|\beta\rangle$ are complex numbers and $\sum_{\alpha}\phi_{jk,\alpha\alpha}=\delta_{jk}$. If $C$ is a coherence measure of quantum states, then $C(\phi)=C(\frac{J_\phi}{|A|})$ is a corresponding coherence measure for quantum channels \cite{xujianwei}, which quantifies the coherence of the channel by the coherence of the state $\frac{J_\phi}{|A|}$. The Choi matrix of a channel gives a relation between the coherence measures for quantum channels and for that quantum states.
Let $\mathcal{I}$ be the set of all incoherent states whose density matrices are diagonal in the given basis. An incoherent state $\rho\in H_A$ satisfies $\Delta_A(\rho)=\rho$ with $\Delta_A(\rho)=\sum_j\langle j|\rho|j\rangle|j\rangle\langle j|$ being a completely dephasing channel. $\phi\in\mathcal{C}_{AB}$ is called an incoherent quantum channel if $\Upsilon(\phi)=\phi$, where $\Upsilon(\phi)=\Delta_B \phi \Delta_A$, $\Delta_A$ and $\Delta_B$ are resource destroying maps\cite{liuziwen}. We denote $\mathcal{IC}$ the set of all the incoherent channels. Let $\mathcal{SC}_{ABA'B'}$ be the superchannels that are linear maps from $\mathcal{C}_{AB}$ to $\mathcal{C}_{A'B'}$, we define the Choi matrix of the superchannel $\Theta\in\mathcal{SC}_{ABA'B'}$ as $J_\Theta=\sum_{jk\alpha\beta}|j\alpha\rangle\langle k\beta|\otimes\Theta(|j\alpha\rangle\langle k\beta|)$. Any superchannel $\Theta$ is an incoherent superchannel ($\mathcal{ISC}$) if there exists an expression of Kraus operators $\Theta=\{M_m\}_m$ such that for each $m$, $M_m=\sum_{j\alpha}M_{mj\alpha}|f(j\alpha)\rangle\langle j\alpha|$ with $f(j\alpha)=f(j,\alpha)\in \{(j',\alpha')|_{j'=1}^{|A'|},_{\alpha'=1}^{|B'|}\}$.
In \cite{xujianwei}, the author presented a framework for quantifying the coherence of quantum channels. A proper measure $C$ of the coherence for quantum channels must satisfy the following conditions: \begin{enumerate} \item[$\mathrm{(B1)}$] For any quantum channel $C(\phi)\geqslant0$, $C(\phi)=0$ if and only if $\phi\in\mathcal{IC}_{AB}$; \item[$\mathrm{(B2)}$] $C(\phi)\geqslant C[\Theta(\phi)]$ for any incoherent superchannel $\Theta$;
\item[$\mathrm{(B3)}$] $C(\phi)\geqslant\sum_m p_mC(\phi_m)$ for any incoherent superchannel $\Theta$, with $\{M_m\}_m$ an incoherent Kraus operator of $\Theta$, $p_m=\frac{\mathrm{tr}(M_m J_\phi M_m^\dagger)}{|A'|}$, and $J_{\phi_m}=|A'|\frac{M_m J_\phi M_m^\dagger}{\mathrm{tr}(M_m J_\phi M_m^\dagger)}$; \item[$\mathrm{(B4)}$] $C(\sum_m p_m \phi_m)\leqslant\sum_m p_m C(\phi_m)$ for any set of quantum channels $\{\phi_m\}$ and any probability distribution $\{p_m\}_m$. \end{enumerate} The items (B1)-(B4) give necessary conditions for a bona fide measure of coherence for quantum channels, from which the quantification of the coherence for quantum channels has been further investigated.
Nevertheless, similar to the case of coherence measures for quantum states, the last two conditions (B3) and (B4) are rather difficult to be verified for a given measure of coherence for quantum channels. In the following we present an alternative framework consisting of three conditions, which is equivalent to the above framework but can be easily applied. A function $C$ is a well defined coherence measure of quantum channels if it satisfies the following three conditions:
\begin{enumerate} \item[$\mathrm{(C1)}$] $\mathit{Faithfulness}$. $C(\phi)\geqslant0$ for any $\phi\in \mathcal{C}_{AB}$, and $C(\phi)=0$ if and only if $\phi\in\mathcal{IC}_{AB}$; \item[$\mathrm{(C2)}$] $\mathit{Nonincreasing\ under\ ISCs}.$ $C(\phi)\geqslant C[\Theta(\phi)]$ for any $\Theta\in\mathcal{ISC}_{ABA'B'}$;
\item[$\mathrm{(C3)}$] $\mathit{Additivity}$. $C(\Phi)=p_1C(\phi_1)+p_2C(\phi_2)$ for $p_1+p_2=1$, $\phi_1\in \mathcal{C}_{AB_1}$ and $\phi_2\in \mathcal{C}_{AB_2}$, where $\Phi(|j\rangle\langle k|)=p_1\phi_1(|j\rangle\langle k|)\oplus p_2\phi_2(|j\rangle\langle k|)$, $\Phi\in \mathcal{C}_{AB}$,\ and $|B|=|B_1|+|B_2|$. \end{enumerate}
In the following, we prove that the framework given by (B1)-(B4) is equivalent to the one given by (C1)-(C3). We first prove that (B1)-(B4) give rise to (C1)-(C3), namely, (B3)(B4) give rise to (C3) since (C1) and (C2) are the same as (B1) and (B2). Consider a CPTP map $\Theta_1\in\mathcal{ISC}_{ABAB}$, $\Theta_1(\cdot)=Q_1\cdot Q_1^\dagger+Q_2\cdot Q_2^\dagger$, where \begin{equation} \begin{aligned}
Q_1=&|0\rangle\la0|+\cdots+||B_1|-1\rangle\langle |B_1|-1|+||B_1|+|B_2|\rangle\langle |B_1|+|B_2||+\cdots+|2|B_1|+|B_2|-1\rangle\\
&\langle 2|B_1|+|B_2|-1|+\cdots+|(|A|-1)(|B_1|+|B_2|)\rangle\langle(|A|-1)(|B_1|+|B_2|)|+\cdots\\
&+|(|A|-1)(|B_1|+|B_2|)+|B_1|-1\rangle\langle(|A|-1)(|B_1|+|B_2|)+|B_1|-1| \end{aligned} \end{equation} and \begin{equation} \begin{aligned}
Q_2=&||B_1|\rangle\langle |B_1||+\cdots+||B_1|+|B_2|-1\rangle\langle |B_1|+|B_2|-1|+|2|B_1|+|B_2|\rangle\la2|B_1|+|B_2||+\cdots\\
&+|2(|B_1|+|B_2|)-1\rangle\la2(|B_1|+|B_2|)-1|+\cdots+||A||B_1|+(|A|-1)|B_2|\rangle\\
&\langle |A||B_1|+(|A|-1)|B_2||+\cdots+||A|(|B_1|+|B_2|)-1\rangle\langle |A|(|B_1|+|B_2|)-1|. \end{aligned} \end{equation} Note that $Q_1$ and $Q_2$ are just projectors onto $\mathcal{C}_{AB}$. Obviously, one sees that $Q_i\mathcal{IC}Q_i^\dagger\subset\mathcal{IC}$. The Choi matrix of $\Phi$ in (C3) is given by \begin{equation}\label{W3}
J_\Phi=\sum_{j,k}|j\rangle\langle k|\otimes[p_1\phi_1(|j\rangle\langle k|)\oplus p_2\phi_2(|j\rangle\langle k|)]. \end{equation} Then \begin{equation} \Theta_1(\Phi)=Q_1J_\Phi Q_1^\dagger+Q_2J_\Phi Q_2^\dagger=p_1J_{\tilde{\phi}_1}+p_2J_{\tilde{\phi}_2}, \end{equation}
where $p_1=\frac{\mathrm{tr}(Q_1J_\Phi Q_1^\dagger)}{|A|}$, $p_2=\frac{\mathrm{tr}(Q_2J_\Phi Q_2^\dagger)}{|A|}$ and $J_{\tilde{\phi}_1}=|A|\frac{Q_1J_\Phi Q_1^\dagger}{\mathrm{tr}(Q_1J_\Phi Q_1^\dagger)}$, $J_{\tilde{\phi}_2}=|A|\frac{Q_2J_\Phi Q_2^\dagger}{\mathrm{tr}(Q_2J_\Phi Q_2^\dagger)}$. From (B2) and (B3) we have \begin{equation}\label{W4} C(\Phi)\geqslant p_1C(\tilde{\phi_1})+p_2C(\tilde{\phi_2}), \end{equation}
where $\tilde{\phi}_1,\,\tilde{\phi}_2\in\mathcal{C}_{AB}$, $\tilde{\phi}_1(|j\rangle\langle k|)=\phi_1(|j\rangle\langle k|)\oplus{\bf0}(|j\rangle\langle k|)$ and $\tilde{\phi}_2(|j\rangle\langle k|)={\bf0}(|j\rangle\langle k|)\oplus\phi_2(|j\rangle\langle k|)$ with ${\bf0}$ a zero map. From (B4) we have \begin{equation}\label{W5} C(\Phi)\leqslant p_1C(\tilde{\phi_1})+p_2C(\tilde{\phi_2}). \end{equation} Combining \eqref{W4} with \eqref{W5}, we get \begin{equation}\label{W6} C(\Phi)=p_1C(\tilde{\phi_1})+p_2C(\tilde{\phi_2}). \end{equation}
To obtain (C3), we need to certify $C(\tilde{\phi_1})=C(\phi_1)$ further. Under an incoherent superchannel, any $\phi$ and $\overline{\Theta}(\phi)=\sum P_nJ_\phi P_n^\dagger$ can be transformed into each other, where the Kraus operators $\{P_n\}_n$ are permutation matrices. By (B2) we have $C(\overline{\Theta}(\phi))=C(\phi)$. Based on this fact, we define an incoherent superchannel $\overline{\Theta}_1\in\mathcal{ISC}_{ABAB}$ with Kraus operators $\{P_{nl}^{(1)}\}_{nl}$ as the permutation matrices, \begin{equation} P_{nl}^{(1)}(i,j)=\left\{ \begin{aligned} &1,~{\rm if}~(i, j)=(i_{nl}, j_{nl}) ~{\rm or} ~(i, j)=(j_{nl}, i_{nl}),\\ &1,~{\rm if}~i=j,\\ &0,~\rm{other wise}, \end{aligned}\right. \end{equation}
where $i_{nl}=(n-1)(|B_1|+|B_2|)+l$, $j_{nl}=(n-1)|B_1|+l$, $i,j=1,\cdots,|A||B|$, $n=1,\cdots,|A|$ and $l=1,\cdots,|B_1|$. Then, \begin{equation}\overline{\Theta}_1(\tilde{\phi}_1)=\sum_{n,l}P_{nl}^{(1)}J_{\tilde{\phi}_1}P_{nl}^{(1)\dagger}=J_{\phi_1}\oplus O_{AB_2},\end{equation}
where $O_{AB_2}$ is a $|A||B_2|\times|A||B_2|$ null matrix. It is easily seen that \begin{equation}\label{W7} C(\tilde{\phi}_1)=C(\overline{\Theta}_1(\tilde{\phi}_1)). \end{equation}
Next, we need to prove $C(\overline{\Theta}_1(\tilde{\phi}_1))=C(\phi_1)$. For this, we define two incoherent superchannels: $\Theta_2\in\mathcal{ISC}_{AB_1AB}$ with Kraus operator $M_0$ satisfying $\langle j|M_0|k\rangle=\delta_{jk}$ and $\Theta_3\in\mathcal{ISC}_{ABAB_1}$ with Kraus operators $\{M_n\}_{n=0}^{\lceil\frac{|B_2|}{|B_1|}\rceil}$ satisfying $\langle j|M_n|k\rangle=\delta_{j,k-n|B_1|}$. Then we get, \begin{equation}~\Theta_2(\phi_1)=M_0J_{\phi_1}M_0^\dagger=J_{\phi_1}\oplus O_{AB_2}\end{equation} and \begin{equation} \Theta_3[(\overline{\Theta}_1(\tilde{\phi}_1)]
=\sum_{n=0}^{\lceil\frac{|B_2|}{|B_1|}\rceil}M_n(J_{\phi_1}\oplus O_{AB_2})M_n^\dagger=J_{\phi_1}. \end{equation} From (B2) we obtain \begin{equation}\label{6jia} C(\overline{\Theta}_1(\tilde{\phi}_1))=C(\phi_1). \end{equation} Combining \eqref{6jia} with \eqref{W6} and \eqref{W7}, we get the condition (C3).
We have shown that any $C$ satisfying (B1)-(B4) also satisfies (C1)-(C3). Next, we prove that any $C$ satisfying (C1)-(C3) must satisfy (B3) and (B4). First, we prove (B3), i.e., $C$ is convex. Define $\Phi_1\in\mathcal{C}_{AB'}$ as,
\begin{equation}\Phi_1(|j\rangle\langle k|)=\phi(|j\rangle\langle k|)\oplus {\bf 0}(|j\rangle\langle k|)\oplus\cdots\oplus {\bf 0}(|j\rangle\langle k|),\end{equation} where $\phi\in\mathcal{C}_{AB},\ H_{B'}=\underbrace{H_B\otimes H_B\otimes\cdots\otimes H_B}_{M}.$ From (C3), we have \begin{equation}\label{W8} C(\Phi_1)=C(\phi). \end{equation} Consider $\overline{\Theta}_2\in\mathcal{ISC}_{AB'AB'}$, with its Kraus operators $\{P_n^{(2)}\}_n$ being the permutation matrices, such that $\overline{\Theta}_2(\Phi_1)=\sum_n P_n^{(2)}J_{\Phi_1}P_n^{(2)\dagger}=J_\phi\oplus \underbrace{O_{AB}\oplus\cdots\oplus O_{AB}}_{M-1}$. Apply an incoherent superchannel $\Theta_4\in \mathcal{SC}_{ABA_1B'_{1}}$ with Kraus operators $\{U_m\otimes M_m\}_m$ such that \begin{equation} \Theta_4[\overline{\Theta}_2(\Phi_1)]=\sum_{m=0}^{M-1}(U_m\otimes M_m)(\sum_nP_n^{(2)}J_{\Phi_1}P_n^{(2)\dagger})(U_m\otimes M_m)^\dagger, \end{equation}
where $U_m=\sum_{k=0}^{M-1}|(k+m)~\mathrm{mod}~M\rangle\langle k|,~H_{B'_1}=\underbrace{H_{B_1}\otimes H_{B_1}\otimes\cdots\otimes H_{B_1}}_{M}$, and $\{M_m\}$ are incoherent Kraus operators of the superchannel in $\mathcal{ISC}_{ABA_1B_1}$. One can easily see that $(U_m\otimes M_m)\mathcal{IC}(U_m\otimes M_m)^\dagger \subset\mathcal{IC}$, \begin{equation}
\Theta_4[\overline{\Theta}_2(\Phi_1)]=\sum_{m=0}^{M-1}p_m |m\rangle\langle m|\otimes J_{\phi_m}, \end{equation}
where $p_m=\frac{\rm{tr}(M_mJ_\phi M_m^\dagger)}{|A_1|}$ and $J_{\phi_m}=|A_1|\frac{M_mJ_\phi M_m^\dagger}{\rm{tr}(M_mJ_\phi M_m^\dagger)}$.
Similarly, there exists $\overline{\Theta}_3\in\mathcal{ISC}_{A_1B_1'A_1B_1'}$, with its Kraus operators $\{P_n^{(3)}\}_n$ being the permutation matrices, such that $\overline{\Theta}_3[\Theta_4(\overline{\Theta}_2(\Phi_1))]=\sum P_n^{(3)}(\sum_{m=0}^{M-1}p_m |m\rangle\langle m|\otimes J_{\phi_m})P_n^{(3)\dagger}=\sum_{i,j=0}^{M-1}|i\rangle\langle j|\otimes[p_0\phi_0(|i\rangle\langle j|)\oplus\cdots\oplus p_{M-1}\phi_{M-1}(|i\rangle\langle j|)]$. $\overline{\Theta}_3[\Theta_4(\overline{\Theta}_2(\Phi_1))]$ matches to a channel $\Phi_2\in C_{A_1{B_1}'},~\Phi_2(|i\rangle\langle j|)=p_0\phi_0(|i\rangle\langle j|)\oplus\cdots\oplus p_{M-1}\phi_{M-1}(|i\rangle\langle j|)$. \\Following (C3), we have \begin{equation}\label{W9} C(\Phi_2)=\sum_{m=0}^{M-1}p_m C(\phi_m). \end{equation} By (C2), \eqref{W8} and \eqref{W9} we can have\begin{equation}C(\phi)\geqslant\sum_{m=0}^{M-1}p_m C(\phi_m), \end{equation} which proves that (B3) holds.
We now prove (B4). We first define an initial channel $\Phi_3\in C_{AB'}$ satisfying \begin{equation}\Phi_3(|j\rangle\langle k|)=\oplus_{m=0}^{M-1}p_m\phi_m(|j\rangle\langle k|),\end{equation} where $\phi_m\in\mathcal{C}_{AB},~\{p_m\}$ are the probability distribution of $\{\phi_m\}$ and $\sum_{m=0}^{M-1}p_m=1$. According to (C3), one has \begin{equation}\label{W10} C(\Phi_3)=\sum_{m=0}^{M-1}p_m C(\phi_m). \end{equation} Apply $\overline{\Theta}_4\in\mathcal{ISC}_{AB'AB'}$, with its Kraus operators being the permutation matrices $\{P_n^{(4)}\}$, such that $\overline{\Theta}_4(\Phi_3)=\sum_n P_n^{(4)} J_{\Phi_3} P_n^{(4)\dagger}=\oplus_{m=0}^{M-1}p_m J_{\phi_m}$. Let $\Theta_5\in\mathcal{ISC}_{AB'AB'}$ be an incoherent super channel such that \begin{equation}\begin{aligned}
\Theta_5\overline{\Theta}_4(\Phi_3)&=\sum_{m=0}^{M-1}(|0\rangle\langle m|\otimes I)(\sum_n P_n^{(4)} J_{\Phi_3} P_n^{(4)\dagger})(|0\rangle\langle m|\otimes I)^\dagger\\ &=\sum_{m=0}^{M-1}p_mJ_{\phi_m}\oplus O_{A_1B_1}\oplus\cdots\oplus O_{A_1B_1}.\end{aligned} \end{equation} Apply $\overline{\Theta}_5\in\mathcal{ISC}_{AB'AB'}$, with Kraus operators $\{P_n^{(5)}\}$ as permutation matrices, such that \begin{equation}\begin{aligned} \overline{\Theta}_5[\Theta_5\overline{\Theta}_4(\Phi_3)]&=\sum_n P_n^{(5)}(\sum_{m=0}^{M-1}p_mJ_{\phi_m}\oplus O_{A_1B_1}\oplus\cdots\oplus O_{A_1B_1})P_n^{(5)\dagger}\\
&=\sum_{j,k=0}^{|A|-1}|j\rangle\langle k|\otimes[\sum_{m=0}^{M-1}p_m\phi_m(|j\rangle\langle k|)\oplus{\bf 0}(|j\rangle\langle k|)\oplus\cdots\oplus{\bf0}(|j\rangle\langle k|)]. \end{aligned}\end{equation}
Thus, $\overline{\Theta}_5[\Theta_5\overline{\Theta}_4(\Phi_3)]$ corresponds to $\Phi_4\in\mathcal{C}_{AB'}$ with $\Phi_4(|j\rangle\langle k|)=\sum_{m=0}^{M-1}p_m\phi_m(|j\rangle\langle k|)\oplus{\bf0}(|j\rangle\langle k|)\oplus\cdots\oplus{\bf0}(|j\rangle\langle k|)$. From (C3) we have \begin{equation}\label{W11} C(\Phi_4)=C(\sum_{m=0}^{M-1}p_m\phi_m). \end{equation} Combining (C2) with \eqref{W10} and \eqref{W11}, we get \begin{equation} \sum_{m=0}^{M-1}p_mC(\phi_m)\geqslant C(\sum_{m=0}^{M-1}p_m\phi_m), \end{equation} namely, (B4) holds.
We usually get coherence measures for quantum channels from corresponding coherence measures for quantum states. For instance, the $l_1-$norm of coherence $C_{l_1}(\rho)=\sum_{i\ne j}|\rho_{i,j}|$\cite{baumgratz} and the relative entropy of coherence $C_{\rm rel.}(\rho)=S(\rho_{\rm diag})-S(\rho),$ where $S$ is the von Neumann entropy and $\rho_{\rm diag}$ denotes the state obtained from $\rho$ by deleting all off-diagonal elements\cite{baumgratz}, are coherence measures for quantum states, on this basis, $C_{l_1}(\phi)=\sum_{i\ne j}|\frac{J_\phi}{|A|}|$ and $C_{\rm rel.}(\phi)=S(\phi_{\rm diag})-S(\phi)=S(\frac{J_{\phi_{\rm diag}}}{|A|})-S(\frac{J_\phi}{|A|})$ both are coherence measures for quantum channels\cite{xujianwei}.
The above proof shows that our new framework is equivalent to the framework given by (B1)-(B4) for quantum channels. In determining whether a function $C$ can be used as a coherence measure for channels, in some cases, it is not easy to verify whether $C$ satisfies (B3). The condition (C3) in our framework provides a new way to solve the problem. We give an example to show the efficiency of our framework.
{\bf Example}
The trace distance measure of coherence defined by $C_{\rm tr}(\rho):=\min_{\delta \in \cal{I}} \|\rho-\delta\|_{\rm tr}=\min_{\delta \in \cal{I}}{\rm tr}|\rho-\delta|$ is not a well defined coherence measure for quantum states \cite{yuxiaodong}. Let us check whether $C_{\rm tr}(\phi)=C_{\rm tr}(\frac{J_\phi}{|A|})$ is a bona fide coherence measure for quantum channels or not. Here we define \begin{equation}
C_{\rm tr}(\phi):=\min_{{\tilde{\phi}}\in \cal{IC}}\|\phi-\tilde{\phi}\|_{\rm tr}, \end{equation}
where $\|\phi-\tilde{\phi}\|_{\rm tr}=\|\frac{J_\phi}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}\|_{\rm tr}={\rm tr}|\frac{J_\phi}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}|$ is the trace norm between $\phi$ and $\tilde{\phi}$ with $\phi,\,\tilde{\phi}\in \mathcal{C}_{AB}$. We need to verify that $C_{\rm tr}(\phi)$ satisfies either (B1)-(B4) or (C1)-(C3). It has been already proved in previous works \cite{baumgratz,bromley} that $C_{\rm tr}$ satisfies (B1), (B2) and (B4). However, the verification of (B3) is rather difficult. The inequality can only be fulfilled for qubit and $X$ quantum states \cite{shaolianhe,Rana}.
We use condition (C3) to verify the validity of $C_{\rm tr}(\phi)$. For the isometry channel $\phi_{\rm max}\in \mathcal{C}_{AB}$ \cite{xujianwei},
\begin{equation}\phi_{\rm max}(|j\rangle\langle k|)=\frac{1}{|B|}\sum_{\alpha, \beta=0}^{|B|-1}e^{i(\theta_{j\alpha}-\theta_{k\beta})}|\alpha\rangle\langle\beta|,\end{equation} we have
\begin{equation}C_{\rm tr}(\phi_{\rm max})=\min_{\tilde{\phi}\in\mathcal{IC}}\|\frac{J_{\phi_{\rm max}}}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}\|_{\rm tr},\end{equation} where
\begin{equation}\frac{J_{\phi_{\rm max}}}{|A|}=\frac{1}{|A||B|}\sum_{j,k=0}^{|A|-1}|j\rangle\langle k|\otimes(\sum_{\alpha,~\beta=0}^{|B|-1}e^{i(\theta_{j\alpha}-\theta_{k\beta})}|\alpha\rangle\langle\beta|)=|\psi\rangle\langle\psi|, \end{equation}
with $|\psi\rangle=\frac{1}{\sqrt{|A||B|}}\sum_{j=0}^{|A|-1}\sum_{\alpha=0}^{|B|-1}e^{i\theta_{j\alpha}}|j\alpha\rangle$.
Set $U_n=\sum_{k=0}^{|A||B|-1}e^{i(\theta_{(k+n)~\rm mod~|A||B|}-\theta_{k})}|(k+n)~{\rm mod}~|A||B|\rangle\langle k|$. Then we have $U_n|\psi\rangle=|\psi\rangle$. Since $\|A\|_{\rm tr}+\|B\|_{\rm tr}\geqslant\|A+B\|_{\rm tr}$ and $\|U_n|\psi\rangle\|_{\rm tr}=~\||\psi\rangle\|_{\rm tr}$ for the unitary operation $U_n$, we obtain \begin{equation} \begin{array}{rcl}
\|\frac{J_{\phi_{\rm max}}}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}\|_{\rm tr}&=&\frac{1}{|A||B|}\sum_{n=0}^{|A||B|-1}\|U_n(\frac{J_{\phi_{\rm max}}}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}){U_n}^\dagger\|_{\rm tr}\\[1mm]
&\geqslant&\frac{1}{|A||B|}\|\sum_{n=0}^{|A||B|-1}(U_n(\frac{J_{\phi_{\rm max}}}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}){U_n}^\dagger)\|_{\rm tr}. \end{array} \end{equation} As \begin{equation}
U_n\frac{J_{\phi_{\rm max}}}{|A|}{U_n}^\dagger=U_n|\psi\rangle\langle\psi|{U_n}^\dagger=\frac{J_{\phi_{\rm max}}}{|A|} \end{equation} and
\begin{equation}\sum_{n=0}^{|A||B|-1}U_n\frac{J_{\tilde{\phi}}}{|A|}{U_n}^\dagger=I_{|A||B|}, \end{equation} we have \begin{equation}
\|\frac{J_\phi}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}\|_{\rm tr}\geqslant\|\frac{J_\phi}{|A|}-\frac{1}{|A||B|}I_{|A||B|}\|_{\rm tr}. \end{equation} Therefore, \begin{equation}\label{12}
\min_{\tilde{\phi}\in\mathcal{IC}}\|\frac{J_{\phi_{\rm max}}}{|A|}-\frac{J_{\tilde{\phi}}}{|A|}\|_{\rm tr}=\|\frac{J_{\phi_{\rm max}}}{|A|}-\frac{1}{|A||B|}I_{|A||B|}\|_{\rm tr}=\frac{2(|A||B|-1)}{|A||B|}. \end{equation}
Next, we consider a specific channel $\phi\in \mathcal{C}_{AB}$, \begin{equation}
\phi(|j\rangle\langle k|)=\frac{1}{2}\phi_1(|j\rangle\langle k|)\oplus\frac{1}{2}\phi_2(|j\rangle\langle k|), \end{equation} where \begin{equation}
\phi_1(|j\rangle)=\frac{1}{\sqrt{2}}\sum_{\alpha=0}^{1}e^{i\theta_{j\alpha}}|\alpha\rangle,
~~~\phi_2(|j\rangle)=\frac{1}{\sqrt{3}}\sum_{\beta=0}^{2}e^{i\theta_{j\beta}}|\beta\rangle, \end{equation}
with $\phi_1\in\mathcal{C}_{AB_1}$ and $\phi_2\in\mathcal{C}_{AB_2}$ the isometry channels, $|A|=2,~|B_1|=2,~|B_2|=3$ and $|B|=5$. In particular, we take the incoherent channel $\phi_0\in C_{AB},~\phi_0(|j\rangle\langle k|)=\frac{1}{4}\delta_{jk}(|j\rangle\langle k|)\oplus \mathbf{0}(|j\rangle\langle k|)$. Then \begin{equation}
C_{\rm tr}(\phi)=\displaystyle\min_{\tilde{\phi}\in\mathcal{IC}}\|\frac{J_\phi}{2}-\frac{J_{\tilde{\phi}}}{2}\|_{\rm tr}\leqslant\|\frac{J_\phi}{2}-\frac{J_{\phi_0}}{2}\|_{\rm tr}. \end{equation}
From (10) we get $\frac{1}{2}C_{\rm tr}(\phi_1)+\frac{1}{2}C_{\rm tr}(\phi_2)=\frac{19}{12}$. However, $C_{\rm tr}(\phi)\leqslant\|\frac{J_\phi}{2}-\frac{J_{\phi_0}}{2}\|_{\rm tr}=1$. Obviously, $C_{\rm tr}(\phi)\ne\frac{1}{2}C_{\rm tr}(\phi_1)+\frac{1}{2}C_{\rm tr}(\phi_2)$. Therefore, the trace norm of coherence is not a well defined coherence measure of quantum channels. In other words, it also violates (B3). Here, inspired by the definition of trace norm, one may propose a similar trace norm function ${C_{\rm tr}}'(\phi)=\displaystyle\min_{\lambda\geqslant0,
~\tilde{\phi}\in\mathcal{IC}}\|\phi-\lambda\tilde{\phi}\|_{\rm tr}$, which can be shown to be a legal coherent measure for quantum channels \cite{yuxiaodong,xujianwei}.
We have studied the coherence of quantum channels based on the corresponding Choi matrices to the quantum channels. Note that $p_1J_{\phi_1}\oplus p_2J_{\phi_2}$ is not necessarily a Choi matrix for arbitrary channels $\phi_1\in \mathcal{C}_{AB_1}$ and $\phi_2\in \mathcal{C}_{AB_2}$. From (1) the Choi matrix corresponding to a channel $\phi\in \mathcal{C}_{AB}$ is a $|A||B|\times|A||B|$ positive definite matrix where each $\phi(|j\rangle\langle k|)$ is a $|B|\times|B|$ block matrix and ${\rm tr}(\phi(|j\rangle\langle j|))=1$. Assuming that there is a channel $\Phi\in \mathcal{C}_{AB}$ such that $J_{\Phi}=p_1J_{\phi_1}\oplus p_2J_{\phi_2}$ with $|B|=|B_1|+|B_2|$. With respect to the matrix $p_iJ_{\phi_i}$, each $p_i\phi_i(|j\rangle\langle k|)$ is a $|B_i|\times|B_i|$ block matrix and ${\rm tr}[p_i\phi_i(|j\rangle\langle j|)]=p_i$, $i=1,2$. One can see that the trace of the $|B|\times|B|$ block matrix on all diagonals cannot always be 1 for arbitrary probability $p_1$ and $p_2$. In other words, $p_1J_{\phi_1}\oplus p_2J_{\phi_2}$ is not necessarily a Choi matrix, as it does not satisfy the structure of the Choi matrix corresponding to the channels.
\section{Conclusions}
We have presented an alternative framework to quantify the coherence of quantum channels. Our framework and the framework given by (B1)-(B4) for quantum channels are equivalent. We have used this framework to certify the validity of the trace norm coherence measure for quantum channels. Similar to the case for the coherence measure of quantum states \cite{yuxiaodong}, our framework has the similar unique superiorities and may significantly simplify the quantification for coherence of quantum channels. Our results may highlight further investigations on the resource theory of quantum channels.
\noindent{\bf Acknowledgments}\, \, This work is supported by NSFC (Grant Nos. 12075159 and 12171044), Beijing Natural Science Foundation (Z190005), Academy for Multidisciplinary Studies, Capital Normal University, the Academician Innovation Platform of Hainan Province, and Shenzhen Institute for Quantum Science and Engineering, Southern University of Science and Technology (No. SIQSE202001); Academician Innovation Platform of Hainan Province.
\end{document} |
\begin{document}
\title{Pre-fixed Threshold Real Time Selection Method in Free-space Quantum Key Distribution}
\author{Wenyuan Wang} \affiliation{Centre for Quantum Information and Quantum Control (CQIQC), Dept. of Electrical \& Computer Engineering and Dept. of Physics, University of Toronto, Toronto, Ontario, M5S 3G4, Canada}
\author{Feihu Xu}
\affiliation{Shanghai Branch, National Laboratory for Physical Sciences at Microscale, University of Science and Technology of China, Shanghai, 201315, China}
\author{Hoi-Kwong Lo} \affiliation{Centre for Quantum Information and Quantum Control (CQIQC), Dept. of Electrical \& Computer Engineering and Dept. of Physics, University of Toronto, Toronto, Ontario, M5S 3G4, Canada}
\begin{abstract}
Free-space Quantum key distribution (QKD) allows two parties to share a random key with unconditional security, between ground stations, between mobile platforms, and even in satellite-ground quantum communications. Atmospheric turbulence causes fluctuations in transmittance, which further affect the quantum bit error rate (QBER) and the secure key rate. Previous post-selection methods to combat atmospheric turbulence require a threshold value determined after all quantum transmission. In contrast, here we propose a new method where we pre-determine the optimal threshold value even before quantum transmission. Therefore, the receiver can discard useless data immediately, thus greatly reducing data storage requirement and computing resource. Furthermore, our method can be applied to a variety of protocols, including, for example, not only single-photon BB84, but also asymptotic and finite-size decoy-state BB84, which can greatly increase its practicality. \end{abstract}
\date{\today} \maketitle
\section{Introduction}
Quantum key distribution (QKD), first proposed by Bennett and Brassard\cite{bb84} in 1984, allows two parties to securely share a random secret key, which can be further combined with cryptographic protocols, such as one-time pad\cite{onetimepad}, to encode messages with unconditional security unachievable by classical cryptography.
There has been increasing interest in implementing QKD through free-space channels. A major attraction for free-space QKD is that, performed efficiently, it could potentially be applied to airborne or maritime quantum communications where participating parties are on mobile platforms. Furthermore, it could even enable applications for ground to satellite quantum communications, and eventually, global quantum communication network.
Free-space quantum communication has seen great advances over the past 25 years. The first demonstration of free-space QKD was published by Bennett et al. from IBM research in 1992 \cite{freespace_IBM} over 32cm of free-space channel, which was also the first successful demonstration of experimental QKD. Over the next two decades, numerous demonstrations for free-space QKD have been made. In 1998, Buttler and Hughes et al. \cite{freespace_Hughes} have performed QKD over 1km of free-space channel outdoors at nighttime. In 2005, Peng et al. \cite{freespace_2005_Peng} performed distribution of entangled photons over 13km. In 2007, two successful experimental ground-to-ground free-space QKD experiments based on BB84 and E91 protocol \cite{free2007,freespace_144km_E91} were implemented over a 144km link between the Canary Island of La Palma and Tenerife. Ling et al. \cite{freespace_urban_E91} performed another entanglement-based QKD Experiment with modified E91 protocol over 1.4km in urban area in 2008. In 2012, Yin et al. and Ma et al. \cite{freespace_2012_100km, freespace_2012_143km} respectively performed quantum teleportation over 100km and 143 km.
In recent years, free-space QKD has also seen much development over rapidly moving platforms, with an air-to-ground experiment in 2013 by Nauerth et al. \cite{freespace_plane} over a plane 20km from ground, a demonstration of QKD with devices on moving turntable and floating hot ballon over 20km and 40km by J-Y Wang et al. \cite{freespace_2013_satellite} in 2013, a very recent report on drone-to-drone QKD experiment in 2017 by D. Hill et al. \cite{freespace_drone}, and notably, satellite-based quantum communication experiments in 2017 \cite{freespace_satellite1, freespace_satellite2, freespace_satellite3}, including a QKD experiment from a quantum satellite to the ground over a 1200km channel. Meanwhile, there is a lot of interest in doing QKD in a maritime environment either over sea water\cite{freespace_maritime_data} or through sea water \cite{freespace_underwater}. A study on quantum communication performance over a turbulent free-space maritime channel using real atmospheric data can be found in Ref.\cite{freespace_maritime_data}.
A major characteristic of a free-space channel is its time-dependent transmittance. This is due to the temporal fluctuations of local refractive index in the free-space channel, i.e. \textit{atmospheric turbulence}, which causes scintillation and beam wandering \cite{freespacethesis}, and result in fluctuations in the channel transmittance, which in turn affect QKD performance. Therefore, addressing turbulence is a major challenge for QKD over free-space. This fluctuation due to turbulence can be modeled as a probability distribution, called the Probability Distribution of Transmission Coefficient (PDTC). Hence the real time transmittance $\eta$ is a random time-dependent variable that can be described by the PDTC.
\begin{table*}[t]
\caption{Comparison of transmittance post-selection methods in QKD through turbulence channel}
\begin{center}
\begin{tabular}{cccc}
Method & Threshold choice & Model of signals & Sampling of transmittance\\
\hline
ARTS\cite{probetest} & post-determined & single-photon & secondary probe laser\\
SNRF\cite{SNRF} & post-determined & single-photon & detector count (coincidence) rate \\
P-RTS & pre-determined & universal & universal
\end{tabular}
\end{center} \end{table*}
As free-space channels have time-varying transmittance due to turbulence, the QBER (and hence the secure key rate) for QKD changes with time. In previous literature discussing free-space QKD, such as \cite{free2007,Hughes}, the time variance of the channel is ignored, i.e. the secure key rate is calculated based on the time-average of channel transmittance. Having knowledge of the PDTC, however, Vallone et al. proposed a method named Adaptive Real-Time Selection (ARTS)\cite{probetest} that acquires information about real-time transmittance fluctuation due to turbulence, and makes use of this information to perform post-selection and improve the key rate of QKD.
However, ARTS method needs to "adaptively" choose an optimal threshold by performing numerical optimization after collecting all the data. A similar proposal by Erven et al. \cite{SNRF} called "signal-to-noise-ratio-filter (SNRF)" also discusses the idea of using a threshold to post-select high-transmittance periods, but uses the quantum data itself rather than a secondary classical channel. However, it needs to numerically optimize the threshold after collecting all experiment data, too.
Here we ask the question, is scanning through all acquired data after experiment and finding such an "adaptive" threshold really necessary? The answer is in fact no. In this paper, we propose a new method called "pre-fixed threshold real-time selection (P-RTS)", and show the important observation that, for post-selection based on transmittance in a turbulent channel, the optimal post-selection threshold is independent of the channel, and can be directly calculated from experimental parameters of the devices beforehand - thus simplifying the implementation and enabling post-selection of signals in real time, which can also reduce the data storage requirements and computational resources in Bob's system. This is because, instead of having to wait until all data is collected to optimize the threshold, Bob can immediately discard the data that are obtained below the pre-fixed threshold and doesn't need to store all collected data. Moreover, he doesn't need to have a model for the PDTC of the channel, and no longer need to run a numerical optimization to find optimal threshold, thus we can additionally save software development effort and computing resource for Bob, too.
Furthermore, both ARTS and SNRF are limited to single photon model only, while decoy-state must be used for QKD with practical weak coherent pulse (WCP) source. Here we also propose an universal framework for QKD through a channel with fluctuating transmittance, for not only single-photon BB84, but also practical decoy-state BB84 with WCP source, and decoy-state BB84 with finite-size effects (both of which we are the first to apply threshold post-selection to), thus greatly improving its usefulness in practice. We also propose a model to estimate the maximum improvement in key rate from using threshold post-selection, and show that with P-RTS method we can achieve a key rate very close to the maximum performance with an optimal threshold.
A comparison of P-RTS with post-selection methods in previous literature can be seen in Table I. As shown here, P-RTS has the great advantage of being able to predict the optimal threshold independently of the channel. This means that one no longer needs to store all the data after experiment and optimize the threshold, but can perform real-time selection with a single threshold, regardless of the actual channel turbulence and loss condition. Moreover, our result is valid not only for BB84 with single photons, but for any general protocol that has a fluctuating transmittance. It is also not restricted to transmittance sampling with a secondary laser as in ARTS, but for instance can also use observed photon count rates in a given time interval as in SNRF.
Lastly, we have performed a computer simulation to show the actual advantage of using P-RTS in practical decoy-state BB84, with up to 170\% improvement in decoy-state BB84 key rate for certain scenarios, or 5.1dB increase in maximum tolerant loss at $R=10^{-7}$, under medium-level turbulence. We also include a numerical demonstration for applying P-RTS to BB84 with finite-size effects, which still shows significant increase in rate even when total number of signals is limited, e.g. maximum tolerant loss at $R=10^{-7}$ gains an increase of 1.4dB to 5.2dB, for $N=10^{11}-10^{13}$ under high turbulence.
The organization of the paper is listed as follows: in section 2 we first present a brief recapitulation of ARTS method, and proceed to propose a universal framework for QKD key rate in turbulent channel. We then propose P-RTS method, and discuss how and why an optimal threshold can be pre-fixed, and show an upper bound for the rate of P-RTS. We also present numerical results from simulations to show how P-RTS behaves compared to no post-selection. Lastly, we discuss P-RTS in decoy-state BB84, for the asymptotic case in Section III and for finite-size regime in Section IV, and show with simulation results that P-RTS works effectively for both of them, too.
\section{Methods} \subsection{The ARTS Method}
In Ref.\cite{probetheory}, Capraro et al performed an experiment to study the impact of turbulence on a quantum laser transmitted through an 143km channel on Canary Islands, and proposed the idea of improving SNR with a threshold at the expense of number of signals. Subsequently, in Ref.\cite{probetest}, Vallone et al from the same group performed an experiment of free-space single-photon B92 QKD through the same channel, and showed the effectiveness of using real-time transmittance information in a turbulent channel to improve secure key rate, by performing post-selection on signals with a threshold, hence naming the method adaptive real-time selection (ARTS).
This is realized by using a classical probe signal (a strong laser beam) alongside the quantum channel. In the quantum channel, the bits are polarization-encoded into quantum signals, which are detected by single-photon avalanche diodes (SPADs) that return click events. Meanwhile, the laser passing through the classical channel is detected by an avalanche photodetector that returns a voltage proportional to received light intensity, which is also proportional to the channel transmittance at that moment. An illustration of the setup can be seen in Fig. \ref{fig:ARTS}.
The key idea is that the transmittance of the classical channel will correspond to that of the quantum channel. Therefore, by reading voltage from the classical detector (defined as $V$), one can gain information of the periods of high transmittance. Combined with a threshold on the classical signal (defined as $V_T$), this information can be used to post-select only those quantum signals received by Bob during high transmittance periods (only when $V \geq V_T$), thus increasing the overall average transmittance, at the expense of a smaller number of signals due to post-selection.
This post-selection increases the signal-to-noise ratio among the selected signals, and hence reduces the QBER, which subsequently increases the key rate. However, post-selection also reduces the total number of signals. Therefore, this becomes an optimization problem, and the choice of threshold value becomes critical. By numerically choosing an optimal threshold that maximizes the rate, it is possible to acquire a secure key rate much higher than before applying post-selection. This, as defined in Ref.\cite{probetest}, is called the adaptive real time selection (ARTS) method.
\subsection{Universal Framework for QKD Key Rate in a Turbulent Channel}
In this section, we will expand upon this threshold post-selection idea from ARTS, and apply it to a general framework of post-selection upon transmittance. We will then discuss the effects of threshold post-selection based on transmittance on the secure key rate. Our following discussions will be based on the channel transmittance $\eta$ only, and they are not limited to the secondary-laser transmittance sampling as in ARTS, but can be applied to any sampling method of transmittance, including photon count rate such as in SNRF.
As mentioned in Section I, an important characteristic of a turbulent channel is the time-dependent transmittance, which follows a probability distribution called the PDTC. There have been multiple efforts to accurately characterize the PDTC, and a widely accepted model is the log-normal distribution\cite{distribution,laser} (a plot of which is shown in Fig. \ref{fig:PDTC} (a)): \begin{equation} p(\eta)_{\eta_0,\sigma}={1 \over {\sqrt{2\pi \sigma}}\eta}e^{-{{[ln({\eta \over \eta_0})+{1\over 2}{\sigma}^2]^2}\over{2\sigma^2}}} \end{equation}
\begin{figure}
\caption{The ARTS setup by Vallone et al., where Alice and Bob are linked by a quantum channel and a classical channel. One can post-select quantum signals passing through the channel with high-transmittance, using a threshold on the corresponding classical channel signal}
\label{fig:ARTS}
\end{figure}
\noindent where p is the probability density, $\eta$ the transmittance, and $\eta_0$ and $\sigma$ the mean and variance. The distribution is solely determined by the two parameters $\eta_0$ and $\sigma$, which are inherent to the channel itself. $\eta_0$ is the expected atmospheric transmittance, with a typical value of $10^{-3}$ to $10^{-4}$ (corresponding to 30-40 dB of loss) for a 100km channel, while $\sigma$, typically taking a value between 0 and 1, is determined by the amount of turbulence - the larger the amount of turbulence, the larger the variance. The pair $(\eta_0,\sigma)$ hence contains all information of the PDTC.
Now, we make an important observation: For any given protocol implementation (say, single-photon BB84, or decoy-state BB84), if all experimental parameters in the system except $\eta$ are fixed - i.e. the device parameters including background and dark count rate, detector efficiency, laser intensities, and optical misalignment are all fixed - then the key rate solely depends upon the transmittance $\eta$, and can be written as a single-variable function of $\eta$, i.e. $R(\eta)$.
To estimate secure key rate of QKD through turbulent channel, the question therefore becomes studying how the function $R(\eta)$ changes, when $\eta$ is a random variable following a probability distribution that we know, the PDTC.
Here, we will propose two models for $R(\eta)$ under turbulence:
\begin{enumerate}
\item \textbf{Rate-wise integration model}, $R^{\text{Rate-wise}}$, which is the case where we integrate the rate over PDTC, thus making use of all information of the PDTC. This rate only depends on the rate function and the PDTC, and is independent of what actual threshold we choose.
\item \textbf{Simplified model}, $R^{\text{Simplified}}(\eta_T)$, which estimates the performance of decoy-state QKD with P-RTS, using post-selection with a threshold $\eta_T$ on channel transmittance. It is a function of the threshold $\eta_T$ that one uses. \end{enumerate}
Let us first start with the rate-wise integration model. We can begin by considering an ideal case, where we assume that we have complete knowledge of the channel transmittance $\eta$ when each single signal passes through the channel. Moreover, here we discuss the asymptotic case where there is an infinite number of signals sent. Then, it is possible to order all signals from low to high transmittance $\eta$ when they pass through the channel, and divide the signals into bins of $[\eta,\eta+\Delta \eta)$ (which ranges from 0 to 1), as shown in Fig. \ref{fig:PDTC} (b).
Therefore, within the bin $[\eta,\eta+\Delta \eta)$, we can assume that all signals pass through the channel with the same transmittance $\eta$, given that the bin is sufficiently small, i.e. $\Delta \eta \rightarrow 0$. That is, the signals in the same bin can be considered as in a "static channel", and enjoy the same rate formula $R(\eta)$ and security analysis as if $\eta$ is a static constant.
Then, we can calculate the number of secure key bits from each bin, according to their respective $\eta$, and add all bins together. In the limit of $\Delta \eta \rightarrow 0$, this is an integration of $R(\eta)$ over $\eta$, with $p_{\eta_0,\sigma}(\eta)$ being the weight (i.e. the proportion of signals in each bin). We call this model the \textit{"rate-wise integration model"}. Its rate $R^{\text{Rate-wise}}$ satisfies:
\begin{equation} R^{Rate-wise}=\int_{0}^{1}R(\eta)p_{\eta_0,\sigma}(\eta)d\eta \end{equation}
$R^{\text{Rate-wise}}$ makes use of all PDTC information from turbulence. Since all bins have either zero or positive rate, using a threshold $\eta_T$ in the rate-wise integration model will always result in either same or smaller rate. i.e.
\begin{equation} \begin{aligned} R^{Rate-wise}(0)&=\int_{0}^{1}R(\eta)p_{\eta_0,\sigma}(\eta)d\eta \\ &\geq \int_{\eta_T}^{1}R(\eta)p_{\eta_0,\sigma}(\eta)d\eta \\ & = R^{\text{Rate-wise}}(\eta_T) \end{aligned} \end{equation}
\noindent Hence, from here on if $\eta_T$ is not specified, by $R^{\text{Rate-wise}}$ we will always mean $R^{\text{Rate-wise}}(0)$, which is a constant-value "max possible performance" of key rate that is only dependent on the PDTC of the channel and the experimental device parameters.\\
\begin{figure}
\caption{(a) The Probability Distribution of Transmittance Coefficient (PDTC), where $\eta$ is the transmittance, taking a value between [0,1], while P is the probability density function of $\eta$. Here we are showing a plot generated from the log-normal model of the PDTC; (b) Dividing signals into bins according to their respective $\eta$, in the rate-wise integration model}
\label{fig:PDTC}
\end{figure}
Now, let us consider applying post-selection to free-space QKD. Using a similar model as in ARTS method (instead of using classical detector voltage V, here we will directly use $\eta$, which is proportional to V). We can set a threshold $\eta_T$ and perform post-selection: we select quantum signals received when transmittance $\eta\geq \eta_T$, and discard all signals received when $\eta<\eta_T$.
Unlike the ideal case of the rate-wise integration model, in reality we do not have infinite resolution from the classical detector, nor do we have an infinite number of signals. In practice, we are post-selecting signals with only two status: "pass" or "fail". To make use of this "pass/fail" information, here we propose a practical model that estimates the rate with only the mean transmittance of the post-selected signals. We name it the \textit{"simplified model"}. First, with no post-selection applied, the rate is:
\begin{equation} R^{\text{Simplified}}(0)=R(\eta_0) \end{equation}
\noindent which means that we simply use the mean value of transmittance $\eta_0$ in the channel for all calculations and assume a "static channel", using the same rate formula for a static channel, too. This is, in fact, what has been done in most literature for free-space QKD that don't consider fluctuations due to turbulence, such as in \cite{free2007,Hughes}.
Now, when a threshold is used and post-selection is performed, $R^{\text{Simplified}}$ is written as:
\begin{equation} R^{\text{Simplified}}(\eta_T)=\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta \times R(\langle \eta \rangle) \end{equation}
\noindent here we again treat all post-selected signals as having passed through a "static channel", and use the same rate expression for static case. But the difference is that we use the new mean transmittance among only the post-selected signals, denoted as $\langle \eta \rangle$, as the transmittance of the channel. $\langle \eta \rangle$ satisfies (using expected value formula for a truncated distribution):
\begin{equation} \langle \eta \rangle={{\int_{\eta_T}^{1}\eta p_{\eta_0,\sigma}(\eta)d\eta}\over{\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta}} \end{equation}
When we apply post-selection (like the case with ARTS), in the rate formula for $R^{\text{Simplified}}$, we take into account the loss of signals due to post-selection, and only a portion of $\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta$ remains. This portion is always no larger than 1, and strictly decreases with $\eta_T$. On the other hand, $\langle \eta \rangle$ is always increasing with $\eta_T$, because we are post-selecting only the signals with higher transmittance. So, just like for the single photon case discussed in Section II.A, we have an optimization problem, where the choice of $\eta_T$ is crucial to the rate we acquire. Using optimal threshold and applying post-selection, as we will later show in the numerical results in the next sections, can dramatically increase the rate over using no post-selection at all.
Therefore, using the simplified model, we can effectively treat the static channel QKD protocol as a "black-box". We enjoy the same rate formula and security analysis as a static channel, while the only difference is that we use a higher $\langle \eta \rangle$ after post-selection as the input, and multiply a reduced portion $\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta$ to the output.\\
Now, let us compare the performance of the two models. From an information theory perspective, the rate-wise integration model makes use of all possible information on fluctuating transmittance (i.e. the whole PDTC), while the simplified model discards all distribution information and only acknowledges "pass or fail", and keeps only the single mean transmittance after post-selection. Therefore, we expect that the rate-wise integration model, which makes use of the most information, would have a higher rate than the simplified model. We can write the relation as:
\begin{equation} R^{\text{Rate-wise}} \geq R^{\text{Simplified}} \end{equation}
This relation suggestions that the Rate-wise integration model is an upper bound for the Simplified model key rate. This result can be shown rigorously by Jensen's Inequality (we include the detailed proof in the Appendix B), under the condition that the rate function $R(\eta)$ is convex. Numerically, we show that (in next section) the rate for single-photon BB84 and decoy-state BB84 are both convex. Therefore, the relation Eq. 7 always holds true.
The next question is, naturally, what is the optimal threshold to choose, such that $R^{\text{Simplified}}$ approaches the upper bound $R^{\text{Rate-wise}}$? Moreover, how closely can it approach the upper bound? We will discuss this optimal threshold in the next section, and show that it is only dependent upon $R(\eta)$ and independent of the PDTC.
\subsection{Optimal Threshold and Near-Tightness of Upper Bound}
In this section, we propose the "Pre-fixed threshold Real Time Selection" (P-RTS) method, and show that the optimal threshold is independent of the PDTC and can be pre-fixed based on experimental parameters only. We also show that with this pre-fixed threshold the simplified model can approach its upper bound very closely.
Here, to describe the key rate function, we have to bring it into the context of an actual protocol model. We will first discuss single-photon BB84, using the Shor-Preskill \cite{Preskill} rate:
\begin{equation} R=1-2h_2[QBER] \end{equation}
\noindent here to keep the consistency of notations with following discussions, we will use parameters from Table II (which is also used as the channel model for decoy-state discussion), where detector dark count/background count rate is $Y_0$, basis misalignment is $e_d$, and total system transmittance is $\eta_{sys}=\eta\eta_d$:
\begin{equation} R_{S-P}=(Y_0+\eta_{sys})\{1-2h_2[e(\eta_{sys})]\} \end{equation}
\noindent while the single-photon QBER is
\begin{equation} e(\eta_{sys})={{{1\over 2}Y_0+e_d \eta_{sys}}\over{Y_0+\eta_{sys}}} \end{equation}
\begin{figure}
\caption{Single-photon rate and PDTC vs Transmittance $\eta$. As can be seen, there is an $\eta_{critical}$ such that $R_{S-P}(\eta)=0$ for all $\eta \leq \eta_{critical}$. For this example, we have plotted the single-photon rate R, using experimental parameters are listed in Table II. We acquire an $\eta_{critical}=0.00020$ for single-photon case. Note that $\eta_{critical}$ is only determined by the experimental parameters of our devices (e.g. dark count rate, and misalignment, and the chosen intensities), and is independent of the actual PDTC. Linear interpolation of the asymptotic $\eta \gg Y_0$ case shows that the function is very close to linear. Here an instance of P, the PDTC function, is also plotted for comparison.}
\label{fig:critical}
\end{figure}
A point worth noting is that $R_{S-P}(\eta)$ has the unique property of having an $\eta_{critical}$ such that $R_{S-P}(\eta)=0$ for all $\eta<\eta_{critical}$, and $R_{S-P}(\eta) \geq 0$ for $\eta\geq \eta_{critical}$. This critical position can be expressed as:
\begin{equation} \eta_{critical}={Y_0 \over \eta_d}{{{1\over 2}-e_{critical}}\over{e_{critical}-e_d}} \end{equation}
\noindent where $e_{critical}$ is the threshold QBER satisfying \begin{equation} 1-2h_2(e_{critical})=0 \end{equation}
\noindent that returns zero rate. For Shor-Preskill rate, this threshold is $e_{critical}=11\%$. More details can be seen in Appendix D.
As can be shown in Fig.\ref{fig:critical}, we plot out the single-photon rate $R_{S-P}(\eta)$, where a sharp turning point $\eta_{critical}$ exists. Moreover, within the $[\eta_{critical},1]$ region, numerical results show that $R(\eta)$ is slightly convex but very close to linear. (For larger $\eta \gg Y_0$, the rate is completely linear. Using this approximation we can make an interpolation of the "linear" part of the rate. As shown in the plot, this linear interpolation is very close to the rate function itself.)
This can lead to a very interesting result: We showed in Section II.B that $R^{\text{Rate-wise}}$ predicts the maximum possible performance of QKD with threshold post-selection in a turbulence channel. If we choose the threshold $\eta_T=\eta_{critical}$ for the simplified model, we can apply Jensen's Inequality for the truncated $p(\eta)$ distribution within region $[\eta_{critical},1]$, and acquire \begin{equation}
R^{\text{Simplified}}(\eta_{critical}) \approx R^{\text{Rate-wise}}(\eta_{critical}) \end{equation}
\noindent given that $R_{S-P}(\eta)$ is very close to linear within the region (but still convex, so $R^{\text{Simplified}}$ is still slightly smaller), since Jensen's Inequality takes equal sign for a linear function. There is also no loss in $R^{\text{Rate-wise}}$ from truncating $[0,\eta_{critical})$, as $R(\eta)=0$ for all $\eta<\eta_{critical}$.
\begin{equation} R^{\text{Rate-wise}}(\eta_{critical}) = R^{\text{Rate-wise}}(0) \end{equation}
\noindent Therefore, $R^{\text{Simplified}}$ can approximately reach the upper bound with $\eta_T=\eta_{critical}$, and the upper bound given by $R^{\text{Rate-wise}}$ is near-tight, due to the near-linearity of $R(\eta)$. A more rigorous proof showing that the optimal threshold for the simplified model is indeed $\eta_{critical}$ can be found in Appendix C.
Also, despite that there is no explicit analytical expression for $\eta_{critical}$, we can show that it depends more heavily on the background/dark count rate (approximately proportional to $Y_0$, if $\eta \ll 1$, and $Y_0 \ll \eta$). Details can be seen in Appendix D.
This result for optimal threshold has two significant implications for using threshold post-selection and applying the simplified model:
\begin{itemize}
\item Since $R(\eta)$ is only a function of $\eta$, and not $(\eta_0, \sigma)$, this optimal threshold position $\eta_{critical}$ is only determined by the experimental parameters of the devices (e.g. detector efficiency, dark count rate, misalignment, and Alice's intensities - although here we make an assumption that the misalignment is independent of $\eta$), and thus $\eta_{critical}$ is \textit{\textbf{independent}} of the channel itself and its PDTC. This means that, regardless of the turbulence level, we can use the same threshold to get optimized performance - although the actual amount of performance improvement over not using post-selection \textit{will} be determined by the average loss and the amount of turbulence (i.e. the actual PDTC), as will also be shown in numerical results in the next section.
\item Given that we choose the optimal threshold and apply P-RTS, not only are we optimizing the rate for the simplified model, but we are also achieving the maximum possible performance for the turbulent channel, even if we make use of all information on transmittance fluctuations. This is because, at $\eta_{critical}$, the max value for $R^{\text{Simplified}}$ can almost reach the upper bound given by the rate-wise integration model - meaning that the upper bound is nearly tight. We will illustrate this point further with numerical results in the next section. \end{itemize}
The significant implication is that, as long as we know the experimental parameters, we can determine the optimal threshold in advance, without the need to know any information about the channel (such as to measure the turbulence level), and perform post-selection in real time using the fixed threshold.
Therefore, we show that it is possible to perform post-selection on the channel transmittance with a pre-fixed threshold - which we will call "Pre-fixed threshold Real Time Selection" (P-RTS). This is significantly more convenient than protocols which perform optimization of threshold after the experiment is done. It will substantially reduce the amount of data storage requirements in the experiment, since Bob doesn't need to store all data until after experiment for optimization of threshold, and will also save the computational resource since Bob no longer needs to perform optimization of threshold.
\subsection{Numerical Results}
In this section we put the above models into a simulation program for single-photon BB84 in a turbulent channel. We use the experimental parameters from Ref.\cite{free2007}, as listed in Table II. One note is that, though the dark count and stray light contribution is reported to be as high as 1700/s in the paper, because of the gated behavior of the detector and the post-selection, only the counts within a 5.9ns time window (in 100ns period between two pulses, for the 10MHz source used) will affect the result. Therefore, here we take dark count rate as $Y_0=1\times 10^{-5}$ in the simulations.
\begin{table*}[t]
\caption{Experimental parameters for free-space QKD over an 144km channel in Ref.\cite{free2007}}
\begin{center}
\begin{tabular}{ccccccc}
dark count rate $Y_0$ & pulse rate & detector efficiency $\eta_d$ & misalignment $e_d$ & error-correction efficiency $f$\\
\hline
$1\times 10^{-5}$ (per signal) & 10MHz & 25\% & 3\% & 1.22\\
\end{tabular}
\end{center} \end{table*}
\begin{figure}\label{fig:threshold}
\end{figure}
Here, we first take a turbulence level of $\sigma=0.9$, and compare the performance of the two models plus the static model (which is a simplified model with no post-selection, i.e. $R_{S-P}(\eta_0)$) at a fixed loss of 37dB. We plot the results in Fig.\ref{fig:threshold}. As shown in the figure, $R^{\text{Simplified}}(\eta_T)$ first increases with threshold $\eta_T$ (because of post-selecting high-transmittance signals) and then decreases when threshold is further increased (because the decrease in rate due to loss of signals starts to dominate).
Just as predicted in Section II.C, the simplified model can achieve a very similar performance as the upper bound given by the rate-wise integration model, when the optimal threshold is chosen. For this case, at the optimal threshold $\eta_T=0.00020$, which, as we predicted, is the same as $\eta_{critical}=0.00020$ in Fig.\ref{fig:critical}, we get $R^{\text{Simplified}}=1.18 \times 10^{-5}$, very close to the upper bound $R^{\text{Rate-wise}}=1.22 \times 10^{-5}$ (only by $3\%$ difference - which is due to the rate above $\eta_{critical}$ not perfectly linear), and with dramatic increase in key rate compared with the default static model (using mean transmittance) $R^{\text{Static}}=3.5 \times 10^{-6}$, demonstrating the significant performance gain from using P-RTS in turbulence channel.
Furthermore, we compare the rate-wise integration model $R^{\text{Rate-wise}}$, the optimized $R^{\text{Simplified}}(\eta_T)$ with $\eta_T=\eta_{critical}$, and the non-post-selected model (whose rate is equivalent to static model, i.e. $R(\eta_0)$, as in Eq. 4) , by generating the rate vs loss relation for different average loss in the channel. Results can be seen in Fig. \ref{fig:turbulence2}. We see that indeed the rate for simplified model with fixed threshold is extremely close to its upper bound (as suggested in Eq. 13), the rate-wise integration model. Comparing with the static case, we see that the P-RTS method works best for high-loss regions, where post-selection can "salvage" some rate where static case would fail entirely, hence "getting something out of practically nothing". Therefore, one of the major improvements we acquire from using P-RTS in free-space QKD is a dramatically increased maximum tolerant loss (which would mean longer maximum distance).
\begin{figure}
\caption{Comparison of the rate-wise integration model, simplified model with optimal threshold, and no post-selection (static model) under $\sigma=0.9$, for the single-photon case. Parameters are from Table II. We can see that the simplified model, with optimized threshold, approaches the rate-wise integration model extremely closely, and both cases have significant improvement in key rate over static (no post-selection) model, especially in high-loss region.}
\label{fig:turbulence2}
\end{figure}
\section{Decoy-State BB84}
On the other hand, for decoy-state BB84 QKD, we follow decoy-state BB84 QKD theory from Ref. \cite{decoystate_Hwang,decoystate_LMC,decoystate_Wang}, and adopt the notations as in Lo, Ma, and Chen's Paper in 2005 \cite{decoystate_LMC}. Using the GLLP formula \cite{GLLP}, in the asymptotic limit of infinitely many data, we can calculate the secure key rate as:
\begin{equation}
\begin{aligned}
R_{GLLP} = q\{-f(E_\mu)Q_\mu h_2(E_\mu)+Q_1[1-h_2(e_1)]\}
\end{aligned} \end{equation}
\noindent where $h_2$ is the binary entropy function, $q={1\over 2}$ or $q\approx 1$ depending on whether efficient BB84 is used, and $f$ is the error-correction efficiency. $Q_\mu$ and $E_\mu$ are the observed Gain and QBER, while $Q_1$ and $e_1$ are the single-photon Gain and QBER contributions estimated using decoy-state. (For a more detailed recapitulation of decoy-state, see Appendix A.1. We have also discussed the channel model that we use for P-RTS in Appendix A.2).
\begin{figure}
\caption{Rate and PDTC vs Transmittance $\eta$ for (asymptotic) decoy-state BB84 with infinite data size. Intensities are $\mu=0.3$, $\nu=0.05$, and experimental parameters are from Table II. As can be seen, there is also an $\eta_{critical}=0.0012$ such that $R_{GLLP}(\eta)=0$ for all $\eta \leq \eta_{critical}$, just like for single photons.}
\label{fig:critical_decoy}
\end{figure}
Here for free-space decoy-state QKD. We fix the signal and decoy-state intensities as $\mu=0.3$, $\nu=0.05$, and the vacuum state $\omega=0$, and use the vacuum+weak method to estimate single-photon contribution, as in Ma et al.'s 2005 paper \cite{decoypractical} for practical decoy-state QKD.
Like for the single-photon case, again we generate the rate vs $\eta$ function. As can be observed in Fig. \ref{fig:critical_decoy}, the decoy-state rate function $R_{GLLP}(\eta)$ behaves similarly as the single-photon rate $R_{S-P}(\eta)$, with a critical transmittance $\eta_{critical}$ ($\eta_{critical}=0.0012$ for this parameter set) such that all $\eta$ below it returns zero rate, and a nearly linear rate-transmittance relation for $\eta \geq \eta_{critical}$. Therefore, using the same proof from section II, we can conclude that $\eta_{critical}$ is the optimal (and fixed) threshold for decoy-state BB84 with post-selection too.
\begin{figure}
\caption{Comparison of the optimized Simplified model vs no post-selection (static model) under different levels of turbulence, for (asymptotic) decoy-state BB84 with infinite data size. Here we use $\sigma=0.3, 0.6, 0.9$ and $\eta_T=0.0012$. Intensities are $\mu=0.3$, $\nu=0.05$, and experimental parameters are from Table II. We see that the improvement in rate from using P-RTS increases with the level of turbulence, and has a significant improvement over static model even under medium-level turbulence of $\sigma=0.6$.}
\label{fig:turbulence}
\end{figure}
Using the fixed threshold $\eta_T=\eta_{critical}$ to get the optimized rate $R^{\text{Simplified}}(\eta_{critical})$, we generate the rate vs loss relation for different levels or turbulence, as shown in Fig.\ref{fig:turbulence}. As can be seen, the P-RTS method works in the same way with decoy-sate BB84. We can also see that the higher the turbulence level is, the larger performance gain from applying P-RTS will we be able to achieve. As described in Section II.C, the optimal threshold is only determined by the parameters of the equipment, but the actual optimal \textit{performance} is determined by the amount of turbulence present in the channel that we can utilize. As can be seen in the plot, even for a medium-level turbulence of $\sigma=0.6$: for the same loss=29dB, $R^{\text{Simplified}}=8.453 \times 10^{-6}$, a 170\% increase over $R^{\text{Static}}=3.119 \times 10^{-6}$ at loss=29dB. Also, for a minimum rate of $R=10^{-7}$, simplified model has a maximum tolerant loss of 34.4dB, versus 29.5dB for Static Model, with 5.1dB increase in tolerant loss.
\begin{figure}
\caption{Comparison of the optimized simplified model vs static model, for decoy-state BB84 with finite-size effects. We test different data sizes $N=10^{11},10^{12},10^{13}$, and the near-asymptotic case $N=10^{99}$. Here we use a high turbulence of $\sigma=0.9$. The experimental parameters also follow Table II, and intensities and probabilities used are $\mu=0.31$, $\nu=0.165$, $\omega=2\times 10^{-4}$, $p_{\mu}=0.5$, $p_{\nu}=0.36$, and the probability of sending X basis $q_x=0.75$. The dotted lines are the cases where no post-selection is applied, while the solid lines all have post-selection applied with $\eta_T=0.0012$. We can see from the figure that the improvement in rate from using P-RTS increases with the data size N, and still increases maximum tolerant loss by 3.5dB and 1.4dB when $N=10^{12}$ and $10^{11}$.}
\label{fig:finite}
\end{figure}
\section{Decoy-State BB84 with Finite-size Effects}
We now turn to the case with finite data size, and apply simplified model and P-RTS to decoy-state BB84 under finite-size effects. We also use simulations to numerically demonstrate the improvements in key rate for finite-size case. The protocol is based on C. Lim et al.'s finite-size decoy-state BB84 paper \cite{finitebb84}, and we have adopted the same channel model as in Ref. \cite{decoypractical}. Here we use the same experimental parameters (including dark count rate, detector efficiency and misalignment) as in Table II, same as the ones used in our previous asymptotic-case simulations. Also, we fix the signal and decoy intensities to $\mu=0.31$, $\nu=0.165$ (in addition to the vacuum intensity $\omega=2\times 10^{-4}$), the probabilities of sending them $p_{\mu}=0.5$, $p_{\nu}=0.36$, and the probability of sending X basis $q_x=0.75$. Unlike in Ref. \cite{finitebb84}, however, we do not scan through the decoy-state intensities and probabilities to perform optimization. Instead, since we only concentrate on high-loss region, we use fixed parameters that are already very close to optimal (while changing them with distance does not provide much improvement in performance). Using intensities that do not change with channel loss also avoids changing the expression for $R_{GLLP}(\eta)$ (which depends on intensities $\mu$, $\nu$), and ensures that $\eta_{critical}$ is independent of the actual loss of the channel.
As described for simplified model, we can use the same "black box" idea, and simply substitute $R_{GLLP}$ for asymptotic BB84 with rate for finite-size BB84. However, one difference from the asymptotic case is that N, the number of signals sent by Alice, matters when calculating the rate, i.e. the rate becomes $R_{Finite-Size}(\eta,N)$ instead of $R_{GLLP}(\eta)$. Then, instead of using Eq. 5 for $R^{\text{Simplified}}$, we use
\begin{equation} \begin{aligned} R^{\text{Simplified}}&=\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta \\ &\times R_{Finite-Size}(\langle \eta \rangle, N\times \int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta) \end{aligned} \end{equation}
\noindent which means that, the post-selection not only affects the overall rate due to the portion of lost signals, but also affects the rate for the \textit{selected} signals, too, since fewer signals than N are used to actually perform the protocol, and higher statistical fluctuations will be present among the selected signals. This means that we need to be even more prudent with post-selection when treating finite-size BB84.
The numerical results are shown in Fig.\ref{fig:finite}. As can be seen, P-RTS has a similar effect on finite-size BB84 as on the asymptotic case: we gain a significant advantage in the high-loss region, and have an improved maximum tolerant loss, when a minimum acceptable Rate is required. For instance, at $\sigma=0.9$ and for a minimum $R=10^{-7}$, the maximum loss increases by 1.4dB, 3.5dB, 5.2dB, and 6.2dB, respectively, for the cases with $N=10^{11}, 10^{12}, 10^{13}$, and near-asymptotic case, while not much improvement can be gained from P-RTS with N smaller than $10^{10}$. As shown, the improvement increases with the size of N (which is understandable, since the smaller N is, the more sensitive the rate will be to post-selection - because we are cutting off a portion from the already-insufficient number of signals and further aggravating the statistical fluctuations - while for the asymptotic case, for instance, the performance of selected signals does not depend upon how big is the selected portion of signals, and the only negative effect post-selection has is the lost portion of signals). For a free-space QKD system with 100MHz repetition rate, $N=10^{11}$ would require about 17 minutes of communication.
\section{Conclusion}
In this paper we have proposed a post-selection method with prefixed threshold for QKD through turbulent channel, and have also proposed a universal framework for determining the optimal threshold beforehand and predicting the maximum possible performance. By choosing the threshold in advance, we can perform post-selection in real time regardless of the channel condition. This real-time post-selection also provides an additional benefit of reducing the amount of data that is required to be stored in the detector system on Bob's side. We also performed simulations to show the method's effectiveness in not only single-photon BB84, but also practical decoy-state QKD in both the asymptotic case and the case with finite-size effects.
This method is especially effective for regions of high turbulence and high loss, and can even "salvage something out of nothing", when the secure key rate could have been zero without P-RTS method. In order to sample the real-time transmittance condition, the P-RTS method can use only an additional classical channel for each quantum channel, which would be easily implemented (or may even be already implemented as a beacon laser is often required for alignment in free-space QKD). Moreover, since our results only depend on post-selection of $\eta$, in essence our method is even possible without an additional classical channel, such as in Erven et al.'s SNRF setup \cite{SNRF} (which samples transmittance by observing quantum signal count rate). The thresholding, on the other hand, is purely implemented in post-processing, therefore does not require any additional resource, and could be readily deployed into existent infrastructure, and gain a ready increase in secure key rate performance over existing implementation for free-space QKD.
\begin{thebibliography}{1}
\bibitem{bb84} C Bennett, G Brassard {\em Quantum cryptography: Public key distribution and coin tossing.} International Conference on Computer System and Signal Processing, IEEE (1984)
\bibitem{onetimepad} F Miller {\em Telegraphic code to insure privacy and secrecy in the transmission of telegrams} CM Cornwell (1882)
\bibitem{freespace_IBM} CH Bennett, F Bessette, G Brassard, L Salvail, and J Smolin {\em Experimental quantum cryptography} Journal of cryptology 5.1 (1992): 3-28.
\bibitem{freespace_Hughes} WT Buttler et al. {\em Practical free-space quantum key distribution over 1 km}. Physical Review Letters, 1998, 81(15): 3283.
\bibitem{freespace_2005_Peng} C-Z Peng et al. {\em Experimental free-space distribution of entangled photon pairs over 13 km: towards satellite-based global quantum communication.} Physical Review Letters 94.15 (2005): 150501.
\bibitem{freespace_144km_E91} R Ursin et al. {\em Entanglement-based quantum communication over 144 km.} Nature physics 3.7 (2007): 481-486.
\bibitem{free2007} T Schmitt-Manderbach et al. {\em Experimental demonstration of free-space decoy-state quantum key distribution over 144 km} Physical Review Letters 98.1 (2007): 010504.
\bibitem{freespace_urban_E91} A Ling et al. {\em Experimental quantum key distribution based on a Bell test} Physical Review A 78.2 (2008): 020301.
\bibitem{freespace_2012_100km} J Yin et al. {\em Quantum teleportation and entanglement distribution over 100-kilometre free-space channels} Nature 488.7410 (2012): 185-188.
\bibitem{freespace_2012_143km} X-S Ma et al. {\em Quantum teleportation over 143 kilometres using active feed-forward} Nature 489.7415 (2012): 269-273.
\bibitem{freespace_plane} S Nauerth et al. {\em Air-to-ground quantum communication} Nature Photonics 7.5 (2013): 382-386.
\bibitem{freespace_2013_satellite} J-Y Wang et al. {\em Direct and full-scale experimental verifications towards ground-satellite quantum key distribution} Nature Photonics 7.5 (2013): 387-393.
\bibitem{freespace_drone} AD Hill, et al. {\em Drone-based Quantum Key Distribution} Urbana 51 (2017): 61801-3003.
\bibitem{freespace_satellite1} S-K Liao et al. {\em Satellite-to-ground quantum key distribution} Nature 549.7670 (2017): 43-47.
\bibitem{freespace_satellite2} J Yin et al. {\em Satellite-based entanglement distribution over 1200 kilometers} Science 356.6343 (2017): 1140-1144.
\bibitem{freespace_satellite3} J-G Ren et al. {\em Ground-to-satellite quantum teleportation} Nature 549.7670 (2017): 70-73.
\bibitem{freespace_maritime_data} J Gariano, M Neifeld, and I Djordjevic. {\em Engineering trade studies for a quantum key distribution system over a 30 km free-space maritime channel.} Applied Optics 56.3 (2017): 543-557.
\bibitem{freespace_underwater} L Ji et al. {\em Towards quantum communications in free-space seawater.} Optics Express 25.17 (2017): 19795-19806.
\bibitem{freespacethesis} J-P Bourgoin {\em Experimental and theoretical demonstration of the feasibility of global quantum cryptography using satellites}, University of Waterloo (2014)
\bibitem{Hughes} RJ Hughes, JE Nordholt, D Derkacs, and CG Peterson {\em Practical free-space quantum key distribution over 10 km in daylight and at night} New Journal of Physics, Vol. 4 (2002)
\bibitem{probetest} G Vallone et al {\em Adaptive real time selection for quantum key distribution in lossy and turbulent free-space channels} Phys. Rev. A 91, 042320 (2015)
\bibitem{SNRF} C Erven et al. {\em Studying free-space transmission statistics and improving free-space quantum key distribution in the turbulent atmosphere.} New Journal of Physics 14.12 (2012): 123018.
\bibitem{probetheory} I Capraro et al {\em Impact of turbulence in long range quantum and classical communications} Phys. Rev. Lett. 109, 200502 (2012)
\bibitem{distribution} P Milonni, JH Carter, CG Peterson, and RJ Hughes {\em Effects of propagation through atmospheric turbulence on photon statistics} J. Opt. B: Quantum Semiclass., Vol. 6, No. 8 (2004)
\bibitem{laser} LC Andrews, RL Phillips, and CY Hopen. {\em Laser beam scintillation with applications} Vol. 99. SPIE press (2001)
\bibitem{Preskill} P Shor, J Preskill {\em Simple Proof of Security of the BB84 Quantum Key Distribution Protocol} Phys. Rev. Lett. 85, 441 (2000)
\bibitem{decoystate_Hwang} W-Y Hwang {\em Quantum key distribution with high loss: toward global secure communication} Physical Review Letters 91.5 (2003): 057901.
\bibitem{decoystate_LMC} HK Lo, XF Ma, and K Chen {\em Decoy state quantum key distribution.} Physical review letters 94.23 (2005): 230504.
\bibitem{decoystate_Wang} X-B Wang {\em Beating the photon-number-splitting attack in practical quantum cryptography} Physical review letters 94.23 (2005): 230503.
\bibitem{GLLP} D Gottesman, HK Lo, N Lutkenhaus, and J Preskill {\em Security of quantum key distribution with imperfect devices} Quantum Info. and Comp., 4, No.5 (2004) 325-360
\bibitem{decoypractical} X Ma, B Qi, Y Zhao, and HK Lo {\em Practical decoy state for quantum key distribution} Phys. Rev. A 72, 012326 (2005)
\bibitem{finitebb84} C Lim, M Curty, N Walenta, F Xu, and H Zbinden {\em Concise security bounds for practical decoy-state quantum key distribution} Physical Review A 89.2 (2014): 022307.
\bibitem{rytov} M E Gracheva, and A S Gurvich {\em Strong fluctuations in the intensity of light propagated through the atmosphere close to the earth} Radiophysics and Quantum Electronics 8.4 (1965): 511-515.
\bibitem{modtran} A Berk et al. {\em MODTRAN6: a major upgrade of the MODTRAN radiative transfer code} SPIE Defense+ Security. International Society for Optics and Photonics (2014)
\end{thebibliography}
\appendix
\section{Decoy-State BB84 Rate Function}
\subsection{Standard Channel Model}
Here we present a brief recapitulation of the decoy-state BB84 model we used. We follow the notations as in Lo, Ma, and Chen's Paper in 2005 \cite{decoystate_LMC}.
Alice uses a WCP source at intensity $\mu$, which sends pulses with a Poissonian photon number distribution: $P_i={\mu^i \over i!}e^{-\mu}$. We will first consider using the standard channel model (as in the original paper Ref.\cite{decoystate_LMC}), where for each i-photon pulse $\ket{i}$, the transmittance, yield $Y_i$, gain $Q_i$, and QBER $e_i$ are:
\begin{equation}
\begin{aligned}
\eta_i&=1-(1-\eta)^i\\
Y_i&\approx Y_0+\eta_i=Y_0+1-(1-\eta)^i\\
Q_i&=Y_i{\mu^i \over i!}e^{-\mu}\\
e_i&={{e_0Y_0+e_d \eta _i}\over Y_i}\\
\end{aligned}
\end{equation}
\noindent where $Y_0$, $e_d$ are the dark count rate and misalignment, respectively, and $e_0={1\over 2}$. The overall Gain $Q_{\mu}$ and QBER ${E_{\mu}}$ for this intensity $\mu$ are:
\begin{equation}
\begin{aligned}
Q_{\mu}&=\sum_{i=0}^{\infty}Y_i{\mu^i \over i!} e^{-\mu}=\sum_{i=0}^{\infty}Y_iP_i \\
E_\mu &= {1 \over Q_\mu} \sum_{i=0}^{\infty}e_i Y_i{\mu^i \over i!} e^{-\mu}={1 \over Q_\mu} \sum_{i=0}^{\infty}e_iY_iP_i
\end{aligned}
\end{equation}
\noindent where $Q_{\mu}$ and $E_{\mu}$ are simulated here for rate estimation using known channel transmittance $\eta$, while in experiment they will be measured observables.
For this standard channel model, we assume that the photon number distribution after passing through the channel would still be Gaussian. Using decoy-state technique to combine $Q_{\mu}$ and $E_{\mu}$ for different intensities, we can estimate the single-photon contributions $Q_1$ and $e_1$. The achievable secure key rate is at least
\begin{equation}
\begin{aligned}
R_{GLLP} = q\{-f(E_\mu)Q_\mu h_2(E_\mu)+Q_1[1-h_2(e_1)]\}
\end{aligned}
\end{equation}
\noindent as given by the GLLP formula\cite{GLLP}, where $h_2$ is the binary entropy function, $q={1\over 2}$ or $q\approx 1$ depending on whether efficient BB84 is used, and $f$ is the error-correction efficiency.
\subsection{Channel Model after Post-Selection}
However, one thing worth noting is that although photon number distribution is Gaussian after the signals pass through the standard channel model, it is no longer necessarily so if we perform post-selection, in which case the photon number distribution might change, and thus the decoy-state key rate form in Eq. A3 (which depends on a Gaussian distribution model) might no longer be adequate.
To show that this will not be a concern for us, we will explicitly discuss how the post-selection from P-RTS will affect the yield for each photon number. From Eq. A1, before post-selection, the yield for pulses with a given photon number $i$ is
\begin{equation}
\begin{aligned}
Y_i(\eta)=Y_0+1-(1-\eta)^i
\end{aligned}
\end{equation}
For simplified model, among the post-selected signals, we have replaced $\eta$ in Eq. A4 with
\begin{equation}
\begin{aligned}
\langle \eta \rangle={{\int_{\eta_T}^{1}\eta p_{\eta_0,\sigma}(\eta)d\eta}\over{\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta}}
\end{aligned}
\end{equation}
\noindent thus the yield for i-photon pulse is assumed to be:
\begin{equation}
\begin{aligned}
Y_i^{Simplified}(\eta_T)=Y_0+1-(1-\langle \eta \rangle)^i
\end{aligned}
\end{equation}
\noindent in which case, we are simply replacing the $\eta$ with a higher expected value $\langle \eta \rangle$, but the expression is in the same form as Eq. A4, and the received photon number distribution is still Gaussian. (Hence decoy-state analysis and key rate expression still hold).
However, if we consider the more realistic case, post-selection might have a different effect on pulses with different photon number $i$. Therefore, to estimate the yield for each photon number, and analyze the photon number distribution after the channel and the post-selection, we should group up pulses with the same given photon number, and calculate the expected value of the yield for each given $i$. We can call this the "pulse-wise integration" model.
\begin{equation}
\begin{aligned}
Y_i^{Pulse-wise}(\eta_T)={{\int_{\eta_T}^{1}Y_i(\eta) p_{\eta_0,\sigma}(\eta)d\eta}\over{\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta}}=\langle Y_i(\eta) \rangle
\end{aligned}
\end{equation}
\noindent and the Gain $Q_{\mu}$ and QBER ${E_{\mu}}$ would become:
\begin{equation}
\begin{aligned}
Q_{\mu}&=\sum_{i=0}^{\infty}\langle Y_i(\eta) \rangle{\mu^i \over i!} e^{-\mu}=\sum_{i=0}^{\infty}\langle Y_i(\eta) \rangle P_i \\
E_\mu &= {1 \over Q_\mu} \sum_{i=0}^{\infty}e_i \langle Y_i(\eta) \rangle{\mu^i \over i!} e^{-\mu}={1 \over Q_\mu} \sum_{i=0}^{\infty}e_i \langle Y_i(\eta) \rangle P_i
\end{aligned}
\end{equation}
In the case of this "pulse-wise integration" model, $Q_\mu$ and $E_\mu$ can no longer be considered as from a Gaussian distribution with intensity $\eta\mu$, which is seemingly warning us that the decoy-state analysis might not hold true anymore. However, here we make the observation that for $i=0$, trivially,
\begin{equation}
\begin{aligned}
Y_0^{Simplified}=Y_0=Y_0^{Pulse-wise}
\end{aligned}
\end{equation}
\noindent and for $i=1$, the yield is a linear function of $\eta$, hence
\begin{equation}
\begin{aligned}
Y_1^{Simplified}=Y_0+\langle \eta \rangle = \langle(Y_0+\eta)\rangle=Y_1^{Pulse-wise}
\end{aligned}
\end{equation}
While for all multi-photon cases where $i\geq 2$, the function
\begin{equation}
\begin{aligned}
Y_i(\eta)=Y_0+1-(1-\eta)^i
\end{aligned}
\end{equation}
is a strictly concave function on the domain $[0,1]$. Therefore, from Jensen's Inequality, the expected value of a concave function is strictly smaller than the function ($Y_i$) of the expected value, i.e.
\begin{equation}
\begin{aligned}
Y_i^{Pulse-wise}=\langle Y_i(\eta)\rangle < Y_i(\langle \eta \rangle) = Y_i^{Simplified}, i\geq 2
\end{aligned}
\end{equation}
This means that, with the simplified model, with the Gaussian photon number distribution assumption and the standard decoy-state key rate analysis, we are correctly estimating the vacuum and single-photon contributions, but always \textit{over-estimating} the multi-photon contributions. This will in fact result in an \textit{under-estimated} key rate for simplified model than the realistic case (yield-wise integration model). Therefore, we make the "validity argument" here that, despite post-selection will result in a non-Gaussian photon number distribution, by using simplified model and the same decoy-state analysis, we will never incorrectly over-estimate the key rate, and can be confident in the improvement in performance from using P-RTS.
\section{Proof of Rate-Wise Integration Model as Upper Bound}
To better compare the models, let us first simply the notations, and define $\langle f(\eta)\rangle$ operator as taking the expected value of $f(\eta)$ over $p_{\eta_0,\sigma}(\eta)$ (in the case of using post-selection, the distribution is truncated, and will be normalized by dividing by $\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta$). The expected value $\langle f(\eta) \rangle$ can be expressed as:
\begin{equation}
\langle f(\eta) \rangle={{\int_{\eta_T}^{1}f(\eta) p_{\eta_0,\sigma}(\eta)d\eta}\over{\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta}}
\end{equation}
Then, we can easily see that, mathematically, the two models we proposed so far, the rate-wise integration model and the simplified model, are only different in that they apply the "expected value" operator at different levels of the function. We can simply write $R^{\text{Rate-wise}}$ and $R^{\text{Simplified}}$ as:
\begin{equation}
\begin{aligned}
R^{\text{Rate-wise}} (\eta_T)&=\langle R(\eta) \rangle\\
R^{\text{Simplified}} (\eta_T)&=R(\langle \eta \rangle)
\end{aligned}
\end{equation}
Now, we introduce the \textit{Jensen's Inequality}: \\
\textit {For a random variable X following a probability distribution p(X), and for any given convex function f(x), we always have}
\begin{equation}
\langle f(X) \rangle \geq f(\langle X \rangle)
\end{equation}
\noindent the equal sign is taken when the function $f(x)$ is linear.
For decoy-state BB84, $R_{GLLP}(\eta)$ is a convex (and increasing) function of $\eta$, therefore we have $\langle R(\eta) \rangle \geq R(\langle \eta \rangle)$, i.e.
\begin{equation}
R^{\text{Rate-wise}}(0) \geq R^{\text{Simplified}}(0)
\end{equation}
\noindent This holds true even after a threshold is applied, too, since we can simply replace the distribution $p(\eta)$ with the truncated distribution on domain $[\eta_T,1]$, and normalize it by dividing by the constant $\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta$. Since $R(\eta)$ is non-concave on all sections of $[0,1]$, the Jensen's Inequality always holds true, regardless of the threshold. i.e.
\begin{equation}
R^{\text{Rate-wise}}(0) \geq R^{\text{Rate-wise}}(\eta_T) \geq R^{\text{Simplified}}(\eta_T)
\end{equation}
\noindent here we also include Eq. 3's result that $R^{\text{Rate-wise}}(\eta_T)$ is non-increasing with $\eta_T$.
Therefore, we see that $R^{\text{Rate-wise}}$ serves as an upper bound for the possible rate in a turbulent channel, as it is the maximum achievable rate when we know all transmittance information and make use of the entire PDTC. simplified model always has no higher rate than this upper bound. This means that, when we use $R^{\text{Simplified}}$ to calculate the rate, we \textit{never overestimate} the performance of the protocol. When we demonstrate the improvements we gain by using P-RTS in decoy-state BB84, the actual possible rate will be even higher, thus the validity argument for the usage of the simplified model in estimating the rate.
\section{Proof of Optimality of Critical Transmittance as Threshold for Simplified Model}
Following the argument in Section II.C, here we give a rigorous proof that $\eta_T=\eta_{critical}$ is indeed the optimal threshold for the simplified model, given that $R_{S-P}(\eta)$ (and similarly for $R_{GLLP}(\eta)$) is nearly linear. For simplified model, we showed that
\begin{equation}
R^{\text{Simplified}}(\eta_T)=\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta \times R_{S-P}(\langle \eta \rangle)
\end{equation}
\noindent where $\langle \eta \rangle$ satisfies:
\begin{equation}
\langle \eta \rangle={{\int_{\eta_T}^{1}\eta p_{\eta_0,\sigma}(\eta)d\eta}\over{\int_{\eta_T}^{1}p_{\eta_0,\sigma}(\eta)d\eta}}
\end{equation}
\noindent Then, using the Leibniz Integration Rule, and taking derivative with respect to $\eta_T$ (here we omit the subscript of $\eta_0,\sigma$ for the PDTC, and $S-P$ (or $GLLP$) for the rate), we have
\begin{equation}
\begin{aligned}
{d \over d\eta_T}{\langle \eta \rangle}={{p(\eta_T)}\over{\int_{\eta_T}^{1}p(\eta)d\eta}}({\langle \eta \rangle} - \eta_T)
\end{aligned}
\end{equation}
\noindent using the chain rule,
\begin{equation}
\begin{aligned}
{d \over d\eta_T}R(\langle \eta \rangle) &= {dR(\eta) \over {d\langle \eta \rangle}} {{d\langle \eta \rangle}\over d\eta_T}\\
&= R'(\langle \eta \rangle) {{p(\eta_T)}\over{\int_{\eta_T}^{1}p(\eta)d\eta}}({\langle \eta \rangle} - \eta_T)\\
\end{aligned}
\end{equation}
\noindent Maximizing $R^{\text{Simplified}}$ requires that
\begin{equation}
{d \over d\eta_T}R^{\text{Simplified}}(\eta_T)=0
\end{equation}
\noindent expanding the derivative using Eq. C1 gives us
\begin{equation}
\begin{aligned}
&{d \over d\eta_T}R^{\text{Simplified}}(\eta_T)\\
&= \left( \int_{\eta_T}^{1}p(\eta)d\eta \right) \times {d \over d\eta_T}R(\langle \eta \rangle) - p(\eta_T)R(\langle \eta \rangle) \\
&= R'(\langle \eta \rangle) {p(\eta_T)}({\langle \eta \rangle} - \eta_T) - p(\eta_T)R(\langle \eta \rangle) \\
&= p(\eta_T)[({\langle \eta \rangle} - \eta_T) R'(\langle \eta \rangle) - R(\langle \eta \rangle)]
\end{aligned}
\end{equation}
\noindent Therefore, the optimal threshold requires that
\begin{equation}
({\langle \eta \rangle} - \eta_T) R'(\langle \eta \rangle) = R(\langle \eta \rangle)
\end{equation}\\
When $R(\eta)$ is a linear function on the domain $[\eta_{critical}, 1]$ and $R(\eta_{critical})=0$, there is
\begin{equation}
R'(\langle \eta \rangle) = {{R(\langle \eta \rangle) - R(\eta_T)}\over{({\langle \eta \rangle} - \eta_T)}}
\end{equation}
\noindent combined with Eq. C7, we have
\begin{equation}
R(\eta_T)=0
\end{equation}
\noindent for $\eta \in [\eta_{critical}, 1]$, there is one and only one point satisfying $R(\eta_T)=0$, that is
\begin{equation}
\eta_T=\eta_{critical}
\end{equation}
\noindent For $\eta \in [0, \eta_{critical})$, on the other hand,
\begin{equation}
R(\langle \eta \rangle) - R(\eta_T) < R'(\langle \eta \rangle) ({\langle \eta \rangle} - \eta_T) = R(\langle \eta \rangle)
\end{equation}
\noindent which becomes
\begin{equation}
R(\eta_T) > 0
\end{equation}
\noindent but $R(\eta)=0$ for all $\eta \leq \eta_{critical}$, so no $\eta_T\in [0, \eta_{critical})$ satisfies the zero derivative requirement. Which means that, when $R_{GLLP}(\eta)$ is near linear on $[\eta_{critical}, 1]$, we have
\begin{equation}
\eta_T=\eta_{critical}
\end{equation}
\noindent as the one and only optimal threshold for $R^{\text{Simplified}}$.\\
Additionally, if we do not ignore the convexity of $R(\eta)$, consider the tangent line for $R(\eta)$ at $\langle \eta \rangle$, since $R(\eta)$ is a convex function of $\eta$,
\begin{equation}
({\langle \eta \rangle} - \eta_T) R'(\langle \eta \rangle) > R(\langle \eta \rangle) - R(\eta_T)
\end{equation}
\noindent optimal threshold requires that
\begin{equation}
R(\langle \eta \rangle) > R(\langle \eta \rangle) - R(\eta_T)
\end{equation}
\noindent i.e. $R(\eta_T) > 0$, which means that the optimal threshold position will be shifted rightward from $\eta_{critical}$, the actual amount of shift depends on how much $R$ deviates from linearity (in numerical simulations, we see that since $R(\eta)$ is very close to linear, this shift is very small). Also, although $R^{\text{Rate-wise}}$ is not affected for a threshold no larger than $\eta_{critical}$, using a threshold larger than $\eta_{critical}$ will cause $R^{\text{Rate-wise}}$ to decrease, since "bins" with positive rate are discarded. Therefore, the maximum point for $R^{\text{Simplified}}$ is no longer the maximum $R^{\text{Rate-wise}}$, but slightly smaller than it. This also explains why in the numerical results, the optimal $R^{\text{Simplified}}$ is always slightly lower than upper bound, due to non-linearity of $R(\eta)$.
Also, a small note is that, the Jensen's Inequality asks the function to be differentiable at every point, while the turning point of $R$ at $\eta_{critical}$ is a sharp point. To address this, we can construct another $R_2$ with an infinitesimally small yet smooth "turn" at $\eta_{critical}$ to replace the sharp point, but as the "turn" is infinitely small, integrating $R$ and $R_2$ over any region will yield infinitely close results. Therefore the turning point's structure does not affect the above results.
\section{Analytical Expression for Optimal Threshold}
\subsection{Single-Photon Case}
Let $\eta_{sys}=\eta\times \eta_d$. The single photon Shor-Preskill rate is
\begin{equation}
R_{S-P}=(Y_0+\eta_{sys})\{1-2h_2[e(\eta_{sys})]\}
\end{equation}
where the single-photon QBER is
\begin{equation}
e(\eta_{sys})={{{1\over 2}Y_0+e_d \eta_{sys}}\over{Y_0+\eta_{sys}}}
\end{equation}
For the rate to be zero, we require:
\begin{equation}
R_{S-P}=0
\end{equation}
hence
\begin{equation}
1-2h_2[e(\eta_{sys})]=0
\end{equation}
or, $h_2[e(\eta_{sys})]={1\over 2}$. This numerically corresponds to $e(\eta_{sys})=11\%=e_{critical}$ (which is the QBER threshold for Shor-Preskill rate). Therefore, substituting into Eq. D2, we have
\begin{equation}
\eta_{sys}={{{1\over 2}-e_{critical}}\over{e_{critical}-e_d}}Y_0
\end{equation}
expressing it in channel transmittance $\eta$
\begin{equation}
\eta_{critical}={Y_0 \over \eta_d}{{{1\over 2}-e_{critical}}\over{e_{critical}-e_d}}
\end{equation}
\noindent or, if we substitute $\eta_{critical}=11\%$ into the equation, we have
\begin{equation}
\eta_{critical}={Y_0 \over \eta_d}{0.39\over{0.11-e_d}}
\end{equation}
This is the analytical expression for the critical transmittance for the single-photon case. Also, we can see that the critical transmittance is proportional to the background count (i.e. noise) in the system. i.e.
\begin{equation}
\eta_{critical} \propto{Y_0 \over \eta_d}
\end{equation}
\subsection{Decoy-State BB84}
Consider the asymptotic case of decoy-state BB84, with infinite number of decoys (i.e. the only significant intensity is the signal intensity $\mu$). Using the GLLP rate,
\begin{equation}
\begin{aligned}
R_{GLLP} = q\{-fQ_\mu h_2(E_\mu)+Q_1[1-h_2(e_1)]\}
\end{aligned}
\end{equation}
\noindent we would like to find $\eta_{critical}$ such that
\begin{equation}
R(\eta_{critical})=0
\end{equation}
\noindent hence
\begin{equation}
fQ_\mu h_2(E_\mu)=Q_1[1-h_2(e_1)]
\end{equation}
\noindent or
\begin{equation}
h_2(e_1)+f{Q_\mu \over Q_{1}} h_2(E_\mu)=1
\end{equation}
Let $\eta_{sys}=\eta\times \eta_d$, the observables and single-photon contributions can be written as:
\begin{equation}
\begin{aligned}
Q_\mu &= Y_0+1-exp(-\mu\eta_{sys})\\
E_\mu &= {{{1\over 2}Y_0+e_d(1-exp(-\mu\eta_{sys}))}\over{Y_0+1-exp(-\mu\eta_{sys})}}\\
Q_1 &=\mu exp(-\mu)(Y_0+\eta_{sys})\\
e_1 &={{{1\over 2}Y_0+e_d\eta_{sys}}\over{Y_0+\eta_{sys}}}
\end{aligned}
\end{equation}
Now, if $\eta \ll 1$, we can use the approximation $1-exp(-\mu\eta_{sys})=\mu\eta_{sys}$. If the dark/background count rate $Y_0$ also satisfies $Y_0 \ll \eta_{sys}$ (which is a reasonable approximation, since with parameters in Table II, $Y_0$ is at the order of $10^{-5}$, while $\eta_d \eta_{critical}$ is at the order of $10^{-3}$), we can write
\begin{equation}
\begin{aligned}
{Q_\mu \over Q_1 }&\approx {{Y_0+\mu\eta_{sys}}\over{\mu exp(-\mu)(Y_0+\eta_{sys})}} \approx exp(\mu)\\
e_1 &\approx {1\over 2}{Y_0 \over \eta_{sys}}+e_d \\
E_\mu &\approx {1\over {2\mu}}{Y_0 \over \eta_{sys}}+e_d
\end{aligned}
\end{equation}
\noindent substituting back into Eq. D12, and defining
\begin{equation}
x={Y_0 \over \eta_{sys}}={Y_0\over{\eta_d\eta}}
\end{equation}
\noindent we can have
\begin{equation}
h_2({1\over 2}x+e_d)+fe^\mu h_2({1\over {2\mu}}x+e_d)=1
\end{equation}
\noindent this is a function that is only determined by $e_d$ and $\mu$. We can write its solution for x as
\begin{equation}
x_{critical}=\mathcal{F}(e_d,\mu)
\end{equation}
Then the critical transmittance (i.e. optimal threshold position) can be written as
\begin{equation}
\eta_{critical}={{Y_0}\over{\eta_d}} [{1\over\mathcal{F}(e_d,\mu)}]
\end{equation}
\noindent where $\mathcal{F}(e_d,\mu)$ does not have an explicit analytical expression, because $h_2$ function cannot be analytically expanded. (One can, however, numerically use linear fit to expand $h_2$, if given the approximate range of the experimental parameters $e_d$ and $\mu$). The important observation here, however, is that for the decoy-state case, we can still have:
\begin{equation}
\eta_{critical} \propto{Y_0 \over \eta_d}
\end{equation}
\noindent which points out that the critical threshold is directly proportional to the dark (or background) count rate of the experimental devices, and inversely proportional to the detector efficiency.
\section{PDTC parameters}
In our simulations, we have fixed several typical values for $\sigma$ for free-space QKD, corresponding to the case of weak-to-medium level turbulence, and have considered the PDTC to be a fixed distribution for a given $\sigma$ regardless of the channel loss. In reality, though, $\sigma$ is distance-dependent, too. A commonly used estimation for $\sigma$ is the "Rytov Approximation"\cite{rytov}
\begin{equation}
\sigma^2 = 1.23 C_n^2 k^{7/6} L^{11/6}
\end{equation}
\noindent which relates $\sigma$ both to the distance $L$ and the refractive index structure constant $C_n^2$ (which is determined by atmospheric conditions).
Also, with simulation software such as MODTRAN\cite{modtran}, it is possible to simulate the relationship between $\eta_0$ and $L$ for a given free-space channel. Therefore, one necessary next step would also be to estimate performance for cases with realistic values for $\eta_0$ and $\sigma$, both from literature and from simulations, as well as to study the possible correlation $\sigma$ and $\eta_0$ (both related to $L$) in simulations.
\end{document} |
\begin{document}
\title{\bf Frames of translates with prescribed fine structure \\ in shift invariant spaces} \author{Mar\'\i a J. Benac $^{*}$, Pedro G. Massey $^{*}$, and Demetrio Stojanoff \footnote{Partially supported by CONICET (PIP 0435/10) and Universidad Nacional de La PLata (UNLP 11X681) } \
\footnote{ e-mail addresses: [email protected] , [email protected] , [email protected]} \\ {\small Depto. de Matem\'atica, FCE-UNLP and IAM-CONICET, Argentina }} \date{} \maketitle
\begin{abstract}
For a given finitely generated shift invariant (FSI) subspace ${\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ we obtain a simple criterion for the existence of shift generated (SG) Bessel sequences $E(\mathcal F)$ induced by finite sequences of vectors $\mathcal F\in {\cal W}^n$ that have a prescribed fine structure i.e., such that the norms of the vectors in $\mathcal F$ and the spectra of $S_{E(\mathcal F)}$ is prescribed in each fiber of $\text{Spec}({\cal W})\subset \mathbb{T}^k$. We complement this result by developing an analogue of the so-called sequences of eigensteps from finite frame theory in the context of SG Bessel sequences, that allows for a detailed description of all sequences with prescribed fine structure. Then, given $0<\alpha_1\leq \ldots\leq \alpha_n$ we characterize the finite sequences $\mathcal F\in{\cal W}^n$ such that $\|f_i\|^2=\alpha_i$, for $1\leq i\leq n$, and such that the fine spectral structure of the shift generated Bessel sequences $E(\mathcal F)$ have minimal spread (i.e. we show the existence of optimal SG Bessel sequences with prescribed norms); in this context the spread of the spectra is measured in terms of the convex potential $P^{\cal W}_\varphi$ induced by ${\cal W}$ and an arbitrary convex function $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$. \end{abstract}
\noindent AMS subject classification: 42C15.
\noindent Keywords: frames of translates, shift invariant subspaces, Schur-Horn theorem, frame design problems, convex potentials.
\tableofcontents
\section{Introduction} Let ${\cal W}$ be a closed subspace of a separable complex Hilbert space $\mathcal{H}$ and let $\mathbb I$ be a finite or countable infinite set. A sequence $\mathcal F=\{f_i\}_{i\in \mathbb I}$ in ${\cal W}$ is a frame for ${\cal W}$ if there exist positive constants $0<a\leq b$ such that $$
a \, \|f\|^2\leq \sum_{i\in \mathbb I}|\langle f,f_i\rangle |^2\leq b\,\|f\|^2 \peso{for every} f\in {\cal W}\, . $$ If we can choose $a=b$ then we say that $\mathcal F$ is a tight frame for ${\cal W}$. A frame $\mathcal F$ for ${\cal W}$ allows for linear (typically redundant) and stable encoding-decoding schemes of vectors (signals) in ${\cal W}$. Indeed, if ${\cal V}$ is a closed subspace of $\mathcal{H}$ such that ${\cal V}\oplus{\cal W}^\perp=\mathcal{H}$ (e.g. ${\cal V}={\cal W}$) then it is possible to find frames $\mathcal G=\{g_i\}_{i\in \mathbb I}$ for ${\cal V}$ such that \begin{equation}\label{eq: intro duals} f=\sum_{i\in \mathbb I}\langle f,g_i\rangle \ f_i \ , \quad \text{ for } f\in{\cal W}\,. \end{equation} The representation above lies within the theory of oblique duality (see \cite{YEldar3,CE06,YEldar1,YEldar2}). In applied situations, it is usually desired to develop encoding-decoding schemes as above, with some additional features related with stability of the scheme.
In some cases, we search for schemes such that the sequence of norms $\{\|f_i\|^2\}_{i\in \mathbb I}$ as well as the spectral properties of the family $\mathcal F$ are given in advance, leading to what is known in the literature as frame design problem (see \cite{AMRS,BF,CFMPS,CasLeo,MR08,Pot} and the papers \cite{FMP,MRS13,MRS14,MRS13b} for the more general frame completions problem with prescribed norms). It is well known that both the spread of the sequences of norms as well as the spread of the spectra of the frame $\mathcal F$ are linked with numerical properties of $\mathcal F$. Once we have constructed a frame $\mathcal F$ for ${\cal W}$ with the desired properties, we turn our attention to the construction of frames $\mathcal G$ for ${\cal V}$ satisfying Eq.\eqref{eq: intro duals} and having some prescribed features related with their numerical stability (see \cite{BMS14,BMS15,CE06,MRS13,MRS13b}).
\noi It is well known that the frame design problem has an equivalent formulation in terms of the relation between the main diagonal of a positive semi-definite operator and its spectra; in the finite dimensional setting this relation is characterized in the Schur-Horn theorem from matrix analysis. There has been recent important advances in both the frame design problems as well as the Schur-Horn theorems in infinite dimensions, mainly due to the interactions of these problems (see \cite{AMRS,BoJa,BoJa2,BoJa3,Jas,KaWe}). There are also complete parametrizations of all finite frames with prescribed norms and eigenvalues (of their frame operators) in terms of the so-called eigensteps sequences \cite{CFMPS}. On the other hand, the spectral structure of oblique duals (that include classical duals) of a fixed frame can be described in terms of the relations between the spectra of a positive semi-definite operator and the spectra of its compressions to subspaces. In the finite dimensional context (see \cite{BMS14,MRS13}) these relations are known as the Fan-Pall inequalities (that include the so-called interlacing inequalities as a particular case). Yet, in general, the corresponding results in frame theory do not take into consideration any additional structure of the frame. For example, regarding the frame design problem, it seems natural to wonder whether we can construct a structured frame (e.g., wavelet, Gabor or a shift generated frame) with prescribed structure; similarly, in case we fix a structured frame $\mathcal F$ for ${\cal W}$ it seems natural to wonder whether we can construct structured oblique dual frames with further prescribed properties.
\noi In \cite{BMS15}, as a first step towards a detailed study of the spectral properties of structured oblique duals of shift generated systems induced by finite families of vectors in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$, we extended the Fan-Pall theory to the context of measurable fields of positive semi-definite matrices and their compressions by measurable selections of subspaces; this allowed us to give an explicit description of what we called {\it fine spectral structure} of the shift generated duals of a fixed shift generated (SG) frame for a finitely generated shift invariant (FSI) subspace ${\cal W}$ of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$. Given a convex function $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ we also introduced the convex potential associated to pair the $(\varphi,{\cal W})$, that is a functional on SG Bessel sequences that measures the spread of the fine spectral structure of the sequence; there we showed that these convex potentials detect tight frames as their minimizers (under some normalization conditions). Yet, our analysis was based on the fine spectral structure of a given SG Bessel sequence in a FSI subspace ${\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$.
\noi
In this paper, building on an extension of the Schur-Horn theorem for measurable fields of positive semi-definite matrices, we characterize the possible {\it fine structures} of SG Bessel sequences in FSI subspaces (see Section \ref{SI cosas} for preliminaries on SG Bessel sequences, Remark \ref{rem sobre estruc fina} and Theorem \ref{teo sobre disenio de marcos});
thus, we solve a frame design problem, where the prescribed features of the SG Bessel sequences are described in terms of some internal (or fine) structure, relative to a finitely generated shift invariant subspace ${\cal W}$. We also show that the Fan-Pall theory for fields of positive semi-definite matrices can be used to obtain a detailed description of SG Bessel sequences with prescribed fine structure, similar to that obtained in terms of the eigensteps in \cite{CFMPS}. In turn, we use these results to show that
given a FSI subspace ${\cal W}$, a convex function $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ and a finite sequence of positive numbers $\alpha_1\geq \ldots\geq \alpha_n>0$, there exist vectors $f_i\in{\cal W}$ such that $\|f_i\|^2=\alpha_i$, for $1\leq i\leq n$, and such that the SG Bessel sequence induced by these vectors minimizes the convex potential associated to the pair $(\varphi,{\cal W})$, among all such SG Bessel sequences (for other optimal design problems in shift invariant spaces see \cite{AlCHM1,AlCHM2}). The existence of these $(\varphi,{\cal W})$-optimal shift generated frame designs with prescribed norms is not derived using a direct ``continuity + compactness'' argument. Actually, their existence follows from a discrete nature of their spectral structure; we make use of the this fact to reduce the problem of describing the structure of optimal designs, to an optimization problem in a finite dimensional setting. As a tool, we consider the waterfilling construction in terms of majorization in general probability spaces. It is worth pointing out that there has been interest in the structure of finite sequences of vectors that minimize convex potentials in the finite dimensional context (see \cite{CKFT,FMP,MR08,MR10}), originating from the seminal paper \cite{BF}; our present situation is more involved and, although we reduce the problem to a finite dimensional setting, this reduction is not related with the techniques nor the results of the previous works on finite families of vectors.
\noi The paper is organized as follows. In Section \ref{sec prelim}, after fixing the general notations used in the paper, we present some preliminary material on frames, shift invariant subspaces and shift generated Bessel sequences; we end this section with the general notion of majorization in probability spaces. In Section \ref{subsec exac charac} we obtain an exact characterization of the existence of shift generated Bessel sequences with prescribed fine structure in terms of majorization relations; this result is based on a version of the Schur-Horn theorem for measurable fields of positive semi-definite matrices (defined on measure spaces) that is developed in the appendix (see Section \ref{Appendixity}). In Section \ref{subsec eigensteps}, building on the Fan-Pall inequalities from \cite{BMS15}, we obtain a detailed description of all shift generated Bessel sequences with prescribed fine structure that generalizes the so-called eigensteps construction in the finite dimensional setting. In Section \ref{sec opti frames with prescribed norms} we show that for a fixed sequence of positive numbers $\alpha_1\geq \ldots\geq \alpha_n>0$, a
convex function $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ and a FSI subspace ${\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ there exist vectors $f_i\in{\cal W}$ such that $\|f_i\|^2=\alpha_i$, for $1\leq i\leq n$, and such that $\mathcal F$ minimizes the convex potential associated to the pair $(\varphi,{\cal W})$ among all such finite sequences; in order to do this, we first consider in Section \ref{subse uniform} the uniform case in which the dimensions of the fibers of ${\cal W}$ are constant on the spectrum of ${\cal W}$. The general case of the optimal design problem with prescribed norms in a FSI is studied in Section \ref{subsec gral mi gral}; our approach is based on a reduction of the problem to an optimization procedure in the finite dimensional setting. The paper ends with an Appendix, in which we consider a measurable version of the Schur-Horn theorem needed in Section \ref{subsec exac charac} as well as some technical aspects of an optimization problem needed in Section \ref{subsec gral mi gral}.
\section{Preliminaries} \label{sec prelim}
In this section we recall some basic facts related with frames for subspaces and shift generated frames for shift invariant (SI) subspaces of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$. At the end of this section we describe majorization between functions in arbitrary probability spaces.
\def\mathbb {I} _m{\mathbb {I} _m} \def\mathbb {I} _d{\mathbb {I} _d} \def\mathds{1}{\mathds{1}}
\noi {\bf General Notations}
\noi Throughout this work we shall use the following notation: the space of complex $d\times d$ matrices is denoted by $\mathcal{M}_d(\mathbb{C})$, the real subspace of self-adjoint matrices is denoted $\mathcal{H}(d)$ and $\mat^+$ denotes the set of positive semi-definite matrices; $\mathcal{G}\textit{l}\,(d)$ is the group of invertible elements of $\mathcal{M}_d(\mathbb{C})$, $\mathcal{U}(d)$ is the subgroup of unitary matrices and $\mathcal{G}\textit{l}\,(d)^+ = \mat^+ \cap \mathcal{G}\textit{l}\,(d)$. If $T\in \mathcal{M}_d(\mathbb{C})$, we denote by
$\|T\|$ its spectral norm, by $\text{\rm rk}\, T= \dim R(T) $ the rank of $T$, and by $\tr T$ the trace of $T$.
\noi Given $d \in \mathbb{N}$ we denote by $\mathbb {I} _d = \{1, \dots , d\} \subseteq \mathbb{N}$ and we set $\mathbb{I}_0=\emptyset$. For a vector $x \in \mathbb{R}}\def\C{\mathbb{C}^d$ we denote by $x^\downarrow\in \mathbb{R}}\def\C{\mathbb{C}^d$
the rearrangement of $x$ in non-increasing order. We denote by $(\mathbb{R}}\def\C{\mathbb{C}^d)^\downarrow = \{ x\in \mathbb{R}}\def\C{\mathbb{C}^d : x = x^\downarrow\}$ the set of downwards ordered vectors. Given $S\in \mathcal{H}(d)$, we write $\lambda(S) = \lambda^\downarrow(S)= (\lambda_1(S) \, , \, \dots \, , \, \lambda_d(S)\,) \in (\mathbb{R}}\def\C{\mathbb{C}^d)^\downarrow$ for the vector of eigenvalues of $S$ - counting multiplicities - arranged in decreasing order.
\noi If $W\subseteq \C^d$ is a subspace we denote by $P_W \in \mat^+$ the orthogonal projection onto $W$. Given $x\, , \, y \in \C^d$ we denote by $x\otimes y \in \mathcal{M}_d(\mathbb{C})$ the rank one matrix given by \begin{equation} \label{tensores} x\otimes y \, (z) = \langle z\, , \, y\rangle \, x \peso{for every} z\in \C^d \ .
\end{equation} Note that, if $x\neq 0$, then the projection $P_x \ \stackrel{\mbox{\tiny{def}}}{=}\ P_{\gen\{x\}}= \|x\|^{-2} \, x\otimes x \,$.
\subsection{Frames for subspaces}\label{sec defi frames subespacios}
In what follows $\mathcal{H}$ denotes a separable complex Hilbert space and $\mathbb I$ denotes a finite or countable infinite set. Let ${\cal W}$ be a closed subspace of $\mathcal{H}$: recall that a sequence $\mathcal F=\{f_i\}_{i\in \mathbb I}$ in ${\cal W}$ is a {\it frame} for ${\cal W}$ if there exist positive constants $0<a\leq b$ such that \begin{equation}\label{defi frame}
a \, \|f\|^2\leq \sum_{i\in \mathbb I}|\langle f,f_i\rangle |^2\leq b\,\|f\|^2 \peso{for every} f\in {\cal W}\, . \end{equation} In general, if $\mathcal F$ satisfies the inequality to the right in Eq. \eqref{defi frame} we say that $\mathcal F$ is a $b$-Bessel sequence for ${\cal W}$. Moreover, we shall say that a sequence $\mathcal G=\{g_i\}_{i\in\mathbb I}$ in $\mathcal{H}$ is a Bessel sequence - without explicit reference to a closed subspace - whenever
$\mathcal G$ is a Bessel sequence for its closed linear span; notice that this is equivalent to the fact that $\mathcal G$ is a Bessel sequence for $\mathcal{H}$.
\noi Given a Bessel sequence $\mathcal F=\{f_i\}_{i\in \mathbb I}$ we consider its {\it synthesis operator} $T_\mathcal F\in L(\ell^2(\mathbb I),\mathcal{H})$ given by $T_\mathcal F((a_i)_{i\in \mathbb I})=\sum_{i\in \mathbb I} a_i\ f_i$ which, by hypothesis on $\mathcal F$, is a bounded linear transformation. We also consider $T_\mathcal F^*\in L(\mathcal{H},\ell^2(\mathbb I))$ called the {\it analysis operator} of $\mathcal F$, given by $T_\mathcal F^*(f)=(\langle f,f_i\rangle )_{i\in \mathbb I}$ and the {\it frame operator} of $\mathcal F$ defined by $S_\mathcal F=T_\mathcal F\,T_\mathcal F^*$. It is straightforward to check that $$
\langle S_\mathcal F f,f \rangle =\sum_{i\in \mathbb I}|\langle f,f_i\rangle |^2 \peso{for every}
f\in \mathcal{H}\ .
$$ Hence, $S_\mathcal F$ is a positive semi-definite bounded operator; moreover, a Bessel sequence $\mathcal F$ in ${\cal W}$ is a frame for ${\cal W}$ if and only if $S_\mathcal F$ is an invertible operator when restricted to ${\cal W}$ or equivalently, if the range of $T_\mathcal F$ coincides with ${\cal W}$.
\noi If ${\cal V}$ is a closed subspace of $\mathcal{H}$ such that ${\cal V}\oplus{\cal W}^\perp=\mathcal{H}$ (e.g. ${\cal V}={\cal W}$) then it is possible to find frames $\mathcal G=\{g_i\}_{i\in \mathbb I}$ for ${\cal V}$ such that $$f=\sum_{i\in \mathbb I}\langle f,g_i\rangle \ f_i \ , \quad \text{ for } f\in{\cal W}\,. $$ The representation above lies within the theory of oblique duality (see \cite{YEldar3,CE06,YEldar1,YEldar2}). In this note we shall not be concerned with oblique duals; nevertheless, notice that the numerical stability of the encoding-decoding scheme above depends both on the numerical stability corresponding to $\mathcal F$ and $\mathcal G$ as above. One way to measure stability of the encoding or decoding algorithms is to measure the spread of the spectra of the frame operators corresponding to $\mathcal F$ and $\mathcal G$. Therefore both the task of constructing optimally stable $\mathcal F$ together with obtaining optimally stable duals $\mathcal G$ of $\mathcal F$ are of fundamental interest in frame theory.
\subsection{SI subspaces, frames of translates and their convex potentials}\label{SI cosas}
In what follows we consider $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ (with respect to Lebesgue measure) as a separable and complex Hilbert space. Recall that a closed subspace ${\cal V}\subseteq L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ is {\it shift-invariant} (SI) if $f\in {\cal V}$ implies $T_\ell f \in {\cal V}$ for any $\ell\in \mathbb{Z}^k$, where $T_yf(x)=f(x-y)$ is the translation by $y \in \mathbb{R}}\def\C{\mathbb{C}^k$. For example, take a subset $\mathcal{A} \subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and set $$
{\cal S}(\mathcal{A})= \overline{\text{span}}\,\{T_\ell f:\ f\in\mathcal{A}\, , \ \ell\in\mathbb Z^k\} \,. $$ Then, ${\cal S}(\mathcal{A})$ is a shift-invariant subspace called the {\it SI subspace generated by $\mathcal{A}$}; indeed, ${\cal S}(\mathcal{A})$ is the smallest SI subspace that contains $\mathcal{A}$. We say that a SI subspace ${\cal V}$ is {\it finitely generated} (FSI) if there exists a finite set $\mathcal{A}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ such that ${\cal V}={\cal S}(\mathcal{A})$. We further say that ${\cal W}$ is a principal SI subspace if there exists $f\in L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ such that ${\cal W}={\cal S}(f)$.
\noi In order to describe the fine structure of a SI subspace we consider the following representation of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ (see \cite{BDR,Bo,RS95} and \cite{CabPat} for extensions of these notions to the more general context of actions of locally compact abelian groups). Let $\mathbb{T}=[-1/2,1/2)$ endowed with the Lebesgue measure and let $L^2(\mathbb{T}^k, \ell^2(\mathbb{Z}^k))$ be the Hilbert space of square integrable $\ell^2(\mathbb{Z}^k)$-valued functions that consists of all vector valued measurable functions $\phi: \mathbb{T}^k \to \ell^2(\mathbb{Z}^k)$ with the norm
$$\| \phi\|^2= \int_{\mathbb{T}^k} \| \phi(x)\|_{\ell^2(\mathbb{Z}^k)}^{2} \ dx< \infty.$$ Then, $\Gamma: L^2(\mathbb{R}}\def\C{\mathbb{C}^k)\to L^2(\mathbb{T}^k, \ell^2(\mathbb{Z}^k))$ defined for $f\in L^1(\mathbb{R}}\def\C{\mathbb{C}^k)\cap L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ by \begin{equation}\label{def: iso} \Gamma f: \mathbb{T}^k \to \ell^2(\mathbb{Z}^k)\ ,\quad\Gamma f(x)= (\hat{f}(x+\ell))_{\ell\in \mathbb{Z}^k}, \end{equation} extends uniquely to an isometric isomorphism between $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and $L^2(\mathbb{T}^k, \ell^2(\mathbb{Z}^k))$; here $$\hat f(x)= \int_{\mathbb{R}}\def\C{\mathbb{C}^k} f(y) \ e^{-2\pi\, i\,\langle y,\,x\rangle} \ dy \quad \text{ for } \quad x\in\mathbb{R}}\def\C{\mathbb{C}^k\, , $$ denotes the Fourier transform of $f\in L^1(\mathbb{R}}\def\C{\mathbb{C}^k)\cap L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$.
\noi Let ${\cal V}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ be a SI subspace. Then, there exists a function $J_{\cal V}:\mathbb{T}^k\rightarrow\{$ closed subspaces of $\ell^2(\mathbb{Z}^k)\}$ such that: if $ P_{J_{\cal V}(x)}$ denotes the orthogonal projection onto $J_{\cal V}(x)$ for $x\in\mathbb{T}^k$, then for every $\xi,\,\eta\in \ell^2(\mathbb{Z}^k)$ the function $x\mapsto \langle P_{J_{\cal V}(x)} \,\xi\, , \, \eta\rangle$ is measurable and \begin{equation}\label{pro: V y J} {\cal V}=\{ f\in L^2(\mathbb{R}}\def\C{\mathbb{C}^k): \Gamma f(x) \in J_{\cal V}(x) \,\ \text{for a.e.}\,\ x\in \mathbb{T}^k\}. \end{equation} The function $J_{\cal V}$ is the so-called {\it measurable range function} associated with ${\cal V}$. By \cite[Prop.1.5]{Bo}, Eq. \eqref{pro: V y J} establishes a bijection between SI subspaces of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and measurable range functions. In case ${\cal V}=S(\mathcal A) \subseteq L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ is the SI subspace generated by $\mathcal A=\{h_i:i\in \mathbb I\}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$, where $\mathbb I$ is a finite or countable infinite set, then for a.e. $x\in\mathbb{T}^k$ we have that \begin{equation}\label{eq Jv} J_{\cal V}(x)=\overline{\text{span}}\,\{\Gamma h_i(x): \ i\in \mathbb I\}\,. \end{equation}
\noi Recall that a bounded linear operator $S\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))$ is {\it shift preserving} (SP) if $T_\ell \, S=S\,T_\ell$ for every $\ell\in\mathbb{Z}^k$. In this case (see \cite[Thm 4.5]{Bo}) there exists a (weakly) measurable field of operators $[S]_{(\cdot)}:\mathbb{T}^k\rightarrow \ell^2(\mathbb{Z}^k)$ (i.e. such that for every $\xi,\,\eta\in \ell^2(\mathbb{Z}^k)$ the function $\mathbb{T}^k\ni x\mapsto \langle [S]_x\, \xi\, , \, \eta \rangle $
is measurable) and essentially bounded (i.e. the function $\mathbb{T}^k\ni x\mapsto \|\,[S]_{x}\,\|$ is essentially bounded) such that \begin{equation}\label{defi hatS}
[S]_x \big(\Gamma f(x)\,\big)=\Gamma (Sf) (x) \quad \text{ for a.e. }x\in\mathbb{T}^k\ , \ \ f\in L^2(\mathbb{R}}\def\C{\mathbb{C}^k)\,.
\end{equation} Moreover, $\|S\|={\mathrm{ess}\sup}_{x\in\mathbb{T}^k} \|\, [S]_x\, \|$. Conversely, if $s:\mathbb{T}^k\rightarrow L(\ell^2(\mathbb{Z}^k))$ is a weakly measurable and essentially bounded field of operators then,
there exists a unique bounded operator $S\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))$ that is SP and such that $[S]=s$. For example, let ${\cal V}$ be a SI subspace and consider $P_{\cal V}\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))$, the orthogonal projection onto ${\cal V}$; then, $P_{\cal V}$ is SP so that $[P_{\cal V}]{} :\mathbb{T}^k\rightarrow L(\ell^2(\mathbb{Z}^k))$ is given by $[P_{\cal V}]{}_x=P_{J_{\cal V}(x)}$ i.e., the orthogonal projection onto $J_{\cal V}(x)$, for a.e. $x\in\mathbb{T}^k$.
\noi The previous notions associated with SI subspaces and SP operators allow to develop a detailed study of frames of translates. Indeed, let $\mathcal F=\{f_i\}_{i\in \mathbb I}$ be a (possibly finite) sequence in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$. In what follows we consider the sequence of integer translates of $\mathcal F$, denoted $E(\mathcal F)$ and given by
$$E(\mathcal F)=\{T_\ell \, f_i\}_{(\ell,\, i)\,\in\, \mathbb{Z}^{k}\times \mathbb I}\,.$$
For $x\in \mathbb{T}^k$, let $\Gamma\mathcal F(x)=\{\Gamma f_i(x)\}_{i\in \mathbb I}$ which is a (possibly finite) sequence in $\ell^2(\mathbb{Z}^k)$. Then $E(\mathcal F)$ is a $b$-Bessel sequence if and only if $\Gamma\mathcal F(x)$ is a $b$-Bessel sequence for a.e. $x\in \mathbb{T}^k$ (see \cite{Bo,RS95}). In this case, we consider the synthesis operator $T_{\Gamma\mathcal F(x)}:\ell^2(\mathbb I)\rightarrow \ell^2(\mathbb{Z}^k)$ and frame operator $S_{\Gamma\mathcal F(x)}:\ell^2(\mathbb{Z}^k)\rightarrow \ell^2(\mathbb{Z}^k)$ of $\Gamma\mathcal F(x)$, for $x\in\mathbb{T}^k$. It is straightforward to check that $S_{E(\mathcal F)}$ is a SP operator.
\noi If $\mathcal F=\{f_i\}_{i\in \mathbb I}$ and $\mathcal G=\{g_i\}_{i\in \mathbb I}$ are such that $E(\mathcal F)$ and $E(\mathcal G)$ are Bessel sequences then (see \cite{HG07,RS95}) the following fundamental relation holds: \begin{equation}\label{eq:fourier} [T_{E(\mathcal G)}\,T^*_{E(\mathcal F)}]_x = T_{\Gamma\mathcal G(x)}\,T^*_{\Gamma\mathcal F(x)}\ , \quad \text{for a.e }\, x \in \mathbb{T}^k \,. \end{equation} These equalities have several consequences. For example, if ${\cal W}$ is a SI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and we assume further that $\mathcal F,\,\mathcal G\in{\cal W}^n$ then, for every $f,\,g\in L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$, $$ \langle S_{E(\mathcal F)}\, f,\,g\rangle =\int_{\mathbb{T}^k} \langle S_{\Gamma\mathcal F(x)} \ \Gamma f(x),\,\Gamma g(x)\rangle_{\ell^2(\mathbb{Z}^k)}\ dx\ . $$ This last fact implies that $[S_{E(\mathcal F)}]_x=S_{\Gamma \mathcal F(x)}$ for a.e. $x\in\mathbb{T}^k$. Moreover, $E(\mathcal F)$ is a frame for ${\cal W}$ with frame bounds $0<a\leq b$ if and only if $\Gamma\mathcal F(x)$ is a frame for $J_{\cal W}(x)$ with frame bounds $0<a\leq b$ for a.e. $x\in \mathbb{T}^k$ (see \cite{Bo}).
\noi We end this section with the notion of convex potentials in FSI introduced in \cite{BMS15}\,; in order to describe these potentials we consider the sets \begin{equation}\label{def convf} \convf = \{ \varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+\ , \ \varphi \ \mbox{ is a convex function} \ \} \end{equation} and $\convfs = \{\varphi\in \convf \ , \ \varphi$ is strictly convex $\}$.
\begin{fed} \label{defi pot conv}\rm Let ${\cal W}$ be a FSI subspace in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$, let $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ be such that $E(\mathcal F)$ is a Bessel sequence and consider $\varphi\in \convf$. The convex potential associated to $(\varphi,{\cal W})$ on $E(\mathcal F)$, denoted $P_\varphi^{\cal W}(E(\mathcal F))$, is given by \begin{equation}\label{eq defi pot} P_\varphi^{\cal W}(E(\mathcal F))=\int_{\mathbb{T}^k} \tr(\varphi(S_{\Gamma \mathcal F(x)})\, [P_{\cal W}]_x) \ dx \end{equation} where $\varphi(S_{\Gamma \mathcal F(x)})$ denotes the functional calculus of the positive and finite rank operator $S_{\Gamma \mathcal F(x)}\in L(\ell^2(\mathbb{Z}^k))^+$ and $\tr(\cdot)$ denotes the usual semi-finite trace in $L(\ell^2(\mathbb{Z}^k))$.
$\triangle$ \end{fed}
\begin{exa} Let ${\cal W}$ be a FSI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and let $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$. If we set $\varphi(x)=x^2$ for $x\in \mathbb{R}}\def\C{\mathbb{C}_+$ then, the corresponding potential on $E(\mathcal F)$, that we shall denote $\FP(E(\mathcal F))$, is given by $$ \FP(E(\mathcal F))= \int_{\mathbb{T}^k}
\tr( S_{\Gamma \mathcal F(x)}^2) \ dx= \int_{\mathbb{T}^k} \ \sum_{i,\,j\in \mathbb {I} _n} |\langle \Gamma f_i(x),\Gamma f_j(x)\rangle|^2 \ dx\,, $$ where we have used the fact that $\varphi(0)=0$ in this case. Hence, $\FP(E(\mathcal F))$ is a natural extension of the Benedetto-Fickus frame potential (see \cite{BF}).
$\triangle$ \end{exa}
\noi With the notation of Definition \ref{defi pot conv}, it is shown in \cite{BMS15} that $P_\varphi^{\cal W}(E(\mathcal F))$ is a well defined functional on the class of Bessel sequences $E(\mathcal F)$ induced by a finite sequence $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ as above. The main motivation for considering convex potentials is that, under some natural normalization hypothesis, they detect tight frames as their minimizers (see \cite[Theorem 3.9.]{BMS15} or Corollary \ref{cororo1} below); that is, convex potentials provide simple scalar measures of stability that can be used to compare shift generated frames. Therefore, the convex potentials for FSI are natural extensions of the convex potentials in finite dimensions introduced in \cite{MR10}. In what follows, we shall consider the existence of tight frames $E(\mathcal F)$ for the FSI ${\cal W}$ with prescribed norms. It turns out that there are natural restrictions for the existence of such frames (see Theorem \ref{teo sobre disenio de marcos} below). In case these restrictions are not fulfilled then, the previous remarks show that minimizers of convex potentials associated to a pair $(\varphi,\,{\cal W})$ within the class of frames with prescribed norms are natural substitutes of tight frames.
\subsection{Majorization in probability spaces}\label{2.3}
Majorization between vectors (see \cite{Bhat,MaOl}) has played a key role in frame theory. On the one hand, majorization allows to characterize the existence of frames with prescribed properties (see \cite{AMRS,CFMPS,CasLeo}). On the other hand, majorization is a preorder relation that implies a family of tracial inequalities; this last fact can be used to explain the structure of minimizers of general convex potentials, that include the Benedetto-Fickus' frame potential (see \cite{BF,CKFT,MR08,MR10,MRS13,MRS14,MRS13b}). We will be dealing with convex potentials in the context of Bessel families of integer translates of finite sequences; accordingly, we will need the following general notion of majorization between functions in probability spaces.
\noi Throughout this section the triple $(X,\mathcal{X},\mu)$ denotes a probability space i.e.
$\mathcal{X}$ is a $\sigma$-algebra of sets in $X$ and $\mu$ is a probability measure defined on $\mathcal{X}$. We shall denote by $L^\infty(X,\mu)^+ = \{f\in L^\infty(X,\mu): f\ge 0\}$. For $f\in L^\infty(X, \mu)^+$, the {\it decreasing rearrangement} of $f$ (see \cite{MaOl}), denoted $f^*:[0,1)\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$, is given by \begin{equation}\label{eq:reord} f^*(s ) \ \stackrel{\mbox{\tiny{def}}}{=}\ \sup \,\{ t\in \mathbb{R}}\def\C{\mathbb{C}_+ : \ \mu \{x\in X:\ f(x)>t\} >s\} \peso{for every} s\in [0,1)\, . \end{equation}
\begin{rem} \label{rem:prop rear elem}We mention some elementary facts related with the decreasing rearrangement of functions that we shall need in the sequel. Let $f\in L^\infty(X,\mu)^+$, then: \begin{enumerate} \item $f^*$ is a right-continuous and non-increasing function.
\item $f$ and $f^*$ are equimeasurable i.e. for every Borel set $A\subset \mathbb{R}}\def\C{\mathbb{C}$ then $\mu(f^{-1}(A))=|(f^*)^{-1}(A)|$, where $|B|$ denotes the Lebesgue measure of the Borel set $B\subset \mathbb{R}}\def\C{\mathbb{C}$. In turn, this implies that for every continuous $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ then: $\varphi\circ f\in L^\infty(X,\mu)$ iff $\varphi\circ f^*\in L^\infty([0,1])$ and in this case \begin{equation}\label{reor int} \int_X \varphi\circ f\ d\mu =\int_{0}^1 \varphi\circ f^*\ dx\ . \end{equation} \item If $g\in L^\infty(X,\mu)$ is such that $f\leq g$ then $0\leq f^*\leq g^*$; moreover, in case $f^*=g^*$ then $f=g$.
$\triangle$ \end{enumerate} \end{rem}
\begin{fed}\rm Let $f, g\in L^\infty (X, \mu)^+$ and let $f^*,\,g^*$ denote their decreasing rearrangements. We say that $f$ \textit{submajorizes} $g$ (in $(X,\mathcal{X},\mu)$), denoted $g \prec_w f$, if \begin{eqnarray*}\label{eq: mayo de func} \int_{0}^{s} g^*(t) \,\ dt &\leq& \int_{0}^{s} f^*(t)\,\ dt \peso{for every} 0\leq s\leq 1 \,. \end{eqnarray*} If in addition $\int_{0}^{1} g^*(t)\,\ dt = \int_{0}^{1} f^*(t)\,\ dt$
we say that $f$ \textit{majorizes} $g$ and write $g \prec f$.
$\triangle$ \end{fed}
\noi In order to check that majorization holds between functions in probability spaces, we can consider the so-called {\it doubly stochastic maps}. Recall that a linear operator $D$ acting on $L^\infty(X,\mu)$ is a doubly-stochastic map if $D$ is unital, positive and trace preserving i.e. $$ D(1_X)=1_X \ , \ \ D\big(\, L^\infty (X, \mu)^+ \, \big)\subseteq L^\infty (X, \mu)^+ \peso{and} \int_X D(f)(x)\ d\mu(x) =\int_X f(x)\ d\mu(x) $$ for every $f\in L^\infty(X,\mu)$. It is worth pointing out that $D$ is necessarily a contractive map.
\noi Our interest in majorization relies in its relation with integral inequalities in terms of convex functions. The following result summarizes this relation as well as the role of the doubly stochastic maps (see for example \cite{Chong,Ryff}). Recall that $\convf$ and $\convfs$ (see Eq. \eqref{def convf}) denote the sets of convex and strictly convex functions $\varphi:\mathbb{R}}\def\C{\mathbb{C}_+\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$, respectively.
\begin{teo}\label{teo porque mayo} \rm Let $f,\,g\in L^\infty (X, \mu)^+$. Then the following conditions are equivalent: \begin{enumerate} \item $g\prec f$; \item There is a doubly stochastic map $D$ acting on $L^\infty(X,\mu)$ such that $D(f)=g$; \item For every $\varphi \in \convf$ we have that \begin{equation}\label{eq teo:desi mayo}
\int_X \varphi(g(x)) \ d\mu(x)\leq \int_X \varphi(f(x))\ d\mu(x)\ . \end{equation} \end{enumerate}
Similarly, $g\prec_w f \iff $ Eq. \eqref{eq teo:desi mayo} holds for every {non-decreasing} convex function $\varphi$. \qed \end{teo}
\noi The following result plays a key role in the study of the structure of minimizers of $\prec_w$ within (appropriate) sets of functions.
\begin{pro}[\cite{Chong}]\label{pro int y reo} \rm Let $f,\,g\in L^\infty(X,\mu)^+$ such that $g\prec_w f$. If there exists $\varphi\in\convfs$ such that \begin{equation} \int_X \varphi(f(x))\ d\mu(x) =\int_X \varphi(g(x)) \ d\mu(x) \peso{then} g^*=f^* \ . \tag*{
$\square$} \end{equation} \end{pro}
\noi
\section{Existence of shift generated frames with prescribed fine struture}
In this section we characterize the fine structure of a Bessel sequence $E(\mathcal F)$, where $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$. By the fine (or relative) structure of $E(\mathcal F)$ we mean the sequence of norms of the vectors $\Gamma \mathcal F(x)=(\Gamma f_i(x)) _{i\in\mathbb {I} _n}$ and the sequence of eigenvalues of $[S_{E(\mathcal F)}]_x$ for $x\in\mathbb{T}^k$ (see Remark \ref{rem sobre estruc fina} for a precise description). As we shall see, the possible fine structure of $E(\mathcal F)$ can be described in terms of majorization relations.
\subsection{A complete characterization in terms of majorization relations}\label{subsec exac charac}
\noi We begin by showing the existence of measurable spectral representations of self-adjoint SP operators with range lying in a FSI subspace (see Lemma \ref{lem spect represent ese}), which follow from results from \cite{RS95} regarding the existence of measurable fields of eigenvectors and eigenvalues (counting multiplicities and arranged in non-increasing order) of measurable fields $M:\mathbb{T}^k \rightarrow \mathcal{H}(d)$ of selfadjoint matrices. In order to do that, we first recall some notions and results from \cite{Bo}.
\noi Given ${\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ a FSI subspace, we say that $f\in {\cal W}$ is a quasi-orthogonal generator of ${\cal W}$ if \begin{equation}\label{eq:}
\|g\|^2=\sum_{\ell\in \mathbb{Z}^k} |\langle T_\ell f, g \rangle|^2\ , \peso{for every} g\in {\cal W}\, . \end{equation} The next theorem, which is a consequence of results from \cite{Bo}, provides a decomposition of any FSI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^n)$ into a finite orthogonal sum of principal SI subspaces with quasi-orthogonal generators.
\begin{teo}[\cite{Bo}]\label{teo:la descom de Bo} \rm Let ${\cal W}$ be a FSI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$, with $d=\supes_{x\in \mathbb{T}^k} d(x)$, where $d(x)=\dim J_{{\cal W}}(x)$ for $x\in\mathbb{T}^k$. Then there exist $h_1,\ldots,h_d\in{\cal W}$ such that ${\cal W}$ can be decomposed as an orthogonal sum \begin{eqnarray} {\cal W}=\bigoplus_{j\in\mathbb{I}_{d}} {\cal S}(h_j), \end{eqnarray} where $h_j$ is a quasi orthogonal generator of ${\cal S}(h_j)$ for $j\in\mathbb{I}_{d}\,$, and $\text{Spec}({\cal S}(h_{j+1}))\subseteq \text{Spec}({\cal S}(h_j))$ for $j\in \mathbb{I}_{d-1}\,$. Moreover, in this case $\{\Gamma h_j(x)\}_{j\in\mathbb{I}_{d(x)}}$ is a ONB of $J_{\cal W}(x)$ for a.e. $x\in\mathbb{T}^k$. \qed \end{teo}
\begin{lem}\label{lem spect represent ese} Let ${\cal W}$ be a FSI subspace in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ with $d=\supes_{x\in \mathbb{T}^k} d(x)$, where $d(x)=\dim J_{{\cal W}}(x)$ for $x\in\mathbb{T}^k$. Let $S\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))$ be a SP self-adjoint operator such that $R(S)\subseteq{\cal W}$. Then, there exist: \begin{enumerate} \item measurable vector fields $v_j:\mathbb{T}^k\rightarrow \ell^2(\mathbb{Z}^k)$ for $j\in\mathbb{I}_{d}$ such that $v_j(x)=0$ if $j>d(x)$ and $\{v_j(x)\}_{j\in\mathbb{I}_{d(x)}}$ is an ONB for $J_{\cal W}(x)$ for a.e. $x\in\mathbb{T}^k$; \item bounded, measurable functions $\lambda_j:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ for $j\in\mathbb{I}_{d}\,$, such that $\lambda_1\geq \ldots\geq\lambda_d$, $\lambda_j(x)=0$ if $j>d(x)$ and \begin{equation}\label{lem repre espec S} [S]_x =\sum_{j\in\mathbb{I}_{d(x)}}\lambda_j(x)\ v_j(x)\otimes v_j(x) \ , \peso{for a.e.}\ x\in\mathbb{T}^k\,. \end{equation} \end{enumerate} \end{lem} \begin{proof} By considering a convenient finite partition of $\mathbb{T}^k$ into measurable sets we can assume, without loss of generality, that $d(x)=d$ for a.e. $x\in \mathbb{T}^k$. In this case, by Theorem \ref{teo:la descom de Bo} we have that $$ {\cal W}=\bigoplus_{j\in \mathbb{I}_{d}} {\cal S}(h_j) \ , $$ where $h_j\in {\cal W}$, for $j\in\mathbb{I}_{d}$, are such that $\{\Gamma h_j(x)\}_{j\in\mathbb{I}_{d}}$ is a ONB of $J_{{\cal W}}(x)$ for a.e. $x\in \mathbb{T}^k$. Consider the measurable field of self-adjoint matrices $M(\cdot):\mathbb{T}^k\to \mathcal{H}(d)$ given by $$ M(x)=\big(\langle [S]_{x} \,\ \Gamma h_j(x) ,\,\ \Gamma h_i(x)\rangle\big)_{i,j\,\in \mathbb{I}_{d}}\ . $$ By \cite{RS95}, we can consider measurable functions $\lambda_j:\mathbb{T}^k\to \mathbb{R}}\def\C{\mathbb{C}_+$ for $j\in \mathbb{I}_d\,$, such that $\lambda_1\geq \ldots\geq \lambda_d$ and measurable vector fields $w_j:\mathbb{T}^k\to \C^d$ for $j\in \mathbb{I}_{d}\,$, such that $\{w_j(x)\}_{j\in \mathbb{I}_{d}}$ is a ONB of $\C^d$ and \begin{equation}\label{ecuac agreg1} M(x)=\sum_{j\in \mathbb{I}_{d}} \lambda_j(x)\,\ w_j(x) \otimes w_j(x) \peso{for a.e.} \ x\in \mathbb{T}^k\, . \end{equation} If $w_j(x)=(w_{ij}(x))_{i\in\mathbb{I}_{d}}$ for $j\in\mathbb{I}_{d}\,$, consider the measurable vector fields $v_j:\mathbb{T}^k \to \ell^2(\mathbb{Z}^k)$ for $j\in \mathbb{I}_{d}\,$, given by $$ v_j(x) =\sum_{i\in \mathbb{I}_{d}} w_{ij}(x)\,\ \Gamma h_i(x)\,\ \text{for}\,\ x\in \mathbb{T}^k\ . $$ Then, it is easy to see that $\{v_j(x)\}_{j\in \mathbb{I}_{d}}$ is ONB of $J_{\cal W}(x)$ for a.e. $x\in\mathbb{T}^k$; moreover, Eq. \eqref{ecuac agreg1} implies that Eq. \eqref{lem repre espec S} holds in this case. \end{proof}
\begin{rem}\label{rem sobre estruc fina} Let ${\cal W}$ be a FSI subspace with $d(x)=\dim J_{\cal W}(x)$ for $x\in\mathbb{T}^k$, and let $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ be a finite sequence in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ such that $E(\mathcal F)$
is a Bessel sequence. In what follows we consider: \begin{enumerate} \item the {\it fine spectral structure of} $E(\mathcal F)$, that is the weakly measurable function $$ \mathbb{T}^k\ni x\mapsto (\lambda_j([S_{E(\mathcal F)}]_x )\,)_{j\in\mathbb{N}}\in \ell^1_+(\mathbb{Z}^k) \ , $$ with $\lambda_j([S_{E(\mathcal F)}]_x )=\lambda_j(x)$ as in Lemma \ref{lem spect represent ese} for $j\in\mathbb{I}_{d(x)}\,$, and $\lambda_j([S_{E(\mathcal F)}]_x )=0$ for $j\geq d(x)+1$
and $x\in \mathbb{T}^k$. Thus, the fine spectral structure of $\mathcal F$ describes the eigenvalues of the positive finite rank operator $[S_{E(\mathcal F)}]_x =S_{\Gamma \mathcal F(x)}\in L(\ell^2(\mathbb{Z}^k))$, counting multiplicities and arranged in non-increasing order.
\item The {\it fine structure of} $E(\mathcal F)$ given by the fine spectral structure together with the measurable vector valued function $\mathbb{T}^k\ni x\mapsto (\|\Gamma f_i(x)\|^2)_{i\in\mathbb {I} _n}\in\mathbb{R}}\def\C{\mathbb{C}_+^n\,$.
$\triangle$ \end{enumerate} \end{rem}
\noi In order to state our main result of this section we shall need the notion of vector majorization from matrix analysis. Recall that given $a=(a_i)_{i\in \mathbb {I} _n}\in \mathbb{R}}\def\C{\mathbb{C}^n$ and $b=(b_i)_{i\in \mathbb {I} _n}\in \mathbb{R}}\def\C{\mathbb{C}^m$ we say that $a$ is majorized by $b$, denoted $a\prec b$, if \begin{equation}\label{eq: mayo dif} \sum_{i\in \mathbb{I}_k} a_i \leq \sum_{i\in \mathbb{I}_k} b_i\ , \ \ 1\leq k \leq \min\{n, m\} \ \ \text{and} \ \ \sum_{i\in \mathbb{I}_n} a_i = \sum_{i\in \mathbb{I}_m} b_i\,. \end{equation}
\begin{teo}[Existence of shift generated sequences with prescribed fine structure] \label{teo sobre disenio de marcos}
Let ${\cal W}$ be a FSI subspace in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and let $d(x)=\dim J_{{\cal W}}(x)$ for $x\in\mathbb{T}^k$. Given measurable functions $\alpha_j:\mathbb{T}^k \to \mathbb{R}}\def\C{\mathbb{C}_+$ for $j\in \mathbb {I} _n$ and $\lambda_j:\mathbb{T}^k \to \mathbb{R}}\def\C{\mathbb{C}_+$ for $j\in \mathbb{N}$,
the following conditions are equivalent: \begin{enumerate} \item There exists $\mathcal F=\{f_j\}_{j\in \mathbb {I} _n} \in {\cal W}^n$ such that $E(\mathcal F)$ is a Bessel sequence and: \begin{enumerate}
\item $\|\Gamma f_j(x)\|^2=\alpha_j(x)$ for $j\in \mathbb {I} _n$ and a.e. $x\in\mathbb{T}^k$; \item $\lambda_j([S_{E(\mathcal F)}]_x ) =\lambda_j(x)$ for $j\in\mathbb{N}$ and a.e. $x\in\mathbb{T}^k$. \end{enumerate} \item The following admissibility conditions hold: \begin{enumerate} \item $\lambda_j(x)=0$ for a.e. $x\in\mathbb{T}^k$ such that $j\geq\min\{d(x),n\}+1$. \item $(\alpha_j(x))_{j\in\mathbb {I} _n}\prec (\lambda_j(x))_{j\in\mathbb{I}_{d(x)}}$ for a.e. $x\in\mathbb{T}^k$. \end{enumerate} \end{enumerate} \end{teo}
\noi Our proof of Theorem \ref{teo sobre disenio de marcos} is based on the following extension of a basic result in matrix analysis related with the Schur-Horn theorem (for its proof, see section \ref{Appendixity} - Appendix). In what follows we let $D_b\in \mathcal{M}_d(\mathbb{C})$ be the diagonal matrix with main diagonal $b\in\C^d$.
\begin{teo}\label{teo:mayo equiv} \rm Let $b:\mathbb{T}^k\rightarrow (\mathbb{R}}\def\C{\mathbb{C}_+)^d$ and $c:\mathbb{T}^k\rightarrow (\mathbb{R}}\def\C{\mathbb{C}_+)^n$ be measurable vector fields. The following statements are equivalent: \begin{enumerate} \item For a.e. $x\in \mathbb{T}^k$ we have that $c(x)\prec b(x)$.
\item There exist measurable vector fields $u_j: \mathbb{T}^k\to \C^d$ for $j\in\mathbb {I} _n$ such that $\|u_j(x)\|=1$ for a.e. $x\in \mathbb{T}^k$ and $j\in \mathbb{I}_n\,$, and such that \begin{equation} D_{b(x)}=\sum_{j\in \mathbb{I}_n} c_j(x)\,\ u_j(x) \otimes u_j(x) \ , \peso{for a.e.} \ x\in \mathbb{T}^k\ . \tag*{
$\square$} \end{equation} \end{enumerate} \end{teo}
\begin{proof}[Proof of Theorem \ref{teo sobre disenio de marcos}]
Assume that there exists $\mathcal F=\{f_j\}_{j\in \mathbb {I} _n} \in {\cal W}^n$ such that $\|\Gamma f_j(x)\|^2=\alpha_j(x)$, for $j\in \mathbb {I} _n\,$, and $\lambda_j([S_{E(\mathcal F)}]_x ) =\lambda_j(x)$ for $j\in\mathbb{N}$ and a.e. $x\in\mathbb{T}^k$. Consider the measurable field of positive semi-definite matrices $G:\mathbb{T}^k\to {\cal M}_n(\C)^+$ given by the Gramian $$ G(x)=\Big(\, \big\langle \Gamma f_i(x)\, , \, \Gamma f_j(x) \big\rangle\, \Big)_{i,j\in \mathbb {I} _n} \ , \peso{for} x\in \mathbb{T}^k \ . $$ Notice that $G(x)$ is the matrix representation of $T^*_{\Gamma \mathcal F(x)}T_{\Gamma \mathcal F(x)}\in L(\C^n)$ with respect to the canonical basis of $\C^n$ for $x\in \mathbb{T}^k$; using the fact that the finite rank operators $T^*_{\Gamma \mathcal F(x)}T_{\Gamma \mathcal F(x)}$ and $T_{\Gamma \mathcal F(x)}T^*_{\Gamma \mathcal F(x)}=[S_{E(\mathcal F)}]_x $ have the same positive eigenvalues (counting multiplicities) we see that
$$ \lambda_j(G(x)) = \begin{cases} \lambda_j(x) & \peso{for} 1\leq j\leq \min\{d(x),n\} \\ \ \ \ 0 & \peso{for} \min\{d(x),n\}+1\leq j\leq n \end{cases} \peso{for a.e.} x\in\mathbb{T}^k \ . $$
On the other hand, the main diagonal of $G(x)$ is $(\|\Gamma f_j(x)\|^2)_{j\in\mathbb {I} _n}=(\alpha_j(x))_{j\in\mathbb {I} _n}\,$; hence, by the classical Schur-Horn theorem (see \cite{HJ13}) we see that $$ (\alpha_j(x))_{j\in\mathbb {I} _n}\prec \lambda(G(x))\in \mathbb{R}}\def\C{\mathbb{C}_+ ^n \implies (\alpha_j(x))_{j\in\mathbb {I} _n}\prec (\lambda_j(x))_{j\in \mathbb{I}_{d(x)}} \peso{for a.e.} x\in\mathbb{T}^k \ . $$
\noi Conversely, assume that $(\alpha_j(x))_{j\in \mathbb {I} _n} \prec (\lambda_i(x))_{i\in \mathbb{I}_{d(x)}}$ for a.e. $x\in \mathbb{T}^k$.
By considering a convenient finite partition of $\mathbb{T}^k$ into measurable subsets we can assume, without loss of generality, that $d(x)=d$ for $x\in\mathbb{T}^k$. Therefore, by Theorem \ref{teo:mayo equiv}, there exist measurable vector fields $u_j: \mathbb{T}^k\to \C^d$ for $j\in\mathbb {I} _n$ such that $\|u_j(x)\|=1$ for a.e. $x\in \mathbb{T}^k$ and $j\in \mathbb{I}_n$, and such that \begin{equation} \label{diago} D_{\lambda(x)}=\sum_{j\in \mathbb{I}_n} \alpha_j(x)\,\ u_j(x) \otimes u_j(x) \ , \peso{for a.e.} \ x\in \mathbb{T}^k\ , \end{equation} where $\lambda(x)=(\lambda_j(x))_{j\in\mathbb{I}_d}$ for $x\in \mathbb{T}^k$. Now, by Theorem \ref{teo:la descom de Bo} there exist measurable vector fields $v_j:\mathbb{T}^k\rightarrow \ell^2(\mathbb{Z}^k)$ for $j\in\mathbb{I}_d$ such that $\{v_j(x)\}_{j\in\mathbb{I}_d}$ is a ONB of $J_{\cal W}(x)$ for a.e. $x\in\mathbb{T}^k$. Let $u_j(x)=(u_{ij}(x))_{i\in\mathbb{I}_d}$ for $j\in\mathbb {I} _n$ and $x\in\mathbb{T}^k$; then we consider the finite sequence $\mathcal F=\{f_j\}_{j\in\mathbb {I} _n}\in{\cal W}^n$ determined by $\Gamma f_j(x)=\alpha_j^{1/2}(x) \sum_{i\in\mathbb{I}_d} u_{ij}(x) \ v_i(x)$ for $j\in\mathbb {I} _n$ and $x\in\mathbb{T}^k$. It is clear that $$
\|\Gamma f_j(x)\|^2=\|\alpha_j^{1/2}(x)\ u_j(x)\|^2=\alpha_j(x) \peso{ for a.e. } x\in\mathbb{T}^k,\quad j\in\mathbb {I} _n\ . $$ Moreover, using Eq. \eqref{diago} it is easy to see that $$\left(\sum_{j\in\mathbb {I} _n} \Gamma f_j(x)\otimes \Gamma f_j(x) \right)\, v_i(x)= \lambda_i(x)\,v_i(x) \peso{for} i\in\mathbb{I}_d \ \ \text { and \ a.e. }\ x\in\mathbb{T}^k\,. $$ Hence, $\lambda_j([S_{E(\mathcal F)}]_x ) =\lambda_j(x)$ for $j\in\mathbb{N}$ and a.e. $x\in\mathbb{T}^k$ \end{proof}
\begin{rem}\label{se puede incluso con op SP}
Let ${\cal W}$ be a FSI subspace in $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and let $d(x)=\dim J_{{\cal W}}(x)$ for $x\in\mathbb{T}^k$. Let $\alpha_j:\mathbb{T}^k \to \mathbb{R}}\def\C{\mathbb{C}_+$, $j\in \mathbb {I} _n\,$, be measurable functions and let $S\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))^+$ be a positive SP operator such that $R(S)\subseteq {\cal W}$. Let $\mathbb{T}^k\ni x\mapsto (\lambda_j([S]_x) )_{j\in\mathbb{N}}$ be the fine spectral structure of $S$ (which is well defined by Lemma \ref{lem spect represent ese}). Assume that for a.e. $x\in\mathbb{T}^k$ we have that $$ (\alpha_j(x))_{j\in\mathbb {I} _n}\prec (\lambda_j([S]_x) )_{j\in\mathbb{I}_{d(x)}}\ . $$ Then, there exists $\mathcal F=\{f_j\}_{j\in \mathbb {I} _n} \in {\cal W}^n$ such that $E(\mathcal F)$ is a Bessel sequence, $$
S_{E(\mathcal F)}=S \peso{and} \|\Gamma f_j(x)\|^2=\alpha_j(x) \peso{for a.e.} x\in\mathbb{T}^k \, , \, j\in\mathbb {I} _n \ . $$ Indeed, if in the proof of Theorem \ref{teo sobre disenio de marcos} above we take the measurable vector fields $v_j:\mathbb{T}^k\rightarrow \ell^2(\mathbb{Z}^k)$ such that $\{v_j(x)\}_{j\in\mathbb{I}_d}$ is a ONB of $J_{\cal W}(x)$ and such that $[S]_x \, v_j(x)=\lambda_j([S]_x)\ v_j$ for a.e. $x\in\mathbb{T}^k$ (notice that this can always be done by Lemma \ref{lem spect represent ese}) then we conclude, as before, that \begin{equation} [S_{E(\mathcal F)}]_x \ v_j(x)= \lambda_j([S]_x)\,v_j(x) \peso{for} j\in\mathbb{I}_d \implies [S_{E(\mathcal F)}]_x = [S]_x \peso{for a.e.} x\in\mathbb{T}^k\ . \tag*{\EOE} \end{equation} \end{rem}
\noi As a first application of Theorem \ref{teo sobre disenio de marcos} we show the existence of shift generated uniform tight frames for an arbitrary FSI. In turn, this allows us to strengthen some results from \cite{BMS15} (see also Corollary \ref{coro tight 2}).
\begin{cor}\label{cororo1} Let $\{0\}\neq{\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ be a FSI subspace and let $d(x)=\dim J_{\cal W}(x)$ for $x\in\mathbb{T}^k$. Assume that $n\geq \supes_{x\in\mathbb{T}^k}d(x)$,
let $Z_i=d^{-1}(i)$ for $i\in\mathbb {I} _n\,$ and set $C_{\cal W}= \sum_{i\in\mathbb {I} _n} i\cdot |Z_i|>0$. Then: \begin{enumerate}
\item There exists a sequence $\mathcal F=\{f_j\}_{j\in\mathbb {I} _n}\in{\cal W}^n$ such that $\|f_j\|^2=n^{-1}$ for $j\in\mathbb {I} _n$ and such that $E(\mathcal F)$ is a uniform tight frame for ${\cal W}$.
\item For any sequence $\mathcal G=\{g_j\}_{j\in\mathbb {I} _n}\in{\cal W}^n$ such that $E(\mathcal G)$ is a Bessel sequence and such that $\sum_{j\in\mathbb {I} _n} \|g_j\|^2=1$, and for every $\varphi\in\convf$ we get that: \begin{equation}\label{del item 2} P^{\cal W}_\varphi(E(\mathcal G))\geq C_{\cal W} \ \varphi(C_{\cal W}^{-1})=P^{\cal W}_\varphi(E(\mathcal F))\, . \end{equation} Moreover, if we assume that $\varphi\in\convfs$ then $P^{\cal W}_\varphi(E(\mathcal G))=C_{\cal W} \ \varphi(C_{\cal W}^{-1})$ if and only if $E(\mathcal G)$ is a tight frame for ${\cal W}$. \end{enumerate} \end{cor} \begin{proof}
Let $p_i=|Z_i|$ (where $|A|$ denotes the Lebesgue measure of $A\subset \mathbb{T}^k$ and $Z_i=d^{-1}(i)$) for $1\leq i\leq n$; then $C_{\cal W}= \sum_{i\in\mathbb {I} _n} i\cdot p_i$. Notice that by hypothesis Spec ${\cal W}=\cup_{i\in\mathbb {I} _n} Z_i\,$. For $x\in\mathbb{T}^k$ set $\alpha(x)=0$ if $x\in \mathbb{T}^k\setminus\text{Spec}({\cal W})$ and: \begin{equation} \label{defi alfas} \alpha(x):=\left\{
\begin{array}{ccc}
\frac{j\cdot C_{\cal W}^{-1} }{n}
& {\rm if} & x\in Z_j \ \text{ and } \ p_j>0\,; \\
0 & {\rm if} & x\in Z_j \ \text{ and } \ p_j=0 \, .\\
\end{array}
\right. \end{equation}
Then, it is easy to see that $(\alpha(x))_{i\in\mathbb {I} _n}\prec (C_{\cal W}^{-1})_{i\in\mathbb{I}_{d(x)}}$ for every $x\in \mathbb{T}^k$; hence, by Theorem \ref{teo sobre disenio de marcos} we see that there exists $\mathcal F=\{f_j\}_{j\in\mathbb {I} _n}\in{\cal W}^n$ such that $\|\Gamma f_j(x)\|^2=\alpha(x)$ for $j\in\mathbb {I} _n$ and such that $S_{\Gamma \mathcal F(x)}=C_{\cal W}^{-1}\ P_{J_{\cal W}(x)}$ for a.e. $x\in\mathbb{T}^k$. Therefore, $S_{E(\mathcal F)}=C_{\cal W}^{-1}\, P_{\cal W}$ and $$
\|f_j\|^2=\int_{\mathbb{T}^k} \alpha(x)\ dx= \frac{C_{\cal W}^{-1}}{n} \, \sum_{i\in\mathbb {I} _n} \int_{Z_i} i\ dx=\frac{C_{\cal W}^{-1}}{n}\ \sum_{i\in\mathbb {I} _n} i \cdot p_i=\frac{1}{n}\ . $$ If $\mathcal G$ is as in item 2. then, by \cite{BMS15}, we get the inequality \eqref{del item 2}. Notice that the lower bound is attained at $\mathcal F$ (since it is tight); the last part of the statement was already shown in \cite{BMS15}. \end{proof}
\subsection{Generalized (measurable) eigensteps}\label{subsec eigensteps}
In this section we derive a natural extension of the notion of eigensteps introduced in \cite{CFMPS}, that allows us to describe a procedure to inductively construct finite sequences $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ such that the fine structure of $E(\mathcal F)$ (that is, the fine spectral structure of $E(\mathcal F)$ and the finite sequence of measurable functions
$\|\Gamma f_i(\cdot)\|^2:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$, $i\in\mathbb{I}_n$) are prescribed. Hence, we obtain an in-depth description of a step-by-step construction of Bessel sequences $E(\mathcal F)$ with prescribed fine structure. We point out that our techniques are not based on those from \cite{CFMPS}; indeed, our approach is based on an additive model developed in \cite{BMS15}.
\begin{rem}\label{rem hay eigensteps}Let ${\cal W}$ be a FSI subspace and let $d(x)=\dim J_{\cal W}(x)$ for $x\in\mathbb{T}^k$; let $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ be such that $E(\mathcal F)$ is a Bessel sequence and set: \begin{enumerate}
\item $\alpha_i(x)=\|\Gamma f_i(x)\|^2$, for $x\in\mathbb{T}^k$ and $i\in\mathbb {I} _n\,$. \item $\lambda_i(x)=\lambda_i([S_{E(\mathcal F)}]_x )$ , for $x\in\mathbb{T}^k$ and $i\in\mathbb {I} _n\,$, where $\mathbb{T}^k\ni x\mapsto (\lambda_i([S_{E(\mathcal F)}]_x )\,)_{i\in\mathbb{N}}$ denotes the fine spectral structure of $E(\mathcal F)$. \end{enumerate} By Theorem \ref{teo sobre disenio de marcos} these functions satisfy the following admissibility conditions: \begin{enumerate} \item[Ad.1] $\lambda_i(x)=0$ for a.e. $x\in\mathbb{T}^k$ such that $i\geq\min\{n,\,d(x)\}+1$. \item[Ad.2] $(\alpha_i(x))_{i\in\mathbb {I} _n}\prec (\lambda_i(x))_{i\in\mathbb{I}_{d(x)}}$ for a.e. $x\in\mathbb{T}^k$. \end{enumerate} For $j\in\mathbb {I} _n$ consider the sequence $\mathcal F_j=\{f_i\}_{i\in\mathbb{I}_{j}}\in{\cal W}^j$. In this case $E(\mathcal F_j) =\{T_\ell f_i\}_{(\ell,i)\in\mathbb{Z}^k\times \mathbb{I}_{j}}$ is a Bessel sequence and $S_j=S_{E(\mathcal F_j)}$ is a SP operator such that $$ [S_j]_x=S_{\Gamma \mathcal F_j(x)} =\sum_{i\in\mathbb{I}_{j}} \Gamma f_i(x)\otimes \Gamma f_i(x)\in L(\ell^2(\mathbb{Z}^k))^+ \peso{for a.e. } x\in\mathbb{T}^k\ , \ \ j\in\mathbb {I} _n\ . $$ For $j\in\mathbb {I} _n$ and $i\in\mathbb{I}_{j}\,$, consider the measurable function $\lambda_{i,j}:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ given by $$ \lambda_{i,j}(x)=\lambda_i([S_j]_x)\peso{for} x\in\mathbb{T}^k\ , $$ where $\mathbb{T}^k\ni x\mapsto (\lambda_i([S_j]_x))_{i\in\mathbb{N}}$ denotes the fine spectral structure of $E(\mathcal F_j)$ (notice that by construction $\lambda_i([S_j]_x)=0$ for $i\geq j+1$). Then, it is well known (see \cite{CFMPS}) that
$(\lambda_{i,j}(x))_{i\in\mathbb{I}_{j}}$ interlaces $(\lambda_{i,(j+1)}(x))_{i\in\mathbb{I}_{j+1}}$ i.e. $$ \lambda_{i,(j+1)}(x)\geq \lambda_{i,j}(x)\geq \lambda_{(i+1),(j+1)}(x) \peso{for} i\in\mathbb{I}_{j}\ ,\ j\in\mathbb{I}_{n-1}\ , \ \text{ and a.e. } x\in\mathbb{T}^k\ . $$ Notice that for a.e. $x\in\mathbb{T}^k$, $$ \sum_{i\in\mathbb{I}_{j}} \lambda_{i,j}(x)= \tr([S_j]_x)
= \sum_{i\in\mathbb{I}_{j}}\|\Gamma f_i(x)\|^2 =\sum_{i\in\mathbb{I}_{j}}\alpha_i(x) \peso{for} j\in\mathbb {I} _n\ . $$ Finally notice that by construction $S_n=S_{E(\mathcal F)}$ and hence, $\lambda_{i,n}(x)=\lambda_i(x)$ for $i\in\mathbb {I} _n$ and $x\in \mathbb{T}^k$. These facts motivate the following extension of the notion of eigensteps introduced in \cite{CFMPS}.
$\triangle$ \end{rem}
\begin{fed}\label{defi measiegen} \rm Let ${\cal W}$ be a FSI subspace and let $\lambda_i\, , \, \alpha_i:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ for $i\in\mathbb {I} _n$ be measurable functions satisfying the admissibility assumptions Ad.1 and Ad.2 in Remark \ref{rem hay eigensteps}.
A sequence of eigensteps for $(\lambda,\alpha)$ is a doubly-indexed sequence of measurable functions $\lambda_{i,j}:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ for $i\in\mathbb{I}_{j}$ and $j\in\mathbb {I} _n$ such that: \begin{enumerate} \item $\lambda_{i,(j+1)}(x)\geq \lambda_{i,j}(x)\geq \lambda_{(i+1),(j+1)}(x)$ for $i\in\mathbb{I}_{j} \, , \, j\in\mathbb{I}_{n-1}\, , \, $
and a.e. $x\in\mathbb{T}^k$; \item $\sum_{i\in\mathbb{I}_{j}} \lambda_{i,j}(x)= \sum_{i\in\mathbb{I}_{j}}\alpha_i(x)$ for $j\in\mathbb {I} _n$ and a.e. $x\in \mathbb{T}^k$; \item $\lambda_{i,n}(x)=\lambda_i(x)$ for $i\in\mathbb {I} _n$ and a.e. $x\in \mathbb{T}^k$.
$\triangle$ \end{enumerate} \end{fed}
\begin{rem}\label{rem hay eigensteps2} Consider the notations and terminology from Remark \ref{rem hay eigensteps}. Then $((\lambda_{i,j}(\cdot))_{i\in\mathbb{I}_{j}})_{j\in\mathbb {I} _n}$ is a sequence of eigensteps for $(\lambda,\alpha)$. We say that $((\lambda_{i,j}(\cdot))_{i\in\mathbb{I}_{j}})_{j\in\mathbb {I} _n}$ is the sequence of eigensteps for $(\lambda,\alpha)$ associated to $\mathcal F$.
$\triangle$ \end{rem}
\noi In what follows we show that every sequence of eigensteps is associated to some $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ such that $E(\mathcal F)$ is a Bessel sequence (see Theorem \ref{teo todo eigen echachi} below). In order to show this, we recall an additive (operator) model from \cite{BMS15}.
\begin{fed}\label{el conjunto U}\rm Let ${\cal W}$ be a FSI subspace and let $d:\mathbb{T}^k\rightarrow \mathbb{N}_{\geq 0}$ be the measurable function given by $d(x)=\dim J_{{\cal W}}(x)$ for $x\in\mathbb{T}^k$. Let $S\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))^+$ be SP and such that $R(S)\subset {\cal W}$. Given a measurable function $m:\mathbb{T}^k\rightarrow \mathbb{Z}$ such that $m(x)\leq d(x)$ for a.e. $x\in\mathbb{T}^k$ we consider $$U^{\cal W}_m(S) = \Big\{S+ B:B\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))^+ \text{ is SP},\, R(B)\subset {\cal W},\, \text{\rm rk}([B]_x)\leq d(x)-m(x) \ \text{for a.e. } x\in \mathbb{T}^k \Big\} \,. $$
$\triangle$ \end{fed}
\begin{teo}[Appendix of \cite{BMS15}]\label{estructdelU} \rm Consider the notations from Definition \ref{el conjunto U}. Given a measurable function $\mu:\mathbb{T}^k\rightarrow \ell^1(\mathbb{N})^+$ the following are equivalent:
\begin{enumerate}
\item There exists $C\in U^{\cal W}_m(S)$ such that $\lambda(\hat C_x)=\mu(x)$, for a.e. $x\in \mathbb{T}^k$;
\item For a.e. $x\in\mathbb{T}^k\setminus \text{Spec}({\cal W})$ then $\mu(x)=0$; for a.e. $x\in \text{Spec}({\cal W})$ we have that $\mu_i(x)=0$ for $i\geq d(x)+1$ and
\begin{enumerate}
\item in case $m(x)\leq 0$, $\mu_i(x)\geq \lambda_i([S]_ x)$ for $i\in\mathbb{I}_{d(x)}\,$;
\item in case $m(x)\in\mathbb{I}_{d(x)}\,$, $\mu_i(x)\geq \lambda_i([S] _x)$ for $i\in\mathbb{I}_{d(x)}\,$ and \begin{equation} \lambda_i([S]_x)\geq \mu_{(d(x)-m(x))+i}(x) \peso{for} i\in\mathbb{I}_{m(x)}\ . \tag*{
$\square$} \end{equation}
\end{enumerate}
\end{enumerate} \end{teo}
\begin{rem} We point out that Theorem \ref{estructdelU} is obtained in terms of a natural extension of the Fan-Pall interlacing theory from matrix theory, to the context of measurable fields of positive matrices (see \cite[Appendix]{BMS15}); we also notice that the result is still valid for fields (of vectors and operators) defined in measurable subsets of $\mathbb{T}^k$. The original motivation for considering the additive model above was the fact that it describes the set of frame operators of oblique duals of a fixed frame. In the present setting, this additive model will also allow us to link the sequences of eigensteps with the construction of SG Bessel sequences with prescribed fine structure.
$\triangle$ \end{rem}
\begin{teo}\label{teo todo eigen echachi} Let ${\cal W}$ be a FSI subspace and let $\lambda_i,\, \alpha_i:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ for $i\in\mathbb {I} _n$ be measurable functions satisfying the admissibility conditions Ad.1 and Ad.2 in Remark \ref{rem hay eigensteps}. Consider a sequence of eigensteps $((\lambda_{i,j}(\cdot))_{i\in\mathbb{I}_{j}})_{j\in\mathbb {I} _n}$ for $(\lambda,\alpha)$. Then, there exists $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ such that $E(\mathcal F)$ is a Bessel sequence and $((\lambda_{i,j}(\cdot))_{i\in\mathbb{I}_{j}})_{j\in\mathbb {I} _n}$ is the sequence of eigensteps associated to $\mathcal F$. \end{teo}
\begin{proof} First notice that both the assumptions as well as the properties of the objects that we want to construct are checked point-wise; hence, by considering a convenient partition of $\mathbb{T}^k$ into measurable sets we can assume (without loss of generality) that $d(x)=d\geq 1$, for $x\in\mathbb{T}^k$. Now, we argue by induction on $j$. Notice that by hypothesis for $i=j=1$, we see that $\lambda_{1,1}(x)=\alpha_1(x)$ for a.e. $x\in\mathbb{T}^k$. Let $f_1\in{\cal W}$ be such that $\|\Gamma f_1(x)\|^2=\alpha_1(x)$ for a.e. $x\in\mathbb{T}^k$; indeed, we can take $f_1\in{\cal W}$ determined by the condition $\Gamma f_1(x)=\alpha^{1/2}(x)\ \Gamma h_1(x)$, where $\{h_i\}_{i\in\mathbb{I}_{\ell}}$ are the quasi orthogonal generators for the orthogonal sum decomposition of ${\cal W}$ as in Theorem \ref{teo:la descom de Bo}. Then, by construction $\|\Gamma f_1(x)\|^2=\alpha_1(x)$ and $\lambda_{1,1}(x)=\|\Gamma f_1(x)\|^2=\lambda_1([S_{E(f_1)}]_x)$ for a.e. $x\in \mathbb{T}^k$.
\noi Assume that for $j\in\mathbb{I}_{n-1}$ we have constructed $\mathcal F_j=\{f_i\}_{i\in\mathbb{I}_{j}}\in{\cal W}^j$ such that
\begin{equation}\label{eq induc} \lambda_{i,\ell}(x)=\lambda_i([S_{E(\mathcal F_\ell)}]_x) \peso{for} i\in\mathbb{I}_{\ell} \ , \ \ \ell\in\mathbb{I}_{j}\ \ \text{and a.e. } x\in\mathbb{T}^k\,. \end{equation} We now construct $f_{j+1}$ as follows: set $\mu_i=\lambda_{i,j+1}$ for $i\in\mathbb{I}_{j+1}$ and $\mu_i=0$ for $i>j+1$; set $\mu=(\mu_i)_{i\in\mathbb{N}}:\mathbb{T}^k\rightarrow \ell^1(\mathbb{N})^+$ which is a measurable function. Further, set $S=S_{E(\mathcal F_j)}\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))^+$ which is a SP operator with $R(S)\subset {\cal W}$ and set $m(x)=m=d-1$. Moreover, by taking $\ell=j$ in Eq. \eqref{eq induc} above we see that $\lambda_i([S]_x)=\lambda_{i,j}(x)$ for $i\in\mathbb{I}_{j}$ and $\lambda_i([S]_x)=0$ for $i\geq j+1$, for a.e. $x\in\mathbb{T}^k$ .
\noi By hypothesis, we see that $\lambda_{i,j+1}\leq \lambda_{i,j+2}\leq \ldots\leq \lambda_{i,n}=\lambda_i$; since the admissibility conditions in Remark \ref{rem hay eigensteps} hold, we conclude that $\mu_i=\lambda_{i,j+1}=0$ whenever $i\geq d+1$. On the other hand, since $d-m=1$ we see that the conditions in item 2. in Theorem \ref{estructdelU} can be put together as the interlacing relations $$ \mu_i(x)\geq \lambda_i([S]_x)\geq\mu_{i+1}(x) \peso{for} i\in\mathbb{I}_{j} \ \text{ and a.e. }\ x\in\mathbb{T}^k\,, $$ which hold by hypothesis (see condition 1. in Definition \ref{defi measiegen}); therefore, by Definition \ref{el conjunto U} and Theorem \ref{estructdelU}, there exists a SP operator $B\in L(L^2(\mathbb{R}}\def\C{\mathbb{C}^k))^+$ such that $R(B)\subset {\cal W}$, $\text{\rm rk}([B] _x )\leq 1$ for a.e. $x\in\mathbb{T}^k$ and such that $\lambda_i([S+B]_x)=\mu_i(x)=\lambda_{i,j+1}(x)$ for $i\in\mathbb{I}_{j+1}\,$, for a.e. $x\in\mathbb{T}^k$. The previous conditions on $B$ imply that there exists $f_{j+1}\in{\cal W}$ such that $B=S_{E(f_{j+1})}$; indeed, $f_{j+1}$ is such that it satisfies: $\Gamma f_{j+1}(x)\otimes \Gamma f_{j+1}(x)=[B] _x$ for a.e. $x\in\mathbb{T}^k$.
Finally, if we set $\mathcal F_{j+1}=\{f_i\}_{i\in\mathbb{I}_{j+1}}$ then $S_{E(\mathcal F_{j+1})}=S_{E(\mathcal F_{j})}+S_{E(f_{j+1})}=S+B$ and hence $\lambda_{i,j+1}(x)= \lambda_i([S_{E(\mathcal F_{j+1})}]_x)$ for $i\in\mathbb{I}_{j+1}$ and a.e. $x\in\mathbb{T}^k$. This completes the inductive step. \end{proof}
\noi We end this section with the following remark. With the notations and terminology in Theorem \ref{teo todo eigen echachi}, notice that the constructed sequence $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}$ is such that its fine structure is prescribed by $(\lambda,\,\alpha)$: indeed, $\lambda_i([S_{E(\mathcal F)}]_x)=\lambda_{i,\,n}(x)=\lambda_i(x)$ and
$\|\Gamma f_i(x)\|^2=\alpha_i(x)$ for $i\in\mathbb {I} _n$ and a.e. $x\in\mathbb{T}^k$ (this last fact can be checked using induction and item 2. in Definition \ref{defi measiegen}). That is, the measurable eigensteps provide a detailed description of Bessel sequences $E(\mathcal F)$ with prescribed fine structure.
\section{An application: optimal frames with prescribed norms for FSI subspaces}\label{sec opti frames with prescribed norms}
In order to describe the main problem of this section we consider the following: \begin{fed}\label{defi bal} \rm Let ${\cal W}$ be a FSI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and let $\alpha=(\alpha_i)_{i\in\mathbb {I} _n}\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$. We let \begin{equation}
\mathfrak {B}_\alpha({\cal W})=\{\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n:\ E(\mathcal F) \ \text{is a Bessel sequence }, \ \|f_i\|^2=\alpha_i\,,\ i\in\mathbb {I} _n\}\ , \end{equation} the set of SG Bessel sequences in ${\cal W}$ with norms prescribed by $\alpha$.
$\triangle$ \end{fed}
\noi
Notice that the restrictions on the families $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in \mathfrak {B}_\alpha({\cal W})$
(namely $\|f_i\|^2=\alpha_i$ for $i\in\mathbb {I} _n$) are of a {\it global} nature. Our problem is to describe those $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ such that the encoding schemes associated to their corresponding Bessel sequences $E(\mathcal F)$ are as stable as possible. Ideally, we would search for sequences $\mathcal F$ such that $E(\mathcal F)$ are tight frames for ${\cal W}$; yet, Theorem \ref{teo sobre disenio de marcos} shows that there are obstructions for the existence of such sequences (see Corollary \ref{coro tight 2} below).
\noi By a simple re-scaling argument, we can assume that $\sum_{i\in\mathbb {I} _n}\alpha_i=1$; then Corollary \ref{cororo1} (see also \cite[Theorem 3.9.]{BMS15}) shows that if there exists $\mathcal F_0\in \mathfrak {B}_\alpha({\cal W})$ such that $E(\mathcal F_0)$ is a tight frame for ${\cal W}$ then $E(\mathcal F_0)$ is a minimizer in $\mathfrak {B}_\alpha({\cal W})$ of every frame potential $P_\varphi^{\cal W}$ for any convex function $\varphi\in\convf$ and $P_\varphi^{\cal W} (E(\mathcal F_0))= C_{\cal W} \ \varphi(C_{\cal W}^{-1})$; moreover, in case $\varphi\in\convfs$ is a strictly convex function, then every such $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ for which $P_\varphi^{\cal W}(E(\mathcal F))=C_{\cal W} \ \varphi(C_{\cal W}^{-1})$ is a tight frame. This suggests that in the general case, in order to search for $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ such that the encoding schemes associated to their corresponding Bessel sequences $E(\mathcal F)$ are as stable as possible, we could study the minimizers in $\mathfrak {B}_\alpha({\cal W})$ of the convex potential $P_\varphi^{\cal W}$ associated to a strictly convex function $\varphi\in\convfs$.
\noi Therefore, given $\varphi\in\convf$, in what follows we show the existence of finite sequences $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ such that $$ P_\varphi^{\cal W}(E(\mathcal F^{\rm op}))=\min\{P_\varphi^{\cal W}(E(\mathcal F)): \ \mathcal F\in \mathfrak {B}_\alpha({\cal W})\}\,. $$ Moreover, in case $\varphi\in\convfs$ then we describe the fine spectral structure of the frame operator of $E(\mathcal F^{\rm op})$. In case $\varphi(x)=x^2$, our results extend some results from \cite{BF,CKFT,MR10} for the frame potential to the context of SG Bessel sequences lying in a FSI subspace ${\cal W}$.
\noi Let us fix some general notions and notation for future reference: \begin{notas}\label{nota impor2}
In what follows we consider: \begin{enumerate} \item A FSI subspace ${\cal W}\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$; \item $d(x)=\dim J_{\cal W}(x)\leq \ell\in\mathbb{N}$, for a.e. $x \in \mathbb{T}^k$;
\item The Lebesgue measure on $\mathbb{R}}\def\C{\mathbb{C}^k$, denoted $|\cdot|$ ; $Z_i=d^{-1}(i)\subseteq \mathbb{T}^k$ and $p_i=|Z_i|$, $i\in\mathbb{I}_{\ell}\,$.
\item The spectrum of ${\cal W}$ is the measurable set $\text{Spec}({\cal W}) = \bigcup_{i\in\mathbb{I}_{\ell}} Z_i = \{x\in \mathbb{T}^k: d(x)\neq 0\}$. \end{enumerate}
$\triangle$ \end{notas}
\subsection{The uniform dimension case}\label{subse uniform}
Consider the Notations \ref{nota impor2}. In this section we obtain the fine spectral structure of minimizers of convex potentials in $\mathfrak B_\alpha({\cal W})$ under the assumption that $d(x)=d$ for a.e. $x\in\text{Spec}({\cal W})$. In order to deal with this particular case, we recall some notions and constructions from \cite{BMS15}.
\begin{rem}[Waterfilling in measure spaces]\label{recordando waterfilling} Let $(X,\mathcal{X},\mu)$ denote a probability space and let $L^\infty(X,\mu)^+ = \{g\in L^\infty(X,\mu): g\ge 0\}$. Recall that for $f\in L^\infty(X,\mu)^+ $ and $c \geq \infes f\geq 0$ we consider the {\it waterfilling} of $f$ at level $c$, denoted $f_c\in L^\infty(X,\mu)^+$, given by $f_c= \max\{f,\,c\}=f + (c-f)^+$, where $g^+$ denotes the positive part of a real function $g$. Recall the decreasing rearrangement of non-negative functions defined in Eq. \eqref{eq:reord}. It is straightforward to check that if \begin{equation}\label{eq lem reord} s_0= \mu\{x\in X:\ f(x)>c\} \peso{then} f_c^*(s)=\left\{
\begin{array}{ccc}
f^*(s) & if & 0\leq s <s_0 \,; \\
c & if & s_0\leq s \leq 1\, .
\end{array}
\right.
\end{equation} We further consider $\phi_f: [\infes f, \infty)\to \mathbb{R}}\def\C{\mathbb{C}_+$ given by $$ \phi_f(c)=\int_X f_c\ d\mu= \int_X f(x) + (c-f(x))^+\ d\mu(x)\,. $$ Then, it is easy to see that: \begin{enumerate} \item $\phi_f(\infes f)=\int_X f\ d\mu$ and $\lim _{c\to +\infty} \phi_f(c)= +\infty$; \item $\phi_f$ is continuous and strictly increasing. \end{enumerate} Hence, for every $v\geq \int_X f\ d\mu$ there exists a unique $c=c(v)\geq \infes f$ such that $\phi_f(c)=v$. With the previous notations then, by \cite[Theorem 5.5.]{BMS15} we get that if $h\in L^\infty(X,\mu)^+ $ is such that \begin{equation}\label{eq teo 5.5} f\leq h \peso{and} v\leq \int_X h\ d\mu \peso{then} f_{c(v)}\prec_w h\ .\end{equation}
$\triangle$ \end{rem}
\begin{lem}\label{lem: wat er filling} Let $(X,\mathcal{X},\mu)$ denote a probability space and let $f,\,g\in L^\infty(X,\mu)^+$ be such that $f\prec_w g$. Let $c,\,d\geq 0$ be such that $\int_X f_c\ d\mu=\int_X g_d\ d\mu$, where $f_c$ and $g_d$ denote the waterfillings of $f$ and $g$ at levels $c$ and $d$ respectively. Then $f_c\prec g_d$ in $(X,\mu)$. \end{lem} \begin{proof} Set $s_0=\mu\{x\in X:\ f(x)>c\}\in[0,1]$; notice that by construction $g\leq g_d$ in $X$ so that, by Remark \ref{rem:prop rear elem}, $g^*\leq (g_d)^*$ in $[0,1]$. Hence, for every $s\in[0,s_0]$ we have \begin{equation}\label{eq desi11} \int_0^s(f_c)^*\ dt=\int_0^s f^*\ dt\leq \int_0^s g^*\ dt\leq \int_0^s (g_d)^*\ dt\,. \end{equation} On the other hand, $$ \int_0^{s_0} (g_d)^* \ dt\geq \int_0^{s_0} g^* \ dt\geq \int_0^{s_0} f^* \ dt \implies \omega \ \stackrel{\mbox{\tiny{def}}}{=}\ \int_0^{s_0} (g_d)^* \ dt-\int_0^{s_0} f^* \ dt\geq 0\, .$$ Using Remark \ref{recordando waterfilling} and the hypothesis we get that $$ \int_0^{s_0} f^* \ dt + (1-s_0) \, c = \int_0^1 f_c^* \ dt \stackrel{\eqref{reor int}}{=}\int_X f_c\ d\mu=\int_X g_d\ d\mu \stackrel{\eqref{reor int}}{=} \int_0^{s_0} (g_d)^* \ dt + \int_{s_0}^1 (g_d)^* \ dt $$ $$ \implies \quad \quad (1-s_0) \, c=\int_{s_0}^1 \Big[ \ (g_d)^* + \frac{\omega}{1-s_0} \ \Big]\ dt\,.$$ Thus, by \cite[Lemma 5.3.]{BMS15} we get that for $s\in[s_0\, , \, 1]$: $$ (s-s_0)\,c\leq \int_{s_0}^s \Big[ \ (g_d)^* + \frac{\omega}{1-s_0} \ \Big]\ dt
\leq \int_{s_0}^s (g_d)^* \ dt+ \omega\ . $$ This last identity and Remark \ref{recordando waterfilling} show that for $s\in[s_0\, , \, 1]$, \begin{equation} \label{eq desi22} \int_0^s (f_c)^*\ dt= \int_0^{s_0} (g_d)^*\ dt-\omega + (s-s_0)\, c\leq \int_0^{s_0} (g_d)^*\ dt + \int_{s_0}^s (g_d)^*\ dt\,. \end{equation} The lemma is a consequence of Eqs. \eqref{eq desi11} and \eqref{eq desi22}. \end{proof}
\begin{rem}\label{rem: sobre medidas}
Let $(Z\, , \, \mathcal Z\, , \, |\, \cdot \,|)$ be a (non-zero) measure subspace of $(\mathbb{T}^k\, , \, \mathcal B\, , \, |\, \cdot \,|)$ and consider $(\mathbb{I}_r, \mathcal P(\mathbb{I}_r),\#(\cdot))$ i.e, $\mathbb{I}_r$ endowed with the counting measure. In what follows we consider the product
space $X\ \stackrel{\mbox{\tiny{def}}}{=}\ Z\times \mathbb{I}_r$ endowed with the product measure $\mu\ \stackrel{\mbox{\tiny{def}}}{=}\ |\cdot |\times \#(\cdot)$.
$\triangle$ \end{rem}
\begin{lem}\label{lem utilisima} Consider the notations in Remark \ref{rem: sobre medidas} and let $\alpha: Z\rightarrow \mathbb{R}}\def\C{\mathbb{C}^r$ be a measurable function. Let $\breve\alpha:X\rightarrow \mathbb{R}}\def\C{\mathbb{C}$ be given by $$ \breve \alpha(x,i)=\alpha_i(x) \peso{for} x\in Z \peso{and} i\in\mathbb{I}_r\ . $$ Then $\breve \alpha$ is a measurable function and we have that: \begin{enumerate} \item If $\varphi\in\convf$ then
$\int_X \varphi\circ \breve \alpha \ d\mu = \sum\limits_{i\in\mathbb{I}_r} \int_{Z} \varphi(\alpha_i(x))\ dx \ .$ \item Let $\beta: Z\rightarrow \mathbb{R}}\def\C{\mathbb{C}^r$ be a measurable function and let $\breve \beta:X\rightarrow \mathbb{R}}\def\C{\mathbb{C}$ be constructed analogously. If $$ \alpha(x)\prec\beta(x) \peso{for a.e.} x\in Z \ \implies \ \ \breve \alpha\prec \breve\beta $$
in the probability space $(X,\mathcal X,\tilde \mu)$, where $\tilde \mu=(r\cdot|Z|)^{-1}\,\mu$. \item Similarly, $\alpha(x)\prec_w\beta(x)$ for a.e. $x\in Z$ implies that $\breve \alpha\prec_w \breve \beta$ in $(X,\mathcal X,\tilde \mu)$. \end{enumerate} \end{lem} \begin{proof} The proof of the first part of the statement is straightforward. In order to see item 2., notice that if $\varphi\in\convf$ then $\alpha(x)\prec\beta(x)$ implies that $\sum_{i\in\mathbb{I}_r}\varphi(\alpha_i(x))\leq \sum_{i\in\mathbb{I}_r}\varphi(\beta_i(x))$ for a.e. $x\in Z$. Then, using item 1. we get that $$
\int_X \varphi\circ \breve \alpha\ d\tilde \mu=(r\cdot |Z|)^{-1} \int_{Z} \sum_{i\in\mathbb{I}_r}\varphi(\alpha_i(x)) \ dx
\leq (r\cdot |Z|)^{-1} \int_{Z} \sum_{i\in\mathbb{I}_r}\varphi(\beta_i(x))\ dx = \int_X \varphi\circ \breve \beta\ d\tilde \mu\ . $$ Since $\varphi\in\convf$ is arbitrary, Theorem \ref{teo porque mayo} shows that $\breve \alpha\prec \breve \beta$. Item 3. follows using similar arguments, based on the characterization of submajorization in terms of integral inequalities involving non-decreasing convex functions given in Theorem \ref{teo porque mayo} (see also \cite{Chong}). \end{proof}
\noi The following is the first main result of this section.
\begin{teo}[Existence of optimal sequences in $\mathfrak {B}_\alpha({\cal W})$]\label{teo dim unif}
Consider the Notations \ref{nota impor2}. Let $\alpha=(\alpha_i)_{i\in\mathbb {I} _n}\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$ and assume that ${\cal W}$ is such that $d(x)=d$ for a.e. $x\in \text{Spec}({\cal W})$; set $r=\min\{n,d\}$. Let $p=p_d=|\text{Spec}({\cal W})|$. Then there exist $c=c(\alpha,\,d,\,p)\geq 0$ and $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ such that: \begin{enumerate} \item For a.e. $x\in \text{Spec}({\cal W})$ we have that \begin{equation}\label{eq defi la op unif dim} \lambda_j([S_{E(\mathcal F^{\rm op})}]_x) =\left\{
\begin{array}{ccc}
\max\{\frac{\alpha_j}{p}\, , \, c\} & if & j\in\mathbb{I}_{r} \ ; \\
0 & if & r+1 \le j\le d\ .
\end{array}
\right.
\end{equation}
In particular, if $d\leq n$ (i.e. $r=d$) then $E(\mathcal F^{\rm op})$ is a frame for ${\cal W}$. \item For every $\varphi\in\convf$ and every $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ then \begin{equation}\label{eq prop c}
p\cdot \sum_{j\in\mathbb{I}_{r}} \varphi (\max\{\frac{\alpha_j}{p}\, , \, c\}) + p \, (d-r)\, \varphi(0) =P_\varphi^{\cal W}(E(\mathcal F^{\rm op}))\leq P_\varphi^{\cal W}(E(\mathcal F)) \,. \end{equation} \end{enumerate} \end{teo} \begin{proof} Consider $\text{Spec}({\cal W})$ as a (non-zero, otherwise the result is trivial) measure subspace of the $k$-torus endowed with Lebesgue measure. Then, we consider
$X=\text{Spec}({\cal W})\times \mathbb{I}_r$ endowed with the product measure $\mu=|\cdot|\times \#(\cdot)$, where $\#(\cdot)$ denotes the counting measure on $\mathbb{I}_r$ (as in Remark \ref{rem: sobre medidas}). We also consider the normalized measure $\tilde \mu=\frac{1}{p\cdot r}\ \mu$ on $X$.
Let $\mathcal F=\{f_j\}_{j\in\mathbb {I} _n}\in \mathfrak {B}_\alpha({\cal W})$ and set $\beta_j(x)=\|\Gamma f_j(x)\|^2$ for $x\in \text{Spec}({\cal W})$ and $j\in\mathbb {I} _n\, $. Notice that \begin{equation}\label{ecua betaj}
\int_{\text{Spec}({\cal W})} \beta_j(x)\ dx=\|f_j\|^2=\alpha_j\ , \peso{for} j\in\mathbb {I} _n\ . \end{equation} Let $\breve\gamma\, , \, \breve\beta:X\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ be the measurable functions determined by $$ \breve \gamma (x,j)=\frac{\alpha_j}{p} \peso{and} \breve \beta(x,j)=\beta_j(x) \peso{for} x\in \text{Spec}({\cal W}) \peso{and} j\in\mathbb{I}_{r} \ . $$ Consider the map $D:L^\infty(X,\tilde \mu) \rightarrow L^\infty(X,\tilde \mu)$ given by $$ D(h)(x,j)=r\cdot\int_{\text{Spec}({\cal W})\times \{j\}} h \ d\tilde \mu = \frac 1p \ \int_{\text{Spec}({\cal W})} h(x,j) \ dx \peso{for} x\in \text{Spec}({\cal W}) \peso{and} j \in \mathbb{I}_{r}\ . $$ Then, it is easy to see that $D$ is positive, unital and trace preserving i.e. $D$ is a doubly stochastic map; moreover, by Eq. \eqref{ecua betaj}, $D(\breve \beta)=\breve \gamma$ and by Theorem \ref{teo porque mayo} we conclude that $\breve \gamma\prec \breve \beta\,$.
\noi Now, consider the measurable vector-valued function $\beta^\downarrow(x)=(\beta^\downarrow_j(x))_{j\in\mathbb {I} _n}$ obtained by re-arrangement of the entries of the vector $\beta(x)=(\beta_j(x))_{j\in\mathbb {I} _n}$, for $x\in Z$ independently. By construction we get the submajorization relations $(\beta_j(x))_{j\in\mathbb{I}_{r}}\prec_w (\beta^\downarrow_j(x))_{j\in\mathbb{I}_{r}}$ for every $x\in Z$ (notice that we are considering just the first $r$ entries of these $n$-tuples).
\noi Thus, if we consider the measurable function $\breve {\beta^\downarrow} :X\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ determined by $\breve{\beta^\downarrow}(x,j)=\beta^\downarrow_j(x)$ if $x\in \text{Spec}({\cal W})$ and $j\in\mathbb{I}_{r}\,$, then Lemma \ref{lem utilisima} shows that $\breve\beta \prec_w \breve{\beta ^\downarrow}$ in $(X,\tilde \mu)$. By transitivity, we conclude that $\breve \gamma\prec_w \breve{\beta^\downarrow}$. By Remark \ref{recordando waterfilling} there exists a unique $b\geq \text{ess-}\inf\limits_{x\in X} \breve{\beta^\downarrow} (x)$ such that the waterfilling of $\breve{\beta^\downarrow}$ at level $b$, denoted $\breve{\beta^\downarrow}_b$, satisfies $$ \int_X \breve{\beta^\downarrow} _b \ d\tilde \mu=(r\cdot p)^{-1}\, \sum_{i\in\mathbb {I} _n} \alpha_i \geq \int_X \breve{\beta^\downarrow} \ d\tilde \mu \ . $$ Similarly, let $c\geq \text{ess-}\inf\limits_{x\in X} \,\breve \gamma(x)$ be such that the waterfilling of $\breve \gamma$ at level $c$, denoted $\breve \gamma_c\,$, satisfies $$ \int_X \breve\gamma_c \ d\tilde \mu=(r\cdot p)^{-1}\, \sum_{i\in\mathbb {I} _n} \alpha_i\geq \int_X \breve\gamma \ d\tilde \mu\ . $$ Therefore, by Lemma \ref{lem: wat er filling}, we see that \begin{equation}\label{eq relac fc fbetaparaabajo} \breve\gamma_c\prec \breve{\beta^\downarrow} _b\peso{in} (X,\tilde \mu)\ . \end{equation} By Lemma \ref{lem spect represent ese} there exist measurable functions $\lambda_j:\mathbb{T}^k\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ for $j\in\mathbb{I}_{d}$ such that we have a representation of $[S_{E(\mathcal F)}]_x=S_{\Gamma \mathcal F(x)}$ as in Eq. \eqref{lem repre espec S}, in terms of some measurable vector fields $v_j:\mathbb{T}^k\rightarrow \ell^2(\mathbb{Z}^k)$ for $j\in\mathbb{I}_d$, such that $\{v_j(x)\}_{j\in\mathbb{I}_d}$ is a ONB of $J_{\cal W}(x)$ for a.e. $x\in \text{Spec}({\cal W})$; indeed, in this case $\lambda_j(x)=0$ for $j\geq r+1$ and a.e. $x\in \text{Spec}({\cal W})$.
\noi
If we let $e(x)\geq 0$ be determined by the condition $$ \sum_{i\in\mathbb{I}_r}\max\{\beta^\downarrow _i(x),e(x)\}=\sum_{i\in\mathbb{I}_{r}}\lambda_i(x)\ \Big(\, =\sum_{i\in\mathbb{I}_{d}}\lambda_i(x)\, \Big) \ , \peso{for a.e.} x\in \text{Spec}({\cal W}) $$ then by \cite{MR10} (also see \cite{MRS13,MRS14b,MRS14}) we have that \begin{equation}\label{eq rel mayo MR} (\delta_i(x))_{i\in\mathbb{I}_{r}}\ \stackrel{\mbox{\tiny{def}}}{=}\ (\max\{ \beta^\downarrow_i(x),\, e(x)\} )_{i\in\mathbb{I}_{r}}\prec (\lambda_i(x))_{i\in\mathbb{I}_{r}} \ , \peso{for a.e.} x\in \text{Spec}({\cal W})\,. \end{equation} Notice that the vector $(\delta_i(x))_{i\in\mathbb{I}_{r}}$ can be considered as the (discrete) waterfilling of the vector $(\beta^\downarrow_j(x))_{j\in\mathbb{I}_{r}}$ at level $e(x)$, for $x\in \text{Spec}({\cal W})$. If $\breve\delta \, , \, \breve\lambda:X\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+$ are the measurable functions given by $$ \breve\delta(x,j)=\delta_j(x) \peso{and} \breve\lambda(x,j)=\lambda_j(x) \peso{for} x\in \text{Spec}({\cal W}) \peso{and} j\in\mathbb{I}_{r} $$ then, by Lemma \ref{lem utilisima}, we get that $\breve\delta\prec \breve\lambda$ in $(X,\tilde\mu)$. Notice that by construction, $\breve\delta\geq \breve{\beta^\downarrow} $ and $$\int_X \breve\delta\ d\tilde \mu =(r\cdot p)^{-1}\,\sum_{i\in\mathbb {I} _n}\alpha_i \,.$$ Hence, by Remark \ref{recordando waterfilling}, we get that $\breve{\beta^\downarrow}_b\prec \breve\delta\,$. Putting all the pieces together, we now see that \begin{equation}\label{eq relac major func} \breve \gamma_c\prec \breve{\beta^\downarrow}_b\prec \breve\delta\prec \breve\lambda\ , \peso{in} (X,\tilde\mu)\ . \end{equation} Recall that by construction, we have that \begin{equation}\label{eq la pinta de fc}
\breve \gamma_c(x)=\max\{ \frac{\alpha_j}{p} \, , \, c\} \ , \peso{for} x\in \text{Spec}({\cal W})\times \{j\}\subset X \, , \ j\in\mathbb{I}_{r}\ . \end{equation} Then, it is straightforward to check that \begin{equation}\label{eq. c es el correcto} (r\cdot p)^{-1}\,\sum_{i\in\mathbb {I} _n}\alpha_i=\int_X \breve\gamma_c\ d\tilde\mu= r^{-1} \cdot \sum_{j\in\mathbb{I}_{r}}\max\{ \frac{\alpha_j}{p} \, , \, c\} \implies (\frac{\alpha_j}{p})_{j\in\mathbb {I} _n}\prec (\max\{\frac{\alpha_j}{p} \, , \, c\})_{j\in\mathbb{I}_{r}}\ . \end{equation} Thus, by Theorem \ref{teo sobre disenio de marcos}, there exists a Bessel sequence $\mathcal F^{\rm op}=\{f^{\rm op}_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n$ such that the fine spectral structure $(\lambda_j([S_{E(\mathcal F^{\rm op})}]_x)\,)_{j\in\mathbb{N}}$ satisfies Eq. \eqref{eq defi la op unif dim} and such that
$\|\Gamma f^{\rm op}_i(x)\|^2=\frac{\alpha_i}{p}\,$, for $i\in\mathbb {I} _n\,$, and $x\in \text{Spec}({\cal W})$. In particular, $\|f^{\rm op}_i\|^2=\alpha_i$ for $i\in\mathbb {I} _n\,$, so $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$. If $\varphi\in\convf$ then, by the majorization relations in Eq. \eqref{eq relac major func} and Lemma \ref{lem utilisima}, \begin{eqnarray*}\label{eq desi poten} P_\varphi^{\cal W}(E(\mathcal F^{\rm op}))&=&\int_{\text{Spec}({\cal W})} [ \sum_{j\in\mathbb{I}_{r}}\varphi(\max\{\frac{\alpha_j}{p} \, , \, c\}) + (d-r)\, \varphi(0) ]\ dx= \int_X \varphi\circ \breve \gamma_c\ d\mu + p\,(d-r)\, \varphi(0) \\ &\leq & \int_X \varphi\circ \breve\lambda\ d\mu + p\,(d-r)\, \varphi(0)
=P_\varphi^{\cal W}(E(\mathcal F))\,. \end{eqnarray*} Hence, $\mathcal F^{\rm op}$ satisfies items 1. and 2. in the statement. \end{proof}
\noi The previous result shows that there are indeed structural optimal frames with prescribed norms in the sense that these frames minimize any frame potential within $\mathfrak {B}_\alpha({\cal W})$; along its proof we showed several majorization relations that allow us to prove that the spectral structure of any such structural optimal frame is described by Eq. \eqref{eq defi la op unif dim}.
\begin{teo}[Fine spectral structure of optimal sequences in $\mathfrak {B}_\alpha({\cal W})$] \label{teo struct fina dim hom} With the hypothesis and notations from Theorem \ref{teo dim unif}, assume that $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ is such that there exists $\varphi\in\convfs$ with $P_\varphi^{\cal W}(E(\mathcal F))=P_\varphi^{\cal W}(E(\mathcal F^{\rm op}))$. Then, for a.e. $x\in \text{Spec}({\cal W})$ we have that \begin{equation}\label{eq defi la op unif dim2} \lambda_j([S_{E(\mathcal F)}]_x) =\left\{
\begin{array}{ccc}
\max\{\frac{\alpha_j}{p} \, , \, c\} = \max\{ \beta^\downarrow_j(x)\, ,\, c\}& if & j\in\mathbb{I}_{r} \ ; \\
0 & if & r+1 \le j\le d\ ,
\end{array}
\right.
\end{equation} where $\beta^\downarrow_1(x)\geq \ldots\beta^\downarrow_n(x)\geq 0$ are obtained by re-arranging the sequence $$ \beta(x) = \big( \, \beta_1(x) \, , \, \ldots \, , \, \beta_n(x)\,\big)
=\big(\, \|\Gamma f_1(x)\|^2 \, , \, \ldots \, , \, \|\Gamma f_n(x)\|^2 \,\big) \in \mathbb{R}}\def\C{\mathbb{C}^n $$ in non-increasing order, independently for each $x\in\text{Spec}({\cal W})$. \end{teo} \begin{proof} We continue to use the notations and terminology from the proof of Theorem \ref{teo dim unif}. Assume further that $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ is such that there exists $\varphi\in\convfs$ with $$ p\cdot \sum_{j\in\mathbb{I}_{r}} \varphi (\max\{\frac{\alpha_j}{p}\, , \, c\}) + p\,(d-r)\,\varphi(0) = P_\varphi^{\cal W}(E(\mathcal F))\ . $$ Then, using this last fact and Lemma \ref{lem utilisima} we see that $$ (r\cdot p)\,\int_X \varphi\circ \breve \gamma_c\ d\tilde\mu= (r\cdot p)\,\int_X \varphi\circ \breve\lambda\ d\tilde\mu\ . $$ Hence, by Eq. \eqref{eq relac major func} we have that $$ \int_X \varphi\circ \breve \gamma_c\ d\tilde\mu= \int_X \varphi\circ \breve{\beta^\downarrow }_b\ d\tilde\mu = \int_X \varphi\circ \breve\delta\ d\tilde\mu= \int_X \varphi\circ \breve\lambda\ d\tilde\mu\ . $$ Thus, by Proposition \ref{pro int y reo} the functions $\breve \gamma_c,\,\breve{\beta^\downarrow}_b,\,\breve\delta,\,\breve\lambda$ are equimeasurable. On the one hand, Eq. \eqref{eq rel mayo MR}, together with the equality above imply that $\max\{ \beta^\downarrow_j(x)\, , \, e(x)\} =\lambda_j(x)$, for $j\in\mathbb{I}_{r}$ and a.e. $x\in \text{Spec}({\cal W})$ and hence, by construction, $\breve\delta=\breve\lambda\,$. On the other hand, by \cite[Corollary 5.6]{BMS15} we also get that $\breve{\beta^\downarrow} _b=\breve\delta\,$. Therefore, $\breve{\beta^\downarrow} _b=\breve\delta=\breve\lambda\,$; in particular, we get that $\max\{ \beta^\downarrow_j(x)\, , \, b\} =\lambda_j(x)$, for $j\in\mathbb{I}_{r}$ and a.e. $x\in \text{Spec}({\cal W})$.
\noi Notice that, since $\breve \gamma_c$ and $\breve\lambda$ are equi-measurable, then
$|\breve\lambda^{-1}(\max\{\frac{\alpha_j}{p}\, , \, c\})|=|{\breve \gamma_c} ^{-1}(\max\{\frac{\alpha_j}{p} \, , \, c\})|$ for $j\in\mathbb{I}_{r}\,$; thus, $\breve\lambda$ takes the values $\max\{\frac{\alpha_j}{p},\,c\}$ for $j\in\mathbb{I}_{r}$ (off a zero-measure set). As $\breve\lambda$ and $\breve \gamma_c$ are both induced by the vector-valued functions $$ \text{Spec}({\cal W})\ni x\mapsto (\max\{\frac{\alpha_j}{p} \, , \, c\})_{j\in\mathbb{I}_{r}}\in(\mathbb{R}}\def\C{\mathbb{C}_+^r)^\downarrow \peso{and} \text{Spec}({\cal W})\ni x\mapsto (\lambda_j(x))_{j\in\mathbb{I}_{r}}\in(\mathbb{R}}\def\C{\mathbb{C}_+^r)^\downarrow $$ respectively, we conclude that $$ (\max\{\frac{\alpha_j}{p}\, , \, c\})_{j\in\mathbb{I}_{r}}=(\lambda_j(x))_{j\in\mathbb{I}_{r}} =(\max\{ \beta^\downarrow_j(x)\, , \, b\} )_{j\in\mathbb{I}_{r}} \ , \peso{for} x\in \text{Spec}({\cal W})\ . $$ From this last fact, we see that we can set $b=c$ and the result follows. \end{proof}
\begin{rem}\label{interpret de prop dim unif} Consider the notations and terminology from Theorem \ref{teo dim unif}. We point that there is a simple formula for the constant $c$. Indeed, notice that if $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ is the structural solution of the optimization problem considered in Theorem \ref{teo dim unif} then $$ \sum_{j\in\mathbb{I}_r}\lambda_j([S_{E(\mathcal F^{\rm op})}]_ x)
=\tr([S_{E(\mathcal F^{\rm op})}]_ x)=\sum_{j\in\mathbb {I} _n}\|\Gamma f^{\rm op}_j(x)\|^2\peso{for a.e.} x\in\mathbb{T}^k $$ Therefore, \begin{equation}\label{formu c}
\sum_{i\in\mathbb{I}_r}\max\{\frac{\alpha_i}{p} \, , \, c\}=\frac 1p \ \sum_{j\in\mathbb {I} _n}\alpha_j\ , \end{equation} which shows that $c$ is obtained by the previous discrete waterfilling condition.
$\triangle$ \end{rem}
\noi Tight frames play a central role in applications. On the one hand, they give raise to simple reconstruction formulas; on the other hand, they have several robustness properties related with numerical stability of the encoding-decoding scheme that they induce. It is therefore important to have conditions that assure the existence of tight frames with prescribed norms: in the finite dimensional context (i.e. finite frame theory) this problem is solved in \cite{CKFT} in terms of the so-called fundamental inequality. As a consequence of Remark \ref{interpret de prop dim unif}, we obtain conditions for the existence of tight SG frames with norms given by a finite sequence of positive numbers, in the uniform dimensional case.
\begin{cor} \label{coro tight 2} Consider the notations and hypothesis of Theorem \ref {teo dim unif}. In the uniform dimensional case (so in particular, $d(x)=d$ for a.e. $x\in \text{Spec}({\cal W})\,$), we have that $$ \text{\rm there exist {\bf tight} frames in $\mathfrak {B}_\alpha({\cal W})$ } \ \iff \ \ d=r\le n \peso{ \rm and} d \cdot\alpha_1 \le \sum_{j\in\mathbb {I} _n}\alpha_j\ . $$
\end{cor} \proof It is a direct consequence of Eqs. \eqref{eq defi la op unif dim} and \eqref{formu c}. \qed \subsection{Existence and structure of $P^{\cal W}_\varphi$-minimizers in $\mathfrak {B}_\alpha({\cal W})$: the general case}\label{subsec gral mi gral}
It turns out that Theorem \ref{teo dim unif} allows to reduce the study of the spectral structure of minimizers of convex potentials in FSI subspaces with norm restrictions to a finite dimensional model. Indeed, consider the Notations \ref{nota impor2} and, for the sake of simplicity, assume that $p_i>0$ for every $i\in\mathbb{I}_{\ell}\,$. Consider $\alpha\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$ and let $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$. For each $i\in\mathbb{I}_\ell$ let ${\cal W}_i\subset L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ be the closed FSI subspace whose fibers coincide with those of ${\cal W}$ in $Z_i=d^{-1}(i)$ and are the zero subspace elsewhere, and let $\mathcal F_i=\{f_{i,j}\}_{j\in\mathbb {I} _n}\in{\cal W}_i^n$ be determined by $$ \Gamma f_{i,j}(x)=\chi_{Z_i}(x)\ \Gamma f_j(x) \peso{for a.e.} x\in\mathbb{T}^k \peso{and} j\in\mathbb {I} _n\ , $$ where $\chi_Z$ denotes the characteristic function of a measurable set $Z\subset\mathbb{T}^k$. Fix a convex function $\varphi\in\convf$. Since each ${\cal W}_i$ is also a uniform FSI, it satisfies the hypothesis of Theorem \ref{teo dim unif}. Then we conclude that for each $i\in\mathbb{I}_\ell$ there exists $\mathcal F_i^{\rm dis}=\{f_{i,j}^{\rm dis}\}_{j\in\mathbb {I} _n}\in{\cal W}_i ^n$ such that $$
\|f_{i,j}^{\rm dis}\|^2=\|f_{i,j}\|^2 \peso{for} j\in\mathbb{I}_n \peso{and} P^{{\cal W}_i}_\varphi (E(\mathcal F_i^{\rm dis}))\leq P^{{\cal W}_i}_\varphi (E(\mathcal F_i)) \peso{for} i\in\mathbb{I}_\ell\ . $$ We can recover the initial family $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}$ by gluing together the families $\mathcal F_i$ for $i\in\mathbb{I}_\ell\,$. Similarly, if we glue the families $\mathcal F_i^{\rm dis}$ we get a family $\mathcal F^{\rm dis}$ (in such a way that $(\mathcal F^{\rm dis})_i=\mathcal F_i^{\rm dis}\in {\cal W}_i^n$ as before, for $i\in\mathbb{I}_\ell$). Notice that $\mathcal F^{\rm dis}\in \mathfrak {B}_\alpha({\cal W})$ since $$
\|f_i^{\rm dis}\|^2=\sum_{j\in\mathbb{I}_n}\|f_{i,j}^{\rm dis}\|^2= \|f_i\|^2=\alpha_i \peso{for} i\in\mathbb{I}_n \ , $$ using the fact that the subspaces $\{{\cal W}_i\}_{i\in\mathbb{I}_\ell}$ are mutually orthogonal. Also $$ P^{{\cal W}}_\varphi (E(\mathcal F^{\rm dis}))= \sum_{i\in\mathbb{I}_\ell} P^{{\cal W}_i}_\varphi (E(\mathcal F_i^{\rm dis}))\leq \sum_{i\in\mathbb{I}_\ell} P^{{\cal W}_i}_\varphi (E(\mathcal F_i))= P^{{\cal W}}_\varphi (E(\mathcal F))\ . $$ Now, the fine spectral structure of $\mathcal F_i^{\rm dis}$ is of a discrete nature (as described in Theorem \ref{teo dim unif}). Moreover, this fine structure is explicitly determined in terms of the matrix \begin{equation}\label{las B}
B=(p_i^{-1}\, \|f_{i,j}\|^2)_{i\in \mathbb{I}_\ell,\,j\in\mathbb {I} _n} \in \mathbb{R}}\def\C{\mathbb{C}_{+}^{\ell\times n} \peso{fulfilling the identity} p^T\,B=\alpha \ , \end{equation} where $p=(p_i)_{i\in\mathbb{I}_\ell}$ and $\alpha=(\alpha_i)_{i\in\mathbb {I} _n}\,$. Notice that the set of all such matrices form a convex compact subset of $\mathbb{R}}\def\C{\mathbb{C}_{+}^{m\times n}$. The advantage of this approach is that we can use simple tools such as convexity, compactness and continuity in a finite dimensional context, to show existence of optimal spectral structure within our reduced model. Nevertheless, the reduced model has a rather combinatorial nature (see the definition of $\Lambda_{\alpha,\,p}^{\rm op}(\delta)$ below), so we build it in steps.
\begin{notas}\label{muchas nots} In order to simplify the exposition of the next result, we introduce the following notations that are motivated by the remarks above. Let $ m \, , \, n\in\mathbb{N}$: \begin{enumerate} \item Inspired in Eq. \eqref{las B}, for finite sequences $\alpha\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$ and $p=(p_i)_{i\in\mathbb{I}_{m}}\in \mathbb{R}}\def\C{\mathbb{C}_{>0}^m$ we consider the set of weighted partitions $$ \begin{array}{rl} W_{\alpha,\,p} &=\{ B\in \mathbb{R}}\def\C{\mathbb{C}_+^{m\times n } \ : \ p^T\, B = \alpha \, \} \ . \end{array} $$ It is straightforward to check that $W_{\alpha,\,p}$ is a convex compact set. \item \label{item2} Given $d\in \mathbb{N}$ we define the map $L_d : \mathbb{R}}\def\C{\mathbb{C}_+^n \to (\mathbb{R}}\def\C{\mathbb{C}_+^d)^\downarrow$ given by \begin{equation}\label{eq defi gammacd} L_d (\gamma ) \ \stackrel{\mbox{\tiny{def}}}{=}\ \left\{
\begin{array}{ccc}
(\max\{\gamma^\downarrow_i\, , \, c_d(\gamma) \})_{i\in\mathbb{I}_{d}} &
& \text{if } \ d\leq n \\
(\gamma^\downarrow,0_{d-n}) & & \text{if } \ d> n
\end{array} \peso{for every} \gamma \in \mathbb{R}}\def\C{\mathbb{C}_+^n \ ,
\right. \end{equation} where the constant $c_d(\gamma) \in \mathbb{R}}\def\C{\mathbb{C}_+$ is uniquely determined by $\tr \, L_d (\gamma ) = \tr \, \gamma$, in case $d\le n$. By \cite[Prop. 2.3]{MR10} we know that $\gamma\prec L_d (\gamma )\,$, and $L_d (\gamma )\prec \beta$ for every $\beta\in\mathbb{R}}\def\C{\mathbb{C}^d$ such that $\gamma\prec \beta$. \item\label{item3} Let $\delta=(d_i)_{i\in\mathbb{I}_{m}}\in\mathbb{N}^m$ be such that $1 \le d_1< \ldots< d_m$. For each $B \in W_{\alpha,\,p}$ consider \begin{equation}\label{Bdelta} B_{\delta}= \big[ \, L_{d_i} (R_i(B)\,)\, \big]_{i\in\mathbb{I}_{m}} \in \prod_{i\in\mathbb{I}_{m}} (\mathbb{R}}\def\C{\mathbb{C}_+^{d_i})^\downarrow \ , \end{equation} where $R_i(B)\in \mathbb{R}}\def\C{\mathbb{C}_+^n$ denotes the $i$-th row of $B$. Moreover, using the previous notations we introduce the {\it reduced model (for optimal spectra)} $$ \Lambda^{\rm op}_{\alpha,\,p}(\delta)\ \stackrel{\mbox{\tiny{def}}}{=}\ \{ B_\delta :\ B\in W_{\alpha,\,p}\} \subset \prod_{i\in\mathbb{I}_{m}} (\mathbb{R}}\def\C{\mathbb{C}_+^{d_i})^\downarrow\, . $$ In general, $\Lambda^{\rm op}_{\alpha,\,p}(\delta)$ is not a convex set and indeed, the structure of this set seems rather involved; notice that item 2 above shows that the elements of $\Lambda^{\rm op}_{\alpha,\,p}(\delta)$ are $\prec$-minimizers within appropriate sets.
$\triangle$ \end{enumerate} \end{notas}
\noi The following result describes the existence and uniqueness of the solution to an optimization problem in the reduced model for a fixed $\varphi\in\convfs$, which corresponds to the minimization of the convex potential $P^{\cal W}_\varphi$ in $\mathfrak {B}_\alpha({\cal W})$ for a FSI subspace ${\cal W}$ and a sequence of weights $\alpha\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$. The proof of this result is presented in section \ref{subsec reduced} (Appendix).
\begin{teo}\label{teo estruc prob reducido unificado} Let $ m,\, n\in\mathbb{N}$, $\alpha\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$, $p=(p_i)_{i\in\mathbb{I}_{m}}\in \mathbb{R}}\def\C{\mathbb{C}_{>0}^m$ and $\delta=(d_i)_{i\in\mathbb{I}_{m}}\in\mathbb{N}^m$ be such that $1\leq d_1< \ldots< d_m$. If $\varphi\in\convf$ then there exists $\Psi^{\rm op}=[\psi_i^{\rm op}]_{i\in\mathbb{I}_{m}}\in \Lambda^{\rm op}_{\alpha,\,p}(\delta)$ such that $$ \sum_{i\in\mathbb{I}_{m}} {p_i}\,\tr(\varphi(\psi_i^{\rm op}) ) \leq \sum_{i\in\mathbb{I}_{m}} {p_i}\,\tr(\varphi(\psi_i) )
\peso{for every} \Psi=[\psi_i]_{i\in\mathbb{I}_{m}}\in \Lambda^{\rm op}_{\alpha,\,p}(\delta)\,.$$ Moreover: \begin{enumerate} \item If $\varphi\in\convfs$ then such $\Psi^{\rm op}$ is unique; \item If $n\geq d_m$ and $\varphi\in\convfs$ is differentiable in $\mathbb{R}}\def\C{\mathbb{C}_+\,$ then $\Psi^{\rm op}\in \prod_{i\in\mathbb{I}_m} (\mathbb{R}}\def\C{\mathbb{C}_{>0}^{d_i})^\downarrow$. \qed \end{enumerate} \end{teo}
\noi We now turn to the statement and proof of our main result in this section (Theorem \ref{teo min pot fsi generales} below). Hence, we let ${\cal W}$ be an arbitrary FSI subspace of $L^2(\mathbb{R}}\def\C{\mathbb{C}^k)$ and let $\alpha=(\alpha_i)_{i\in\mathbb {I} _n}\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$. Recall that $$
\mathfrak {B}_\alpha({\cal W})=\{\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in{\cal W}^n:\ E(\mathcal F) \ \text{is a Bessel sequence }, \ \|f_i\|^2=\alpha_i\,,\ i\in\mathbb {I} _n\}\,. $$ Given $\varphi\in\convf$, in what follows we show the existence of finite sequences $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ such that $$ P_\varphi^{\cal W}(E(\mathcal F^{\rm op}))=\min\{P_\varphi^{\cal W}(E(\mathcal F)): \ \mathcal F\in \mathfrak {B}_\alpha({\cal W})\}\,. $$ Moreover, in case $\varphi\in\convfs$ then we describe the fine spectral structure of the frame operator of $E(\mathcal F^{\rm op})$ of any such $\mathcal F^{\rm op}$.
\begin{teo}\label{teo min pot fsi generales}
Let $\alpha=(\alpha_i)_{i\in\mathbb {I} _n}\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$, consider the Notations \ref{nota impor2}
and fix $\varphi\in\convf$. Then, there exists $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ such that: \begin{enumerate} \item $\lambda_j([S_{E(\mathcal F^{\rm op})}]_x)=:\psi^{\rm op}_{i,j}\in\mathbb{R}}\def\C{\mathbb{C}_+$ is a.e. constant for $x\in Z_i$, $j\in \mathbb{I}_{i}$ and $i\in\mathbb{I}_\ell$;
\item For every $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ we have that $$ \sum_{i\in\mathbb{I}_{\ell}} {p_i}\left( \sum_{j\in\mathbb{I}_i}\varphi(\psi^{\rm op}_{i,j})\right)=P_\varphi^{{\cal W}}(E(\mathcal F^{\rm op}))\leq P_\varphi^{{\cal W}}(E(\mathcal F))\ . $$ \end{enumerate}
If we assume that $\varphi\in\convfs$ then: \begin{enumerate} \item[a)] If $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ is such that $P_\varphi^{{\cal W}}(E(\mathcal F))=P_\varphi^{{\cal W}}(E(\mathcal F^{\rm op}))$ then $S_{E(\mathcal F)}$ has the same fine spectral structure as $S_{E(\mathcal F^{\rm op})}$. \item[b)] If we assume further that $\varphi$ is differentiable in $\mathbb{R}}\def\C{\mathbb{C}_+$ and that
$n\geq i$ for every $i\in\mathbb{I}_\ell$ such that $p_i=|Z_i|>0$, then $E(\mathcal F)$ is a frame for ${\cal W}$. \end{enumerate} \end{teo} \begin{proof}
Without loss of generality, we can assume that there exists an $m\leq \ell $ such that $p_i=|Z_i|>0$ for $i\in\mathbb{I}_{m}$ and $p_i=|Z_i|=0$ for $m+1\leq i\leq \ell$
(indeed, the general case follows by restricting the argument given below to the set of indexes $i\in\mathbb{I}_\ell$ for which $p_i=|Z_i|>0$). We set $p=(p_i)_{i\in\mathbb{I}_m}\in\mathbb{R}}\def\C{\mathbb{C}^m_{>0}$ and consider $\mathcal F=\{f_i\}_{i\in\mathbb {I} _n}\in \mathfrak {B}_\alpha({\cal W})$. For $i\in\mathbb{I}_{m}$ and $j\in\mathbb {I} _n$ set $$
B_{i,j}\ \stackrel{\mbox{\tiny{def}}}{=}\ \frac{1}{ p_i}\,\int_{ Z_i} \|\Gamma f_j(x)\|^2\ dx \implies \sum_{i\in\mathbb{I}_{m}} p_i\,B_{i,j}
=\int_{\text{Spec}({\cal W})}\|\Gamma f_j(x)\|^2\ dx=\|f_j\|^2=\alpha_j \ , $$ for every $j\in\mathbb {I} _n\,$, since $\text{Spec}({\cal W})=\cup_{i\in\mathbb{I}_m}Z_i$. Then $p^T\, B=\alpha $ so using Notations \ref{muchas nots}, $B\in W_{\alpha,\, p}\,$.
\noi Now, fix $i\in\mathbb{I}_{m}$ and consider the weights $\beta^i= p_i\, R_{i}(B)^\downarrow\in\mathbb{R}}\def\C{\mathbb{C}_+^n\,$. For the sake of simplicity we assume, without loss of generality, that $\beta^i=p_i\, R_{i}(B)$. For $i\in\mathbb{I}_m\,$, let ${\cal W}_i$ be the FSI subspace whose fibers coincide with those of ${\cal W}$ inside $Z_i$ and that are the zero subspace elsewhere; hence, Spec$({\cal W}_i)= Z_i$ and $\dim J_{{\cal W}_i}(x)=i$ for $x\in\text{Spec}({\cal W}_i)$. For $i\in\mathbb{I}_m\,$, set $\mathcal F_i=\{f_{i,j}\}_{j\in\mathbb {I} _n}$ where $\Gamma f_{i,j}(x)=\Gamma f_{j}(x)$ for $x\in Z_i$ and $\Gamma f_{i,j}(x)=0$ elsewhere; then $\mathcal F_i\in \mathfrak B_{\beta^i}({\cal W}_i)$ and $$ [S_{E(\mathcal F_i)}]_x=S_{\Gamma \mathcal F_i(x)}=[S_{E(\mathcal F)}]_x \peso{for} x\in Z_i=\text{Spec}({\cal W}_i) \ , \quad i\in\mathbb{I}_m\, .$$ If we consider the minimization of $P_\varphi^{{\cal W}_i}$ in $\mathfrak B_{\beta^i}({\cal W}_i)$ then, Theorem \ref{teo dim unif} and Remark \ref{interpret de prop dim unif} imply that there exists $c_i\geq 0$ such that \begin{equation} \label{desi caso uniforme}
p_i\, \sum_{j\in\mathbb{I}_{i}} \varphi (\max\{B_{i,j}\, , \, c_i\}) \leq P_\varphi^{{\cal W}_i}(E(\mathcal F_i)) \peso{and} \sum_{j\in I_{i}} \max\{B_{i,j}\, , \, c_i\}=\sum_{i\in\mathbb {I} _n} B_{i,j} \,. \end{equation} Using Notations \ref{muchas nots} and Eq. \eqref{eq. c es el correcto}, we get that for $i\in\mathbb{I}_m$ $$ L_{i}(R_{i}(B))= (\max\{B_{i,j}\, , \, c_i\})_{j\in I_{i}} \implies B_{\delta}=[(\max\{B_{i,j}\, , \, c_i\})_{j\in \mathbb{I}_{i}}]_{i\in\mathbb{I}_{m}}\in \Lambda^{\rm op}_{\alpha,\,p}(\delta)\,,$$
where $\delta=(i)_{i\in\mathbb{I}_m}$. Notice that ${\cal W}=\oplus_{i\in\mathbb{I}_{m}}{\cal W}_i$ (orthogonal sum) and hence $$ \sum_{i\in\mathbb{I}_{m}} p_i\, \sum_{j\in\mathbb{I}_{i}} \varphi (\max\{B_{i,j}\, , \, c_i\}) \le \sum_{i\in\mathbb{I}_{m}} P_\varphi^{{\cal W}_i}(E(\mathcal F_i))=P_\varphi^{\cal W}(E(\mathcal F))\,. $$
\noi Let $[\psi^{\rm op}_i]_{i\in\mathbb{I}_{m}}=\Psi^{\rm op}
\,\in \Lambda_{\alpha,\,p}^{\rm op}(\delta)$ be as in
Theorem \ref{teo estruc prob reducido unificado}. Then \begin{equation}\label{desi potop} \sum_{i\in\mathbb{I}_{m}} { p_i}\, \tr(\varphi(\psi^{\rm op}_i)) = \sum_{i\in\mathbb{I}_{m}} {p_i}\left( \sum_{j\in\mathbb{I}_i}\varphi(\psi^{\rm op}_{i,j})\right) \leq \sum_{i\in\mathbb{I}_{m}} p_i\, \sum_{j\in\mathbb{I}_{i}} \varphi (\max\{B_{i,j}\, , \, c_i\})\leq P_\varphi^{\cal W}(E(\mathcal F))\ . \end{equation} Recall that by construction, there exists $B^{\rm op}=(\gamma_{i,j})_{(i\, , \, j)\in\mathbb{I}_{m}\times\mathbb {I} _n}\in W_{\alpha,\,p}$ such that $B^{\rm op}_{\delta}=\Psi^{\rm op}$ (see item \ref{item3} in Notations \ref{muchas nots}). In this case, $$ \psi^{\rm op}_i=L_{i}(\,(\gamma_{i,j})_{j\in\mathbb {I} _n}) \implies (\gamma_{i,j})_{j\in\mathbb {I} _n}\prec \psi^{\rm op}_i \peso{for} i\in\mathbb{I}_{m}\ . $$ Let $\gamma:\text{Spec}({\cal W})\rightarrow \mathbb{R}}\def\C{\mathbb{C}^n$ be given by $\gamma(x) =R_i(B^{\rm op})= (\gamma_{i,j})_{j\in\mathbb {I} _n}$ if $x\in Z_i$, for $i\in\mathbb{I}_{m}\,$; similarly, let $\lambda:\text{Spec}({\cal W})\rightarrow \coprod_{i\in\mathbb{I}_{m}}\mathbb{R}}\def\C{\mathbb{C}^{i}$, $\lambda(x)=\psi^{\rm op}_i$ if $x\in Z_i$, for $i\in\mathbb{I}_{m}\,$. Then, by the previous remarks we get that $\gamma(x)\prec \lambda(x)$ for $x\in\text{Spec}({\cal W})$.
\noi Hence, by Theorem \ref{teo sobre disenio de marcos} there exists $\mathcal F^{\rm op}=\{f_j^{\rm op}\}_{j\in\mathbb {I} _n}$ such that $$
\|\Gamma f_j^{\rm op}(x)\|^2=\gamma_{i,j} \peso{and} \lambda_j([S_{E(\mathcal F^{\rm op})}]_x)=\psi_{i\, , \, j}^{\rm op} \peso{for} x\in Z_i\, , \ \ j\in\mathbb{I}_i \peso{and} i\in\mathbb{I}_{m}\ . $$ Since $B^{\rm op}\in W_{\alpha,\,p}$ then
$$\|f_j^{\rm op}\|^2=\int_{\text{Spec}({\cal W})}\|\Gamma f_j^{\rm op}(x)\|^2\ dx=\sum_{i\in\mathbb{I}_{m}} p_i \ \gamma_{i,j}=\alpha_j \peso{for} j\in\mathbb {I} _n\implies \mathcal F^{\rm op}\in \mathfrak B_{\alpha}({\cal W})$$ and \begin{equation}\label{eq casi estamos} P_\varphi^{{\cal W}}(E(\mathcal F^{\rm op}))=\int_{\text{Spec}({\cal W})} \tr(\varphi(\lambda(x)))\ dx=\sum_{i\in\mathbb{I}_{m}} p_i \ \tr(\varphi(\psi_i^{\rm op})),
\end{equation} then by Eq. \eqref{desi potop} we see that $P_\varphi^{{\cal W}}(E(\mathcal F^{\rm op}))\leq P_\varphi^{{\cal W}}(E(\mathcal F))\,.$ Since $\mathcal F\in \mathfrak B_{\alpha}({\cal W})$ was arbitrary, the previous facts show that $\mathcal F^{\rm op}$ satisfies items 1. and 2. in the statement.
\noi Assume further that $\varphi\in\convfs$ and $\mathcal F\in \mathfrak {B}_\alpha({\cal W})$ is such that $P_\varphi^{{\cal W}}(E(\mathcal F))=P_\varphi^{{\cal W}}(E(\mathcal F^{\rm op}))$. Then, by Eqs. \eqref{desi caso uniforme}, \eqref{desi potop} and \eqref{eq casi estamos} we see that $$
p_i\, \sum_{j\in\mathbb{I}_{i}} \varphi (\max\{B_{i,j}\, , \, c_i\}) = P_\varphi^{{\cal W}_i}(E(\mathcal F_i)) \peso{for} i\in\mathbb{I}_{m}\ . $$ Therefore, by the case of equality in Theorem \ref{teo struct fina dim hom} and the uniqueness of $\Psi^{\rm op}$ from Theorem \ref{teo estruc prob reducido unificado} we conclude that $$ \lambda_j([S_{E(\mathcal F)}]_x) =\lambda_j([S_{E(\mathcal F_i)}]_x)=\psi^{\rm op}_{i,\,j} \peso{for} x\in Z_i\, , \ j\in\mathbb{I}_{i}\, , \ i\in\mathbb{I}_{m}\ . $$ Finally, in case $\varphi\in\convfs$ is differentiable in $\mathbb{R}}\def\C{\mathbb{C}_+$ and $n\geq m$ then, again by Theorem \ref{teo estruc prob reducido unificado}, we see that $S_{E(\mathcal F)}$ is bounded from below in ${\cal W}$ (since the vectors in $\Psi^{\rm op}$ have no zero entries) and hence $E(\mathcal F)$ is a frame for ${\cal W}$. \end{proof}
\noi We end this section with the following remarks. With the notations of Theorem \ref{teo min pot fsi generales}, notice that the optimal Bessel sequence $\mathcal F^{\rm op}\in \mathfrak {B}_\alpha({\cal W})$ depends on the convex function $\varphi\in\convf$, which was fixed in advance. That is, unlike the uniform case, we are not able to show that there exists $\mathcal F^{\rm univ}\in \mathfrak {B}_\alpha({\cal W})$ such that $\mathcal F^{\rm univ}$ is a $P^{\cal W}_\varphi$-minimizer in $\mathfrak {B}_\alpha({\cal W})$ for every $\varphi\in\convf$. It is natural to wonder whether there exists such a universal solution $\mathcal F^{\rm univ}\in \mathfrak {B}_\alpha({\cal W})$; we conjecture that this is always the case.
\section{Appendix}\label{Appendixity}
\subsection{The Schur-Horn theorem for measurable fields of self-adjoint matrices and applications}
The simple notion of majorization between real vectors has played an important role in finite frame theory in finite dimensions. In particular, it is well known that the existence of finite sequences with prescribed norms and frame operator can be characterized in terms of majorization, applying the Schur-Horn theorem.
\noi Next we develop a Schur-Horn type theorem for measurable fields of self-adjoint matrices and use this result to prove Theorem \ref{teo:mayo equiv}.
Our proof is an adaptation of that given in \cite{HJ13} for the classical Schur-Horn theorem. We will use the existence of measurable eigenvalues and eigenvectors (i.e. diagonalization by measurable fields of unitary matrices) of measurable fields of self-adjoint matrices from \cite{RS95}. In what follows we consider a measure subspace $(X,\, \mathcal X,\, |\,\cdot|)$ of the measure space $(\mathbb{T}^k,\,\mathcal B(\mathbb{T}^k),\,|\,\cdot|)$ of the $k$-torus with Lebesgue measure on Borel sets.
\begin{teo} \label{teo:mayo y el unitario} Let $A(\cdot): X \to \mathcal{H}(n)$ be a measurable field of self-adjoint matrices with associated measurable eigenvalues $b_j:X\to \mathbb{R}}\def\C{\mathbb{C}$ for $j\in \mathbb{I}_n$ such that $b_1\geq \cdots \geq b_n\,$. Let $c_j:X\to \mathbb{R}}\def\C{\mathbb{C}$ be measurable functions for $j\in \mathbb{I}_n\,$. The following statements are equivalent: \begin{enumerate} \item $c(x)=(c_1(x)\, , \, \cdots \, , \, c_n(x))\prec b(x)=(b_1(x)\, , \, \cdots\, , \, b_n(x))$, for a.e. $x\in X$. \item There exists a measurable field of unitary matrices $U(\cdot):X\to {\cal U}(n)$, such that \begin{equation}\label{eq SH} d(U(x)^*\,A(x)\ U(x))=c(x)\, , \peso{for a.e.} x\in X\,, \end{equation} where $d(B)\in \C^n$ denotes the main diagonal of the matrix $B\in {\cal M}_n(\C)$. \end{enumerate} \end{teo} \begin{proof} First notice that the implication $2.\implies 1.$ follows from the classical Schur theorem.
\noi $1.\implies 2.\,$: By considering a convenient measurable field of permutation matrices, we can (and will) assume that the entries of the vector $c(x)$ is also arranged in non-increasing order: $c_1(x)\geq c_2(x)\geq \ldots c_n(x)$. By the results from \cite{RS95} showing the existence of a measurable field of unitary matrices diagonalizing the field $A$, we can assume without loss of generality that $A(x)=D_{b(x)}$ where $D_{b(x)}$ is the diagonal matrix with main diagonal $(b_1(x)\, , \, \ldots \, , \, b_n(x))$ for a.e. $x\in X$.
\noi We will argue by induction on $n$. For $n=1$ the result is trivial.
Hence, we may assume that $n\geq 2$. Since $c(x)\prec b(x)$, we have $b_1(x)\geq c_1(x)\geq c_n(x)\geq b_n(x)$, so if $b_1(x)=b_n(x)$ it follows that all the entries of $c(x)$ and $b(x)$ coincide, $A(x)=c_1(x) I_n\,$, and we can take $U(x)=I_n\,$ for every such $x \in X$. By considering a convenient partition of $X$ we may therefore assume that $b_1(x)>b_n(x)$ in $X$. Similarly, in case $c_1(x)=c_n(x)$ then the unitary matrix $U(x)=n^{-1/2}\, (w^{j\,k})_{j,k\in\mathbb {I} _n}\,$, where $w=e^{\frac{-2\pi i}{ \ n}}$, satisfies that $U(x)^*\, D_{b(x)}\ U(x)=(c_1(x)\, , \, \ldots \, , \, c_n(x))$. Therefore, by considering a convenient partition of $X$ we may therefore assume that $c_1(x)>c_n(x)$ in $X$
\noi For $n=2$, we have $b_1(x)>b_2(x)$ and $b_1(x)\geq c_1(x)\geq c_2(x)= (b_1(x)- c_1(x))+b_2(x)\geq b_2(x).$ Consider the matrix
$$U(x)=\frac{1}{\sqrt{b_1(x)-b_2(x)}} \begin{pmatrix} \sqrt{b_1(x)-c_2(x)} &-\sqrt{c_2(x)-b_2(x)} \\ \sqrt{b_2(x)-c_2(x)} &\sqrt{b_1(x)-c_2(x)} \end{pmatrix} \peso{for a.e.} x\in X\,. $$ Notice that $U(x):X \to M_2(\C)^+$ is a measurable function and an easy computation reveals that $U(x)^*\, U(x)=I_2$, so $U(x)$ is unitary for a.e. $x\in X$. A further computation shows that $$U(x)^*\, A(x)\ U(x)= \begin{pmatrix} &c_1(x)& &*& \\ &*& &c_2(x)& \end{pmatrix} \peso{for a.e.} x\in X\,. $$ That is, $d(U^*(x)\, A(x)\, U(x))=(c_1(x),\,\ c_2(x))$ and $U(\cdot)$ has the desired properties.
\noi Suppose that $n\geq 3$ and asssume that the theorem is true if the vectors $c(x)$ and $b(x)$ have size at most $n-1$. For each $x\in X$ let $k(x)$ be the largest integer $k\in\mathbb {I} _n$ such that $b_k(x)\geq c_1(x)$. Since $b_1(x)\geq c_1(x)>c_n(x)\geq b_n(x)$, we see that $1\leq k\leq n-1$. Then, by considering a convenient partition of $X$ into measurable sets we can assume that $k(x)=k$ for $x\in X$. Therefore, by definition of $k$ we get that $b_k(x)\geq c_1(x)>b_{k+1}(x)$ for $x\in X$. Let $\eta(x)=b_k(x)+b_{k+1}(x)-c_1(x)$ and observe that $\eta(x)=(b_k(x)-c_1(x))+b_{k+1}(x)\geq b_{k+1}(x)$. Then, the measurable vector $(b_k(x), b_{k+1}(x))$ majorizes the measurable vector $(c_1(x), \eta(x))$ and $b_k(x)>b_{k+1}(x)$ for a.e. $x\in X$.
Let $$D_1(x)=\begin{pmatrix} &b_k(x)& &0& \\ &0& &b_{k+1}(x)& \end{pmatrix} \peso{for a.e.} x\in X\,.$$ By the case $n=2$ we obtain a measurable field of unitary matrices $U_1(\cdot):X\rightarrow {\cal U}(2)$ such that $$d(U_1(x)^*\, D_1(x)\, U_1(x))= (c_1(x), \eta(x)) \peso{for a.e.} x\in X\,.$$ Since $b_k(x)=\eta(x)+(c_1(x)-b_{k+1}(x))>\eta(x)$, we have:
\noi If $k=1$ then $b_1(x)>\eta(x)\geq b_2(x)\geq \cdots \geq b_n(x)$; if we let $D_2(x)\in \mathbb {M}_{n-2}(\C)$ be the diagonal matrix with main diagonal $(b_3(x)\, , \, \ldots\, , \, b_n(x))$ then $D_{b(x)}=D_1(x)\oplus D_2(x)$ and $$\begin{pmatrix} U_1(x)& 0 \\ 0 &I_{n-2} \end{pmatrix}^* \begin{pmatrix} D_1(x)& 0 \\ 0 &D_2(x) \end{pmatrix} \begin{pmatrix} U_1(x) &0 \\ 0 &I_{n-2} \end{pmatrix}=\begin{pmatrix}c_1(x) &Z(x)^*\\ Z(x) &V_1(x)\end{pmatrix}$$ where $Z(x)^*=(\overline{z(x)}\, , \, 0\, , \, \ldots\, , \, 0)\in M_{1,(n-1)}(\C)$, $z(\cdot):X\rightarrow \C$ is a measurable function and $V_1(x)\in\mathbb {M}_{n-1}(\C)$ is the diagonal matrix with main diagonal $(\eta(x)\, , \, b_3(x)\, , \, \ldots\, , \, b_n(x))$. Moreover, in this case it turns out that $(\eta(x)\, , \, b_3(x)\, , \, \cdots \, , \, b_n(x))$ majorizes $(c_2(x)\, , \, \cdots\, , \, c_n(x))$ for a.e. $x\in X$ (see \cite{HJ13}). By the inductive hypothesis there exists a measurable field $U_2(\cdot):X\rightarrow {\cal U}(n-1)$ such that $d(U_2(x)^* V_1(x) U_2(x))=(c_2(x)\, , \, \cdots\, , \, c_n(x))$. Hence, if we set $U(x)=(U_1(x)\oplus I_{n-2})\cdot (1\oplus U_2(x))$ for $x\in X$ then $U(\cdot):X\rightarrow {\cal U}(n)$ has the desired properties.
\noi If $k>1$ then $b_1(x)\geq\ldots \geq b_{k-1}(x)\geq b_{k}(x)>\eta(x)\geq b_{k+1}(x)\geq \ldots \geq b_n(x)$. Let $D_2(x)\in \mathbb {M}_{n-2}(\C)$ be the diagonal matrix with main diagonal $$\beta(x)\ \stackrel{\mbox{\tiny{def}}}{=}\ (b_1(x)\, , \, \ldots\, , \, b_{k-1}(x)\, , \, b_{k+2}(x)\, , \, \ldots\, , \, b_n(x))\in\mathbb{R}}\def\C{\mathbb{C}^{n-2}.$$ Notice that in this case $$\begin{pmatrix} U_1(x)& 0 \\ 0 &I_{n-2} \end{pmatrix}^* \begin{pmatrix} D_1(x)& 0 \\ 0 &D_2(x) \end{pmatrix} \begin{pmatrix} U_1(x) &0 \\ 0 &I_{n-2} \end{pmatrix}=\begin{pmatrix}c_1(x) &W(x)^*\\ W(x) &V_2(x)\end{pmatrix}$$ where $W(x)^*=(\overline{w(x)}\, , \, 0\, , \, \ldots\, , \, 0)\in M_{1,(n-1)}(\C)$, $w(\cdot):X\rightarrow \C$ is a measurable function and $V_2(x)\in M_{n-1}(\C)$ is the diagonal matrix with main diagonal $$ \gamma(x)\ \stackrel{\mbox{\tiny{def}}}{=}\ (\eta(x)\, , \, b_1(x)\, , \, \ldots\, , \, b_{k-1}(x)\, , \, b_{k+2}(x)\, , \, \ldots\, , \, b_n(x)) \peso{for a.e.} x\in X \ . $$ It turns out that $(c_2(x)\, , \, \ldots\, , \, c_n(x))\prec \gamma(x)$ for a.e. $x\in X$; by the inductive hypothesis there exists a measurable field $U_2(\cdot):X\rightarrow {\cal U}(n-1)$ such that $d(U_2(x)^* V_2(x) U_2(x))=(c_2(x)\, , \, \ldots\, , \, c_n(x))$ for a.e. $x\in X$. Notice that there exists a permutation matrix $P\in{\cal U}(n)$ such that $P^*(x) D_{b(x)} P=D_1\oplus D_2\,$. Hence, if we set $U(x)=P\cdot (U_1(x)\oplus I_{n-2})\cdot (1\oplus U_2(x))$ for a.e. $x\in X$ then, $U(\cdot):X\rightarrow {\cal U}(n)$ has the desired properties. \end{proof}
\noi Next we prove Theorem \ref{teo:mayo equiv}, based on the Schur-Horn theorem for measurable field i.e. Theorem \ref{teo:mayo y el unitario} above. Our approach is an adaptation of some known results in finite frame theory (see \cite{AMRS}).
\noi {\bf Theorem \ref{teo:mayo equiv}} \it Let $b:\mathbb{T}^k\rightarrow (\mathbb{R}}\def\C{\mathbb{C}_+)^d$ and $c:\mathbb{T}^k\rightarrow (\mathbb{R}}\def\C{\mathbb{C}_+)^n$ be measurable vector fields. The following statements are equivalent: \begin{enumerate} \item For a.e. $x\in \mathbb{T}^k$ we have that $c(x)\prec b(x)$.
\item There exist measurable vector fields $u_j: \mathbb{T}^k\to \C^d$ for $j\in\mathbb {I} _n$ such that $\|u_j(x)\|=1$ for a.e. $x\in \mathbb{T}^k$ and $j\in \mathbb{I}_n\,$, and such that $$ D_{b(x)}=\sum_{j\in \mathbb{I}_n} c_j(x)\,\ u_j(x) \otimes u_j(x) \ , \peso{for a.e.} \ x\in \mathbb{T}^k\ . $$ \end{enumerate} \rm \begin{proof} First notice that the implication $2.\implies 1.$ follows from well known results in finite frame theory (see \cite{AMRS}) in each point $x\in \mathbb{T}^k$. Hence, we show $1.\implies 2.$ We assume, without loss of generality, that the entries of the vectors $b(x)$ and $c(x)$ are arranged in non-increasing order. We now consider the following two cases:
\noi {\bf Case 1:} assume that $n<d$. We let $\tilde c:\mathbb{T}^k\rightarrow \C^d$ be given by $\tilde c(x)=(c(x)\, , \, 0_{d-n})$ for $x\in \mathbb{T}^k$. Then, $\tilde c(x)\prec b(x)$ for $x\in\mathbb{T}^k$ and therefore, by Theorem \ref{teo:mayo y el unitario} there exists a measurable field $U(\cdot):\mathbb{T}^k\rightarrow {\cal U}(d)$ such that \begin{equation} \label{eq aplic SH11} d(U(x)^* D_{b(x)} \, U(x))=(c_1(x)\, , \, \ldots\, , \, c_n(x)\, , \, 0_{d-n})\peso{for a.e.} x\in\mathbb{T}^k\,. \end{equation} Let $v_1(x)\, , \, \ldots\, , \, v_d(x)\in\C^d$ denote the columns of $C(x)=D_{b(x)}^{1/2}\,U(x)$, for $x\in\mathbb{T}^k$. Then, Eq. \eqref{eq aplic SH11} implies that:
$$ \|v_j(x)\|^2= c_j(x) \peso{for} j\in\mathbb{I}_n \ , \quad v_j=0 \peso{for} n+1\leq j\leq d $$ $$ \peso{and} D_{b(x)}= C(x)\, C(x)^*=\sum_{j\in\mathbb{I}_n} v_j(x)\otimes v_j(x) \peso{for a.e.} x\in\mathbb{T}^k \,.$$ Thus, the vectors $u_j(x)$ are obtained from $v_j(x)$ by normalization, for a.e. $x\in\mathbb{T}^k$ and $j\in\mathbb{I}_n\,$.
\noi {\bf Case 2:} assume that $n\geq d$. We let $\tilde b:\mathbb{T}^k\rightarrow \C^n$ be given by $\tilde b(x)=(b(x)\, , \, 0_{n-d})$ for $x\in \mathbb{T}^k$. Then, $c(x)\prec \tilde b(x)$ for $x\in\mathbb{T}^k$ and therefore, by Theorem \ref{teo:mayo y el unitario} there exists a measurable field $U(\cdot):\mathbb{T}^k\rightarrow {\cal U}(n)$ such that \begin{equation} \label{eq aplic SH1} d(U(x)^* D_{\tilde b(x)} \, U(x))=(c_1(x)\, , \, \ldots\, , \, c_n(x))\peso{for a.e.} x\in\mathbb{T}^k\ . \end{equation} Let $\tilde v_1(x)\, , \, \ldots\, , \, \tilde v_n(x)\in\C^n$ denote the columns of $C(x)=D_{\tilde b(x)}^{1/2}U(x)$, for $x\in\mathbb{T}^k$. As before, Eq. \eqref{eq aplic SH1} implies that $$
\|\tilde v_j(x)\|^2= c_j(x) \peso{for} j\in\mathbb{I}_n \peso{and} D_{\tilde b(x)}= \sum_{j\in\mathbb{I}_n} \tilde v_j(x)\otimes \tilde v_j(x) \peso{for a.e.} x\in\mathbb{T}^k \ . $$ If we let $\tilde v_j(x)=(v_{i,j}(x))_{i\in\mathbb {I} _n}$ then, the second identity above implies that $\tilde v_{i,j}(x)=0$ for a.e. $x\in\mathbb{T}^k$ and every $d+1\leq i\leq n$. If we let $v_j(x)=(v_{i,j}(x))_{i\in\mathbb{I}_d}$ for a.e. $x\in\mathbb{T}^k$ and $j\in\mathbb {I} _n\,$, we get that
$$ \|v_j(x)\|^2= c_j(x) \peso{for} j\in\mathbb{I}_n \peso{and} D_{b(x)}= \sum_{j\in\mathbb{I}_n} v_j(x)\otimes v_j(x) \peso{for a.e.} x\in\mathbb{T}^k \,.$$ Thus, the vectors $u_j(x)$ are obtained from $v_j(x)$ by normalization, for a.e. $x\in\mathbb{T}^k$ and $j\in\mathbb{I}_n\,$. \end{proof}
\subsection{The reduced finite-dimensional model: proof of Theorem \ref{teo estruc prob reducido unificado}}\label{subsec reduced}
In this section we present the proof of Theorem \ref{teo estruc prob reducido unificado}, divided into two parts (namely, Propositions \ref{teo estruc prob reducido} and \ref{era facilongo nomas} below).
\begin{pro}\label{teo estruc prob reducido} Let $ m,\, n\in\mathbb{N}$, $\alpha\in (\mathbb{R}}\def\C{\mathbb{C}_{>0}^n)^\downarrow$, $p=(p_i)_{i\in\mathbb{I}_{m}}\in \mathbb{R}}\def\C{\mathbb{C}_{>0}^m$ and $\delta=(d_i)_{i\in\mathbb{I}_{m}}\in\mathbb{N}^m$ be such that $1\leq d_1< \ldots< d_m$. If $\varphi\in\convf$ then there exists $\Psi^{\rm op}=[\psi_i^{\rm op}]_{i\in\mathbb{I}_{m}}\in \Lambda^{\rm op}_{\alpha,\,p}(\delta)$ such that $$ \sum_{i\in\mathbb{I}_{m}} {p_i}\,\tr(\varphi(\psi_i^{\rm op}) ) \leq \sum_{i\in\mathbb{I}_{m}} {p_i}\,\tr(\varphi(\psi_i) )
\peso{for every} \Psi=[\psi_i]_{i\in\mathbb{I}_{m}}\in \Lambda^{\rm op}_{\alpha,\,p}(\delta)\,.$$ Moreover, if $\varphi\in\convfs$ then such $\Psi^{\rm op}$ is unique. \end{pro} \begin{proof} Let us consider the set $$ \Lambda_{\alpha,\,p}(\delta)\ \stackrel{\mbox{\tiny{def}}}{=}\ \bigcup_{B\in W_{\alpha,\,p}} M(B) \subseteq \prod_{i\in\mathbb{I}_{m}} (\mathbb{R}}\def\C{\mathbb{C}_+^{d_i})^\downarrow \ , $$ where $$ M(B)\ \stackrel{\mbox{\tiny{def}}}{=}\ \{ [\lambda_i]_{i\in\mathbb{I}_{m}}\in \prod_{i\in\mathbb{I}_{m}} (\mathbb{R}}\def\C{\mathbb{C}_+^{d_i})^\downarrow:\ R_i(B)\prec \lambda_i\ , \ i\in\mathbb{I}_{m}\} \ . $$ Notice that by construction $\Lambda_{\alpha,\,p}^{\rm op}(\delta)\subseteq \Lambda_{\alpha,\,p}(\delta)$.
\noi We claim that $\Lambda_{\alpha,\,p}(\delta)$ is a convex set. Indeed, let $[\lambda_i]_{i\in\mathbb{I}_{m}}\in M(B_1)$, $[\mu_i]_{i\in\mathbb{I}_{m}}\in M(B_2)$ for $B_1 $, $B_2\in W_{\alpha,\,p}$ and $t\in [0,1]$. Take the matrix $B = t\, B_1 + (1-t)\,B_2\in W_{\alpha,\,p}\,$ (since $ W_{\alpha,\,p}$ is a convex set). Then $$ [\,\gamma_i\,]_{i\in\mathbb{I}_{m}} = [\,t\,\lambda_i+(1-t)\, \mu_i\,]_{i\in\mathbb{I}_{m}}\in M(B) \subseteq \Lambda_{\alpha,\,p}(\delta) \ : $$ on the one hand, $\gamma_i\in(\mathbb{R}}\def\C{\mathbb{C}_+^{d_i})^\downarrow$, $i\in\mathbb{I}_m$; on the other hand, by Lidskii's additive inequality (see \cite{Bhat}) we have that, for each $i\in\mathbb{I}_{m}\,$ $$ R_i(B)= t\,R_i(B_1)+ (1-t)\,R_i(B_2)\prec t\, R_i(B_1)^\downarrow + (1-t)\, R_i(B_2)^\downarrow \in (\mathbb{R}}\def\C{\mathbb{C}_+)^\downarrow \ . $$ On the other hand, by the hypothesis (and the definition of majorization) one deduces that $$ R_i(B_1)^\downarrow\prec \lambda_i \peso{and} R_i(B_2)^\downarrow\prec \mu_i \implies R_i(B) \prec t\, \lambda_i + (1-t)\, \mu_i= \gamma_i $$ for every $i\in \mathbb{I}_{m}\,$. This proves the claim, so $\Lambda_{\alpha,\,p}(\delta)$ is a convex set. Moreover, by the compactness of $W_{\alpha,\,p}$ and by the conditions defining $M(B)$ for $B\in W_{\alpha,\,p}\,$, it follows that $\Lambda_{\alpha,\,p}(\delta)$ is a compact set. Let $$ \varphi_p:\Lambda_{\alpha,\,p}(\delta)\rightarrow \mathbb{R}}\def\C{\mathbb{C}_+ \peso{given by} \varphi_p(\Psi)\ \stackrel{\mbox{\tiny{def}}}{=}\ \sum_{i\in\mathbb{I}_{m}} {p_i}\,\tr \,\varphi(\psi_i) \ , $$ for $\Psi=[\psi_i]_{i\in\mathbb{I}_{m}} \in \Lambda_{\alpha,\,p}(\delta)\,$. It is easy to see that $\varphi_p$ is a convex function, which is strictly convex whenever $\varphi\in\convfs$. Using this last fact it follows that there exists $ \Psi_0\in \Lambda_{\alpha,\,p}(\delta)$ that satisfies $$ \varphi_p(\Psi_0)\leq \varphi_p(\Psi) \peso{for every} \Psi\in \Lambda_{\alpha,\,p}(\delta)\ , $$ and such $ \Psi_0$ is unique whenever $\varphi\in\convfs$. Notice that by construction there exists some $B\in W_{\alpha,\,p}$ such that $\Psi_0=[\psi_i^0]_{i\in\mathbb{I}_{m}} \in M(B)$. Then, by item \ref{item2} of Notation \ref{muchas nots}, $$ R_i(B)\prec \psi_i^0 \implies L_{d_i}(R_i(B))\prec \psi_i^0 \implies \tr \, \varphi(L_{d_i}(R_i(B)))\leq \tr \,\varphi(\psi_i^0) \peso{for} i\in\mathbb{I}_{m}\ . $$ Hence, the sequence $B_\delta$ defined in Eq. \eqref{Bdelta} using this matrix $B$ satisfies that $\varphi_p(B_\delta)\le \varphi_p(\Psi_0)$. So we define $\Psi^{\rm op}\ \stackrel{\mbox{\tiny{def}}}{=}\ B_\delta\in \Lambda_{\alpha,\,p}^{\rm op}(\delta)\subset \Lambda_{\alpha,\,p}(\delta)$, that has the desired properties. Finally, the previous remarks show that $\Psi_0= \Psi^{\rm op}\in \Lambda_{\alpha,\,p}^{\rm op}(\delta)$ whenever $\varphi\in\convfs$. \end{proof}
\begin{pro}\label{era facilongo nomas} With the notations and terminology of Proposition \ref{teo estruc prob reducido}, assume further that $n\geq d_m$ and that $\varphi\in\convfs$ is differentiable in $\mathbb{R}}\def\C{\mathbb{C}_+\,$. Then $$ \Psi^{\rm op}\in \prod_{i\in\mathbb{I}_m} (\mathbb{R}}\def\C{\mathbb{C}_{>0}^{d_i})^\downarrow\,.$$ \end{pro} \begin{proof} Let $\Psi^{\rm op}=[\psi_i^{\rm op}]_{i\in\mathbb{I}_{m}}$ where each vector $\psi_i^{\rm op} \in (\mathbb{R}}\def\C{\mathbb{C}_{+}^{d_i})^\downarrow \,$,
and assume that there exists $i_0\in\mathbb{I}_m$ such that $\psi_{i_0}^{\rm op}=(\psi_{i_0,j}^{\rm op})_{j\in \mathbb{I}_{d_{i_0}}}$ satisfies that $\psi^{\rm op}_{i_0,k}=0$ for some $1\leq k\leq d_{i_0}$; let $1\leq k_0\leq d_{i_0}$ be the smallest such index. Let $B\in W_{\alpha,\,p}$ be such that $B_\delta=\Psi^{\rm op}$. Recall from Eq. \eqref{eq defi gammacd} that, if we denote $c_i = c_{d_{i}}(R_{i}(B))$ for every $i \in \mathbb{I}_{m}\,$, then $$ \psi^{\rm op}_{i_0,j}= L_{d_{i_0}} (R_{i_0}(B))_j = \max\{R_{i_0}(B)^\downarrow_j\, , \, c_{i_0}\} \peso{for} j\in \mathbb{I}_{d_{i_0}} \ , $$ since $n\geq d_{i_0}$ by hypothesis. Hence, in this case $c_{i_0}=
0$ and $R_{i_0}(B)^\downarrow_{k_0}=0$. Let $j_0\in \mathbb {I} _n $ such that $0=R_{i_0}(B)^\downarrow_{k_0} = B_{i_0\, , \, j_0}\,$. By construction $\sum_{i\in\mathbb{I}_m}p_i\ B_{i\, , \, j_0}=\alpha_{j_0}>0$ so that there exists $i_1\in\mathbb{I}_m$ such that $B_{i_1,\,j_0}>0$. Let $\{e_j\}_{i\in\mathbb {I} _n}$ denote the canonical basis of $\mathbb{R}}\def\C{\mathbb{C}^n$. For every $t\in I=[0,\frac{\beta_{i_1,\,j_0} \ p_{i_1}}{p_{i_0}}]$ consider the matrix $B(t)$ defined by its rows as follows: \begin{itemize} \item $R_{i_0}(B(t))=R_{i_0}(B)+t\, e_{j_0} $ \item $R_{i_1}(B(t))= R_{i_1}(B)- \frac{p_{i_0}\, t}{p_{i_1}}\ e_{j_0}\,$ \item $R_{i}(B(t))=R_{i}(B)$ for $i\in\mathbb{I}_m\setminus\{i_0,\,i_1\}$. \end{itemize} It is straightforward to check that $B(t)\in W_{\alpha,\,p}$ for $t\in I$ and that $B(0)=B$. Set $\Psi(t)=[\psi_i(t)]_{i\in\mathbb{I}_m}=B(t)_\delta\in \Lambda_{\alpha,\, p}^{\rm op}(\delta)$ for $t\in I$ and notice that $\Psi(0)=\Psi^{\rm op}$. We now consider two cases:
\noi {\bf Case 1:\ }\ $B_{i_1,\,j_0}> c_{i_1}$ (recall that $\psi^{\rm op}_{i_1\, , \, j}= L_{d_{i_1}} (R_{i_1}(B))_j = \max\{R_{i_1}(B)^\downarrow_j\, , \, c_{i_1}\} $). Therefore $B_{i_1,\,j_0} =R_{i_1}(B)^\downarrow_{k}$ for some $1\leq k\leq d_{i_1}$ and we let $1\leq k_1\leq d_{i_1}$ be the largest such $k$. It is straightforward to check that in this case there exists $\varepsilon >0$ such that $$\psi_{i_0}(t)=\psi^{\rm op}_{i_0}+t\, e_{k_0} \peso{and} \psi_{i_1}(t)=\psi^{\rm op}_{i_1}-\frac{p_{i_0}}{p_{i_1}}\, t\, e_{k_1} \peso{for} t\in [0,\epsilon]\,.$$ Therefore, for $t\in [0,\epsilon]$ we have that $$ f(t)=\varphi_p(\Psi(t))- \varphi_p(\Psi^{\rm op})=p_{i_0}\ (\varphi(t)-\varphi(0))+p_{i_1}\ (\varphi(B_{i_1,\, j_0} - \frac{p_{i_0}}{p_{i_1}} t ) - \varphi(B_{i_1,\, j_0}))\ . $$ Hence $f(0)=0$ and by hypothesis $f(t)\geq 0$ for $t\in[0,\epsilon]$. On the other hand, $$ f'(0)= p_{i_0}\ (\varphi'(0) - \varphi'(B_{i_1,\, j_0}) )<0 $$ since by the hypothesis $\varphi'$ is strictly increasing and $B_{i_1,\, j_0}>0$. This condition contradicts the previous facts about $f$. From this we see that the vectors in $\Psi^{\rm op}$ have no zero entries.
\noi {\bf Case 2:\ }\ $B_{i_1,\,j_0}\le c_{i_1}$. Hence, in this case $0<c_{i_1}$ and there exists $0\leq r\leq d_{i_1}-1$ such that $$ \psi^{\rm op}_{i_1}=(R_{i_1}(B)^\downarrow_1\, , \, \ldots \, , \, R_{i_1}(B)^\downarrow_r \, , \, c_{i_1}\, , \, \ldots\, , \, c_{i_1}) $$ so that there exists $\varepsilon>0$ such that for $t\in[0,\varepsilon]$ we have that $$ \psi_{i_1}(t)=(R_{i_1}(B)^\downarrow_1\, , \, \ldots \, , \, R_{i_1}(B)^\downarrow_r \, , \, c_{i_1}\, , \, \ldots\, , \, c_{i_1})-\frac{p_{i_0}\ t}{(d-r)\ p_{i_1}}\sum_{j=r+1}^{d_1} e_j\,.
$$ Therefore, for $t\in [0,\epsilon]$ we have that $$f(t)=\varphi_p(\Psi(t))- \varphi_p(\Psi^{\rm op})= p_{i_0}\ (\varphi(t)-\varphi(0))+p_{i_1} \ (d-r)\ (\varphi(c_{i_1}- \frac{p_{i_0} \, t}{(d-r)\,p_{i_1} } ) - \varphi(c_{i_1}))\, .$$ As before, $f(0)=0$ and $f(t)\geq 0$ for $t\in[0,\epsilon]$; a simple computation shows that in this case we also have that $f'(0)<0$, which contradicts the previous facts; thus, the vectors in $\Psi^{\rm op}$ have no zero entries. \end{proof}
{\scriptsize
}
\end{document} |
\begin{document}
\title[Center of gravity of the associahedron]{The centers of gravity of the associahedron and of the permutahedron are the same}
\author[C. Hohlweg]{Christophe~Hohlweg} \address[Christophe Hohlweg]{Universit\'e du Qu\'ebec \`a Montr\'eal\\ LaCIM et D\'epartement de Math\'ematiques\\ CP 8888 Succ. Centre-Ville\\ Montr\'eal, Qu\'ebec, H3C 3P8\\ CANADA} \email{[email protected]} \urladdr{http://www.lacim.uqam.ca/\~{}hohlweg}
\author[J. Lortie]{Jonathan~Lortie} \address[Jonathan Lortie]{Universit\'e du Qu\'ebec \`a Montr\'eal\\ LaCIM et D\'epartement de Math\'ematiques\\ CP 8888 Succ. Centre-Ville\\ Montr\'eal, Qu\'ebec, H3C 3P8\\ CANADA} \email{[email protected]}
\author[A. Raymond]{Annie~Raymond} \address[Annie Raymond]{Berlin Mathematical School\\ Strasse des 17. Juni 136\\ Berlin, 10623, Germany} \email{[email protected]}
\date{\today}
\thanks{$^*$ This work is supported by FQRNT and NSERC. It is the result of a summer undergraduate research internship supported by LaCIM}
\begin{abstract} \noindent In this article, we show that Loday's realization of the associahedron has the the same center of gravity than the permutahedron. This proves an observation made by F.~Chapoton.
We also prove that this result holds for the associahedron and the cyclohedron as realized by the first author and C.~Lange.
\end{abstract}
\maketitle
\section{Introduction.}\label{se:Intro}
In 1963, J.~Stasheff discovered the associahedron~\cite{stasheff,stasheff2}, a polytope of great importance in algebraic topology.
The associahedron in $\mathbb R^n$ is a simple $n-1$-dimensional convex polytope. The classical realization of the associahedron given by
S.~Shnider and S.~Sternberg in \cite{shnider_sternberg} was
completed by J.~L.~Loday in 2004~\cite{loday}. Loday gave a
combinatorial algorithm to compute the integer coordinates of the
vertices of the associahedron, and showed that it can be obtained
naturally from the classical permutahedron of dimension $n-1$.
F.~Chapoton observed that the centers of gravity of the
associahedron and of the permutahedron are the same \cite[Section 2.11]{loday}.
As far as we know, this property of Loday's realization has never been proved.
In 2007, the first author and C.~Lange gave a family of realizations of the associahedron that contains the classical realization of the associahedron. Each of these realizations is also obtained naturally from the classical permutahedron \cite{realisation1}. They conjectured that for any of these realizations, the center of gravity coincide with the center of gravity of the permutahedron. In this article, we prove this conjecture to be true.
The associahedron fits in a larger family of polytopes, {\em generalized associahedra}, introduced by S.~Fomin and A.~Zelevinsky in \cite{fomin_zelevinsky} within the framework of cluster algebras (see \cite{chapoton_fomin_zelevinsky,realisation2} for their realizations).
In 1994, R.~Bott and C.~Taubes discovered the cyclohedron~\cite{bott_taubes} in connection with knot theory. It was rediscovered independently by R. Simion \cite{simion}. In \cite{realisation1}, the first author and C.~Lange also gave a family of realizations for the cyclohedron, starting with the permutahedron of type $B$.
We also show that the centers of gravity of the cyclohedron and of the permutahedron of type $B$ are the same.
The article is organized as follows. In \S\ref{se:1}, we first recall the realization of the permutahedron and how to compute its center of gravity. Then we compute the center of gravity of Loday's realization of the associahedron. In order to do this, we partition its vertices into isometry classes of triangulations, which parameterize the vertices, and we show that the center of gravity for each of those classes is the center of gravity of the
permutahedron.
In \S\ref{se:2}, we show that the computation of the center of
gravity of any of the realizations given by the first author and
C.~Lange is reduced to the computation of the center of gravity of the classical
realization of the associahedron. We do the same for the cyclohedron in \S\ref{se:3}.
We are grateful to Carsten Lange for allowing us to use some of the pictures he made in~\cite{realisation1}.
\section{Center of gravity of the classical permutahedron and associahedron}\label{se:1}
\subsection{The permutahedron} Let $S_n$ be the symmetric group acting on the set $[n]=\{1,2,\dots,n\}$. The {\em permutahedron} ${\mathsf{Perm}}(S_n)$ is the classical $n-1$-dimensional simple convex polytope defined as the convex hull of the points $$ M(\sigma)=(\sigma(1),\sigma(2),\dots, \sigma (n))\in\mathbb R^n,\qquad \forall \sigma\in S_n. $$ The {\em center of gravity} (or {\em isobarycenter}) is the unique point $G$ of $\mathbb R^n$ such that $$ \sum_{\sigma\in S_n} \vect{GM(\sigma)}=\vect 0. $$ Since the permutation $w_0:i\mapsto n+1-i$ preserves ${\mathsf{Perm}}(S_n)$, we see, by sending $M(\sigma)$ to $$ M(w_0\sigma)=(n+1-\sigma(1),n+1-\sigma(2),\dots, n+1-\sigma (n)), $$ that the center of gravity is $ G=(\frac{n+1}{2},\frac{n+1}{2},\dots,\frac{n+1}{2}). $
\subsection{Loday's realization} We present here the realization of the associahedron given by J.~L.~Loday \cite{loday}. However, instead of using planar binary trees, we use triangulations of a regular polygon to parameterize the vertices of the associahedron (see \cite[Remark 1.2]{realisation1}).
\subsubsection{Triangulations of a regular polygon} Let $P$ be a regular $(n+2)$-gon in the Euclidean plane with vertices $A_0,A_1,\dots,A_{n+1}$ in counterclockwise direction. A {\em triangulation of $P$} is a set of $n$ noncrossing diagonals of $P$.
Let us be more explicit. A {\em triangle of $P$} is a triangle whose vertices are vertices of $P$. Therefore a side of a triangle of $P$ is either an edge or a diagonal of $P$. A triangulation of $P$ is then a collection of $n$ distinct triangles of $P$ with noncrossing sides. Any of the triangles in $T$ can be described as $A_i A_j A_k$ with $0\leq i<j<k\leq n+1$. Each $1\leq j\leq n$ corresponds to a unique triangle $\Delta_j(T)$ in $T$ because the sides of triangles in $T$ are noncrossing.
Therefore we write $T=\{\Delta_1(T),\dots, \Delta_n(T)\}$ for a triangulation $T$, where $\Delta_j(T)$ is the unique triangle in $T$ with vertex $A_j$ and the two other vertices $A_i$ and $A_k$ satisfying the inequation $0\leq i<j<k\leq n+1$.
Denote by ${\mathcal T}_{n+2}$ the set of triangulations of $P$.
\subsubsection{Loday's realization of the associahedron}
Let $T$ be a triangulation of $P$. The {\em weight} $\delta_j(T)$ of the triangle $\Delta_j(T)=A_i A_jA_k$, where $i<j<k$, is the positive number $$ \delta_j(T)=(j-i)(k-j). $$ The weight $\delta_j(T)$ of $\Delta_j(T)$ represents the product of the number of boundary edges of $P$ between $A_i$ and $A_j$ passing through vertices indexed by smaller numbers than $j$ with the number of boundary edges of $P$ between $A_j$ and $A_k$ passing through vertices indexed by larger numbers than $j$.
The {\em classical associahedron} ${\mathsf{Asso}}(S_n)$ is obtained as the convex hull of the points $$ M(T)=(\delta_1(T),\delta_2(T),\dots, \delta_n(T))\in \mathbb R^n,\quad\forall T\in{\mathcal T}_{n+2}. $$ We are now able to state our first result.
\begin{thm}\label{thm:Main} The center of gravity of ${\mathsf{Asso}}(S_n)$ is $G=(\frac{n+1}{2},\frac{n+1}{2},\dots,\frac{n+1}{2})$.
\end{thm}
In order to prove this theorem, we need to study closely a certain partition of the vertices of $P$.
\subsection{Isometry classes of triangulations}\label{se:centergravity} As $P$ is a regular $(n+2)$-gon, its isometry group is the dihedral group ${\mathcal D}_{n+2}$ of order $2(n+2)$. So ${\mathcal D}_{n+2}$ acts on the set ${\mathcal T}_{n+2}$ of all triangulations of $P$: for $f\in{\mathcal D}_{n+2}$ and $T\in{\mathcal T}_{n+2}$, we have $f\cdot T\in{\mathcal T}_{n+2}$. We denote by $\mathcal O (T)$ the orbit of $T\in{\mathcal T}_{n+2}$ under the action of ${\mathcal D}_{n+2}$.
We know that $G$ is the center of gravity of ${\mathsf{Asso}}(S_n)$ if and only if $$ \sum_{T\in{\mathcal T}_{n+2}} \vect{GM(T)} =\vect 0. $$ As the orbits of the action of ${\mathcal D}_{n+2}$ on ${\mathcal T}_{n+2}$ form a partition of the set ${\mathcal T}_{n+2}$, it is sufficient to compute $$ \sum_{T\in\mathcal O} \vect{GM(T)} $$ for any orbit $\mathcal O$. The following key observation implies directly Theorem~\ref{thm:Main}.
\begin{thm}\label{thm:key} Let $\mathcal O$ be an orbit of the action of ${\mathcal D}_{n+2}$ on ${\mathcal T}_{n+2}$, then $G$ is the center of gravity of $\{M(T)\,|\, T\in\mathcal O\}$. In particular, $ \sum_{T\in\mathcal O} \vect{GM(T)}=\vect 0. $ \end{thm}
Before proving this theorem, we need to prove the following result.
\begin{prop}\label{prop:canonique} Let $T\in{\mathcal T}_{n+2}$ and $j\in [n]$, then $\displaystyle{\sum_{f\in {\mathcal D}_{n+2}} \delta_j(f\cdot T) = (n+1)(n+2)}$. \end{prop}
\begin{proof}
We prove this proposition by induction on $j\in [n]$. For any triangulation $T'$, we denote by
$a_j(T')<j<b_j(T')$ the indices of the vertices of $\Delta_j(T')$. Let $H$ be the group of rotations
in ${\mathcal D}_{n+2}$. It is well-known that for any reflection $s\in {\mathcal D}_{n+2}$, the classes $H$ and
$sH$ form a partition of ${\mathcal D}_{n+2}$ and that $|H|=n+2$. We consider also the unique reflection $s_k\in{\mathcal D}_{n+2}$ which maps $A_x$ to $A_{n+3+k-x}$, where the values of the indices are taken in modulo $n+2$. In particular, $s_k(A_0)=A_{n+3+k}=A_{k+1}$, $s_k(A_1)=A_k$, $s_k(A_{k+1})=A_{n+2}=A_0$, and so on.
\noindent {\bf Basic step $j=1$:} We know that $a_1(T')=0$ for any triangulation $T'$, hence the weight of $\Delta_1(T')$ is $\delta_1(T')=(1-0)(b_1(T')-1)=b_1(T')-1$.
The reflection $s_0\in {\mathcal D}_{n+2}$ maps $A_x$ to $A_{n+3-x}$ (where $A_{n+2}=A_0$ and $A_{n+3}=A_1$). In other words, $s_0(A_0)=A_1$ and $s_0(\Delta_1(T'))$ is a triangle in $s_0\cdot T'$. Since $$ s_0(\Delta_1( T'))= s_0(A_0A_1A_{b_1(T')})= A_0A_1A_{n+3-b_1(T')} $$ and $0<1<n+3-b_1(T')$, $s_0(\Delta_1(T'))$ has to be $\Delta_1(s_0\cdot T')$. In consequence, we obtain that $$ \delta_1(T')+\delta_1(s_0\cdot T')= (b_1(T')-1)+(n+3-b_1(T')-1)=n+1, $$ for any triangulation $T'$. Therefore $$ \sum_{f\in {\mathcal D}_{n+2}} \delta_1(f\cdot T) = \sum_{g\in H}\big(
(\delta_1(g\cdot T)+\delta_1(s_0\cdot (g\cdot T))\big)= |H| (n+1)=(n+1)(n+2), $$ proving the initial case of the induction.
\noindent {\bf Inductive step:} Assume that, for a given $1\leq j<n$, we have $$ \sum_{f\in {\mathcal D}_{n+2}}\delta_j(f\cdot T) = (n+1)(n+2). $$ We will show that $$ \sum_{f\in {\mathcal D}_{n+2}}\delta_{j+1}(f\cdot T) = \sum_{f\in {\mathcal D}_{n+2}}\delta_j(f\cdot T). $$ Let $r\in H\subseteq {\mathcal D}_{n+2}$ be the unique rotation mapping $A_{j+1}$ to $A_{j}$. In particular, $r(A_0)=A_{n+1}$. Let $T'$ be a triangulation of $P$. We have two cases:
\noindent {\bf Case 1.} If $a_{j+1}(T')>0$ then $a_{j+1}(T')-1<j<b_{j+1}(T')-1$ are the indices of the vertices of the triangle $r(\Delta_{j+1}(T'))$ in $r\cdot T'$. Therefore, by unicity, $r(\Delta_{j+1}(T'))$ must be $\Delta_j(r\cdot T')$. Thus
\begin{eqnarray*}
\delta_{j+1}(T')&=&(b_{j+1}(T')-(j+1))(j+1-a_{j+1}(T'))\\
&=&\big((b_{j+1}(T')-1)-j\big)(j-(a_{i+1}(T')-1))\\
&=&\delta_j(r\cdot T'). \end{eqnarray*} In other words: \begin{eqnarray}\label{equ:1} \sum_{{f\in {\mathcal D}_{n+2},\atop a_{j+1}(f\cdot T)\not = 0}}\delta_{j+1}(f\cdot T) & =& \sum_{{f\in {\mathcal D}_{n+2},\atop a_{j+1}(f\cdot T)\not = 0}}\delta_j(r\cdot(f\cdot T))\\\nonumber & =& \sum_{{g\in {\mathcal D}_{n+2},\atop b_{j}(g\cdot T)\not = n+1}}\delta_j(g\cdot T). \end{eqnarray}
\noindent {\bf Case 2.} If $a_{j+1}(T')=0$, then $j<b_{j+1}(T')-1<n+1$ are the indices of the vertices of $r(\Delta_{j+1}(T'))$, which is therefore not $\Delta_j(r\cdot T')$: it is $\Delta_{b_{j+1}(T')-1}(r\cdot T')$. To handle this, we need to use the reflections $s_j$ and $s_{j-2}$.
On one hand, observe that $j+1<n+3+j-b_{j+1}(T')$ because $b_{j+1}(T')<n+1$.
Therefore $$ s_j(\Delta_{j+1}(T'))=A_{j+1}A_0 A_{n+3+j-b_{j+1}(T')}=\Delta_{j+1}(s_j\cdot T'). $$ Hence \begin{eqnarray*} \delta_{j+1}(T')+\delta_{j+1}(s_j\cdot T')&=&(j+1)(b_{j+1}(T')-(j+1))\\ &&+(j+1)(n+3+j-b_{j+1}(T')-(j+1))\\ &=&(j+1)(n+1-j). \end{eqnarray*}
On the other hand, consider the triangle $\Delta_j(r\cdot T')$ in $r\cdot T'$. Since $$ r(\Delta_{j+1}(T'))=A_{j}A_{b_{j+1}(T')-1}A_{n+1}=\Delta_{b_{j+1}(T')-1}(r\cdot T') $$ is in $r\cdot T'$, $[j,n+1]$ is a diagonal in $r\cdot T'$. Hence $b_j(r\cdot T')=n+1$. Thus $\Delta_j(r\cdot T')=A_{a_j(r\cdot T')}A_j A_{n+1}$ and $\delta_j(r\cdot T')=(j-a_j(r\cdot T'))(n+1-j)$. We have $s_{j-2}(A_j)=A_{n+1}$, $s_{j-2}(A_{n+2})=A_j$ and $s_{j-2}(A_{a_j(r\cdot T')})=A_{n+1+j-a_j(r\cdot T')}=A_{j-a_j(r\cdot T')-1}$ since $a_j(r\cdot T')<j$. Therefore $s_{j-2}(\Delta_j(r\cdot T'))=A_{j-a_j(r\cdot T')-1}A_jA_{n+1}=\Delta_j(s_{j-2}r\cdot T')$ and $\delta_j(s_{j-2}r\cdot T')=(a_j(r\cdot T')+1)(n+1-j)$. Finally we obtain that
\begin{eqnarray*}
\delta_{j}(r\cdot T')+\delta_{j}(s_{j-2}r\cdot T')&=&(j-a_j(r\cdot T'))(n+1-j)+(a_j(r\cdot T')+1)(n+1-j)\\ &=&(j+1)(n+1-j).
\end{eqnarray*}
Since $\{H,s_k H\}$ forms a partition of ${\mathcal D}_{n+2}$ for any $k$, we have \begin{eqnarray}\label{equ:2}
\sum_{{f\in {\mathcal D}_{n+2},\atop a_{j+1}(f\cdot T)=0}}\delta_{j+1}(f\cdot T) & =& \sum_{{f\in H,\atop a_{j+1}(f\cdot T)=0}}\big(\delta_{j+1}(f\cdot T) +\delta_{j+1}(s_j f\cdot T)\big)\\ \nonumber
&=& \sum_{{f\in H,\atop a_{j+1}(f\cdot T)=0}} (j+1)(n+1-j)\\ \nonumber
&=& \sum_{{rf\in H,\atop b_{j}(rf\cdot T)=n+1}}\big(\delta_{j}(rf\cdot T) +\delta_{j}(s_{j-2} rf\cdot T)\big),\ \textrm{since }r\in H\\ \nonumber
&=& \sum_{{g\in H,\atop b_{j}(g\cdot T)=n+1}}\delta_{j}(g\cdot T). \end{eqnarray}
\noindent We conclude the induction by adding Equations~(\ref{equ:1}) and (\ref{equ:2}). \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:key}] We have to prove that $$ \vect u=\sum_{T'\in\mathcal O(T)} \vect{GM(T')}=\vect 0. $$
Denote by ${\textnormal{Stab}}(T')=\{f\in{\mathcal D}_{n+2}\,|\, f\cdot T'=T'\}$ the stabilizer of $T'$, then $$
\sum_{f\in {\mathcal D}_{n+2}} M(f\cdot T) = \sum_{T'\in \mathcal O(T)} |{\textnormal{Stab}}(T')| M(T'). $$
Since $T'\in\mathcal O(T)$, $|{\textnormal{Stab}}(T')|=|{\textnormal{Stab}}(T)|=\frac{2(n+2)}{|\mathcal O(T)|}$, we have $$
\sum_{f\in {\mathcal D}_{n+2}} M(f\cdot T) = \frac{2(n+2)}{|\mathcal O(T)|} \sum_{T'\in \mathcal O(T)} M(T') . $$ Therefore by Proposition~\ref{prop:canonique} we have for any $i\in [n]$ \begin{equation}\label{equ:3}
\sum_{T'\in \mathcal O(T)} \delta_i(T')= \frac{|\mathcal O(T)|}{2(n+2)}(n+1)(n+2)=\frac{|\mathcal O(T)|(n+1)}{2}. \end{equation}
Denote by $O$ the point of origin of $\mathbb R^n$. Then $\vect{OM}=M$ for any point $M$ of $\mathbb R^n$. By Chasles' relation we have finally
$$
\vect u=\sum_{T'\in\mathcal O(T)} \vect{GM(T')}= \sum_{T'\in\mathcal O(T)} (M(T')-G) =\sum_{T'\in\mathcal O(T)} M(T') - |\mathcal O(T)| G. $$
So the $i^{th}$ coordinate of $\vect u$ is $ \sum_{T'\in \mathcal O(T)} \delta_i(T')- \frac{|\mathcal O(T)|(n+1)}{2}=0 $, hence $\vect u =\vect 0$ by (\ref{equ:3}). \end{proof}
\section{Center of gravity of generalized associahedra of type $A$ and $B$}\label{se:2}
\subsection{Realizations of associahedra} As a Coxeter group (of type $A$), $S_n$ is generated by the simple transpositions $\tau_i=(i,\, i+1)$, $i\in [n-1]$. The Coxeter graph $\Gamma_{n-1}$ is then
Let ${\mathscr A}$ be an orientation of $\Gamma_{n-1}$. We distinguish between {\em up} and {\em down} elements of $[n]$~: an element $i\in [n]$ is {\em up} if the edge $\{\tau_{i-1}, \tau_i\}$ is directed from $\tau_i$ to $\tau_{i-1}$ and {\em down} otherwise (we set $1$ and $n$ to be down). Let ${\mathsf D}_{\mathscr A}$ be the set of down elements and let ${\mathsf U}_{\mathscr A}$ be the set of up elements (possibly empty).
The notion of up and down induces a labeling of the $(n+2)$-gon $P$ as follows. Label $A_0$ by $0$. Then the vertices of $P$ are, in counterclockwise direction, labeled by the down elements in increasing order, then by $n+1$, and finally by the up elements in decreasing order. An example is given in Figure~\ref{fig:example_labelling}. \begin{figure}\label{fig:example_labelling}
\end{figure}
We recall here a construction due to Hohlweg and Lange~\cite{realisation1}. Consider $P$ labeled according to a fixed orientation~${\mathscr A}$ of $\Gamma_{n-1}$. For each $l\in [n]$ and any triangulation $T$ of $P$, there is a unique triangle $\Delta^{\mathscr A}_l(T)$ whose vertices are labeled by $k<l<m$. Now, count the number of edges of $P$ between $i$ and $k$, whose vertices are labeled by smaller numbers than $l$. Then multiply it by the number of edges of $P$ between $l$ and $m$, whose vertices are labeled by greater numbers than $l$. The result $\omega_l^{\mathscr A}(T)$ is called the {\em weight} of $\Delta_l^{\mathscr A}(T)$. The injective map \begin{align*}
M_{{\mathscr A}}: {\mathcal T}_{n+2} &\longrightarrow {\mathbb R}^n \\
T &\longmapsto (x^{\mathscr A}_1(T),x^{\mathscr A}_2(T),\dots,x^{\mathscr A}_n(T)) \end{align*} that assigns explicit coordinates to a triangulation is defined as follows: \[
x^{\mathscr A}_j(T) := \begin{cases}
\omega_j^{\mathscr A} (T) & \textrm{if } j\in{\mathsf D}_{\mathscr A}\\
n+1-\omega_j^{\mathscr A}(T) & \textrm{if } j\in{\mathsf U}_{\mathscr A}.
\end{cases} \]
Hohlweg and Lange showed that the convex hull ${\mathsf{Asso}}_{\mathscr A}(S_n)$
of~$\{M_{{\mathscr A}}(T)\,|\,T\in {\mathcal T}_{n+2}\}$ is a realization of the associahedron with integer coordinates \cite[Theorem 1.1]{realisation1}. Observe that if the orientation ${\mathscr A}$ is {\em canonic}, that is, if ${\mathsf U}_{\mathscr A}=\emptyset$, then ${\mathsf{Asso}}_{\mathscr A}(S_n)={\mathsf{Asso}}(S_n)$.
The key is now to observe that the weight of $\Delta_{j}^{\mathscr A}(T)$ in $T$ is precisely the weight of $\Delta_j (T')$ where $T'$ is a triangulation in the orbit of $T$ under the action of ${\mathcal D}_{n+2}$, as stated in the next proposition.
\begin{prop}\label{prop:weight} Let ${\mathscr A}$ be an orientation of $\Gamma_{n-1}$. Let $j\in [n]$ and let $A_l$ be the vertex of $P$ labeled by $j$. There is an isometry $r_j^{\mathscr A}\in \mathcal D_{n+2}$ such that: \begin{enumerate} \item[(i)] $r_j^{\mathscr A}(A_l)=A_j$;
\item[(ii)] the label of the vertex $A_k$ is smaller than $j$ if and only if the index $i$ of the vertex $A_i=r_j^{\mathscr A}(A_k)$ is smaller than $j$.
\end{enumerate} Moreover, for any triangulation $T$ of $P$ we have $\omega_j^{\mathscr A}(T)=\delta_j(r_j^{\mathscr A}\cdot T).$ \end{prop} \begin{proof} If ${\mathscr A}$ is the canonical orientation, then $r_j^{\mathscr A}$ is the identity, and the proposition is straightforward. In the following proof, we suppose therefore that ${\mathsf U}_{\mathscr A}\not=\emptyset$.
\noindent Case 1: Assume that $j\in{\mathsf D}_{\mathscr A}$. Let $\alpha$ be the greatest up element smaller than $j$ and let $A_{\alpha+1}$ be the vertex of $P$ labeled by $\alpha$. Then by construction of the labeling, $A_{\alpha}$ is labeled by a larger number than $j$, and $[A_{\alpha},A_{\alpha+1}]$ is the
unique edge of $P$ such that $A_{\alpha+1}$ is labeled by a smaller number than $j$. Denote by $\Lambda_{\mathscr A}$ the path from $A_l$ to $A_{\alpha+1}$ passing through vertices of $P$ labeled by smaller numbers than $j$. This is the path going from $A_l$ to $A_{\alpha+1}$ in clockwise direction on the boundary of $P$.
By construction, $A_k\in \Lambda_{\mathscr A}$ if and only if the label of $A_k$ is smaller than $j$. In other words, the path $\Lambda_{\mathscr A}$ consists of {\em all} vertices of $P$ labeled by smaller numbers than $j$. Therefore the cardinality of $\Lambda_{\mathscr A}$ is $j+1$.
Consider $r_j^{\mathscr A}$ to be the rotation mapping $A_l$ to $A_j$. Recall that a rotation is an isometry preserving the orientation of the plane. Then the path $\Lambda_{\mathscr A}$, which is obtained by walking on the boundary of $P$ from $A_l$ to $A_{\alpha+1}$ in clockwise direction, is sent to the path $\Lambda$ obtained by walking on the boundary of $P$ in clockwise direction from $A_j$ and going through $j+1=|\Lambda_{\mathscr A}|$ vertices of $P$. Therefore $\Lambda=\{A_0,A_1,\dots, A_j\}$, thus proving the first claim of our proposition in this case.
\noindent Case 2: assume that $j\in {\mathsf U}_{\mathscr A}$. The proof is almost the same as in the case of a down element. Let $\alpha$ be the greatest down element smaller than $j$ and let $A_{\alpha}$ be the vertex of $P$ labeled by $\alpha$. Then by construction of the labeling, $A_{\alpha+1}$ is labeled by a larger number than $j$, and $[A_{\alpha},A_{\alpha+1}]$ is the unique edge of $P$ such that $A_{\alpha}$ is labeled by a smaller number than $j$. Denote by $\Lambda_{\mathscr A}$ the path from $A_l$ to $A_{\alpha}$ passing through vertices of $P$ labeled by smaller numbers than $j$. This is the path going from $A_{\alpha}$ to $A_l$
in clockwise direction on the boundary of $P$.
As above, $A_k\in \Lambda_{\mathscr A}$ if and only if the label of $A_k$ is smaller than $j$. In other words, the path $\Lambda_{\mathscr A}$ consists of all the vertices of $P$ labeled by smaller numbers than $j$.
Therefore, again, the cardinality of $\Lambda_{\mathscr A}$ is $j+1$.
Let $r_j^{\mathscr A}$ be the reflection mapping $A_\alpha$ to $A_0$ and $A_{\alpha+1}$ to $A_{n+1}$. Recall that a reflection is an isometry reversing the orientation of the plane. Then the path $\Lambda_{\mathscr A}$, which is obtained by walking on the boundary of $P$ from $A_\alpha$ to $A_{l}$ in clockwise direction, is sent to the path $\Lambda$ obtained by walking on the boundary of $P$ in clockwise direction from $A_\alpha$ and going through
$j+1=|\Lambda_{\mathscr A}|$ vertices of $P$. Therefore $\Lambda=\{A_0,A_1,\dots, A_j\}$. Hence $r_j^{\mathscr A}(A_l)$ is sent on the final vertex of the path $\Lambda$ which is $A_j$, proving the first claim of our proposition.
Thus it remains to show that for a triangulation $T$ of $P$ we have $\omega_j^{\mathscr A}(T)=\delta_j(r_j^{\mathscr A}\cdot T).$ We know that $\Delta_j^{\mathscr A}(T)=A_k A_l A_m$ such that the label of $A_k$ is smaller than $j$, which is smaller than the label of $A_m$. Write
$A_a=r_j^{\mathscr A}(A_k)$ and $A_b=r_j^{\mathscr A}(A_m)$. Because of Proposition~\ref{prop:weight}, $a<j<b$ and therefore $$ r_j^{\mathscr A}(\Delta_j^{\mathscr A}(T))= A_a A_jA_b=\Delta_j(r_j^{\mathscr A}\cdot T). $$ So $(j-a)$ is the number of edges of $P$ between $A_l$ and $A_k$, whose vertices are labeled by smaller numbers than $j$. Similarly, $(b-j)$ is the number of edges between $A_l$ and $A_m$, whose vertices are labeled by smaller numbers than $j$, and $(b-j)$ is the number of edges of $P$ between $A_l$ and $A_m$ and whose vertices are labeled by larger numbers than $j$. So $\omega_l^{\mathscr A}(T)=(j-a)(b-j)=\delta_j(r_j^{\mathscr A}\cdot T)$. \end{proof}
\begin{cor}\label{cor:Canon} For any orientation ${\mathscr A}$ of the Coxeter graph of $S_n$ and for any $j\in [n]$, we have $$ \sum_{f\in {\mathcal D}_{n+2}} x^{\mathscr A}_j(f\cdot T) = (n+1)(n+2). $$ \end{cor} \begin{proof} Let $r_j^{\mathscr A}\in \mathcal D_{n+2}$ be as in Proposition~\ref{prop:weight}.
Suppose first that $j\in {\mathsf U}_{\mathscr A}$, then \begin{eqnarray*} \sum_{f\in {\mathcal D}_{n+2}} x^{\mathscr A}_i(f\cdot T) &=&2(n+2)(n+1)-\sum_{f\in {\mathcal D}_{n+2}} \omega_i^{\mathscr A}(f\cdot T)\\ &=&2(n+2)(n+1)-\sum_{f\in {\mathcal D}_{n+2}} \delta_j(fr_j^{\mathscr A}\cdot T),\ \textrm{by Proposition~\ref{prop:weight}} \\ &=&2(n+2)(n+1)-\sum_{g\in {\mathcal D}_{n+2}} \delta_j(g^{\mathscr A}\cdot T),\ \textrm{since $r_j^{\mathscr A}\in\mathcal D_{n+2}$} \\ &=& (n+1)(n+2),\ \textrm{by Proposition~\ref{prop:canonique}} \end{eqnarray*}
If $i\in {\mathsf D}_{\mathscr A}$, the result follows from a similar calculation. \end{proof}
\subsection{Center of gravity of associahedra}
\begin{thm}\label{thm:Main2} The center of gravity of ${\mathsf{Asso}}_{\mathscr A}(S_n)$ is $G=(\frac{n+1}{2},\frac{n+1}{2},\dots,\frac{n+1}{2})$ for any orientation ${\mathscr A}$.
\end{thm}
By following precisely the same arguments as in \S\ref{se:centergravity}, we just have to show the following generalization of Theorem~\ref{thm:key}.
\begin{thm}\label{thm:keyGenAss} Let $\mathcal O$ be an orbit of the action of ${\mathcal D}_{n+2}$ on ${\mathcal T}_{n+2}$, then $G$ is the center of gravity of $\{M_{\mathscr A}(T)\,|\, T\in\mathcal O\}$. In particular, $\sum_{T\in\mathcal O} \vect{GM_{\mathscr A}(T)}=\vect 0. $ \end{thm} \begin{proof} The proof is entirely similar to the proof of Theorem~\ref{thm:key}, using Corollary~\ref{cor:Canon} instead of Proposition~\ref{prop:canonique}. \end{proof}
\section{Center of gravity of the cyclohedron}\label{se:3}
\subsection{The type $B$-permutahedron}
The hyperoctahedral group $W_n$ is defined by $W_n=\{\sigma\in S_{2n}\,|\, \sigma(i)+\sigma(2n+1-i)=2n+1,\ \forall i\in[n]\}$. The {\em type $B$-permutahedron} ${\mathsf{Perm}}(W_n)$ is the simple $n$-dimensional convex polytope defined as the convex hull of the points $$ M(\sigma)=(\sigma(1),\sigma(2),\dots, \sigma (n))\in\mathbb R^{2n},\qquad \forall \sigma\in W_n. $$ As $w_0=(2n,2n-1,\dots,3,2,1)\in W_n$, we deduce from the same argument as in the case of ${\mathsf{Perm}}(S_n)$ that the center of gravity of ${\mathsf{Perm}}(W_n)$ is $$G=(\frac{2n+1}{2},\frac{2n+1}{2},\dots,\frac{2n+1}{2}).$$
\subsection{Realizations of the associahedron} An orientation~${\mathscr A}$ of~$\Gamma_{2n-1}$ is {\em symmetric} if the edges $\{\tau_i,\tau_{i+1}\}$ and $\{\tau_{2n-i-1},\tau_{2n-i}\}$ are oriented in \emph{opposite directions} for all~$i\in [2n-2]$. There is a bijection between symmetric orientations of~$\Gamma_{2n-1}$ and orientations of the Coxeter graph of $W_n$ (see \cite[\S1.2]{realisation1}). A triangulation $T\in {\mathcal T}_{2n+2}$ is {\em centrally symmetric} if~$T$, viewed as a triangulation of $P$, is centrally symmetric. Let ${\mathcal T}_{2n+2}^B$ be the set of the centrally symmetric triangulations of $P$. In \cite[Theorem 1.5]{realisation1} the authors show that for any symmetric orientation ${\mathscr A}$ of $\Gamma_{2n-1}$. The convex hull
${\mathsf{Asso}}_{\mathscr A}(W_{n})$ of $\{M_{{\mathscr A}}(T)\,|\,T\in {\mathcal T}^B_{2n+2}\}$ is a realization of the cyclohedron with integer coordinates.
Since the full orbit of symmetric triangulations under the action of ${\mathcal D}_{2n+2}$ on triangulations provides vertices of ${\mathsf{Asso}}_{\mathscr A}(W_{n})$, and vice-versa, Theorem~\ref{thm:keyGenAss} implies the following corollary.
\begin{cor}\label{cor:Main} Let ${\mathscr A}$ be a symmetric orientation of $\Gamma_{2n-1}$, then the center of gravity of ${\mathsf{Asso}}_{\mathscr A}(W_n)$ is $G=(\frac{2n+1}{2},\frac{2n+1}{2},\dots,\frac{2n+1}{2})$.
\end{cor}
\end{document} |
\begin{document}
\title{{f Critical ($P_5$,bull)-free graphs}
\begin{abstract}
Given two graphs $H_1$ and $H_2$, a graph is $(H_1,H_2)$-free if it contains no induced subgraph isomorphic to $H_1$ or $H_2$. Let $P_t$ and $C_t$ be the path and the cycle on $t$ vertices, respectively. A bull is the graph obtained from a triangle with two disjoint pendant edges. In this paper, we show that there are finitely many 5-vertex-critical ($P_5$,bull)-free graphs.
{\bf Keywords.} coloring; critical graphs; forbidden induced subgraphs; strong perfect graph theorem; polynomial-time algorithms.
\end{abstract}
\section{Introduction}
All graphs in this paper are finite and simple. We say that a graph $G$ {\em contains} a graph $H$ if $ H $ is isomorphic to an induced subgraph of $G$. A graph $G$ is {\em H-free} if it does not contain $H$. For a family of graphs $\mathcal{H}$, $G$ is {\em $\mathcal{H}$-free} if $G$ is $H$-free for every $H\in \mathcal{H}$. When $\mathcal{H}$ consists of two graphs, we write $(H_1,H_2)$-free instead of $\{H_1,H_2\}$-free.
A $k$-{\em coloring} of a graph $G$ is a function $\phi:V(G)\rightarrow\{1,...,k\}$ such that $\phi(u)\neq\phi(v)$ whenever $u$ and $v$ are adjacent in $G$. Equivalently, a $k$-coloring of $G$ is a partition of $V(G)$ into $k$ independent sets. We call a graph $k$-{\em colorable} if it admits a $k$-coloring. The {\em chromatic number} of $G$, denoted by $\chi(G)$, is the minimum number $k$ for which $G$ is $k$-colorable. The {\em clique number} of $G$, denoted by $\omega(G)$, is the size of a largest clique in $G$.
A graph $G$ is said to be $k$-{\em chromatic} if $\chi(G)=k$. We say that $G$ is {\em critical} if $\chi(H)<\chi(G)$ for every proper subgraph $H$ of $G$. A $k$-{\em critical} graph is one that is $k$-chromatic and critical. An easy consequence of the definition is that every critical graph is connected. Critical graphs were first investigated by Dirac \cite{Di51,Di52,Di52i} in 1951, and then by Lattanzio and Jensen \cite{L02,J02} among others, and by Goedgebeur \cite{GS18} in recent years.
Vertex-criticality is a weaker notion. Suppose that $G$ is a graph. Then $G$ is said to be $k$-{\em vertex-critical} if $G$ has chromatic number $k$ and removing any vertex from $G$ results in a graph that is $(k-1)$-colorable. For a set $\mathcal{H}$ of graphs, we say that $ G $ is {\em k-vertex-critical $\mathcal{H}$-free} if it is $k$-vertex-critical and $\mathcal{H}$-free. The following problem arouses our interest.
{\noindent} \textbf{The finiteness problem.} Given a set $\mathcal{H}$ of graphs and an integer $k\ge 1$, are there only finitely many $ k $-vertex-critical $\mathcal{H}$-free graphs?
This problem is meaningful because the finiteness of the set has a fundamental algorithmic implication.
\begin{theorem}[Folklore]\label{Folklore}
If the set of all $k$-vertex-critical $\mathcal{H}$-free graphs is finite, then there is a polynomial-time algorithm to determine whether an $\mathcal{H}$-free graph is $(k-1)$-colorable. \qed
\end{theorem}
Let $K_n$ be the complete graph on $n$ vertices. Let $ P_t $ and $ C_t $ denote the path and the cycle on $t$ vertices, respectively. The {\em complement} of $G$ is denoted by $\overline{G}$. For $s,r\ge 1$, let $K_{r,s}$ be the complete bipartite graph with one part of size $r$ and the other part of size $s$. A class of graphs that has been extensively studied recently is the class of $P_t$-free graphs. In \cite{BHS09}, it was shown that there are finite many 4-vertex-critical $P_5$-free graphs. This result was later generalized to $P_6$-free graphs \cite{CGSZ16}. In the same paper, an infinite family of 4-vertex-critical $P_7$-free graphs was constructed. Moreover, for every $k\ge 5$, an infinite family of $k$-vertex-critical $P_5$-free graphs was constructed in \cite{HMRSV15}. This implies that the finiteness of $k$-vertex-critical $P_t$-free graphs for $t\ge 1$ and $k\ge 4$ has been completely solved by researchers. We summarize the results in the following table.
\begin{table}[!ht]
\centering
\caption{The finiteness of $k$-vertex-critical $P_t$-free graphs.}
\renewcommand\arraystretch{1.5}
\setlength{\tabcolsep}{4mm}{}
\begin{tabular}{|c|p{1.6cm}<{\centering}|p{1.9cm}<{\centering}|p{1.6cm}<{\centering}|p{1.7cm}<{\centering}|}
\hline
\diagbox{$k$}{$t$} & $\le4$ & 5 & 6 & $\ge7$\\
\hline
4 & finite & finite \cite{BHS09}& finite \cite{CGSZ16}& infinite \cite{CGSZ16}\\
\hline
$\ge 5$ & finite & infinite \cite{HMRSV15}& infinite & infinite \\
\hline
\end{tabular}
\end{table}
Because there are infinitely many 5-vertex-critical $P_5$-free graphs, many researchers have investigated the finiteness problem of $k$-vertex-critical $(P_5,H)$-free graphs. Our research is mainly motivated by the following dichotomy result.
\begin{theorem}[\cite{CGHS21}]
Let $H$ be a graph of order 4 and $k\ge 5$ be a fixed integer. Then there are infinitely many k-vertex-critical $(P_5,H)$-free graphs if and only if $H$ is $2P_2$ or $P_1+K_3$.
\end{theorem}
This theorem completely solves the finiteness problem of $k$-vertex-critical $(P_5,H)$-free graphs for graphs of order 4. In \cite{CGHS21}, the authors also posed the natural question of which five-vertex graphs $H$ lead to finitely many $k$-vertex-critical $(P_5,H)$-free graphs. It is known that there are exactly 13 5-vertex-critical $(P_5,C_5)$-free graphs \cite{HMRSV15}, and that there are finitely many 5-vertex-critical ($P_5$,banner)-free graphs \cite{CHLS19,HLS19}, and finitely many $k$-vertex-critical $(P_5,\overline{P_5})$-free graphs for every fixed $k$ \cite{DHHMMP17}. In \cite{CGS}, Cai, Goedgebeur and Huang show that there are finitely many $k$-vertex-critical ($P_5$,gem)-free graphs and finitely many $k$-vertex-critical ($P_5,\overline{P_3+P_2}$)-free graphs. Hell and Huang proved that there are finitely many $k$-vertex-critical $(P_6,C_4)$-free graphs \cite{HH17}. This was later generalized to $(P_5,K_{r,s})$-free graphs in the context of $H$-coloring \cite{KP17}. This gives an affirmative answer for $H=K_{2,3}$.
\noindent {\bf Our contributions.} We continue to study the finiteness of vertex-critical $(P_5,H)$-free graphs when $H$ has order 5. The {\em bull} graph (see \autoref{bull}) is the graph obtained from a triangle with two disjoint pendant edges. In this paper, we prove that there are only finitely many 5-vertex-critical ($P_5$,bull)-free graphs.
\begin{figure}
\caption{The bull graph.}
\label{bull}
\end{figure}
To prove the result on bull-free graphs, we performed a careful structural analysis combined with the pigeonhole principle based on the properties of 5-vertex-critical graphs.
The remainder of the paper is organized as follows. We present some preliminaries in Section \ref{Preliminarlies} and give structural properties around an induced $C_5$ in a ($P_5$,bull)-free graph in Section \ref{structure}. We then show that there are finitely many 5-vertex-critical ($P_5$,bull)-free graphs in Section \ref{bull-free}.
\section{Preliminaries}\label{Preliminarlies}
For general graph theory notation we follow \cite{BM08}. For $k\ge 4$, an induced cycle of length $k$ is called a {\em $k$-hole}. A $k$-hole is an {\em odd hole} (respectively {\em even hole}) if $k$ is odd (respectively even). A {\em $k$-antihole} is the complement of a $k$-hole. Odd and even antiholes are defined analogously.
Let $G=(V,E)$ be a graph. For $S\subseteq V$ and $u\in V\setminus S$, let $d(u,S)={min}_{v\in S}d(u,v)$, where $d(u,v)$ denotes the length of the shortest path from $u$ to $v$. If $uv\in E$, we say that $u$ and $v$ are {\em neighbors} or {\em adjacent}, otherwise $u$ and $v$ are {\em nonneighbors} or {\em nonadjacent}. The {\em neighborhood} of a vertex $v$, denoted by $N_G(v)$, is the set of neighbors of $v$. For a set $X\subseteq V$, let $N_G(X)=\cup_{v\in X}N_G(v)\setminus X$. We shall omit the subscript whenever the context is clear. For $x\in V$ and $S\subseteq V$, we denote by $N_S(x)$ the set of neighbors of $x$ that are in $S$, i.e., $N_S(x)=N_G(x)\cap S$. For two sets $X,S\subseteq V(G)$, let $N_S(X)=\cup_{v\in X}N_S(v)\setminus X$. For $X,Y\subseteq V$, we say that $X$ is {\em complete} (resp. {\em anticomplete}) to $Y$ if every vertex in $X$ is adjacent (resp. nonadjacent) to every vertex in $Y$. If $X=\{x\}$, we write ``$x$ is complete (resp. anticomplete) to $Y$'' instead of ``$\{x\}$ is complete (resp. anticomplete) to $Y$''. If a vertex $v$ is neither complete nor anticomplete to a set $S$, we say that $v$ is {\em mixed} on $S$. For a vertex $v\in V$ and an edge $xy\in E$, if $v$ is mixed on $\{x,y\}$, we say that $v$ is {\em mixed} on $xy$. For a set $H\subseteq V$, if no vertex in $V-H$ is mixed on $H$, we say that $H$ is a {\em homogeneous set}, otherwise $H$ is a {\em nonhomogeneous set}. A vertex subset $S\subseteq V$ is {\em independent} if no two vertices in $S$ are adjacent. A {\em clique} is the complement of an independent set. Two nonadjacent vertices $u$ and $v$ are said to be {\em comparable} if $N(v)\subseteq N(u)$ or $N(u)\subseteq N(v)$. A vertex subset $K\subseteq V$ is a {\em clique cutset} if $G-K$ has more connected components than $G$ and $K$ is a clique. For an induced subgraph $A$ of $G$, we write $G-A$ instead of $G-V(A)$. For $S\subseteq V$, the subgraph \emph{induced} by $S$ is denoted by $G[S]$. For $S\subseteq V$ and an induced subgraph $A$ of $G$, we may write $S$ instead of $G[S]$ and $A$ instead of $V(A)$ for the convenience of writing whenever the context is clear.
We proceed with a few useful results that will be needed later. The first one is well-known in the study of $k$-vertex-critical graphs.
\begin{lemma}[Folklore]\label{lem:xy}
A $k$-vertex-critical graph contains no clique cutsets.
\end{lemma}
Another folklore property of vertex-critical graphs is that such graphs contain no comparable vertices. In \cite{CGHS21}, a generalization of this property was presented.
\begin{lemma}[\cite{CGHS21}]\label{lem:XY}
Let $G$ be a $k$-vertex-critical graph. Then $G$ has no two nonempty disjoint subsets $X$ and $Y$ of $V(G)$ that satisfy all the following conditions.
\begin{itemize}
\item $X$ and $Y$ are anticomplete to each other.
\item $\chi(G[X])\le\chi(G[Y])$.
\item Y is complete to $N(X)$.
\end{itemize}
\end{lemma}
A property on bipartite graphs is shown as follows.
\begin{lemma}[\cite{F93}]\label{2K2}
Let $G$ be a connected bipartite graph. If $G$ contains a $2K_2$, then $G$ must contain a $P_5$.
\end{lemma}
As we mentioned earlier, there are finitely many 4-vertex-critical $P_5$-free graphs.
\begin{theorem}[\cite{BHS09,MM12}]\label{thm:finite4Critical}
If $G=(V,E)$ is a 4-vertex-critical $P_5$-free graph, then $|V|\le 13$.
\end{theorem}
A graph $G$ is {\em perfect} if $\chi(H)=\omega(H)$ for every induced subgraph $H$ of $G$. Another result we use is the well-known Strong Perfect Graph Theorem.
\begin{theorem}[The Strong Perfect Graph Theorem\cite{CRST06}]\label{thm:SPGT}
A graph is perfect if and only if it contains no odd holes or odd antiholes.
\end{theorem}
Moreover, we prove a property about homogeneous sets, which will be used frequently in the proof of our results.
\begin{lemma}\label{lem:homogeneous}
Let $G$ be a 5-vertex-critical $P_5$-free graph and $S$ be a homogeneous set of $V(G)$. For each component $A$ of $G[S]$,
\begin{enumerate}[(i)]
\item if $\chi(A)=1$, then $A$ is a $K_1$;
\item if $\chi(A)=2$, then $A$ is a $K_2$;
\item if $\chi(A)=3$, then $A$ is a $K_3$ or a $C_5$.
\end{enumerate} \end{lemma}
\begin{proof}
(i) is clearly true. Moreover, since $V(A)\subseteq S$, $V(A)$ is also a homogeneous set. Next we prove (ii) and (iii).
(ii)Since $\chi(A)=2$, let $\{x,y\}\subseteq V(A)$ induce a $K_2$. Suppose that there is another vertex $z$ in $A$. Because $G$ is 5-vertex-critical, $G-z$ is 4-colorable. Since $\chi(A)=2$, let $\{V_1,V_2,V_3,V_4\}$ be a 4-coloring of $G-z$ where $V(A)\setminus\{z\}\subseteq V_1\cup V_2$. Since $A$ is homogeneous, $\{V_1\cup \{z\},V_2,V_3,V_4\}$ or $\{V_1,V_2\cup \{z\},V_3,V_4\}$ is a 4-coloring of $G$, a contradiction. Thus $A$ is a $K_2$.
(iii)We first show that $G$ must contain a $K_3$ or a $C_5$. If $A$ is $K_3$-free, then $\omega(A)<\chi(A)=3$ and so $A$ is imperfect. Since $A$ is $P_5$-free, $A$ must contain a $C_5$ by \autoref{thm:SPGT}. Thus $A$ contains either a $K_3$ or a $C_5$.
If $A$ contains a $K_3$ induced by $\{x,y,z\}$, suppose that there is another vertex $s$ in $A$. Because $G$ is 5-vertex-critical, $G-s$ is 4-colorable. Since $\chi(A)=3$, let $\{V_1,V_2,V_3,V_4\}$ be a 4-coloring of $G-s$ where $V(A)\setminus\{s\}\subseteq V_1\cup V_2\cup V_3$. Since $A$ is homogeneous, $\{V_1\cup \{s\},V_2,V_3,V_4\}$, $\{V_1,V_2\cup \{s\},V_3,V_4\}$ or $\{V_1,V_2,V_3\cup \{s\},V_4\}$ is a 4-coloring of $G$, a contradiction. Thus $A$ is a $K_3$. Similarly, $A$ is a $C_5$ if $A$ contains a $C_5$. \end{proof}
\section{Structure around a 5-hole}\label{structure}
Let $G=(V,E)$ be a graph and $H$ be an induced subgraph of $G$. We partition $V\setminus V(H)$ into subsets with respect to $H$ as follows: for any $X\subseteq V(H)$, we denote by $S(X)$ the set of vertices in $V\setminus V(H)$ that have $X$ as their neighborhood among $V(H)$, i.e.,
$$S(X)=\{v\in V\setminus V(H): N_{V(H)}(v)=X\}.$$
\noindent For $0\le m\le|V(H)|$, we denote by $S_m$ the set of vertices in $V\setminus V(H)$ that have exactly $m$ neighbors in $V(H)$. Note that $S_m=\cup_{X\subseteq V(H):|X|=m}S(X)$.
Let $G$ be a ($P_5$,bull)-free graph and $C=v_1,v_2,v_3,v_4,v_5$ be an induced $C_5$ in $G$. We partition $V\setminus C$ with respect to $C$ as above. All subscripts below are modulo five. Clearly, $S_1=\emptyset$ and so $V(G)=V(C)\cup S_0\cup S_2\cup S_3\cup S_4\cup S_5$. Since $G$ is ($P_5$,bull)-free, it is easy to verify that $S(v_i,v_{i+1})=S(v_{i-2},v_i,v_{i+2})=\emptyset$. So $S_2=\cup_{1\le i\le 5}S(v_{i-1},v_{i+1})$ and $S_3=\cup_{1\le i\le 5}S(v_{i-1},v_{i},v_{i+1})$. Note that $S_4=\cup_{1\le i\le 5}S(v_{i-2},v_{i-1},v_{i+1},v_{i+2})$. In the following, we write $S_2(i)$ for $S(v_{i-1},v_{i+1})$, $S_3(i)$ for $S(v_{i-1},v_{i},v_{i+1})$ and $S_4(i)$ for $S(v_{i-2},v_{i-1},v_{i+1},v_{i+2})$. We now prove a number of useful properties of $S(X)$ using the fact that $G$ is ($P_5$,bull)-free. All properties are proved for $i=1$ due to symmetry. In the following, if we say that $\{r,s,t,u,v\}$ induces a bull, it means that $r,v$ are two pendant vertices, $s$ is the neighbor of $r$, $u$ is the neighbor of $v$, and $stu$ is a triangle. If we say that $\{r,s,t,u,v\}$ induces a $P_5$, it means that any two consecutive vertices are adjacent.
\begin{enumerate}[label=\bfseries (\arabic*)]
\item {$S_2(i)$ is complete to $S_2(i+1)\cup S_3(i+1)$.}\label{S2(i)S2(i+1)}
Let $x\in S_2(1)$ and $y\in S_2(2)\cup S_3(2)$. If $xy\notin E$, then $\{x,v_5,v_4,v_3,y\}$ induces a $P_5$.
\item {$S_2(i)$ is anticomplete to $S_2(i+2)$.}\label{S2(i)S2(i+2)}
Let $x\in S_2(1)$ and $y\in S_2(3)$. If $xy\in E$, then $\{v_3,v_2,y,x,v_5\}$ induces a bull.
\item {$S_2(i)$ is anticomplete to $S_3(i+2)$.}\label{S2(i)S3(i+2)}
Let $x\in S_2(1)$ and $y\in S_3(3)$. If $xy\in E$, then $\{v_1,v_2,x,y,v_4\}$ induces a bull.
\item {$S_2(i)$ is anticomplete to $S_4(i)$.}\label{S2(i)S4(i)}
Let $x\in S_2(1)$ and $y\in S_4(1)$. If $xy\in E$, then $\{v_1,v_2,x,y,v_4\}$ induces a bull.
\item {$S_2(i)\cup S_3(i)$ is complete to $S_4(i+2)$.}\label{S2(i)S4(i+2)}
Let $x\in S_2(1)\cup S_3(1)$ and $y\in S_4(3)$. If $xy\notin E$, then $\{v_3,v_4,y,v_5,x\}$ induces a bull.
\item {$S_2(i)$ is complete to $S_4(i+1)\cup S_5$.}\label{S2S5}
Let $x\in S_2(1)$ and $y\in S_4(2)\cup S_5$. If $xy\notin E$, then $\{v_3,y,v_1,v_5,x\}$ induces a bull.
\item {$S_3(i)$ is complete to $S_3(i+1)$.}\label{S3(i)S3(i+1)}
Let $x\in S_3(1)$ and $y\in S_3(2)$. If $xy\notin E$, then $\{x,v_5,v_4,v_3,y\}$ induces a $P_5$.
\end{enumerate}
\section{The main result}\label{bull-free}
Let $\mathcal{F}$ be the set of graphs shown in \autoref{fig:5vertexcritical}. It is easy to verify that all graphs in $\mathcal{F}$ are 5-vertex-critical.
\begin{figure}
\caption{$F_1$.}
\caption{$F_2$.}
\caption{$F_3$.}
\caption{$F4$.}
\caption{$F_5$.}
\caption{$F_6$.}
\caption{$F_7$.}
\caption{$F_8$.}
\caption{$F_9$.}
\caption{Some 5-vertex-critical graphs.}
\label{K5}
\label{F1}
\label{F2}
\label{F3}
\label{F4}
\label{F5}
\label{F6}
\label{F7}
\label{F8}
\label{F9}
\label{fig:5vertexcritical}
\end{figure}
\begin{theorem}\label{th}
There are finitely many 5-vertex-critical $(P_5,bull)$-free graphs.
\end{theorem}
\begin{proof}
Let $G=(V,E)$ be a 5-vertex-critical ($P_5$,bull)-free graph. We show that $|G|$ is bounded. If $G$ has a subgraph isomorphic to a member $F\in\mathcal{F}$, then $|V(G)|=|V(F)|$ by the definition of vertex-critical graph and so we are done. Hence, we assume in the following that $G$ has no subgraph isomorphic to a member in $\mathcal{F}$. Since there are exactly 13 5-vertex-critical $(P_5,C_5$)-free graphs \cite{HMRSV15}, the proof is completed if $G$ is $C_5$-free. So assume that $G$ contains an induced $C_5$ in the following. Let $C={v_1,v_2,v_3,v_4,v_5}$ be an induced $C_5$. We partition $V(G)$ with respect to $C$.
\begin{claim}\label{S5}
$S_5$ is an independent set.
\end{claim}
\begin{proof}
Suppose that $x,y\in S_5$ and $xy\in E$. Then $G$ contains $F_1$, a contradiction.
\end{proof}
\begin{claim}\label{coloring number}
For each $1\le i\le 5$, some properties of $G$ are as follows:
\begin{itemize}
\item $\chi(G[S_3(i)])\le 2$.
\item $\chi(G[S_2(i)\cup S_3(i)])\le 3$.
\item $\chi(G[S_4(i)])\le 2$.
\item $\chi(G[S_5\cup S_0])\le 4$.
\end{itemize}
\end{claim}
\begin{proof}
It suffices to prove for $i=1$. Suppose that $\chi(G[S_3(1)])\ge3$. Then $\chi(G-v_3)\ge 5$, contradicting that $G$ is 5-vertex-critical. So $\chi(G[S_3(1)])\le2$. Similarly, We can prove the other three properties.
\end{proof}
We first bound $S_0$.
\begin{claim}\label{S0}
{$N(S_0)\subseteq S_5$}.
\end{claim}
\begin{proof}
Let $x\in N(S_0)$ and $y\in S_0$ be a neighbor of $x$. Then we show that $x\in S_5$. Let $1\le i\le5$. If $x\in S_2(i)\cup S_3(i)$, then $\{y,x,v_{i+1},v_{i+2},v_{i+3}\}$ induces a $P_5$. If $x\in S_4(i)$, then $\{v_i,v_{i+1},v_{i+2},x,y\}$ induces a bull. Therefore, $y\notin S_2\cup S_3\cup S_4$. It follows that $y\in S_5$.
\end{proof}
\begin{claim}\label{S0 color}
If A is a component of $G[S_0]$, then $\chi(A)=4$.
\end{claim}
\begin{proof}
By \autoref{coloring number}, $\chi(A)\le4$. Suppose that $\chi(A)\le 3$. So $\chi(C)\ge \chi(A)$. Combined with the fact that $C$ is anticomplete to $A$, we know that $C$ is not complete to $N(A)$ by \autoref{lem:XY}. This contradicts the facts that $C$ is complete to $S_5$ and $N(A)\subseteq S_5$. Thus $\chi(A)=4$.
\end{proof}
\begin{claim}\label{S0 connected}
$G[S_0]$ is connected.
\end{claim}
\begin{proof}
Suppose that there are two components $A_1$ and $A_2$ in $G[S_0]$. Since $G$ is connected, there must exist $w_1\in N(A_1)$ and so $w_1\in S_5$ by \autoref{S0}. By \autoref{coloring number}, $w_1$ cannot be complete to $A_1$ and $A_2$. So $w_1$ is mixed on an edge $x_1y_1\in E(A_1)$. Similarly, there exists $w_2\in S_5$ mixed on an edge $x_2y_2\in E(A_2)$ and not complete to $A_1$. So $w_2$ is anticomplete to $A_1$, otherwise if $w_2$ is mixed on an edge $z_1z_2\in E(A_1)$, then $\{z_1,z_2,w_2,x_2,y_2\}$ induces a $P_5$. It follows that $w_2$ is anticomplete to $\{x_1,y_1\}$. Then $\{y_1,x_1,w_1,v_1,w_2\}$ induces a $P_5$, a contradiction.
\end{proof}
By Claims \ref{S0 color}-\ref{S0 connected}, we obtain the following claim.
\begin{claim}\label{S0 4-chromatic}
$G[S_0]$ is a connected 4-chromatic graph.
\end{claim}
\begin{claim}
$N(S_0)=S_5$.
\end{claim}
\begin{proof}
Suppose that $w_1\in S_5$ is anticomplete to $S_0$. Since $G$ is connected, there must exist $w_2\in S_5$, which is a neighbor of $S_0$. By \autoref{coloring number}, $w_2$ is not complete to $S_0$ and so mixed on an edge $xy$ in $G[S_0]$. Thus, $\{w_1,v_1,w_2,x,y\}$ induces a $P_5$, a contradiction.
\end{proof}
To bound $S_0$, we partition $S_0$ into two parts. Let $L=S_0\cap N(S_5)$ and $R=S_0\setminus L$.
\begin{claim}\label{L S5}
If $R\neq\emptyset$, then (i)$L$ is complete to $S_5$; (ii)$N(R)=L$.
\end{claim}
\begin{proof}
Let $L_i=\{l\in L|d(l,R)=i\}$, where $i\ge 1$. Let $l\in L_1$. There exists $r\in R$, which is adjacent to $l$. Let $u\in S_5$ be a neighbor of $l$. Note that if $|S_5|=1$, $S_5$ is a clique cutset of $G$, contradicting \autoref{lem:xy}. So $|S_5|\ge 2$. For each $u'\in S_5\setminus\{u\}$, $u'$ is adjacent to $l$, otherwise $\{r,l,u,v_1,u'\}$ induces a $P_5$. Hence, $L_1$ is complete to $S_5$. Let $l_2\in L_2$. By the definition of $L_2$, there must exist $l_1\in L_1$, $l_2$ is adjacent to $l_1$. Let $r_1\in R$ and $u_2\in S_5$ be the neighbor of $l_1$ and $l_2$, respectively. Since $d(l_2,R)=2$, $l_2r_1\notin E$. Since $L_1$ is complete to $S_5$, $l_1u_2\in E$. Thus $\{v_1,u_2,l_2,l_1,r_1\}$ induces a bull, a contradiction. So $L_2=\emptyset$ and thus $L_i=\emptyset$ for each $i\ge 3$. Then $L=L_1$. Therefore, $L$ is complete to $S_5$ and $N(R)=L$.
\end{proof}
\begin{claim}\label{LR components}
Let $L'$ and $R'$ be components of $G[L]$ and $G[R]$, respectively. Then $L'$ is complete or anticomplete to $R'$.
\end{claim}
\begin{proof}
Let $u\in S_5$. By \autoref{L S5}, $u$ is complete to $L'$. Assume $L'$ is not anticomplete to $R'$. We show that $L'$ is complete to $R'$ in the following. Let $l_1\in V(L')$ and $r_1\in V(R')$ be adjacent. If $l_1$ is mixed on $R'$, then $l_1$ must be mixed on an edge $x_1y_1$ in $R'$ and so $\{v_1,u,l_1,x_1,y_1\}$ induces a $P_5$, a contradiction. So $l_1$ is complete to $R'$. Suppose that $l_2\in V(L')$ is not complete to $R'$, then there exists $r_2\in V(R')$ not adjacent to $l_2$. Since $l_1r_2\in E$, $r_2$ is mixed on $L'$ and so mixed on an edge $x_2y_2$ in $L'$. Thus $\{v_1,u,x_2,y_2,r_2\}$ induces a bull, a contradiction. It follows that $L'$ is complete to $R'$.
\end{proof}
\begin{claim}\label{R finite}
$|R|\le 8$.
\end{claim}
\begin{proof}
Let $R'$ and $R''$ be two arbitrary components of $G[R]$. Let $u_1\in S_5$. If there exists $l_1,l_2\in L$ such that $l_1\in N(R')\setminus N(R'')$ and $l_2\in N(R'')\setminus N(R')$, then $\{u_1,l_1,l_2\}\cup R'\cup R''$ contains an induced bull or an induced $P_5$, depending on whether $l_1l_2\in E$. So $N(R')\subseteq N(R'')$ or $N(R'')\subseteq N(R')$. We may assume $N(R')\subseteq N(R'')$. By \autoref{LR components}, $R''$ is complete to $N(R')$. It follows from \autoref{lem:XY} that $\chi(R'')<\chi(R')$. By \autoref{S0 4-chromatic} and \autoref{LR components}, for each component of $G[R]$, there must exist a vertex in $L$ complete to this component. Since $G[S_0]$ is 4-chromatic, the chromatic number of components of $G[R]$ is at most 3. So there are at most three components $R_1,R_2$ and $R_3$ in $G[R]$. Assume that $\chi(R_1)=1,\chi(R_2)=2$ and $\chi(R_3)=3$. By \autoref{LR components} and the definition of $R$, we know that $R_1,R_2$ and $R_3$ are all homogeneous. By \autoref{lem:homogeneous}, we know that $|R_1|=1$, $|R_2|=2$ and $|R_3|\le 5$. Therefore, $|R|\le 8$.
\end{proof}
\begin{claim}\label{L finite}
If $R\neq \emptyset$, then $|L|\le 8$.
\end{claim}
\begin{proof}
Let $L'$ and $L''$ be two arbitrary components of $G[L]$. By \autoref{L S5}, $L',L''\subseteq N(R)$. Let $u_1\in S_5$. By \autoref{L S5}, \autoref{LR components} and \autoref{coloring number}, each component of $G[L]$ must be complete to some component of $G[R]$ and so $\chi(G[L])\le 3$. Suppose that there exists $r_1,r_2\in R$ such that $r_1\in N(L')\setminus N(L'')$ and $r_2\in N(L'')\setminus N(L')$. Then $r_1$ and $r_2$ belong to different components of $R$ by \autoref{LR components}. So $r_1r_2\notin E$. Then $\{u_1,r_1,r_2\}\cup L'\cup L''$ contains an induced $P_5$, a contradiction. Combined with \autoref{L S5}, we know that $N(L')\subseteq N(L'')$ or $N(L'')\subseteq N(L')$. We may assume $N(L')\subseteq N(L'')$. By \autoref{LR components}, $L''$ is complete to $N(L')$. It follows from \autoref{lem:XY} that $\chi(L'')<\chi(L')$. Note that $\chi(G[L])\le 3$. So there are at most three components $L_1,L_2$ and $L_3$ in $G[L]$. Assume that $\chi(L_1)=1,\chi(L_2)=2$ and $\chi(L_3)=3$. By \autoref{LR components} and \autoref{L S5}, we know that $L_1,L_2$ and $L_3$ are all homogeneous. By \autoref{lem:homogeneous}, we know that $|L_1|=1$, $|L_2|=2$ and $|L_3|\le 5$. Therefore, $|L|\le 8$.
\end{proof}
By Claims \ref{R finite}-\ref{L finite}, we obtain the following claim.
\begin{claim}\label{cla:S0 1}
If $R\neq\emptyset$, $|S_0|\le 16$.
\end{claim}
Next, we bound $S_0$ when $R=\emptyset$.
\begin{claim}\label{cla:S0 2}
If $R=\emptyset$, then $|S_0|\le 13$.
\end{claim}
\begin{proof}
Since $R=\emptyset$, $S_0\subseteq N(S_5)$. For each $v\in S_0$, $\chi(G-v)=4$ since $G$ is 5-vertex-critical. Let $\pi$ be a 4-coloring of $G-v$. By the fact that $\chi(C)=3$ and $S_5$ is complete to $C$, all vertices in $S_5$ must be colored with the same color in $\pi$. Since $S_0\subseteq N(S_5)$, the vertices in $S_0\setminus\{v\}$ must be colored with the remaining three colors, i.e., $\chi(G[S_0]-v)\le 3$. Combined with \autoref{S0 4-chromatic}, $G[S_0]$ is a $P_5$-free 4-vertex-critical graph. By \autoref{thm:finite4Critical}, $|S_0|\le 13$.
\end{proof}
By Claims \ref{cla:S0 1}-\ref{cla:S0 2}, $|S_0|\le 16$. Next, we bound $S_5$.
\begin{claim}\label{S4(i)S5}
For at most one value of $i$, where $1\le i\le 5$, $S_4(i)$ is not anticomplete to $S_5$.
\end{claim}
\begin{proof}
Suppose that $S_4(i)$ and $S_4(j)$ are not anticomplete to $S_5$, where $1\le i<j\le5$. Then $G$ must have a subgraph isomorphic to $F_2,F_3,F_4$ or $F_5$, a contradiction.
\end{proof}
\begin{claim}
$|S_5|\le 2^{16}$.
\end{claim}
\begin{proof}
Suppose that $|S_5|> 2^{|S_0|}$. By the pigeonhole principle, there are two vertices $u,v\in S_5$ that have the same neighborhood in $S_0$. Since $u$ and $v$ are not comparable, there exists $x\in N(u)\setminus N(v)$ and $y\in N(v)\setminus N(u)$. Clearly, $x,y\in S_3\cup S_4(i)$ by \autoref{S4(i)S5} and \ref{S2S5}, for some $1\le i\le 5$. By symmetry, we assume $i=1$.
Suppose that $x,y\in S_4(1)$. Then $xy\notin E$, otherwise $G$ has a subgraph isomorphic to $F_8$. So $\{x,u,v_1,v,y\}$ induces a $P_5$, a contradiction.
Suppose that $x,y\in S_3$. Without loss of generality, we assume $x\in S_3(1)$. If $y\in S_3(3)\cup S_3(4)$, $G$ must have a subgraph isomorphic to $F_7$, a contradiction. If $y\in S_3(2)\cup S_3(5)$, then $xy\in E$ by \ref{S3(i)S3(i+1)} and so $G$ contains $F_8$, a contradiction. If $y\in S_3(1)$, then $xy\notin E$, otherwise $G$ has a subgraph isomorphic to $F_6$. Then $\{x,u,v_3,v,y\}$ induces a $P_5$, a contradiction.
So we assume that $x\in S_4(1)$ and $y\in S_3$. If $y\in S_3(1)\cup S_3(2)\cup S_3(5)$, then $G$ has a subgraph isomorphic to $F_7$, a contradiction. Thus $y\in S_3(3)\cup S_3(4)$. From \ref{S2(i)S4(i+2)} we know that $xy\in E$. Note that $G$ has a subgraph isomorphic to $F_8$, a contradiction.
Therefore, $|S_5|\le 2^{|S_0|}\le 2^{16}$.
\end{proof}
Next, we bound $S_2$. By \ref{S2(i)S2(i+1)}-\ref{S2S5} and \autoref{S0}, for each $1\le i\le 5$, all vertices in $V\setminus S_2(i)$ are complete or anticomplete to $S_2(i)$, except those in $S_3(i)$. So we divide $S_2(i)$ into two parts. Let $R(i)=S_2(i)\cap N(S_3(i))$ and $L(i)=S_2(i)\setminus R(i)$.
\begin{claim}\label{P3 conclusion1}
If $G[R(i)]$ contains a $P_3$, then the two endpoints of the $P_3$ have the same neighborhood in $S_3(i)$.
\end{claim}
\begin{proof}
Let $uvw$ be a $P_3$ contained in $R(i)$. Let $u'\in S_3(i)$ be a neighbor of $w$. Then $uu'\in E$, otherwise $\{u,v,w,u',v_i\}$ induces a bull or a $P_5$, depending on whether $vu'\in E$. So $N_{S_3(i)}(w)\subseteq N_{S_3(i)}(u)$. Similarly, $N_{S_3(i)}(u)\subseteq N_{S_3(i)}(w)$. Therefore, $u$ and $w$ have the same neighborhood in $S_3(i)$.
\end{proof}
\begin{claim}\label{L(i)}
$|L(i)|\le 8$.
\end{claim}
\begin{proof}
If $S_3(i)=\emptyset$ or $R(i)=\emptyset$, then $S_2(i)$ is homogeneous. If there are two components $X$ and $Y$ in $G[S_2(i)]$, then $Y$ is complete to $N(X)$ and $X$ is complete to $N(Y)$, contradicting \autoref{lem:XY}. So $G[S_2(i)]$ is connected. By \autoref{coloring number} and \autoref{lem:homogeneous}, $G[S(i)]$ is a $K_1$, a $K_2$, a $K_3$ or a $C_5$. Thus $|L(i)|\le 5$.
So we assume that $S_3(i)\neq \emptyset$ and $R(i)\neq \emptyset$. Let $u$ be an arbitrary vertex in $R(i)$ and $u'$ be its neighbor in $S_3(i)$. Then $u$ is not mixed on any edge $xy$ in $L(i)$, otherwise $\{y,x,u,u',v_i\}$ induces a $P_5$. Then $u$ is complete or anticomplete to any component of $L(i)$ and so all components of $L(i)$ are homogeneous. By \autoref{lem:homogeneous}, each component of $L(i)$ is a $K_1$, a $K_2$, a $K_3$ or a $C_5$.
We show that there is at most one 3-chromatic component in $L(i)$. Suppose that $X_1$ and $Y_1$ are two 3-chromatic components in $L(i)$. Note that $X_1$ and $Y_1$ are homogeneous. Since $\chi(G[S_2(i)])\le 3$, $X_1$ and $Y_1$ are anticomplete to $R(i)$. So $Y_1$ is complete to $N(X_1)$ and $X_1$ is complete to $N(Y_1)$, which contradicts \autoref{lem:XY}. So, there is at most one 3-chromatic component in $L(i)$.
Then we show that there is at most one $K_2$-component in $L(i)$. Suppose that $X_2=x_1y_1$ and $Y_2=x_2y_2$ are two $K_2$-components in $L(i)$. Note that $X_2$ and $Y_2$ are homogeneous. By \autoref{lem:XY}, there must exist $u_1,u_2\in R(i)$ such that $u_1$ is complete to $X_2$ and anticomplete to $Y_2$ and $u_2$ is complete to $Y_2$ and anticomplete to $X_2$ . Let $u_1',u_2'\in S_3(i)$ be the neighbor of $u_1$ and $u_2$, respectively. Clearly, $u_1'$ and $u_2'$ are not the same vertex, otherwise $\{x_1,u_1,u_1',u_2,x_2\}$ induces a bull or a $P_5$, depending on whether $u_1u_2\in E$. So $u_1'u_2\notin E$ and $u_2'u_1\notin E$. It follows that $u_1u_2\notin E$, otherwise $\{x_2,u_2,u_1,u_1',v_i\}$ induces a $P_5$. Then $\{u_1,u_1',v_i,u_2',u_2\}$ induces a bull or a $P_5$, depending on whether $u_1'u_2'\in E$, a contradiction. So, there is at most one $K_2$-component in $L(i)$.
Similarly, there is at most one $K_1$-component in $L(i)$. It follows that $|L(i)|\le 8$. The proof is completed.
\end{proof}
\begin{figure}
\caption{The graph contained in $G[R(i)]$.}
\label{fig:uvwst}
\end{figure}
\begin{claim}\label{P3 conclusion2}
If $G[R(i)]$ contains $P_3=uvw$, then $G[R(i)]$ must contain the graph induced by $\{u,v,w,s,t\}$ in \autoref{fig:uvwst}. Moreover, $u,w,s$ and $t$ have the same neighborhood in $S_3(i)$ and $N_{S_3(i)}(u)\cap N_{S_3(i)}(v)=\emptyset$.
\end{claim}
\begin{proof}
Let $u'$ be an arbitrary neighbor of $w$ in $S_3(i)$. By \autoref{P3 conclusion1} we know that $N_{S_3(i)}(u)=N_{S_3(i)}(w)$ and so $uu'\in E$. Since $u$ and $w$ are not comparable, there must exist $s\in N(u)\setminus N(w)$ and $t\in N(w)\setminus N(u)$. Clearly, $s,t\in L(i)\cup R(i)$.
\noindent{\bf Case 1. }$s,t\in L(i)$. Then $st\notin E$, otherwise $\{s,t,w,u',v_i\}$ induces a $P_5$. Moreover, $sv\notin E$, otherwise $\{s,v,w,u',v_i\}$ induces a bull or a $P_5$, depending on whether $vu'\in E$. Similarly, $tv\notin E$. So $\{s,u,v,w,t\}$ induces a $P_5$, a contradiction.
\noindent{\bf Case 2. }One vertex of $\{s,t\}$ belongs to $L(i)$ and the other belongs to $R(i)$. We assume that $s\in L(i)$ and $t\in R(i)$. Then $sv\notin E$, otherwise $\{s,v,w,u',v_i\}$ induces a bull or a $P_5$, depending on whether $vu'\in E$. So $vu'\notin E$, otherwise $\{s,u,v,u',v_i\}$ induces a bull. Let $z'$ be a neighbor of $v$ in $S_3(i)$. Clearly, $\{s,u,v,z',v_i\}$ induces a bull or a $P_5$, depending on whether $uz'\in E$, a contradiction.
\noindent{\bf Case 3. }$s,t\in R(i)$. Suppose that $sv\notin E$. Then $suv$ is a $P_3$ and so $u'$ is complete or anticomplete to $\{s,v\}$ by \autoref{P3 conclusion1}. Suppose that $u'$ is complete to $\{s,v\}$. If $vt\in E$, then $uvt$ is a $P_3$ and so $tu'\in E$ by \autoref{P3 conclusion1}. Then $\{t,v,w,u'\}$ induces a $K_4$, contradicting that $\chi(G[S_2(i)\cup S_3(i)])\le 3$. So $vt\notin E$. Hence $vwt$ is a $P_3$ and then $tu'\in E$ by \autoref{P3 conclusion1}. Then $st\in E$, otherwise $\{s,u,v,w,t\}$ induces a $P_5$. It is easy to verify that $\{s,u,v,w,t,u'\}$ induces a 4-chromatic subgraph, contradicting that $\chi(G[S_2(i)\cup S_3(i)])\le 3$. So $u'$ must be anticomplete to $\{s,v\}$. Then $st\notin E$, otherwise $\{s,t,w,u',v_i\}$ induces a bull or a $P_5$, depending on whether $tu'\in E$. Hence $tv\in E$, otherwise $\{s,u,v,w,t\}$ induces a $P_5$. Let $z'$ be an arbitrary neighbor of $v$ in $S_3(i)$. Since $suv$ is a $P_3$, $sz'\in E$ by \autoref{P3 conclusion1}. Note that $uvt$ and $uvw$ are all $P_3$ and so $N_{S_3(i)}(u)=N_{S_3(i)}(w)=N_{S_3(i)}(t)$. Then $tz'\notin E$, otherwise $\{t,v,z',w\}$ induces a $K_4$. Note that $\{s,z',v_i,u',w\}$ induces a bull or a $P_5$, depending on whether $u'z'\in E$, a contradiction. Thus $sv\in E$. By symmetry, $tv\in E$.
Since $svw$ and $uvt$ are all $P_3$, we know that $u,w,s,t$ have the same neighborhood in $S_3(i)$ by \autoref{P3 conclusion1} and so $su',tu'\in E$. Then $vu'\notin E$, otherwise $\{v,w,t,u'\}$ induces a $K_4$. Since $u'$ is an arbitrary neighbor of $w$ in $S_3(i)$, $v$ is anticomplete to $N_{S_3(i)}(u)$. Thus $N_{S_3(i)}(u)\cap N_{S_3(i)}(v)=\emptyset$.
If $st\in E$, then $ust$ is a $P_3$. From the above proof we know that $s$ is anticomplete to $N_{S_3(i)}(u)$, which contradicts the fact that $su'\in E$. So $st\notin E$. It follows that $\{u,v,w,s,t\}$ induces the graph in \autoref{fig:uvwst}. This completes the proof of the claim.
\end{proof}
\begin{claim}\label{P3-free}
$G[R(i)]$ is $P_3$-free.
\end{claim}
\begin{proof}
Suppose that $G[R(i)]$ contains a $P_3=uvw$. By \autoref{P3 conclusion2}, $G[R(i)]$ contains a subgraph in \autoref{fig:uvwst} induced by $\{u,v,w,s,t\}$. Moreover, $u,w,s,t$ have the same neighborhood in $S_3(i)$ and $v$ is anticomplete to $N_{S_3(i)}(u)$. Let $u'$ and $v'$ be arbitrary neighbor of $u$ and $v$ in $S_3(i)$, respectively. Then $u'$ is complete to $\{u,w,s,t\}$ and nonadjacent to $v$ and $v'$ is anticomplete to $\{u,w,s,t\}$. It follows from \autoref{lem:XY} that $\{w,t\}$ is not complete to $N\{u,s\}$. So there exists $a\in N\{u,s\}$ such that $a$ is not complete to $\{w,t\}$. Clearly, $a\in L(i)\cup R(i)$.
Suppose $a\in L(i)$. Assume that $as\in E$. So $au\in E$, otherwise $\{a,s,u,u',v_i\}$ induces a bull. Then $av\in E$, otherwise $\{a,u,v,v',v_i\}$ induces a $P_5$. Note that $\{a,s,v,u\}$ induces a $K_4$, a contradiction. Thus $a\in R(i)$.
If $a$ is adjacent to only one vertex in $\{s,u\}$, then either $usa$ or $sua$ is a $P_3$ and so $N_{S_3(i)}(s)\cap N_{S_3(i)}(u)=\emptyset$ by \autoref{P3 conclusion2}, contradicting that $su',uu'\in E$. Thus $a$ is complete to $\{s,u\}$. Then $av\notin E$, otherwise $\{s,u,a,v\}$ induces a $K_4$. Because $auv$ is a $P_3$, we know that $au'\notin E$ and $av'\in E$ by \autoref{P3 conclusion2}. Since $a$ is not complete to $\{w,t\}$, we assume that $at\notin E$ by symmetry. Note that $\{t,u',v_i,v',a\}$ induces a bull or a $P_5$, depending on whether $u'v'\in E$, a contradiction.
Therefore, $G[R(i)]$ is $P_3$-free.
\end{proof}
Since $G[R(i)]$ is $P_3$-free, $G[R(i)]$ is a disjoint union of cliques. By \autoref{coloring number}, each component of $G[R(i)]$ is a $K_1$, a $K_2$ or a $K_3$. We next prove that the number of them is finite.
\begin{claim}\label{R(i) 1}
There are at most $2^{|L(i)|}$ $K_1$-components and 5 $K_2$-components in $G[R(i)]$.
\end{claim}
\begin{proof}
We first show that there are at most $2^{|L(i)|}$ $K_1$-components in $G[R(i)]$. Suppose there are more than $2^{|L(i)|}$ $K_1$-components in $G[R(i)]$. By the pigeonhole principle, there exists $u,v\in R(i)$ and they have the same neighborhood in $L(i)$. Since $u$ and $v$ are not comparable, there exists $u',v'\in S_3(i)$ such that $u'\in N(u)\setminus N(v)$ and $v'\in N(v)\setminus N(u)$. Then $\{u,u',v_i,v',v\}$ induces a bull or a $P_5$, depending on whether $u'v'\in E$, a contradiction. So there are at most $2^{|L(i)|}$ $K_1$-components in $G[R(i)]$.
Next we show that there are at most 5 $K_2$-components in $G[R(i)]$.
Suppose that $A_1$ and $A_2$ are two homogeneous $K_2$-components of $G[R(i)]$. By \autoref{lem:XY}, there exists $x_1\in N(A_1)\setminus N(A_2)$ and $y_1\in N(A_2)\setminus N(A_1)$. Clearly, $x_1,y_1\in S_3(i)\cup L(i)$. Suppose that $x_1,y_1\in L(i)$. Let $w_1,w_2\in S_3(i)$ be the neighbor of $A_1$ and $A_2$, respectively. If $x_1y_1\in E$, then $\{y_1,x_1,w_1,v_i\}\cup A_1$ contains an induced $P_5$. So $x_1y_1\notin E$. Note that $w_2\notin N(A_1)$, otherwise $\{w_2,x_1,y_1\}\cup A_1\cup A_2$ contains an induced $P_5$. Similarly, $w_1\notin N(A_2)$. Then $\{v_i,w_1,w_2\}\cup A_1\cup A_2$ contains an induced bull or an induced $P_5$, depending on whether $w_1w_2\in E$, a contradiction. Suppose that $x_1\in L(i)$ and $y_1\in S_3(i)$. Let $w_3$ be the neighbor of $A_1$ in $S_3(i)$. Note that $w_3\in N(A_2)$, otherwise $\{v_i,w_3,y_1\}\cup A_1\cup A_2$ contains an induced bull or an induced $P_5$, depending on whether $w_3y_1\in E$. Then $w_3y_1\in E$, otherwise $\{x_1,y_1,w_3\}\cup A_1\cup A_2$ contains an induced $P_5$. Then $\{w_3,y_1\}\cup A_2$ induces a $K_4$, contradicting that $\chi(G[S_2(i)\cup S_3(i)])\le 3$. So $x_1,y_1\in S_3(i)$ and then $\{v_i,x_1,y_1\}\cup A_1\cup A_2$ contains an induced bull or an induced $P_5$, depending on whether $x_1y_1\in E$, a contradiction. Thus there is at most one homogeneous $K_2$-component in $G[R(i)]$.
Let $B_1=x_3y_3$ and $B_2=x_4y_4$ be two arbitrary nonhomogeneous $K_2$-components of $G[R(i)]$ and the vertices mixed on $B_1$ or $B_2$ are clearly in $L(i)\cup S_3(i)$. Suppose that each vertex in $S_3(i)$ is complete or anticomplete to $B_1$, then there exists $z'\in L(i)$ mixed on $B_1$. Let $t\in S_3(i)$ be complete to $B_1$, then $\{z',x_3,y_3,t,v_i\}$ induces a bull, a contradiction. So there must exist $z_3\in S_3(i)$ mixed on $B_1$. Similarly, there exists $z_4\in S_3(i)$ mixed on $B_2$. By symmetry, we assume $z_3x_3,z_4x_4\in E$ and $z_3y_3,z_4y_4\notin E$. Then $z_3$ is complete or anticomplete to $B_2$, otherwise $\{y_3,x_3,z_3,x_4,y_4\}$ induces a $P_5$. Similarly, $z_4$ is complete or anticomplete to $B_1$. If $z_3$ is anticomplete to $B_2$ and $z_4$ is anticomplete to $B_1$, then $\{x_3,z_3,v_i,z_4,x_4\}$ induces a bull or a $P_5$, depending on whether $z_3z_4\in E$. If $z_3$ is complete to $B_2$ and $z_4$ is complete to $B_1$, then $\{y_3,z_4,v_i,z_3,y_4\}$ induces a bull or a $P_5$, depending on whether $z_3z_4\in E$. So we assume $z_3$ is anticomplete to $B_2$ and $z_4$ is complete to $B_1$. It follows that $z_3z_4\in E$, otherwise $\{y_4,x_4,z_4,v_i,z_3\}$ induces a $P_5$. So there are at most 4 nonhomogeneous $K_2$-components in $R(i)$, otherwise the vertices in $S_3(i)$ mixed on them respectively can induce a $K_5$, a contradiction.
The above proof shows that there are at most $2^{|L(i)|}$ $K_1$-components and 5 $K_2$-components in $G[R(i)]$.
\end{proof}
\begin{claim}\label{R(i) 2}
There is at most one $K_3$-component in $G[R(i)]$.
\end{claim}
\begin{proof}
Suppose that $T_1=x_1y_1z_1,T_2=x_2y_2z_2$ are two arbitrary $K_3$-components of $G[R(i)]$. Let $x',y'\in S_3(i)$ be the neighbor of $T_1$ and $T_2$, respectively. Since $\chi(G[S_2(i)\cup S_3(i)])\le 3$, $x'$ is mixed on $T_1$ and $y'$ is mixed on $T_2$. By symmetry, we assume that $x'x_1,y'x_2\in E$ and $x'y_1,y'y_2\notin E$. So $x'$ is not mixed on $T_2$, otherwise $\{y_1,x_1,x'\}\cup T_2$ contains an induced $P_5$. Moreover, since $\chi(G[S_2(i)\cup S_3(i)])\le 3$, $x'$ is not complete to $T_2$. Thus $x'$ is anticomplete to $T_2$. Similarly, $y'$ is anticomplete to $T_1$. Then $\{x_1,x',v_i,y',x_2\}$ induces a bull or a $P_5$, depending on whether $x'y'\in E$, a contradiction.
Therefore, there is at most one $K_3$-component in $G[R(i)]$.
\end{proof}
By Claims \ref{L(i)}, \ref{R(i) 1} and \ref{R(i) 2}, $|L(i)|\le 8$ and $|R(i)|\le 2^{|L(i)|}+13$. So $|S_2|\le 5\times(2^8+21)$.
Finally, we bound $S_3$ and $S_4$.
\begin{claim}\label{S3 trivial}
For each $1\le i\le 5$, the number of $K_1$-components in $G[S_3(i)]$ is not more than $2^{|S_2(i)\cup S_5|}$.
\end{claim}
\begin{proof}
It suffices to prove for $i=1$. Suppose that the number of $K_1$-components in $G[S_3(1)]$ is more than $2^{|S_2(1)\cup S_5|}$. The pigeonhole principle shows that there are two $K_1$-components $u,v$ having the same neighborhood in $S_2(1)\cup S_5$. Since $u$ and $v$ are not comparable, there must exist $u'\in N(u)\setminus N(v)$ and $v'\in N(v)\setminus N(u)$. By \ref{S2(i)S2(i+1)}, \ref{S2(i)S3(i+2)}, \ref{S3(i)S3(i+1)} and \ref{S2(i)S4(i+2)}, $u',v'\in S_3(3)\cup S_3(4)\cup S_4(1)\cup S_4(2)\cup S_4(5)$. So $\{u,u',v_3,v',v\}$ induces a bull or a $P_5$, depending on whether $u'v'\in E$, a contradiction.
\end{proof}
\begin{claim}\label{S4 trivial}
For each $1\le i\le 5$, the number of $K_1$-components in $G[S_4(i)]$ is not more than $2^{|S_5|}$.
\end{claim}
\begin{proof}
It suffices to prove for $i=1$. Suppose that the number of $K_1$-components in $G[S_4(1)]$ is more than $2^{|S_5|}$. The pigeonhole principle shows that there are two $K_1$-components $u,v$ having the same neighborhood in $S_5$. Since $u$ and $v$ are not comparable, there must exist $u'\in N(u)\setminus N(v)$ and $v'\in N(v)\setminus N(u)$. By \ref{S2(i)S4(i)}, \ref{S2(i)S4(i+2)} and \ref{S2S5}, $u',v'\in (\cup_{i=1,2,5}S_3(i))\cup (\cup_{2\le i\le 5}S_4(i))$. So $\{u,u',v_1,v',v\}$ induces a bull or a $P_5$, depending on whether $u'v'\in E$, a contradiction.
\end{proof}
\begin{claim}\label{S4(i) 2-chromatic}
If $\chi(S_4(i))=2$ for some $1\le i\le 5$, then $S_3\cup S_4$ is bounded.
\end{claim}
\begin{proof}
Without loss of generality, we assume $\chi(S_4(1))=2$. It follows from \ref{S2(i)S4(i+2)} that $S_3(3)=S_3(4)=\emptyset$, otherwise $S_4(1)\cup S_3(3)\cup \{v_3,v_4\}$ contains an induced $K_5$. Since $G$ has no subgraph isomorphic to $F_9$, $\chi(S_4(i))\le 1$ for each $2\le i\le 5$ and $\chi(S_3(j))\le 1$ for each $j=1,2,5$. By Claims \ref{S3 trivial}-\ref{S4 trivial}, $S_3\cup(\cup_{2\le i\le 5}S_4(i))$ is bounded and the number of $K_1$-components in $G[S_4(1)]$ is also bounded.
We now show that the number of vertices in a 2-chromatic component of $G[S_4(1)]$ is bounded. Let $A$ be a 2-chromatic component of $G[S_4(1)]$ and so $A$ is bipartite. Let the bipartition of $A$ be $(X,Y)$. Suppose that $|X|>2^{|S_3\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5|}$. By the pigeonhole principle, there exists two vertices $x_1,x_2\in X$ which have the same neighborhood in $S_3\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5$. Since $x_1$ and $x_2$ are not comparable, there must exist $y_1\in N(x_1)\setminus N(x_2),y_2\in N(x_2)\setminus N(x_1)$. Clearly, $y_1,y_2\in Y$ and so $\{x_1,x_2,y_1,y_2\}$ induces a $2K_2$ in $A$. Since $A$ is connected and bipartite, $A$ contains a $P_5$ by \autoref{2K2}, a contradiction. Thus $|X|\le 2^{|S_3\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5|}$. Similarly, $|Y|\le 2^{|S_3\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5|}$. Thus the number of vertices in $A$ is bounded.
Then we show that there are at most five 2-chromatic components in $G[S_4(1)]$.
Suppose that $A_1$ and $A_2$ are two homogeneous 2-chromatic components of $G[S_4(1)]$. By \autoref{lem:XY}, $A_1$ is not complete to $N(A_2)$ and $A_2$ is not complete to $N(A_1)$. So there must exist $z_1\in N(A_1)\setminus N(A_2)$ and $z_2\in N(A_2)\setminus N(A_1)$. Clearly, $z_1,z_2\in (\cup_{i=1,2,5}S_3(i))\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5$. Then $\{v_1,z_1,z_2\}\cup A_1\cup A_2$ contains an induced bull or an induced $P_5$, depending on whether $z_1z_2\in E$, a contradiction. Thus there is at most one homogeneous 2-chromatic component in $G[S_4(1)]$.
Let $B_1,B_2$ be two nonhomogeneous 2-chromatic components of $G[S_4(1)]$. So there exists $x'$ mixed on $B_1$ and $y'$ mixed on $B_2$. Let $x'$ be mixed on edge $x_3y_3$ in $B_1$ and $y'$ be mixed on edge $x_4y_4$ in $B_2$. By symmetry, assume that $x'x_3,y'x_4\in E$ and $x'y_3,y'y_4\notin E$. It is evident that $x'$ and $y'$ are not the same vertex, otherwise $\{y_3,x_3,x',x_4,y_4\}$ induces a $P_5$. Similarly, $x'$ is not mixed on $x_4y_4$ and $y'$ is not mixed on $x_3y_3$. Clearly, $x',y'\in (\cup_{i=1,2,5}S_3(i))\cup (\cup_{2\le i\le 5}S_4(i))\cup S_5$. If $x'$ is anticomplete to $\{x_4,y_4\}$ and $y'$ is anticomplete to $\{x_3,y_3\}$, then $\{x_3,x',v_1,y',x_4\}$ induces a bull or a $P_5$, depending on whether $x'y'\in E$. If $x'$ is complete to $\{x_4,y_4\}$ and $y'$ is complete to $\{x_3,y_3\}$, then $\{y_4,x',v_1,y',y_3\}$ induces a bull or a $P_5$, depending on whether $x'y'\in E$. So we assume that $x'$ is complete to $\{x_4,y_4\}$ and $y'$ is anticomplete to $\{x_3,y_3\}$. Then $x'y'\in E$, otherwise $\{y',x_4,y_4,x',x_3\}$ induces a bull. So the number of nonhomogeneous 2-chromatic components of $G[S_4(1)]$ is not more than 4, otherwise the vertices mixed on them respectively can induce a $K_5$.
So there are at most five 2-chromatic components in $G[S_4(1)]$. It follows that $S_3\cup S_4$ is bounded.
\end{proof}
\begin{claim}\label{S3(i) 2-chromatic}
If $\chi(S_3(i))=2$ for some $1\le i\le 5$, then $S_3\cup S_4$ is bounded.
\end{claim}
\begin{proof}
Without loss of generality, we assume $\chi(S_3(3))=2$. It follows from \ref{S3(i)S3(i+1)} that $S_3(2)=S_3(4)=\emptyset$, otherwise $S_3(3)\cup S_3(2)\cup \{v_2,v_3\}$ or $S_3(3)\cup S_3(4)\cup \{v_4,v_3\}$ contains an induced $K_5$. Similarly, it follows from \ref{S2(i)S4(i+2)} that $S_4(1)=S_4(5)=\emptyset$. Since $G$ has no subgraph isomorphic to $F_9$, $\chi(S_4(i))\le 1$ for each $2\le i\le 4$ and $\chi(S_3(j))\le 1$ for each $j=1,5$. By Claims \ref{S3 trivial}-\ref{S4 trivial}, $(\cup_{i=1,5}S_3(i))\cup S_4$ is bounded and the number of $K_1$-components in $G[S_3(3)]$ is also bounded.
We now show that the number of vertices in a 2-chromatic component of $G[S_3(3)]$ is bounded. Let $A$ be a 2-chromatic component of $G[S_3(3)]$ and so $A$ is bipartite. Let the bipartition of $A$ be $(X,Y)$. Suppose that $|X|>2^{|S_2(3)\cup S_5\cup (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))|}$. By the pigeonhole principle, there exists two vertices $x_1,x_2\in X$ which have the same neighborhood in $S_2(3)\cup S_5\cup (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))$. Since $x_1$ and $x_2$ are not comparable, there must exist $y_1\in N(x_1)\setminus N(x_2),y_2\in N(x_2)\setminus N(x_1)$. Clearly, $y_1,y_2\in Y$ and so $\{x_1,x_2,y_1,y_2\}$ induces a $2K_2$ in $A$. Since $A$ is connected and bipartite, $A$ contains a $P_5$ by \autoref{2K2}, a contradiction. Thus $|X|\le 2^{|S_2(3)\cup S_5\cup (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))|}$. Similarly,
\[|Y|\le 2^{|S_2(3)\cup S_5\cup (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))|}.\] Thus the number of vertices in $A$ is bounded.
Then we show that there are at most $(2^{|S_2(3)|}+4)$ 2-chromatic components in $G[S_3(3)]$.
Suppose that the number of homogeneous 2-chromatic components of $G[S_3(3)]$ is more than $2^{|S_2(3)|}$. By the pigeonhole principle, there are two 2-chromatic components $A_1,A_2$ such that $N_{S_2(3)}(A_1)=N_{S_2(3)}(A_2)$. By \autoref{lem:XY}, $A_1$ is not complete to $N(A_2)$ and $A_2$ is not complete to $N(A_1)$. So there must exist $z_1\in N(A_1)\setminus N(A_2)$ and $z_2\in N(A_2)\setminus N(A_1)$. Clearly, $z_1,z_2\in (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))\cup S_5$. Then $\{v_1,z_1,z_2\}\cup A_1\cup A_2$ contains an induced bull or an induced $P_5$, depending on whether $z_1z_2\in E$, a contradiction. Thus there are at most $2^{|S_2(3)|}$ homogeneous 2-chromatic components in $G[S_3(3)]$.
Let $B_1,B_2$ be two nonhomogeneous 2-chromatic components of $G[S_3(3)]$. So there exists $x'$ mixed on $B_1$ and $y'$ mixed on $B_2$. Let $x'$ be mixed on edge $x_3y_3$ in $B_1$ and $y'$ be mixed on edge $x_4y_4$ in $B_2$. By symmetry, assume that $x'x_3,y'x_4\in E$ and $x'y_3,y'y_4\notin E$. It is evident that $x'$ and $y'$ are not the same vertex, otherwise $\{y_3,x_3,x',x_4,y_4\}$ induces a $P_5$. Similarly, $x'$ is not mixed on $x_4y_4$ and $y'$ is not mixed on $x_3y_3$. Clearly, $x',y'\in S_2(3)\cup S_5\cup (\cup_{i=1,5}S_3(i))\cup (\cup_{2\le i\le 4}S_4(i))$.
\noindent{\bf Case 1.} $x'$ is anticomplete to $\{x_4,y_4\}$ and $y'$ is anticomplete to $\{x_3,y_3\}$. Then $x'$ is nonadjacent to $y'$, otherwise $\{y_3,x_3,x',y',x_4,y_4\}$ induces a $P_6$. If $x',y'\notin S_2(3)$, then $\{x_3,x',v_1,y',x_4\}$ induces a $P_5$. If $x',y'\in S_2(3)$, then $\{x',x_3,v_3,x_4,y'\}$ induces a $P_5$. So assume $x'\in S_2(3)$ and $y'\notin S_2(3)$. Then $\{x_4,v_3,y_3,x_3,x'\}$ induces a bull, a contradiction.
\noindent{\bf Case 2.} $x'$ is complete to $\{x_4,y_4\}$ and $y'$ is anticomplete to $\{x_3,y_3\}$. Then $x'y'\in E$, otherwise $\{y',x_4,y_4,x',x_3\}$ induces a bull. So as the case when $x'$ is anticomplete to $\{x_4,y_4\}$ and $y'$ is complete to $\{x_3,y_3\}$.
\noindent{\bf Case 3.} $x'$ is complete to $\{x_4,y_4\}$ and $y'$ is complete to $\{x_3,y_3\}$. Suppose that $x',y'\notin S_2(3)$ and so $\{y_4,x',v_1,y',y_3\}$ induces a bull or a $P_5$, depending on whether $x'y'\in E$, a contradiction. If $x',y'\in S_2(3)$, then $x'y'\in E$, otherwise $\{x',y_4,v_3,y_3,y'\}$ induces a $P_5$. If $x'\in S_2(3)$ and $y'\notin S_2(3)$, then $x'y'\in E$, otherwise $\{v_1,y',y_3,x_3,x'\}$ induces a bull.
We now know that $x'$ must be adjacent to $y'$. So the number of nonhomogeneous 2-chromatic components of $G[S_3(3)]$ is not more than 4, otherwise the vertices mixed on them respectively can induce a $K_5$, a contradiction. It follows that there are at most $(2^{|S_2(3)|}+4)$ 2-chromatic components in $G[S_3(3)]$.
Therefore, $S_3\cup S_4$ is bounded.
\end{proof}
By Claims \ref{S3 trivial}-\ref{S3(i) 2-chromatic}, $S_3\cup S_4$ is bounded and so is $|G|$. This completes the proof of \autoref{th}.
\end{proof}
\end{document} |
\begin{document}
\title{Decidability of the Monadic Shallow Linear First-Order Fragment with Straight Dismatching Constraints}
\author{Andreas Teucke\inst{1,2} \and Christoph Weidenbach\inst{1}}
\institute{Max-Planck Institut f\"ur Informatik, Saarland Informatics Campus,
66123 Saarbr\"ucken
Germany \and Graduate School of Computer Science, Saarbr\"ucken, Germany }
\maketitle
\begin{abstract} The monadic shallow linear Horn fragment is well-known to be decidable and has many application, e.g., in security protocol analysis, tree automata, or abstraction refinement. It was a long standing open problem how to extend the fragment to the non-Horn case, preserving decidability, that would, e.g., enable to express non-determinism in protocols. We prove decidability of the non-Horn monadic shallow linear fragment via ordered resolution further extended with dismatching constraints and discuss some applications of the new decidable fragment. \end{abstract}
\section{Introduction} \label{sec:intro}
Motivated by the automatic analysis of security protocols, the monadic shallow linear Horn (MSLH) fragment was shown to be decidable in \cite{Weidenbach99cade}. In addition to the restriction to monadic Horn clauses, the main restriction of the fragment is positive literals of the form $S(f(x_1,\ldots,x_n))$ or $S(x)$ where all $x_i$ are different, i.e., all terms are shallow and linear. The fragment can be finitely saturated by superposition (ordered resolution) where negative literals with non-variable arguments are always selected. As a result, productive clauses with respect to the superposition model operator $\I{N}$ have the form $S_1(x_1),\ldots,S_n(x_n) \rightarrow S(f(x_1,\ldots,x_n))$. Therefore, the models of saturated MSLH clause sets can both be represented by tree automata \cite{tata2007} and shallow linear sort theories \cite{JacquemardMeyerEtAl98}. The models are typically infinite. The decidability result of MSLH clauses was rediscovered in the context of tree automata research \cite{GoubaultLarrecq05IPL} where in addition DEXPTIME-completeness of the MSLH fragment was shown. The fragment was further extended by disequality constraints \cite{journalsiplSeidlR11,Seidl12F} still motivated by security protocol analysis \cite{SeidlVerma2007}. Although from a complexity point of view, the difference between Horn clause fragments and the respective non-Horn clause fragments is typically reflected by membership in the deterministic vs.\ the non-deterministic respective complexity fragment, for monadic shallow linear clauses so far there was no decidability result for the non-Horn case.
The results of this paper close this gap. We show the monadic shallow linear non-Horn (MSL) clause fragment to be decidable by superposition (ordered resolution). From a security protocol application point of view, non-Horn clauses enable a natural representation of non-determinism. Our second extension to the fragment are unit clauses with disequations of the form $s\not\approx t$, where $s$ and $t$ are not unifiable. Due to the employed superposition calculus, such disequations do not influence saturation of an MSL clause set, but have an effect on potential models. They can rule out identification of syntactically different ground terms as it is, e.g., desired in the security protocol context for syntactically different messages or nonces. Our third extension to the fragment are straight dismatching constraints. These constraints are incomparable to the disequality constraints mentioned above \cite{journalsiplSeidlR11,Seidl12F}. They do not strictly increase the expressiveness of the MSL theory, but enable up to exponentially more compact saturations. For example, the constrained clause\newline \centerline{$(S(x), T(y) \rightarrow S(f(x,y)); y\neq f(x',f(a,y')))$} over constants $a, b$ describes the same set of ground clauses as the six unconstrained clauses\newline \centerline{$S(x), T(a) \rightarrow S(f(x,a)) \qquad S(x), T(b) \rightarrow S(f(x,b)) \qquad \ldots$} \centerline{$S(x), T(f(b,y')) \rightarrow S(f(x,f(b,y')))$} \centerline{$S(x), T(f(f(x'',y''),y')) \rightarrow S(f(x,f(f(x'',y''),y'))$.} Furthermore, for a satisfiability equivalent transformation into MSL clauses, the nested terms in the positive literals would have to be factored out by the introduction of further predicates and clauses. E.g., the first clause is replaced by the two MSL clauses $S(x), T(a), R(y) \rightarrow S(f(x,y))$ and $R(a)$ where $R$ is a fresh monadic predicate. The constrained clause belongs to the MSL(SDC) fragment. Altogether, the resulting MSL(SDC) fragment is shown to be decidable in Section~\ref{sec:decide}.
The introduction of straight dismatching constraints (SDCs) enables an improved refinement step of our approximation refinement calculus~\cite{Teucke2015}. Before, several clauses were needed to rule out a specific instance of a clause in an unsatisfiable core. For example, if due to a linearity approximation from clause $S(x), T(x) \rightarrow S(f(x,x))$ to $S(x), T(x), S(y), T(y) \rightarrow S(f(x,y))$ an instance $\{x\mapsto f(a,x')$, $y\mapsto f(b,y')\}$ is used in the proof, before \cite{Teucke2015} several clauses were needed to replace $S(x), T(x) \rightarrow S(f(x,x))$ in a refinement step in order to rule out this instance. With straight dismatching constraints the clause $S(x), T(x) \rightarrow S(f(x,x))$ is replaced by the two clauses $S(f(a,x)), T(f(a,x)) \rightarrow S(f(f(a,x),f(a,x)))$ and $(S(x), T(x) \rightarrow S(f(x,x)); x\neq f(a,y))$. For the improved approximation refinement approach (FO-AR) presented in this paper, any refinement step results in just two clauses, see Section~\ref{sec:approx}. The additional expressiveness of constraint clauses comes almost for free, because necessary computations, like, e.g., checking emptiness of SDCs, can all be done in polynomial time, see Section~\ref{sec:prelim}.
In addition to the extension of the known MSLH decidability result and the improved approximation refinement calculus FO-AR, we discuss in Section~\ref{sec:experiments} the potential of the MSL(SDC) fragment in the context of FO-AR, Theorem~\ref{theo:refinement:scfoar}, and its prototypical implementation in SPASS-AR (\url{http://www.mpi-inf.mpg.de/fileadmin/inf/rg1/spass-ar.tgz}). It turns out that for clause sets containing certain structures, FO-AR is superior to ordered resolution/superposition~\cite{BachmairGanzinger94b} and instance generating methods~\cite{Korovin13ganzinger}. The paper ends with a discussion on challenges and future research directions, Section~\ref{sec:conclusion}.
\section{First-Order Clauses with Straight Dismatching Constraints: MSL(SDC)} \label{sec:prelim}
We consider a standard first-order language where letters $v,w,x,$ $y,z$ denote variables, $f,g,h$ functions, $a,b,c$ constants, $s,t$ terms, $p,q,r$ positions and Greek letters $\sigma,\tau,\rho,\delta$ are used for substitutions. $S,P,Q,R$ denote predicates, $\approx$ denotes equality, $A,B$ atoms, $E,L$ literals, $C,D$ clauses, $N$ clause sets and $\mathcal{V}$ sets of variables. $\overline L$ is the complement of $L$. The signature $\Sigma=(\mathcal{F},\mathcal{P})$ consists of two disjoint, non-empty, in general infinite sets of function and predicate symbols $\mathcal{F}$ and $\mathcal{P}$, respectively. The set of all \emph{terms} over variables $\mathcal{V}$ is $\mathcal{T}(\mathcal{F},\mathcal{V})$. If there are no variables, then terms, literals and clauses are called \emph{ground}, respectively. A \emph{substitution} $\sigma$ is denoted by pairs $\{x \mapsto t\}$ and its update at $x$ by $\sigma[ x \mapsto t]$. A substitution $\sigma$ is a \emph{grounding} substitution for $\mathcal{V}$ if $x\sigma$ is ground for every variable $x \in \mathcal{V}$.
The set of \emph{free} variables of an atom $A$ (term $t$) denoted by $\operatorname{vars}(A)$ ($\operatorname{vars}(t)$). A \emph{position} is a sequence of positive integers, where $\varepsilon$ denotes the empty position. As usual $t\vert_p = s$ denotes the subterm $s$ of $t$ at position $p$, which we also write as $t[s]_p$, and $t[p/s']$ then denotes the replacement of $s$ with $s'$ in $t$ at position $p$. These notions are extended to literals and multiple positions.
A predicate with exactly one argument is called \emph{monadic}. A term is \emph{complex} if it is not a variable and \emph{shallow} if it has at most depth one. It is called \emph{linear} if there are no duplicate variable occurrences. A literal, where every argument term is shallow, is also called \emph{shallow}. A variable and a constant are called \emph{straight}. A term $f(s_1,\ldots,s_n)$ is called \emph{straight}, if $s_1,\ldots,s_n$ are different variables except for at most one straight term $s_i$.
A \emph{clause} is a multiset of literals which we write as an implication $\Gamma \rightarrow \Delta$ where the atoms in the multiset $\Delta$ (the\emph{ succedent}) denote the positive literals and the atoms in the multiset $\Gamma$ (the \emph{antecedent}) the negative literals. We write $\square$ for the empty clause. If $\Gamma$ is empty we omit $\rightarrow$, e.g., we can write $P(x)$ as an alternative of $\rightarrow P(x)$. We abbreviate disjoint set union with sequencing, for example, we write $\Gamma,\Gamma' \rightarrow \Delta,L$ instead of $\Gamma \cup \Gamma' \rightarrow \Delta \cup \{L\}$. A clause $E,E,\Gamma \rightarrow \Delta$ is equivalent to $E,\Gamma \rightarrow \Delta$ and we call them equal \emph{modulo duplicate literal elimination}. If every term in $\Delta$ is shallow, the clause is called \emph{positive shallow}. If all atoms in $\Delta$ are linear and variable disjoint, the clause is called \emph{positive linear}. A clause $\Gamma \rightarrow \Delta$ is called an \emph{MSL} clause, if it is (i)~positive shallow and linear, (ii)~all occurring predicates are monadic, (iii)~no equations occur in $\Delta$, and (iv)~no equations occur in $\Gamma$ or $\Gamma = \{s\approx t\}$ and $\Delta$ is empty where $s$ and $t$ are not unifiable. \emph{MSL} is the first-order clause fragment consisting of MSL clauses. Clauses $\Gamma,s\approx t \rightarrow \Delta$ where $\Gamma$, $\Delta$ are non-empty and $s,t$ are not unifiable could be added to the MSL fragment without changing any of our results. Considering the superposition calculus, it will select $s\approx t$. Since the two terms are not unifiable, no inference will take place on such a clause and the clause will not contribute to the model operator. In this sense such clauses do not increase the expressiveness of the fragment.
An \emph{atom ordering} $\prec$ is an irreflexive, well-founded, total ordering on ground atoms. It is lifted to literals by representing $A$ and $\neg A$ as multisets $\{A\}$ and $\{A,A\}$, respectively. The multiset extension of the literal ordering induces an ordering on ground clauses. The clause ordering is compatible with the atom ordering; if the maximal atom in $C$ is greater than the maximal atom in $D$ then $D \prec C$. We use $\prec$ simultaneously to denote an atom ordering and its multiset, literal, and clause extensions. For a ground clause set $N$ and clause $C$, the set $N^{\prec C}=\{D \in N \mid D \prec C\}$ denotes the clauses of $N$ smaller than $C$.
A \emph{Herbrand interpretation} $\I{}$ is a - possibly infinite - set of ground atoms. A ground atom $A$ is called \emph{true} in $\I{}$ if $A\in\I{}$ and \emph{false}, otherwise. $\I{}$ is said to \emph{satisfy} a ground clause $C= \Gamma \rightarrow \Delta$, denoted by $\I{}\vDash C$, if $\Delta \cap \I{} \neq \emptyset$ or $\Gamma \not\subseteq \I{}$. A non-ground clause $C$ is satisfied by $\I{}$ if $\I{}\vDash C\sigma$ for every grounding substitution $\sigma$. An interpretation $\I{}$ is called a \emph{model} of $N$, $\I{}\vDash N$, if $\I{}\vDash C$ for every $C\in N$. A model $\I{}$ of $N$ is considered \emph{minimal} with respect to set inclusion, i.e., if there is no model $\I{}'$ with $\I{}'\subset \I{}$ and $\I{}'\vDash N$. A set of clauses $N$ is \emph{satisfiable}, if there exists a model that satisfies $N$. Otherwise, the set is \emph{unsatisfiable}.
A disequation $t \neq s$ is an \emph{atomic straight dismatching constraint} if $s$ and $t$ are variable disjoint terms and $s$ is straight. A straight dismatching constraint $\pi$ is a conjunction of atomic straight dismatching constraints. Given a substitution $\sigma$, $\pi\sigma= \bwedge{i\in I} ~~ t_i\sigma \neq s_i $. $\mathrm{lvar}(\pi) := \bigcup_{i \in I} \operatorname{vars}(t_i)$ are the left-hand variables of $\pi$ and the depth of $\pi$ is the maximal term depth of the $s_i$. A \emph{solution} of $\pi$ is a grounding substitution $\delta$ such that for all $i\in I$, $t_i\delta$ is not an instance of $s_i$, i.e., there exists no $\sigma$ such that $t_i\delta = s_i\sigma$. A dismatching constraint is solvable if it has a solution and unsolvable, otherwise. Whether a straight dismatching constraint is solvable, is decidable in linear-logarithmic time \cite{DBLP:conf/cade/TeuckeW16}. $\top$ and $\bot$ represent the true and false dismatching constraint, respectively.
We define constraint normalization $\norm{\pi}$ as the normal form of the following rewriting rules over straight dismatching constraints.
\shortrules{\ }{$ ~\pi \wedge f(t_1,\ldots,t_n)\neq y $~~~~~~~~~~~~~~~}{$\bot$}{}{}{10}
\shortrules{\ }{$ \pi \wedge f(t_1,\ldots,t_n)\neq f(y_1,\ldots,y_n) $}{$\bot$}{}{}{10}
\shortrules{\ }{$ \pi \wedge f(t_1,\ldots,t_n)\neq f(s_1,\ldots,s_n) $}{$\pi \wedge t_i \neq s_i$ ~~if $s_i$ is complex}{}{}{10}
\shortrules{\ }{$ \pi \wedge f(t_1,\ldots,t_n)\neq g(s_1,\ldots,s_m) $}{$\pi$}{}{}{10}
\shortrules{\ }{$\pi \wedge x\neq s \wedge x\neq s\sigma $~~~~~~~~~~~~~~~~~\;}{$\pi \wedge x\neq s $ }{}{}{10} Note that $f(t_1,\ldots,t_n)\neq f(s_1,\ldots,s_n)$ normalizes to $t_i \neq s_i$ for some $i$,
where $s_i$ is the one straight complex argument of $f(s_1,\ldots,s_n)$. Furthermore, the depth of $\norm{\pi}$ is less or equal to the depth of $\pi$ and both have the same solutions.
A pair of a clause and a constraint $(C;\pi)$ is called a \emph{constrained clause}. Given a substitution $\sigma$, $(C;\pi)\sigma = (C\sigma;\pi\sigma)$. $C\delta$ is called a ground clause of $(C;\pi)$ if $\delta$ is a solution of $\pi$. $\ground{(C;\pi)}$ is the set of ground instances of $(C;\pi)$. If $\ground{(C;\pi)}\subseteq \ground{(C';\pi')}$, then $(C;\pi)$ is an instance of $(C';\pi')$. If $\ground{(C;\pi)} = \ground{(C';\pi')}$, then $(C;\pi)$ and $(C';\pi')$ are called variants. A Herbrand interpretation $\I{}$ satisfies $(C;\pi)$, if $\I{}\vDash \ground{(C;\pi)}$. A constrained clause $(C;\pi)$ is called \emph{redundant} in $N$ if for every $D \in \ground{(C;\pi)}$, there exist $D_1,\ldots,D_n$ in $\ground{N}^{\prec D}$ such that $D_1,\ldots,D_n \vDash D$. A constrained clause $(C';\pi')$ is called a \emph{condensation} of $(C;\pi)$ if $C' \subset C $ and there exists a substitution $\sigma$ such that, $\pi\sigma= \pi'$, $\pi' \subseteq \pi$, and for all $L \in C$ there is an $L'\in C'$ with $L\sigma=L'$. A finite unsatisfiable subset of $\ground{N}$ is called an unsatisfiable core of $N$.
An MSL clause with straight dismatching constraints is called an \emph{MSL(SDC)} clause with MSL(SDC) being the respective first-order fragment. Note that any clause set $N$ can be transformed into an equivalent constrained clause set by changing each $C\in N$ to $(C;\top)$.
\section{Decidability of the MSL(SDC) fragment}\label{sec:decide}
In the following we will show that the satisfiability of the MSL(SDC) fragment is decidable. For this purpose we will define ordered resolution with selection on constrained clauses \cite{DBLP:conf/cade/TeuckeW16} and show that with an appropriate ordering and selection function, saturation of an MSL(SDC) clause set terminates.
For the rest of this section we assume an atom ordering $\prec$ such that a literal $\neg Q(s)$ is not greater than a literal $P(t[s]_p)$, where $p \neq \varepsilon$. For example, a KBO where all symbols have weight one has this property.
\begin{definition}[sel]\label{def:decide:sel} Given an MSL(SDC) clause $(C;\pi) = (S_1(t_1),\dots,S_n(t_n) \rightarrow P_1(s_1),\dots, P_m(s_m);\pi)$. The Superposition Selection function $\mathrm{sel}$ is defined by $S_i(t_i)\in \mathrm{sel}(C)$ if (1) $t_i$ is not a variable or (2) $t_1,\ldots,t_n$ are variables and $t_i \notin \operatorname{vars}(s_1,\dots,s_m)$ or (3) $\{ t_1,\ldots,t_n\} \subseteq \operatorname{vars}(s_1,\dots,s_m)$ and for some $1 \leq j \leq m$, $s_j=t_i$. \end{definition}
The selection function $\mathrm{sel}$ (Definition \ref{def:decide:sel}) ensures that a clause $\Gamma \rightarrow \Delta$ can only be resolved on a positive literal if $\Gamma$ contains only variables, which also appear in $\Delta$ at a non-top position. For example:\newline \centerline{$\begin{array}{r@{\,=\,}l} \mathrm{sel}(P(f(x)),P(x), Q(z) \rightarrow Q(x),R(f(y)) & \{P(f(x))\}\\ \mathrm{sel}(P(x), Q(z) \rightarrow Q(x),R(f(y))) & \{Q(z)\}\\ \mathrm{sel}(P(x), Q(y) \rightarrow Q(x),R(f(y))) & \{P(x)\}\\ \mathrm{sel}(P(x), Q(y) \rightarrow Q(f(x)),R(f(y))) & \emptyset.\\ \end{array}$} Note that given an MSL(SDC) clause $(C;\pi) = (S_1(t_1),\dots,S_n(t_n)$ $ \rightarrow P_1(s_1),\dots P_m(s_m);\pi)$, if some $S_i(t_i)$ is maximal in $C$, then at least one literal is selected.
\begin{definition} A literal $A$ is called \emph{[strictly] maximal} in a constrained clause $(C \vee A;\pi)$ if and only if there exists a solution $\delta$ of $\pi$ such that for all literals $B$ in $C$, $B\delta \preceq A\delta~ [ B\delta \prec A\delta ]$. \end{definition}
\begin{definition}[SDC-Resolution]\label{def:decide:resolution} $$ { \frac{( \Gamma_1 \rightarrow \Delta_1, A ~;~\pi_1) \qquad ( \Gamma_2 , B \rightarrow \Delta_2 ~;~\pi_2)}{ ((\Gamma_1,\Gamma_2 \rightarrow \Delta_1,\Delta_2)\sigma~; ~\norm{(\pi_1 \wedge \pi_2)\sigma}) }}~~~, \text{if} $$ \begin{tabular}{rlrl}
1. & $\sigma=\mathrm{mgu}(A,B)$ & 2. & $\norm{(\pi_1 \wedge \pi_2)\sigma}$ is solvable\\
3. & \multicolumn{3}{l}{$A\sigma$ is strictly maximal in $(\Gamma_1 \rightarrow \Delta_1, A;\pi_1)\sigma$ and $\mathrm{sel}(\Gamma_1 \rightarrow \Delta_1, A)=\emptyset$}\\
4. & $B \in \mathrm{sel}(\Gamma_2 , B \rightarrow \Delta_2)$ & & \\
5. & \multicolumn{3}{l}{$\mathrm{sel}(\Gamma_2 , B \rightarrow \Delta_2)=\emptyset$ and $\neg B\sigma$ maximal in $(\Gamma_2 , B \rightarrow \Delta_2;\pi_2)\sigma$}\\ \end{tabular} \end{definition}
\begin{definition}[SDC-Factoring]\label{def:decide:factoring} $$ {\frac{(\Gamma\rightarrow \Delta, A, B ~;~ \pi) }{ ((\Gamma\rightarrow \Delta, A)\sigma; \norm{\pi\sigma})}} ~~~, \text{if}$$ \begin{tabular}{rl@{$\quad$}rl}
1. & $\sigma=\mathrm{mgu}(A,B)$ &
2. & $\mathrm{sel}(\Gamma\rightarrow \Delta, A, B)=\emptyset$\\ 3. & $A\sigma$ is maximal in $(\Gamma\rightarrow \Delta, A, B;\pi)\sigma$ & 4. & $\norm{\pi\sigma}$ is solvable\\ \end{tabular} \end{definition}
Note that while the above rules do not operate on equations, we can actually allow unit clauses that consist of non-unifiable disequations, i.e., clauses $s \approx t \rightarrow$ where $s$ and $t$ are not unifiable. There are no potential superposition inferences on such clauses as long as there are no positive equations. So resolution and factoring suffice for completeness. Nevertheless, clauses such as $s \approx t \rightarrow$ affect the models of satisfiable problems. Constrained Resolution and Factoring are sound.
\begin{lemma}[Soundness]
SDC-Resolution and SDC-Factoring are sound. \end{lemma} \begin{proof}
Let $(\Gamma_1,\Gamma_2 \rightarrow \Delta_1,\Delta_2)\sigma\delta$ be a ground instance of $((\Gamma_1,\Gamma_2 \rightarrow \Delta_1,\Delta_2)\sigma; (\pi_1 \wedge \pi_2)\sigma)$.
Then, $\delta$ is a solution of $(\pi_1 \wedge \pi_2)\sigma$ and $\sigma\delta$ is a solution of $\pi_1$ and $\pi_2$.
Hence, $(\Gamma_1 \rightarrow \Delta_1, A)\sigma\delta$ and $(\Gamma_2 , B \rightarrow \Delta_2)\sigma\delta$ are ground instances of $(\Gamma_1 \rightarrow \Delta_1, A;\pi_1)$ and $(\Gamma_2 , B \rightarrow \Delta_2;\pi_2)$, respectively.
Because $A\sigma\delta= B \sigma\delta $, if $(\Gamma_1 \rightarrow \Delta_1, A)\sigma\delta$ and $(\Gamma_2 , B \rightarrow \Delta_2)\sigma\delta$ are satisfied, then $(\Gamma_1,\Gamma_2 \rightarrow \Delta_1,\Delta_2)\sigma\delta$ is also satisfied.
Therefore, SDC-Resolution is sound.
Let $(\Gamma\rightarrow \Delta, A)\sigma\delta$ be a ground instance of $((\Gamma\rightarrow \Delta, A)\sigma; \pi\sigma)$.
Then, $\delta$ is a solution of $\pi\sigma$ and $\sigma\delta$ is a solution of $\pi$.
Hence, $(\Gamma\rightarrow \Delta, A, B )\sigma\delta$ is a ground instance of $(\Gamma\rightarrow \Delta, A, B ;\pi)$.
Because $A\sigma\delta= B \sigma\delta $, if $(\Gamma\rightarrow \Delta, A, B )\sigma\delta$ is satisfied, then $(\Gamma\rightarrow \Delta, A)\sigma\delta$ is also satisfied.
Therefore, SDC-Factoring is sound.\qed \end{proof}
\begin{definition}[Saturation]\label{def:decide:saturated} A constrained clause set $N$ is called saturated up to redundancy, if for every inference between clauses in $N$ the result $(R;\pi)$ is either redundant in $N$ or $\ground{(R;\pi)} \subseteq \ground{N}$. \end{definition}
Note that our redundancy notion includes condensation and the condition $\ground{(R;\pi)} \subseteq \ground{N}$ allows ignoring variants of clauses.
\begin{lemma}\label{lem:decide:condensation}
Let constrained clause $(C';\pi')$ be a condensation of constrained clause $(C;\pi)$.
Then, (i)$(C;\pi) \vDash (C';\pi')$ and (ii)$(C;\pi)$ is redundant in $\{(C';\pi')\}$. \end{lemma}
\begin{proof}
Let $\sigma$ be a substitution such that $C' \subset C$, $\pi\sigma= \pi'$,
$\pi' \subseteq \pi$, and for all $L \in C$ there is a $L'\in C'$ with $L\sigma=L'$.
(i) Let $C'\delta \in \ground{(C';\pi')}$.
Then $\sigma\delta$ is a solution of $\pi$ and hence $C\sigma\delta \in \ground{(C;\pi)}$.
Let $\I{} \vDash C\sigma\delta$. Hence, there is a $L\sigma\delta \in \I{}$ for some $L \in C$ and thus $L'\delta \in \I{}$ for some $L' \in C'$ with $L\sigma=L'$.
Therefore, $\I{} \vDash C'\delta$. Since $\I{}$ and $C'\delta$ were arbitrary, $(C;\pi) \vDash (C';\pi')$.
(ii) Let $C\delta \in \ground{(C;\pi)}$. Because $\pi' \subseteq \pi$, $\delta$ is a solution of $\pi'$ and hence, $C'\delta \in \ground{(C';\pi')} $.
Therefore, since $C'\delta \subset C\delta$, $C'\delta \in \ground{\{(C';\pi')\}}^{\prec C\delta}$ and $C'\delta\vDash C\delta$.\qed \end{proof}
\begin{definition}[Partial Minimal Model Construction]\label{def:decide:model} Given a constrained clause set $N$, an ordering $\prec$ and the selection function $\mathrm{sel}$, we construct an interpretation $\I{N}$ for $N$, called a partial model, inductively as follows: \begin{align*}
\I{C} &:= \bigcup_{D\prec C}^{D \in \ground{N}} \delta_D, \text{ where }C \in \ground{N}\\
\delta_D &:= \left\{
\begin{array}{l l}
\{A\} & \quad \text{if $D=\Gamma \rightarrow \Delta, A$ }\\
& \quad \text{$A$ strictly maximal, $\mathrm{sel}(D)=\emptyset$ and $\I{D}\not \vDash D$}\\
\emptyset & \quad \text{otherwise}
\end{array} \right. \\ \I{N} &:= \bigcup_{C \in \ground{N}} \delta_C \end{align*} Clauses $D$ with $\delta_D \neq \emptyset$ are called productive. \end{definition}
\begin{lemma}[Ordered SDC Resolution Completeness]\label{lem:decide:complete}
Let $N$ be a constrained clause set saturated up to redundancy by ordered SDC-resolution with selection. Then $N$ is unsatisfiable, if and only if $\square\in \ground{N}$. If $\square\not\in \ground{N}$ then $\I{N}\models N$. \end{lemma}
\begin{proof}
Assume $N$ is unsatisfiable but $\square\not\in \ground{N}$.
For the partial model $\I{N}$, there exists a minimal false clause $C\sigma\in \ground{(C;\pi)}$ for some $(C;\pi)\in N$.
$C\sigma$ is not productive, because otherwise $\I{N} \vDash C\sigma$.
Hence, either $\mathrm{sel}(C)\neq \emptyset$ or no positive literal in $C\sigma$ is strictly maximal.
Assume $C= \Gamma_2, B \rightarrow \Delta_2$ with $B \in \mathrm{sel}(C)$ or $\neg B\sigma$ maximal.
Then, $B\sigma \in \I{C\sigma}$ and there exists a ground instance $(\Gamma_1 \rightarrow \Delta_1 , A)\tau= D\tau \prec C\sigma$ of some clause $(D;\pi')\in N$,
which produces $A\tau=B\sigma$.
Therefore, there exists a $\rho=\mathrm{mgu}(A,B)$ and ground substitution $\delta$
such that $C\sigma=C\rho\delta$, $D\tau=D\rho\delta$.
Since $\rho\delta=\sigma$ is a solution of $\pi$ and $\pi'$, $\delta$ is a solution of $(\pi \wedge \pi')\rho$.
Under these conditions, SDC-Resolution can be applied to $(\Gamma_1 \rightarrow \Delta_1 , A;\pi')$ and $(\Gamma_2, B \rightarrow \Delta_2;\pi)$.
Their resolvent $(R;\pi_R)=((\Gamma_1, \Gamma_2 \rightarrow \Delta_1, \Delta_2 )\rho;(\pi \wedge \pi')\rho)$ is either redundant in $N$ or $\ground{(R;\pi_R)} \subseteq \ground{N}$.
Its ground instance $R\delta$ is false in $\I{N}$ and $R\delta \prec C\sigma$.
If $(R;\pi_R)$ is redundant in $N$, there exist $C_1,\ldots,C_n$ in $\ground{N}^{\prec R\delta}$ with $C_1,\ldots,C_n \vDash R\delta$.
Because $C_i \prec R\delta \prec C\sigma$, $\I{N} \vDash C_i$ and hence $\I{N} \vDash R\delta$, which contradicts $\I{N} \not \vDash R\delta$.
Otherwise, if $\ground{(R;\pi_R)} \subseteq \ground{N}$, then $R\delta\in \ground{N}$, which contradicts $C\sigma$ being minimal false.
Now, assume $\mathrm{sel}(C)= \emptyset$ and $C= \Gamma \rightarrow \Delta, B$ with $B\sigma$ maximal.
Then, $C= \Gamma \rightarrow \Delta', A, B$ with $A\sigma=B\sigma$.
Therefore, there exists a $\rho=\mathrm{mgu}(A,B)$ and ground substitution $\delta$
such that $C\sigma=C\rho\delta$ and $\rho\delta$ is a solution of $\pi$.
Hence, $\delta$ is a solution of $\pi\rho$.
Under these conditions, SDC-Factoring can be applied to $(\Gamma \rightarrow \Delta', A, B;\pi)$.
The result $(R;\pi_R)=((\Gamma \rightarrow \Delta', A)\rho;\pi\rho)$
is either redundant in $N$ or $\ground{(R;\pi_R)} \subseteq \ground{N}$.
Its ground instance $R\delta$ is false in $\I{N}$ and $R\delta \prec C\sigma$.
If $(R;\pi_R)$ is redundant in $N$, there exist $C_1,\ldots,C_n$ in $\ground{N}^{\prec R\delta}$ with $C_1,\ldots,C_n \vDash R\delta$.
Because $C_i \prec R\delta \prec C\sigma$, $\I{N} \vDash C_i$ and hence $\I{N} \vDash R\delta$, which contradicts $\I{N} \not \vDash R\delta$.
Otherwise, if $\ground{(R;\pi_R)} \subseteq \ground{N}$, then $R\delta\in \ground{N}$, which contradicts $C\sigma$ being minimal false.
Therefore, if $\square\not\in \ground{N}$, no minimal false clause exists and $\I{N}\models N$. \qed \end{proof}
\begin{lemma}\label{lem:decide:finite_base} Let $N$ be a set of MSL(SDC) clauses without variants or uncondensed clauses over a finite signature $\Sigma$. $N$ is finite if there exists an integer $d$ such that for every $(C;\pi) \in N$, depth($\pi$)$\leq d$ and\\ (1) $C= S_1(x_1),\dots,S_n(x_n),S'_1(t),\dots,S'_m(t) \rightarrow \Delta$ or \\ (2) $C= S_1(x_1),\dots,S_n(x_n),S'_1(t),\dots,S'_m(t) \rightarrow S(t),\Delta$\\ with $t$ shallow and linear, and $\operatorname{vars}(t) \cap \operatorname{vars}(\Delta) = \emptyset$. \end{lemma}
\begin{proof}
Let $(C;\pi) \in N$.
$(C;\pi)$ can be separated into variable disjoint components $(\Gamma_1,\ldots,\Gamma_n \rightarrow \Delta_1,\ldots,\Delta_n; \pi_1 \wedge \ldots \wedge \pi_n)$,
where $\vert \Delta_i \vert \leq 1$ and $\mathrm{lvar}(\pi_i) \subseteq \operatorname{vars}(\Gamma_i \rightarrow \Delta_i)$.
For each positive literal $P(s) \in \Delta$ there is a fragment
\begin{align*}
(A)~~ & (S_1(x_1),\dots,S_k(x_k) \rightarrow P(s);\pi') \\
\intertext{with $\{x_1,\dots,x_k\}\subseteq \operatorname{vars}(s)$. If $m > 0$, there is another fragment}
(B)~~ & (S_1(x_1),\dots,S_k(x_k),S'_1(t),\dots,S'_m(t) \rightarrow ;\pi') \\
\intertext{ or }
(C)~~ & (S_1(x_1),\dots,S_k(x_k),S'_1(t),\dots,S'_m(t) \rightarrow S(t);\pi') \\
\intertext{ with $\{x_1,\dots,x_k\}\subseteq \operatorname{vars}(t)$, respectively.
Lastly, for each variable $x\in \operatorname{vars}(C)$ with $x \notin \operatorname{vars}(t) \cup \operatorname{vars}(\Delta)$ there is a fragment }
(D)~~ & (S_1(x),\dots,S_k(x)\rightarrow;\pi').
\end{align*}
Since there are only finitely many terms $s$ with depth($s$)$\leq d$ modulo renaming,
there are only finitely many atomic constraints $x \neq s$ for a given variable $x$ different up to renaming $s$.
Thus, a normal constraint can only contain finitely many combinations of subconstraints $\bwedge{i\in \mathcal{I}} ~ x \neq s_i$ without some $s_i$ being an instance of another $s_j$.
Therefore, for a fixed set of variables $x_1,\dots,x_k$, there are only finitely many constraints $\pi=\bwedge{i\in \mathcal{I}} ~~ z_i \neq s_i$ with $\mathrm{lvar}(\pi)\subseteq \{x_1,\dots,x_k\}$ up to variants.
Since the number of predicates, function symbols, and their ranks is finite,
the number of possible shallow and linear atoms $S(t)$ different up to variants is finite.
For a given shallow and linear $t$, there exist only finitely many clauses of the form $({S_{1}(t),\dots,S_{n}(t) \rightarrow S(t)}; \pi)$ or $(S_{1}(t),\dots,S_{n}(t) \rightarrow;\pi)$ with $\mathrm{lvar}(\pi)\subseteq \operatorname{vars}(t)$ modulo condensation and variants.
For a fixed set of variables $x_1,\dots,x_k$, there exist only finitely many clauses of the form $(S_1(y_1),\dots,S_k(y_l) \rightarrow; \pi ) $ with $\{y_1,\dots,y_l\} \cup \mathrm{lvar}(\pi)\subseteq \{x_1,\dots,x_k\}$ modulo condensation and variants.
Therefore, there are only finitely many distinct clauses of each form (A)-(D) without variants or condensations.
If in the clause $(C;\pi)=(\Gamma_1,\ldots,\Gamma_n \rightarrow \Delta_1,\ldots,\Delta_n; \pi_1 \wedge \ldots \wedge \pi_n)$ for some $i\neq j$, $(\Gamma_i \rightarrow \Delta_i;\pi_i)$ is a variant of $(\Gamma_j \rightarrow \Delta_j;\pi_j)$,
then $(C;\pi)$ has a condensation and is therefore not part of $N$.
Hence, there can be only finitely many different $(C;\pi)$ without variants or condensations and thus $N$ is finite. \qed \end{proof}
\begin{lemma}[Finite Saturation]\label{lem:decide:saturation}
Let $N$ be an MSL(SDC) clause set. Then $N$ can be finitely saturated up to redundancy by SDC-resolution with selection function $\mathrm{sel}$. \end{lemma}
\begin{proof}
The general idea is that given the way $\mathrm{sel}$ is defined the clauses involved in constrained resolution and factoring can only fall into certain patterns.
Any result of such inferences then is either strictly smaller than one of its parents by some terminating measure
or falls into a set of clauses that is bounded by Lemma~\ref{lem:decide:finite_base}.
Thus, there can be only finitely many inferences before $N$ is saturated.
Let $d$ be an upper bound on the depth of constraints found in $N$ and $\Sigma$ be the finite signature consisting of the function and predicate symbols occurring in $N$.
Let $(\Gamma_1 \rightarrow \Delta_1, S(t);\pi_1)$ and $(\Gamma_2 , S(t') \rightarrow \Delta_2;\pi_2)$ be clauses in $N$ where sdc-resolution applies with $\sigma=\mathrm{mgu}(S(t),S(t'))$ and resolvent $R=((\Gamma_1,\Gamma_2\rightarrow \Delta_1,\Delta_2)\sigma;\norm{(\pi_1 \wedge \pi_2)\sigma})$.
Because no literal is selected by $\mathrm{sel}$, $\Gamma_1 \rightarrow \Delta_1, S(t)$ can match only one of two patterns:
\begin{align*}
(A)~~ & S_1(x_1),\dots, S_n(x_n) \rightarrow S(f(y_1,\dots,y_k)),\Delta
\intertext{where $t=f(y_1,\dots,y_k)$ and $\{x_1,\dots,x_n\}\subseteq\{y_1,\dots,y_k\}\cup \operatorname{vars}(\Delta)$.}
(B) ~~ & S_1(x_1),\dots, S_n(x_n) \rightarrow S(y),\Delta
\intertext{where $t=y$ and $x_1,\dots,x_n$ are variables in $\operatorname{vars}(\Delta)$, i.e., $y$ occurs only once.}
\end{align*}
The literal $S(t')$ is selected by $\mathrm{sel}$ in $\Gamma_2 , S(t') \rightarrow \Delta_2$, and therefore $\Gamma_2 , S(t') \rightarrow \Delta_2$ can match only one of the following three patterns:
\begin{align*}
(1)~~ &S(f(t_1,\dots,t_k)),\Gamma' \rightarrow \Delta' \\
(2)~~ & S(y'),\Gamma' \rightarrow \Delta' \text{ where $\Gamma'$ has no function terms and $y\notin \operatorname{vars}(\Delta')$. }\\
(3)~~ & S(y'),\Gamma' \rightarrow S'(y'),\Delta' \text{ where $\Gamma'$ has no function terms. }
\end{align*}
This means that the clausal part $(\Gamma_1,\Gamma_2\rightarrow \Delta_1,\Delta_2)\sigma$ of $R$ has one of six forms:
\begin{align*}
(A1)~~& S_1(x_1)\sigma,\dots, S_n(x_n)\sigma, \Gamma' \rightarrow \Delta,\Delta' \text{ with $\sigma=\{{y_1 \mapsto t_1},\dots \}$.}\\
\intertext{ $\Delta\sigma = \Delta$ because $S(f(y_1,\dots,y_k))$ and $\Delta$ do not share variables.}
(B1)~~& S_1(x_1),\dots, S_n(x_n), \Gamma' \rightarrow \Delta,\Delta'.\\
\intertext {The substitution $\{y \mapsto f(t_1,\dots,t_k) \}$ is irrelevant since $S(y)$ is the only literal with variable $y$.}
(A2)~~& S_1(x_1),\dots, S_n(x_n), \Gamma'\tau \rightarrow \Delta,\Delta' \text{ with $\tau=\{{y' \mapsto f(y_1,\dots,y_k)} \}$.}\\
\intertext{ $\Delta'\tau=\Delta'$ because $y' \notin \operatorname{vars}(\Delta')$.}
(B2)~~& S_1(x_1),\dots, S_n(x_n), \Gamma' \rightarrow \Delta,\Delta'. \\
(A3)~~& S_1(x_1),\dots, S_n(x_n), \Gamma'\tau \rightarrow S'(f(y_1,\dots,y_k)),\Delta,\Delta' \text{ with $\tau=\{y \mapsto f(y_1,\dots,y_k) \}$.}\\
\intertext{ $\Delta'\tau=\Delta'$ because $y' \notin \operatorname{vars}(\Delta')$.}
(B3)~~& S_1(x_1),\dots, S_n(x_n), \Gamma' \rightarrow S'(y'),\Delta,\Delta'.
\end{align*}
In the constraint $\norm{(\pi_1 \wedge \pi_2) \sigma}$ the maximal depth of the subconstraints is less or equal to the maximal depth of $\pi_1$ or $\pi_2$.
Hence, $d$ is also an upper bound on the constraint of the resolvent.
In each case, the resolvent is again an MSL(SDC) clause.
In the first and second case, the multiset of term depths of the negative literals in $R$ is strictly smaller than for the right parent.
In both, the $\Gamma$ is the same between the right parent and the resolvent.
Only the $f(t_1,\dots,t_k)$ term is replaced by $x_1\sigma,\dots, x_n\sigma$ and $x_1,\dots, x_n$ respectively.
In the first case, the depth of the $x_i\sigma$ is either zero if $x_i\notin \{y_1,\dots,y_k\}$ or at least one less than $f(t_1,\dots,t_k)$ since $x_i\sigma=t_i$.
In the second case, the $x_i$ have depth zero which is strictly smaller than the depth of $f(t_1,\dots,t_k)$.
Since the multiset ordering on natural numbers is terminating, the first and second case can only be applied finitely many times by constrained resolution.
In the third to sixth case $R$ is of the form
$(S_1(x_1),\dots,S_l(x_l),S'_1(t),\dots,S'_m(t) \rightarrow \Delta;\pi)$ or
$(S_1(x_1),\dots,S_l(x_l),S'_1(t),\dots,S'_m(t) \rightarrow S(t)),\Delta;\pi)$ with $t=f(y_1,\dots,y_k)$.
By Lemma \ref{lem:decide:finite_base}, there are only finitely many such clauses after condensation and removal of variants.
Therefore, these four cases can apply only finitely many times during saturation.
Let $(\Gamma \rightarrow \Delta, S(t), S(t');\pi)$ be a clause in $N$ where sdc-factoring applies with $\sigma=\mathrm{mgu}(S(t),S(t'))$ and $R=((\Gamma \rightarrow \Delta, S(t))\sigma;\norm{\pi\sigma})$.
Because in $\Gamma \rightarrow \Delta, S(t), S(t')$ no literal is selected, $\Gamma \rightarrow \Delta, S(t), S(t')$ and $(\Gamma \rightarrow \Delta, S(t))\sigma$ can only match one of three patterns.
\begin{align*}
(A)~~& S_1(x_1),\dots, S_n(x_n) \rightarrow S(f(y_1,\dots,y_k)),S(f(z_1,\dots,z_l)),\Delta \\
\intertext {where $t=f(y_1,\dots,y_k)$, $t'=f(z_1,\dots,z_k)$, and $\{x_1,\dots,x_n\}\subseteq\{y_1,\dots,y_k\}\cup\{z_1,\dots,z_l\}\cup \operatorname{vars}(\Delta)$. The result is}
& S_1(x_1)\sigma,\dots, S_n(x_n)\sigma \rightarrow S(f(y_1,\dots,y_k)),\Delta \text{ with $\sigma=\{{z_1 \mapsto y_1},\dots \}$.}\\
(B)~~& S_1(x_1),\dots, S_n(x_n) \rightarrow S(f(y_1,\dots,y_k)),S(z),\Delta \\
\intertext{ where $t=f(y_1,\dots,y_k)$, $t'=z$ and $\{x_1,\dots,x_n\}\subseteq\{y_1,\dots,y_k\}\cup\operatorname{vars}(\Delta)$, i.e., $z$ occurs only once. The result is}
& S_1(x_1),\dots, S_n(x_n) \rightarrow S(f(y_1,\dots,y_k)),\Delta.\\
(C)~~& S_1(x_1),\dots, S_n(x_n) \rightarrow S(y),S(z),\Delta \\
\intertext{ where $t=y$, $t'=z$ and $\{x_1,\dots,x_n\}\subseteq\operatorname{vars}(\Delta)$, i.e., $y$ and $z$ occur only once. The result is }
& S_1(x_1),\dots, S_n(x_n) \rightarrow S(y),\Delta.
\end{align*}
In the new constraint $\norm{\pi \sigma}$ the maximal depth of the subconstraints is less or equal to the maximal depth of $\pi$.
Hence $d$ is also an upper bound on the constraint of the resolvent.
In each case, the resolvent is again an MSL(SDC) clause.
Furthermore, in each case the clause is of the form $(S_1(x_1),\dots,S_l(x_l) \rightarrow \Delta; \pi)$.
By Lemma \ref{lem:decide:finite_base}, there are only finitely many such clauses after condensation and removal of variants.
Therefore, these three cases can apply only finitely many times during saturation.\qed \end{proof}
\begin{theorem}[MSL(SDC) Decidability]\label{theo:decide:main} Satisfiability of the MSL(SDC) first-order fragment is decidable. \end{theorem}
\begin{proof}
Follows from Lemma \ref{lem:decide:saturation} and \ref{lem:decide:complete}. \end{proof}
\section{Approximation and Refinement}\label{sec:approx}
In the following, we show how decidability of the MSL(SDC) fragment can be used to improve the approximation refinement calculus presented in \cite{Teucke2015}.
Our approach is based on a counter-example guided abstraction refinement (CEGAR) idea. The procedure loops trough four steps: approximation, testing (un)satisfiability, lifting, and refinement. The approximation step transforms any first-order logic clause set into the decidable MSL(SDC) fragment while preserving unsatisfiability. The second step employs the decidability result for MSL(SDC), Section~\ref{sec:decide}, to test satisfiability of the approximated clause set. If the approximation is satisfiable, the original problem is satisfiable as well and we are done. Otherwise, the third step, lifting, tests whether the proof of unsatisfiability found for the approximated clause set can be lifted to a proof of the original clause set. If so, the original clause set is unsatisfiable and we are again done. If not, we extract a cause for the lifting failure that always amounts to two different
instantiations of the same variable in a clause from the original clause set. This is resolved by the fourth step, the refinement. The crucial clause in the original problem is replaced and instantiated in a satisfiability preserving way such that the different instantiations do not reoccur anymore in subsequent iterations of the loop.
As mentioned before, our motivation to use dismatching constraints is that for an unconstrained clause the refinement adds quadratically many new clauses to the clause set. In contrast, with constrained clauses the same can be accomplished with adding just a single new clause. This extension is rather simple as constraints are treated the same as the antecedent literals in the clause. Furthermore we present refinement as a separate transformation rule.
The second change compared to the previous version is the removal of the Horn approximation rule, where we have now shown in Section~\ref{sec:decide} that a restriction to Horn clauses is not required for decidability anymore. Instead, the linear and shallow approximations are extended to apply to non-Horn clauses instead.
The approximation consists of individual transformation rules $N \Rightarrow N'$ that are non-deterministically applied. They transform a clause that is not in the MSL(SDC) fragment in finite steps into MSL(SDC) clauses. Each specific property of MSL(SDC) clauses, i.e, monadic predicates, shallow and linear positive literals, is generated by a corresponding rule: the Monadic transformation encodes non-Monadic predicates as functions, the shallow transformation extracts non-shallow subterms by introducing fresh predicates and the linear transformation renames non-linear variable occurrences.
Starting from a constrained clause set $N$ the transformation is parameterized by a single monadic projection predicate $T$, fresh to $N$ and for each non-monadic predicate $P$ a separate projection function $f_P$ fresh to $N$. The clauses in $N$ are called the original clauses while the clauses in $N'$ are the approximated clauses. We assume all clauses in $N$ to be variable disjoint.
\begin{definition}\label{def:approx:termencod} Given a predicate $P$, projection predicate $T$, and projection function $f_P$, define the injective function $\Proj{P}^T(P(\args{t})) := T(f_p(\args{t}))$ and $\Proj{P}^T(Q(\args{s})) := Q(\args{s})$ for $P \neq Q$. The function is extended to [constrained] clauses, clause sets and interpretations. Given a signature $\Sigma$ with non-monadic predicates $P_1,\ldots,P_n$, define $\Proj{\Sigma}^T(N):=\Proj{P_1}^T(\ldots(\Proj{P_n}^T(N))\ldots)$ and $\Proj{\Sigma}^T(\I{}):=\Proj{P_1}^T(\ldots(\Proj{P_n}^T(\I{}))\ldots)$. \end{definition}
\shortrules{Monadic}{$N$}{$\Proj{P}^T(N)$}{provided $P$ is a non-monadic predicate in the signature of $N$.}{MO}{15}
\shortrules{Shallow}{$N~\dot{\cup}~\{(\Gamma \rightarrow E[s]_{p},\Delta;\pi)\}$}{\\ $~~~~~~~~~~~~~~~~~~~~~~~ N\cup\{(S(x),\Gamma_l \rightarrow E[p/x],\Delta_l;\pi)$; $(\Gamma_r \rightarrow S(s),\Delta_r;\pi)\}$} {provided $s$ is complex, $\vert p\vert=2$, $x$ and $S$ fresh, $\Gamma_l\{x \mapsto s\} \cup \Gamma_r = \Gamma$, $\Delta_l {\cup} \Delta_r = \Delta$, $\{Q(y)\in \Gamma \mid {y \in \operatorname{vars}(E[p/x],\Delta_l)\}} \subseteq \Gamma_l$, $\{Q(y)\in \Gamma \mid {y \in \operatorname{vars}(s,\Delta_r) \}} \subseteq \Gamma_r$.}{SH}{15}
\shortrules{Linear 1}{$N~\dot{\cup}~\{(\Gamma \rightarrow \Delta, E'[x]_{p},E[x]_q;\pi)\}$}{\\ $~~~~~~~~~~~~~~~~~~~~~~\;N\cup\{(\Gamma\sigma,\Gamma \rightarrow \Delta, E'[x]_{p},E[q/x'];\pi \wedge \pi\sigma)\}$} {provided $x'$ is fresh and $\sigma= \{x \mapsto x'\}$.}{LI}{15}
\shortrules{Linear 2}{$N~\dot{\cup}~\{(\Gamma \rightarrow \Delta, E[x]_{p,q};\pi)\}$}{\\ $~~~~~~~~~~~~~~~~~~~~~~\;N\cup\{(\Gamma\sigma,\Gamma \rightarrow \Delta, E[q/x'];\pi \wedge \pi\sigma)\}$} {provided $x'$ is fresh, $p \neq q$ and $\sigma= \{x \mapsto x'\}$.}{LI}{15}
\shortrules{Refinement}{$N~\dot{\cup}~\{(C,\pi)\} $} {$N \cup \{(C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}$} {provided $x\in \operatorname{vars}(C)$, $t$ straight and $\operatorname{vars}(t) \cap \operatorname{vars}((C,\pi))=\emptyset$. }{ \operatorname{Ref}}{15}
Note that variables are not renamed unless explicitly stated in the rule. This means that original clauses and their approximated counterparts share variable names. We use this to trace the origin of variables in the approximation.
The refinement transformation $ \Rightarrow_{\operatorname{Ref}}$ is not needed to eventually generate MSL(SDC) clauses, but can be used to achieve a more fine-grained approximation of $N$, see below.
In the shallow transformation, $\Gamma$ and $\Delta$ are separated into $\Gamma_l$, $\Gamma_r$, $\Delta_l$, and $\Delta_r$, respectively. The separation can be almost arbitrarily chosen as long as no atom from $\Gamma$, $\Delta$ is skipped. However, the goal is to minimize the set of shared variables, i.e., the variables of $(\Gamma \rightarrow E[s]_{p},\Delta;\pi)$ that are inherited by both approximation clauses, $\operatorname{vars}(\Gamma_r,s,\Delta_r) \cap \operatorname{vars}(\Gamma_l,E[p/x],\Delta_l)$. If there are no shared variables, the shallow transformation is satisfiability equivalent. The conditions on $\Gamma_l$ and $\Gamma_r$ ensure that $S(x)$ atoms are not separated from the respective positive occurrence of $x$ in subsequent shallow transformation applications.
Consider the clause $Q(f(x),y) \rightarrow P(g(f(x),y))$. The simple shallow transformation $S(x'),Q(f(x),y) \rightarrow P(g(x',y)); S(f(x))$ is not satisfiability equivalent -- nor with any alternative partitioning of $\Gamma$. However, by replacing the occurrence of the extraction term $f(x)$ in $Q(f(x),y)$ with the fresh variable $x'$, the approximation $S(x'),Q(x',y) \rightarrow P(g(x',y)); S(f(x))$ is satisfiability equivalent. Therefore, we allow the extraction of $s$ from the terms in $\Gamma_l$ and require $\Gamma_l\{x \mapsto s\} \cup \Gamma_r = \Gamma$.
We consider Linear~1 and Linear~2 as two cases of the same linear transformation rule. Their only difference is whether the two occurrences of $x$ are in the same literal or not. The duplication of literals and constraints in $\Gamma$ and $\pi$ is not needed if $x$ does not occur in $\Gamma$ or $\pi$.
Further, consider a linear transformation $N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{LI}} N \cup \{(C_a;\pi_a)\}$, where a fresh variable $x'$ replaces an occurrence of a non-linear variable $x$ in $(C;\pi)$. Then, $(C_a;\pi_a)\{x' \mapsto x\}$ is equal to $(C;\pi)$ modulo duplicate literal elimination. A similar property can be observed of a resolvent of $(C_l;\pi)$ and $(C_r;\pi)$ resulting from a shallow transformation $N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{(C_l;\pi), (C_r;\pi)\}$. Note that by construction, $(C_l;\pi)$ and $(C_r;\pi)$ are not necessarily variable disjoint. To simulate standard resolution, we need to rename at least the shared variables in one of them.
\begin{definition}[$ \Rightarrow_{\operatorname{AP}}$] \label{def:approx:apr} We define $ \Rightarrow_{\operatorname{AP}}$ as the priority rewrite system~\cite{Baeten89tcs} consisting of $ \Rightarrow_{\operatorname{Ref}}$, $\Rightarrow_{\operatorname{MO}}$, $\Rightarrow_{\operatorname{SH}}$ and $\Rightarrow_{\operatorname{LI}}$ with priority $ \Rightarrow_{\operatorname{Ref}} \,>\, \Rightarrow_{\operatorname{MO}} \,>\, \Rightarrow_{\operatorname{SH}} \,>\, \Rightarrow_{\operatorname{LI}}$, where $ \Rightarrow_{\operatorname{Ref}}$ is only applied finitely many times. \end{definition}
\begin{lemma}[$ \Rightarrow_{\operatorname{AP}}$ is a Terminating Over-Approximation]\label{lem:approx:sound} (i)~$ \Rightarrow_{\operatorname{AP}}^*$ terminates, (ii) if $N \Rightarrow_{\operatorname{AP}} N'$ and $N'$ is satisfiable, then $N$ is also satisfiable. \end{lemma}
\begin{proof}
(i)~The transformations can be considered sequentially, because of the imposed rule priority.
There are, by definition, only finitely many refinements at the beginning of an approximation $ \Rightarrow_{\operatorname{AP}}^*$.
The monadic transformation strictly reduces the number of non-monadic atoms.
The shallow transformation strictly reduces the multiset of term depths of the newly introduced clauses compared
to the removed parent clause.
The linear transformation strictly reduces the number of duplicate variable occurrences in positive literals.
Hence $ \Rightarrow_{\operatorname{AP}}$ terminates.
(ii) Let $N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{LI}} N \cup \{ (C_a;\pi_a)\}$ where an occurrence of a variable $x$ in $(C;\pi)$ is replaced by a fresh $x'$.
As $(C_a;\pi_a)\{x' \mapsto x\}$ is equal to $(C;\pi)$ modulo duplicate literal elimination,
$\I{} \models (C;\pi)$ if $\I{} \models (C_a;\pi_a)$.
Therefore, the linear transformation is an over-approximation.
Let $N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{ (C_l;\pi_l),(C_r;\pi_r)\}$
and $(C_a;\pi_a)$
be the shallow $\rho$-resolvent.
As $(C_a;\pi_a)\rho^{-1}$ equals $(C;\pi)$ modulo duplicate literal elimination,
$\I{} \models (C;\pi)$ if $\I{} \models (C_l;\pi_l), (C_r;\pi_r)$.
Therefore, the shallow transformation is an over-approximation.
Let $N \Rightarrow_{\operatorname{MO}} \Proj{P}(N)=N'$.
Then, $N=\R{P}(N') $.
Let $\I{}$ be a model of $N'$ and $(C;\pi) \in N$.
Since $ \Proj{P}((C;\pi)) \in N'$ , $\I{} \vDash \Proj{P}((C;\pi))$ and thus, $\R{P}(\I{})\vDash (C;\pi)$.
Hence, $\R{P}(\I{})$ is a model of $N$.
Therefore, the monadic transformation is an over-approximation. Actually, it
is a satisfiability preserving transformation.
Let $N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{Ref}} N \cup \{(C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}$.
Let $C\delta \in \ground{(C;\pi)} $.
If $x\delta$ is not an instance of $t$, then $\delta$ is a solution of $\pi \wedge x \neq t$ and $C\delta \in \ground{(C;\pi \wedge x \neq t)}$.
Otherwise, $\delta=\{x\mapsto t\}\delta'$ for some substitution $\delta'$.
Then, $\delta$ is a solution of $\pi\{x\mapsto t\}$ and thus, $C\delta=C\{x\mapsto t\}\delta' \in \ground{(C\{x \mapsto t\};\pi\{x \mapsto t\})}$.
Hence, $ \ground{(C;\pi)} \subseteq \ground{(C;\pi \wedge x \neq t)} \cup \ground{(C;\pi)\{x \mapsto t\}}.$
Therefore, if $\I{}$ is a model of $N \cup \{(C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}$, then $\I{}$ is also a model of $N \cup \{(C;\pi)\}$. \qed \end{proof}
Note that $ \Rightarrow_{\operatorname{Ref}}$ and $ \Rightarrow_{\operatorname{MO}}$ are also satisfiability preserving transformations.
\begin{corollary}\label{cor:approx:sound} If $N \Rightarrow_{\operatorname{AP}}^* N'$ and $N'$ is satisfied by a model $\I{}$, then $\R{\Sigma}(\I{})$ is a model of $N$. \end{corollary} \begin{proof}
Follows from Lemma~\ref{lem:approx:sound} (ii)-(v).\qed \end{proof}
On the basis of $ \Rightarrow_{\operatorname{AP}}$ we can define an ancestor relation $ \Rightarrow_{\operatorname{A}}$ that relates clauses, literal occurrences, and variables with respect to approximation. This relation is needed in order to figure out the exact clause, literal, variable for refinement.
\begin{definition}[The Shallow Resolvent]\label{def:approx:resolvent}
Let $N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{(C_l;\pi), (C_r;\pi)\}$ with $C=\Gamma \rightarrow E[s]_{p},\Delta$, $C_l=S(x),\Gamma_l \rightarrow E[p/x],\Delta_l$ and $C_r= \Gamma_r \rightarrow S(s),\Delta_r$.
Let $x_1,\ldots,x_n$ be the variables shared between $C_l$ and $C_r$ and $\rho=\{x_1 \mapsto x'_1, \ldots, x_n \mapsto x'_n\}$ be a variable renaming with $x'_1,\ldots,x'_n$ fresh in $C_l$ and $C_r$.
We define $(\Gamma_l\{x \mapsto s\rho \},\Gamma_r\rho \rightarrow E[p/s\rho],\Delta_l,\Delta_r\rho;\pi \wedge \pi\rho)$ as the shallow $\rho$-resolvent. \end{definition}
Let $(C_a;\pi_a)$ be the shallow $\rho$-resolvent of $N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{(C_l;\pi), (C_r;\pi)\}$. Note that for any two ground instances $C_l\delta_l$ and $C_r\delta_r$, their resolvent is a ground instance of $(C_a;\pi_a)$. Furthermore, using the reverse substitution $\rho^{-1}= \{x'_1 \mapsto x_1, \ldots, x'_n \mapsto x_n\}$, $(C_a;\pi_a)\rho^{-1}= (\Gamma_l\{x \mapsto s \},\Gamma_r \rightarrow E[s]_{p},\Delta_l,\Delta_r;\pi \wedge \pi)$ is equal to $(C;\pi)$ modulo duplicate literal elimination. This is because, $\Delta_l \cup \Delta_r = \Delta$ and $\Gamma_l\{x \mapsto s \} \cup \Gamma_r = \Gamma$ by definition of $ \Rightarrow_{\operatorname{SH}}$ and $\pi \wedge \pi$ is equivalent to $ \pi$.
Next, we establish parent relations that link original and approximated clauses, as well as their variables and literals. Together the parent, variable and literal relations will allow us to not only trace any approximated clause back to their origin, but also predict what consequences changes to the original set will have on its approximations.
For the following definitions, we assume that clause and literal sets are lists and that $\Proj{P}^T$ and substitutions act as mappings. This means we can uniquely identify clauses and literals by their position in those lists. Further, for every shallow transformation $N \Rightarrow_{\operatorname{SH}} N'$, we will also include the shallow resolvent in the parent relation as if it were a member of $N'$.
\begin{definition}[Parent Clause]\label{def:approx:pclause}
For an approximation step $N \Rightarrow_{\operatorname{AP}} N'$ and two clauses $(C;\pi)\in N$ and $(C';\pi')\in N'$,
we define $[(C;\pi), N] \Rightarrow_{\operatorname{A}} [(C';\pi'), N']$ expressing that $(C;\pi)$ in $N$ is the parent clause of $(C';\pi')$ in $N'$:\\
If $N \Rightarrow_{\operatorname{MO}} \Proj{P}^T(N)$, then
$[(C;\pi),N] \Rightarrow_{\operatorname{A}} [\Proj{P}^T((C;\pi)), \Proj{P}^T(N)]$ for all $(C;\pi) \in N$.\\
If $N=N'' \cup \{(C;\pi)\} \Rightarrow_{\operatorname{SH}} N'' \cup \{(C_l;\pi_l),(C_r;\pi_r)\}=N'$, then
$[(D,\pi'),N] \Rightarrow_{\operatorname{A}} [(D,\pi'),N']$ for all $(D,\pi') \in N''$ and
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C_l;\pi_l),N']$,
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C_r;\pi_r),N']$ and
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C_a;\pi_a),N']$ for any shallow resolvent $(C_a;\pi_a)$.\\
If $N=N'' \cup \{(C;\pi)\} \Rightarrow_{\operatorname{LI}} N'' \cup \{(C_a;\pi_a)\}=N'$, then
$[(D,\pi'),N] \Rightarrow_{\operatorname{A}} [(D,\pi'),N']$ for all $(D,\pi') \in N''$ and
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C_a,\pi_a),N']$. \\
If $N=N'' \cup \{(C;\pi)\} \Rightarrow_{\operatorname{Ref}} N'' \cup \{(C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}=N'$, then
$[(D,\pi'),N] \Rightarrow_{\operatorname{A}} [(D,\pi'),N']$ for all $(D,\pi') \in N''$ ,
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C;\pi \wedge x \neq t),N']$ and
$[(C,\pi),N] \Rightarrow_{\operatorname{A}} [(C;\pi)\{x \mapsto t\}, N'] $. \end{definition}
\begin{definition}[Parent Variable]\label{def:approx:pvar}
Let $N \Rightarrow_{\operatorname{AP}} N'$ be an approximation step and $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C';\pi'),N']$.
For two variables $x$ and $y$,
we define $[x,(C;\pi), N] \Rightarrow_{\operatorname{A}} [y,(C';\pi'), N']$ expressing that $x \in \operatorname{vars}(C)$ is the parent variable of $y \in \operatorname{vars}(C')$:\\
If $x\in \operatorname{vars}((C;\pi))\cap \operatorname{vars}((C';\pi'))$, then
$[x,(C;\pi),N] \Rightarrow_{\operatorname{A}} [x,(C';\pi'),N']$.\\
If $N \Rightarrow_{\operatorname{SH}} N'$ and $(C',\pi')$ is the shallow $\rho$-resolvent,
$[x_i,(C;\pi),N] \Rightarrow_{\operatorname{A}} [x_i\rho,(C';\pi'),N']$ for each $x_i$ in the domain of $\rho$.\\
If $N \Rightarrow_{\operatorname{LI}} N'$, $C= \Gamma \rightarrow \Delta[x]_{p,q}$ and $C'=\Gamma\{x \mapsto x'\},\Gamma \rightarrow \Delta[q/x']$, then
$[x,(C;\pi),N] \Rightarrow_{\operatorname{A}} [x',(C';\pi'),N']$. \end{definition}
Note that if $N \Rightarrow_{\operatorname{SH}} N'$ and $x$ is the fresh extraction variable in $(C_l;\pi_l)$, then $x$ has no parent variable. For literals, we actually further specify the relation on the positions within literals of a clause $(C;\pi)$ using pairs $(L,r)$ of literals and positions. We write $(L,r)\in C$ to denote that $(L,r)$ is a literal position in $(C;\pi)$ if $L\in C$ and $r\in \mathrm{pos}(L)$. Note that a literal position $(L,r)$ in $(C;\pi)$ corresponds to the term $L\vert_r$.
\begin{definition}[Parent literal position]\label{def:approx:pterm}
Let $N \Rightarrow_{\operatorname{AP}} N'$ be an approximation step and $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C';\pi'),N']$.
For two literal positions $(L,r)$ and $(L',r')$,
we define $[r,L,(C;\pi), N] \Rightarrow_{\operatorname{A}} [r',L',(C';\pi'), N']$ expressing that $(L,r)$ in $(C;\pi)$ is the parent literal position of $(L',r')$ in $(C';\pi')$:\\
If $(C;\pi)=(C';\pi')$, then
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in C$.\\
If $N \Rightarrow_{\operatorname{Ref}} N'$ and $(C',\pi')=(C;\pi \wedge x \neq t)$, then
$[r,L,(C;\pi),N ] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in C$.\\
If $N \Rightarrow_{\operatorname{Ref}} N'$ and $(C',\pi')=(C;\pi)\{x \mapsto t\}$, then
$[r,L,(C;\pi),N ] \Rightarrow_{\operatorname{A}} [ r,L\{x \mapsto t\},(C';\pi'),N']$ for all $(L,r)\in C$.\\
If $N \Rightarrow_{\operatorname{MO}} \Proj{P}^T(N)=N'$, then
$[\varepsilon,P(\args{t}),(C;\pi),N] \Rightarrow_{\operatorname{A}} [ \varepsilon,T(f_p(\args{t})),(C';\pi'),N']$ for all $P(\args{t})\in C$ and
$[r,P(\args{t}),(C;\pi),N] \Rightarrow_{\operatorname{A}} [ 1.r,T(f_p(\args{t})),(C';\pi'),N']$ for all $(P(\args{t}),r)\in C$.\\
If $N \Rightarrow_{\operatorname{SH}} N'$, $C= \Gamma \rightarrow E[s]_{p},\Delta$ and $C'=S(x),\Gamma_l \rightarrow E[p/x],\Delta_l$, then
$[r,E[s]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,E[p/x],(C';\pi'),N']$ for all $r\in \mathrm{pos}(E[p/x])$,
$[p,E[s]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,S(x),(C';\pi'),N']$ for all $r\in \mathrm{pos}(S(x))$,
$[r,L\{x \mapsto s\},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Gamma_l$,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Delta_l$.\\
If $N \Rightarrow_{\operatorname{SH}} N'$, $C= \Gamma \rightarrow E[s]_{p},\Delta$ and $C'=\Gamma_r \rightarrow S(s),\Delta_r$, then
$[p,E[s]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ \varepsilon ,S(s),(C';\pi'),N']$,
$[pr,E[s]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ 1.r,S(s),(C';\pi'),N']$ for all $r\in \mathrm{pos}(s)$, and
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Gamma_r\cup\Delta_r$.\\
If $N \Rightarrow_{\operatorname{SH}} N'$, $C= \Gamma \rightarrow E[s]_{p},\Delta$ and $(C',\pi')$ is the shallow $\rho$-resolvent, then
$[r,E[s]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,E[p/s\rho],(C';\pi'),N']$ for all $r\in \mathrm{pos}(E[p/s\rho])$,
$[r,L\{x \mapsto s\},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L\{x \mapsto s\rho \},(C';\pi'),N']$ for all $(L,r)\in \Gamma_l$,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L\rho,(C';\pi'),N']$ for all $(L,r)\in \Gamma_r\cup\Delta_r$, and
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Delta_l$.\\
If $N \Rightarrow_{\operatorname{LI}} N'$, $C= \Gamma \rightarrow \Delta, E'[x]_{p},E[x]_q$ and $C'=\Gamma\{x \mapsto x'\},\Gamma \rightarrow \Delta, E'[x]_{p},E[q/x']$,
$[r,E'[x]_{p},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,E'[x]_p,(C';\pi'),N']$ for all $r\in \mathrm{pos}(E'[x]_p)$,
$[r,E[x]_{q},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,E[q/x'],(C';\pi'),N']$ for all $r\in \mathrm{pos}(E[q/x'])$,,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L\{x \mapsto x'\},(C';\pi'),N']$ for all $(L,r)\in \Gamma$,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Gamma$, and
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Delta$.\\
If $N \Rightarrow_{\operatorname{LI}} N'$, $C= \Gamma \rightarrow \Delta, E[x]_{p,q}$ and $C'=\Gamma\{x \mapsto x'\},\Gamma \rightarrow \Delta, E[q/x']$, then
$[r,E[x]_{p,q},(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,E[q/x'],(C';\pi'),N']$ for all $r\in \mathrm{pos}(E[q/x'])$,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L\{x \mapsto x'\},(C';\pi'),N']$ for all $(L,r)\in \Gamma$,
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Gamma$, and
$[r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}} [ r,L,(C';\pi'),N']$ for all $(L,r)\in \Delta$.\\ \end{definition}
\begin{figure}
\caption{Visual representation of the parent literal position relation (Definition~\ref{def:approx:pterm})}
\end{figure}
The transitive closures of each parent relation are called ancestor relations.
The over-approximation of a clause set $N$ can introduce resolution refutations that have no corresponding equivalent in $N$ which we consider a lifting failure. Compared to our previous calculus~\cite{Teucke2015}, the lifting process is identical with the exception that there is no case for the removed Horn transformation. We only update the definition of conflicting cores to consider constrained clauses.
\begin{definition}[Conflicting Core]\label{def:lifting:core}
A finite set of unconstrained clauses and a solvable constraint $(N^\bot;\pi)$ are a conflicting core if
$N^\bot\delta$ is unsatisfiable for all solutions $\delta$ of $\pi$ over $\operatorname{vars}(N^\bot)\cup \mathrm{lvar}(\pi)$.
A conflicting core $(N^\bot;\pi)$ is a conflicting core of the constrained clause set $N$ if for every $C\in N^\bot$ there is a clause $(C',\pi')\in N$
such that $(C;\pi)$ is an instance of $(C';\pi')$ modulo duplicate literal elimination.
The clause $(C';\pi')$ is then called the instantiated clause of $(C;\pi)$ in $(N^\bot;\pi)$.
We call $(N^\bot;\pi)$ complete if for every clause $C \in N^\bot$ and literal $L\in C$, there exists a clause $D\in N^\bot$ with $\overline L\in D$. \end{definition}
A conflicting core is a generalization of a ground unsatisfiability core that allows global variables to act as parameters. This enables more efficient lifting and refinement compared to a simple ground unsatisfiable core. We show some examples at the end of this section.
We discuss the potential lifting failures and the corresponding refinements only for the linear and shallow case because lifting the satisfiability equivalent monadic and refinement transformations always succeeds. To reiterate from our previous work: in the linear case, there exists a clause in the conflicting core that is not an instance of the original clauses. In the shallow case, there exists a pair of clauses whose resolvent is not an instance of the original clauses. We combine these two cases by introducing the notion of a lift-conflict.
\begin{definition}[Conflict]\label{def:refine:conflict} Let $N \cup \{(C,\pi)\} \Rightarrow_{\operatorname{LI}} N\cup \{(C_a,\pi_a)\}$ and $N^\bot$ be a complete ground conflicting core of $N\cup \{(C_a,\pi_a)\}$. We call a conflict clause $C_c \in N^\bot$ with the instantiated clause $(C_a,\pi_a)$ a lift-conflict if $C_c$ is not an instance of $(C,\pi)$ modulo duplicate literal elimination. Then, $C_c$ is an instance of $(C_a,\pi_a)$, which we call the conflict clause of $C_c$.
Let $N \cup \{(C,\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{(C_l,\pi_l),(C_r,\pi_r)\}$, $(C_a;\pi_a)$ be the shallow resolvent and $N^\bot$ be a complete ground conflicting core of $N\cup \{(C_l,\pi_l),(C_r,\pi_r)\}$. We call the resolvent $C_c$ of $C_l\delta_l\in N^\bot$ and $C_r\delta_r \in N^\bot$ a lift-conflict if $C_c$ is not an instance of $(C,\pi)$ modulo duplicate literal elimination. Then, $C_c$ is an instance of $(C_a;\pi_a)$, which we call the conflict clause of $C_c$. \end{definition}
The goal of refinement is to instantiate the original parent clause in such a way that is both satisfiability equivalent and prevents the lift-conflict after approximation. Solving the refined approximation will then either necessarily produce a complete saturation or a new refutation proof, because its conflicting core has to be different. For this purpose, we use the refinement transformation to segment the original parent clause $(C;\pi)$ into two parts $(C;\pi \wedge x \neq t)$ and $(C;\pi)\{x \mapsto t\}$.
For example, consider $N$ and its linear transformation $N'$.\newline \centerline{$\begin{array}{r@{\,\rightarrow\,}lcr@{\,\rightarrow\,}l}
& P(x,x) & \;\Rightarrow_{\operatorname{LI}}\; & &P(x,x')\\
P(a,b) & & \; \Rightarrow_{\operatorname{AP}}^0\; & P(a,b)&\\
\end{array}$} The ground conflicting core of $N'$ is\newline \centerline{$\begin{array}{r@{\,\rightarrow\,}l}
& P(a,b) \\
P(a,b)& \\
\end{array}$} Because $P(a,b)$ is not an instance of $P(x,x)$, lifting fails. $P(a,b)$ is the lift-conflict. Specifically, $\{x\mapsto a\}$ and $\{x \mapsto b\}$ are conflicting substitutions for the parent variable $x$. We pick $\{x\mapsto a\}$ to segment $P(x,x)$ into $ (P(x,x);x \neq a)$ and $P(x,x)\{x \mapsto a\}$. Now, any descendant of $(P(x,x);x \neq a)$ cannot have $a$ at the position of the first $x$, and any descendant of $P(x,x)\{x \mapsto a\}$ must have an $a$ at the position of the second $x$. Thus, $P(a,b)$ is excluded in both cases and no longer appears as a lift-conflict.
To show that the lift-conflict will not reappear in the general case, we use that the conflict clause and its ancestors have strong ties between their term structures and constraints.
\begin{definition}[Constrained Term Skeleton]\label{def:refine:termskel} The constrained term skeleton of a term $t$ under constraint $\pi$, $\mathrm{skt}(t,\pi)$, is defined as the normal form of the following transformation: \newline \centerline{$\begin{array}{c}
(t[x]_{p,q} ;\pi) \Rightarrow_{\mathrm{skt}} (t[q/x'] ;\pi \wedge \pi\{x \mapsto x'\}), \text{ where } p \neq q \text{ and $x'$ is fresh}.
\end{array}$} \end{definition}
The constrained term skeleton of a term $t$ is essentially a linear version of $t$ where the restrictions on each variable position imposed by $\pi$ are preserved. For $(t,\pi)$ and a solution $\delta$ of $\pi$, $t\delta$ is called a ground instance of $(t,\pi)$.
\begin{lemma}\label{lem:refinement:ancestor_skel} Let $N_0 \Rightarrow_{\operatorname{AP}}^* N_k$, $(C_k;\pi_k)$ in $N$ with the ancestor clause $(C_0;\pi_0)\in N_0$ and $N^\bot_k$ be a complete ground conflicting core of $N_k$. Let $\delta$ be a solution of $\pi_k$ such that $C_k\delta$ is in $N^\bot_k$. If $(L',q')$ is a literal position in $(C_k;\pi_k)$ with the ancestor $(L,q)$ in $(C_0,\pi_0)$, then (i) $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L\vert_q,\pi_0)$, (ii) $q=q'$ if $L$ and $L'$ have the same predicate, and (iii) if $L'\vert_{q'}=x$ and there exists an ancestor variable $y$ of $x$ in $(C_0,\pi_0)$, then $L\vert_{q}=y$. \end{lemma}
\begin{proof}
By induction on the length of the approximation $N_0 \Rightarrow_{\operatorname{AP}}^* N_k$.
The base case $N_k=N_0$, is trivial.
Let $N_0= N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{ (C_l;\pi_l),(C_r;\pi_r)\}=N_k$, $(C_k;\pi_k)$ be the shallow $\rho$-resolvent and $C_k\delta$ be the resolvent of two instances of $(C_l;\pi_l)$ and $(C_r;\pi_r)$ in $N^\bot_k$.
Then, $(C_k;\pi_k)\rho^{-1}$ is equal to $ (C;\pi)$ modulo duplicate literal elimination.
Thus, by definition $(L,q)=(L',q')\rho^{-1}$.
Therefore, (i) $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L\vert_q,\pi_0)$,
(ii) $q=q'$ if $L$ and $L'$ have the same predicate, and
(iii) if $L'\vert_{q'}=x$ and there exists an ancestor variable $y$ of $x$ in $(C_0,\pi_0)$, then $L\vert_{q}=y$.
Now, let $ N_0 \Rightarrow_{\operatorname{AP}} N_{1} \Rightarrow_{\operatorname{AP}}^* N_k$.
Since $(L',p)$ has an ancestor literal position in $(C_0,\pi_0)$,
the ancestor clause of $(C_k;\pi_k)$ in $N_1$, $(C_1,\pi_1)$, contains the the ancestor literal position $(L_1,{q_1})$, which has $(L,q)$ as its parent literal position.
By the induction hypothesis on $N_{1} \Rightarrow_{\operatorname{AP}}^* N_k$,
(i) $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L_1\vert_{q_1},\pi_1)$,
(ii) $q_1=q'$ if $L_1$ and $L'$ have the same predicate, and
(iii) if $L'\vert_{q'}=x$ and there is an ancestor variable $y_1$ of $x$ in $(C_1,\pi_1)$, then $L_1\vert_{q_1}=y_1$.
Let $N_0= N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{Ref}} N \cup \{ (C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}=N_1$.
If $(C_1,\pi_1)$ is neither $(C;\pi \wedge x \neq t)$ nor $(C;\pi)\{x \mapsto t\}$, then trivially $(C_0,\pi_0)=(C_1,\pi_1)$.
Otherwise, $(C_1,\pi_1)= (C;\pi \wedge x \neq t)$ or $(C_1,\pi_1)= (C;\pi)\{x \mapsto t\}$.
Then $(L_1,{q_1})= (L,q)$ or $(L_1,{q_1})=(L,q)\{x \mapsto t\}$.
In either case,(i) $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L\vert_q,\pi_0)$,
(ii) $q=q'$ if $L$ and $L'$ have the same predicate, and
(iii) if $L'\vert_{q'}=x$ and there exists an ancestor variable $y$ of $x$ in $(C_0,\pi_0)$, then $L\vert_{q}=y$.
Let $N_0 \Rightarrow_{\operatorname{MO}} \Proj{P}(N)=N_1$.
If $P$ is not the predicate of $L$, then trivially $(L,q)=(L_1,{q_1})$.
If $P$ is the predicate of $L$, then $(L,q)=(P(t_1,\ldots,t_n),q)$ and $(L_1,{q_1})=(T(f_p(t_1,\ldots,t_n)),1.q)$.
Thus, (i) $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L\vert_{q},\pi_0) =$ $\mathrm{skt}(T(f_p(t_1,\ldots,t_n)\vert_{1.q},\pi_0)$.
(ii) The predicate of $L'$ is not $P$ by definition.
(iii) Let $L'\vert_{q'}=x$ and $y$ be the ancestor variable of $x$ in $(C_0,\pi_0)$.
Then, $y$ is also the ancestor variable of $x$ in $(C_1,\pi_1)$ and $L_1\vert_{q_1}=y$.
Therefore, $L\vert_q=P(t_1,\ldots,t_n)\vert_{q}=T(f_p(t_1,\ldots,t_n)\vert_{1.q}=L_1\vert_{q_1}=y$.
Let $N_0= N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{LI}} N \cup \{ (C_a;\pi_a)\}=N_1 $ where an occurrence of a variable $x$ is replaced by a fresh $x'$.
If $(C_1,\pi_1) \neq (C_a;\pi_a)$, then trivially $(C_0,\pi_0)=(C_1,\pi_1)$.
Otherwise, $(C_1,\pi_1) = (C_a;\pi_a)$, $(C_0,\pi_0)=(C,\pi)$.
By definition, $(L,q)=(L_1\{x' \mapsto x\},q_1)$ and $\pi_0=\pi_1\{x' \mapsto x\}$.
Thus, $\mathrm{skt}(L\vert_{q},\pi_0)= \mathrm{skt}(L_1\vert_{q_1},\pi_1)$.
Therefore, $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(L\vert_{q},\pi_0)$.
Since $L$ and $L_1$ have the same predicate and $q=q_1$, $q=q'$ if $L$ and $L'$ have the same predicate.
Let $L'\vert_{q'}=z$ and $y$ be the ancestor variable of $z$ in $(C_1,\pi_1)$.
If $y \neq x'$, then $y$ is the ancestor variable of $z$ in $(C_0,\pi_0)$ and $L\vert_{q}=L_1\{x' \mapsto x\}\vert_{q_1}=y_1$.
Otherwise, $x$ is the ancestor variable of $z$ in $(C_0,\pi_0)$ and $L\vert_{q}=L_1\{x' \mapsto x\}\vert_{q_1}=x$.
Let $N_0= N \cup \{ (C;\pi)\} \Rightarrow_{\operatorname{SH}} N \cup \{ (C_l;\pi_l),(C_r;\pi_r)\}=N_1$ where a term $s$ is extracted from a positive literal $Q(s'[s]_p)$ via introduction of fresh predicate $S$ and variable $x$.
If $(C_1,\pi_1)$ is neither $(C_l;\pi_l)$ nor $(C_r;\pi_r)$, then trivially $(C_0,\pi_0)=(C_1,\pi_1)$.
If $(C_1,\pi_1)= (C_l;\pi_l)$ and $L_1 = S(x)$,
then $(C_0,\pi_0)=(C;\pi)$, $q_1=1$, $(L',q')=(S(x),1)$ and $(Q(s'[s]_p),1.p)$ is the parent literal position of $(S(x),1)$.
Let $L'\delta=S(t)$.
Because $N^\bot_k$ is complete and ground, there is a clause $C'_k\delta'\in N^\bot_k$ that contains the positive literal $S(t)$.
The ancestor of $(C'_k,\pi'_k)\in N_k$ in $N_1$ is $(C_r;\pi_r)$ because it is the only clause in $N_1$ with a positive $S$-literal.
Then, by the inductive hypothesis, $(S(s),1)$ in $(C_r;\pi_r)$ is the ancestor literal position of $(S(x),1)$ in $(C'_k,\pi'_k)$.
Thus, $t$ is an instance of $\mathrm{skt}(S(s)\vert_1,\pi_r)=\mathrm{skt}(s,\pi_r)$.
Therefore, $t=L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(Q(s'[s]_p)\vert_{1.p},\pi)=\mathrm{skt}(s,\pi_r)$.
Further, $Q$ and $S$ are not the same predicate because $S$ is fresh.
Since $x$ has no parent variable, $L'\vert_{q'}=x$ has no ancestor variable in $(C_0,\pi_0)$.
If $(C_1,\pi_1)= (C_l;\pi_l)$ and $L_1 = Q(s'[p/x])$,
then $(C_0,\pi_0)=(C;\pi)$ and $(Q(s'[s]_p),q_1)$ in $(C;\pi)$ is the parent literal position of $(L_1,q_1)$ in $(C_1,\pi_1)$ and ancestor literal position of $(L',q')$ in $(C_k,\pi_k)$.
If $q_1$ is not a position at or above $p$, the subterm at $p$ is irrelevant and thus $\mathrm{skt}(Q(s'[s]_p)\vert_{q_1},\pi)=\mathrm{skt}(Q(s'[p/x])\vert_{q_1},\pi_l)$.
Otherwise, let $r$ be a position such that $q_1r=1.p$.
Since $\vert p \vert=2$, no following shallow transformation step extracts a subterm of $s'[p/x]$ containing $x$.
Thus by definition of $ \Rightarrow_{\operatorname{AP}}$, $L'=Q(t'[x]_p)$ and $C_k$ also contains the negative literal $S(x)$.
Let $S(x)\delta=S(t)$.
Analogously to the previous case, $t$ is an instance of $\mathrm{skt}(s,\pi_r)$.
Combined with $L'\delta\vert_{q'}$ being an instance of $\mathrm{skt}(L_1\vert_{q_1},\pi_1)=\mathrm{skt}(Q(s'[p/x])\vert_{q_1},\pi_l)$ and $L'\delta\vert_{1.p}=t$,
$L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}(Q(s'[s]_p)\vert_{q},\pi)$.
Since $L$ and $L_1$ have the same predicate and $q=q_1$, $q=q'$ if $L$ and $L'$ have the same predicate.
Let $L'\vert_{q'}=z$ and $y$ in $(C_1,\pi_1)$ be the ancestor variable of $z$ in $(C_k,\pi_k)$.
Since $x$ has no parent, $y\neq x$ and $y$ in $(C_0,\pi_0)$ is the ancestor variable of $z$.
Therefore, $Q(s'[s]_p)\vert_{q_1}=y$ because $Q(s'[p/x])\vert_{q_1}=y$.
If $(C_1,\pi_1)= (C_r;\pi_r)$ and $L_1 = S(s)$, let $q_1=1.q'_1$.
Then, $(C_0,\pi_0)=(C;\pi)$ and $(L,q)=(Q(s'[s]_p),1.pq'_1)$ in $(C_0,\pi_0)$ is the parent literal position of $(L_1,q_1)$ in $(C_1,\pi_1)$.
Thus, $L'\delta\vert_{q'}$ is an instance of $\mathrm{skt}((Q(s'[s]_p)\vert_{1.pq'_1},\pi)=\mathrm{skt}(s\vert_{q'_1},\pi)=\mathrm{skt}(L_1\vert_{q_1},\pi_r)$.
Because $S$ is fresh, $Q$ is not the predicate of $L'$.
Let $L'\vert_{q'}=z$ and $y$ in $(C_1,\pi_1)$ be the ancestor variable of $z$ in $(C_k,\pi_k)$.
Then, $y$ in $(C_0,\pi_0)$ is the ancestor variable of $z$
and $Q(s'[s]_p)\vert_{q}=s\vert_{q'_1}=y$ because $s\vert_{q'_1}=L_1\vert_{q_1}=y$.
Otherwise, $(L_1,q_1)$ in $(C_0,\pi_0)$ is the parent literal position of $(L_1,q_1)$ in $(C_1,\pi_1)$, by definition.
Then, $\mathrm{skt}(L_1,\pi)=\mathrm{skt}(L_1,\pi_l)$ or $\mathrm{skt}(L_1,\pi)=\mathrm{skt}(L_1,\pi_r)$, respectively.\qed \end{proof}
Next, we define the notion of descendants and descendant relations to connect lift-conflicts in ground conflicting cores with their corresponding ancestor clauses. The goal, hereby, is that if a ground clause $D$ is not a descendant of a clause in $N$, then it can never appear in a conflicting core of an approximation of $N$.
\begin{definition}[Descendants]\label{def:refine:descendant} Let $N \Rightarrow_{\operatorname{AP}}^* N'$, $[(C;\pi),N] \Rightarrow_{\operatorname{A}}^* [(C';\pi'),N']$ and $D$ be a ground instance of $(C';\pi')$. Then, we call $D$ a \emph{descendant} of $(C;\pi)$ and define the $[(C;\pi),N] \Rightarrow_{\operatorname{A}}^*[(C';\pi'),N']$-descendant relation $\des{}{D}{}$ that maps literals in $D$ to literal positions in $(C;\pi)$ using the following rule: $$ L'\delta \des{}{D}{} (L,r) \text{ if } L'\delta\in D \text{ and } [r,L,(C;\pi),N] \Rightarrow_{\operatorname{A}}^* [\varepsilon,L',(C';\pi'),N'] $$ \end{definition}
For the descendant relations it is of importance to note that while there are potentially infinite ways that a lift-conflict $C_c$ can be a descendant of an original clause $(C;\pi)$, there are only finitely many distinct descendant relations over $C_c$ and $(C;\pi)$. This means, if a refinement transformation can prevent one distinct descendant relation without generating new distinct descendant relations (Lemma~\ref{lem:refinement:descendants}), a finite number of refinement steps can remove the lift-conflict $C_c$ from the descendants of $(C;\pi)$ (Lemma~\ref{lem:refinement:refine}). Thereby, preventing any conflicting cores containing $C_c$ from being found again.
A clause $(C;\pi)$ can have two descendants that are the same except for the names of the $S$-predicates introduced by shallow transformations. Because the used approximation $N \Rightarrow_{\operatorname{AP}}^* N'$ is arbitrary and therefore also the choice of fresh $S$-predicates, if $D$ is a descendant of $(C;\pi)$, then any clause $D'$ equal to $D$ up to a renaming of $S$-predicates is also a descendant of $(C;\pi)$. On the other hand, the actual important information about an $S$-predicate is which term it extracts. Two descendants of $(C;\pi)$ might be identical but their $S$-predicate extract different terms in $(C;\pi)$. For example, $P(a)\rightarrow S(f(a))$ is a descendant of $P(x),P(y)\rightarrow Q(f(x),g(f(x)))$ but might extract either occurrence of $f(x)$. These cases are distinguished by their respective descendant relations. In the example, we have either $S(f(a)) \des{}{D}{} (Q(f(x),g(f(x))),1)$ or $S(f(a)) \des{}{D}{} (Q(f(x),g(f(x))),2.1)$.
\begin{lemma}\label{lem:refinement:descendants} Let $N_0=N \cup \{(C;\pi)\} \Rightarrow_{\operatorname{Ref}} N \cup \{(C;\pi \wedge x \neq t),(C;\pi)\{x \mapsto t\}\}=N_1$ be a refinement transformation and $D$ a ground clause. If there is a $[(C;\pi \wedge x \neq t),N_1] \Rightarrow_{\operatorname{A}}^*[(C';\pi'),N_2]$- or $[(C;\pi)\{x \mapsto t\},N_1] \Rightarrow_{\operatorname{A}}^*[(C';\pi'),N_2]$-descendant relation $\des{}{D}{1}$, then there is an equal $[(C;\pi),N_0] \Rightarrow_{\operatorname{A}}^*[(C';\pi'),N_2]$-descendant relation $\des{}{D}{0}$. \end{lemma}
\begin{proof}
Let $L_D $ be a literal of $D$ and $L' \des{}{D}{1} (L,r)$.
If $D$ is a descendant of $(C;\pi \wedge x \neq t)$, then $[r,L,(C;\pi \wedge x \neq t),N_1] \Rightarrow_{\operatorname{A}}^* [\varepsilon,L',(C';\pi'),N_2]$.
Because $[r,L,(C;\pi),N_0] \Rightarrow_{\operatorname{A}} [r,L,(C;\pi \wedge x \neq t),N_1]$, $L' \des{}{D}{0} (L,r)$.
If $D$ is a descendant of $(C;\pi)\{x \mapsto t\}$, the proof is analogous.\qed \end{proof}
\begin{lemma}[Refinement]\label{lem:refinement:refine} Let $N \Rightarrow_{\operatorname{AP}} N'$ and $N^\bot$ be a complete ground conflicting core of $N'$. If $C_c\in N^\bot$ is a lift-conflict, then there exists a finite refinement $N \Rightarrow_{\operatorname{Ref}}^* N_{R}$ such that for any approximation $N_{R} \Rightarrow_{\operatorname{AP}}^* N'_{R}$ and ground conflicting core $N^\bot_{R}$ of $N'_{R}$, $C_c$ is not a lift-conflict in $N^\bot_{R}$ modulo duplicate literal elimination. \end{lemma}
\begin{proof}
Let $(C_a,\pi_a)$ be the conflict clause of $C_c$ and $(C;\pi)\in N$ be the parent clause of $(C_a,\pi_a)$.
$C_c$ is a descendant of $(C;\pi)$ with the corresponding $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C_a;\pi_a),N']$-descendant relation $\des{}{C_c}{0}$.
We apply induction on the number of distinct $[(C;\pi),N] \Rightarrow_{\operatorname{A}}^* [(C';\pi'),N'']$-descendant relations $\des{}{C_c}{}$ for arbitrary approximations $N \Rightarrow_{\operatorname{AP}}^* N''$.
Since only the shallow and linear transformations can produce lift-conflicts,
the clause $(C;\pi)$ is replaced by either a linearized clause $(C';\pi')$ or
two shallow clauses $(C_l;\pi)$ and $(C_r;\pi)$.
Then, the conflict clause $(C_a;\pi_a)$ of $C_c$ is either the linearized $(C';\pi')$ or the resolvent of $(C_l;\pi)$ and $(C_r;\pi)$.
In either case, $C_c=C_a\delta$ for some solution $\delta$ of $\pi_a$.
Furthermore, there exists a substitution $\tau=\{x'_1 \mapsto x_1,\ldots,x'_n \mapsto x_n\}$ such that $(C;\pi)$ and $(C_a;\pi_a)\tau$ are equal modulo duplicate literal elimination.
That is, $\tau= \{x'\mapsto x\}$ for a linear transformation and $\tau=\rho^{-1}$ for shallow transformation (Definition~\ref{def:approx:resolvent}).
Assume $C_c=C_a\tau\sigma$ for some grounding substitution $\sigma$, where $\tau\sigma$ is a solution of $\pi_a$.
Thus, $\sigma$ is a solution of $\pi_a\tau$, which is equivalent to $\pi$.
Then, $C_c$ is equal to $C\sigma$ modulo duplicate literal elimination an instance of $(C;\pi)$,
which contradicts with $C_c$ being a lift-conflict.
Hence, $C_c=C_a\delta$ is not an instance of $C_a\tau$ and thus, $x_i\delta \neq x'_i\delta$ for some $x_i$ in the domain of $\tau$.
Because $x_i\delta $ and $ x'_i\delta$ are ground, there is a position $p$ where $x_i\delta\vert_p $ and $x'_i\delta\vert_p$ have different function symbols.
We construct the straight term $t$ using the path from the root to $p$ on $x_i\delta$ with variables that are fresh in $(C,\pi)$.
Then, we can use $x_i$ and $t$ to segment $(C;\pi)$ into $(C;\pi \wedge x_i \neq t)$ and $(C;\pi)\{x_i \mapsto t\}$ for the refinement $N \Rightarrow_{\operatorname{Ref}} N_{R}$.
Note, that $x_i\delta$ is a ground instance of $t$, while $x'_i\delta$ is not.
Let $(L'_1,r'_1)$ and $(L'_2,r'_2)$ in $(C_a,\pi_a)$ be literal positions of the variables $x_i$ and $x'_i$ in $C_a$,
and $(L_1,r_1)$ and $(L_2,r_2)$ in $(C,\pi)$ be the parent literal positions of $(L'_1,r'_1)$ and $(L'_2,r'_2)$, respectively.
Because $(C_a,\pi_a)\tau$ is equal to $(C;\pi)$ modulo duplicate literal elimination, $L_1\vert_{r_1} = L_2\vert_{r_2}=x_i$.
Let $N \Rightarrow_{\operatorname{Ref}} N_1$ be the refinement where $(C;\pi)$ is segmented into $(C;\pi \wedge x_i \neq t)$ and $(C;\pi)\{x_i \mapsto t\}$.
By Lemma~\ref{lem:refinement:descendants}, all $[(C;\pi \wedge x_i \neq t),N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$- or $[(C;\pi)\{x_i \mapsto t\},N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relations
correspond to an equal $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C'_a;\pi'_a),N_2]$-descendant relation.
Assume there is a $[(C;\pi \wedge x_i \neq t),N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relation $\des{}{C_c}{1}$ that is not distinct from $\des{}{C_c}{0}$.
Because $L'_1\delta \des{}{C_c}{0} (L_1,r)$ for some literal position $(L_1,r)$ in $(C;\pi)$, which is the parent literal position of $(L_1,r)$ in $(C;\pi \wedge x_i \neq t)$,
$L'_1\delta \des{}{C_c}{1} (L_1,r)$.
However, this contradicts Lemma~\ref{lem:refinement:ancestor_skel} because $x_i\delta $ is not an instance of $\mathrm{skt}(L_1\vert_{r_1},\pi \wedge x_i \neq t)=\mathrm{skt}(x_i,\pi \wedge x_i \neq t)$.
The case that there is a $[(C;\pi)\{x_i \mapsto t\},N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relation that is not distinct from $\des{}{C_c}{0}$ is analogous using the argument that $x'_i\delta $ is not an instance of $\mathrm{skt}(L_2\{x_i \mapsto t\}\vert_{r_2},\pi)=\mathrm{skt}(t,\pi)$.
Hence, there are strictly less distinct descendant relations over $C_c$ and $(C;\pi \wedge x \neq t)$ or $(C;\pi)\{x \mapsto t\}$
than there are distinct descendant relations over $C_c$ and $(C,\pi)$.
If there are no descendant relations, then $C_c$ can no longer appear as a lift conflict.
Otherwise, by the inductive hypothesis, there exists a finite refinement $N \Rightarrow_{\operatorname{Ref}} N_1 \Rightarrow_{\operatorname{Ref}}^* N_{R}$
such that for any approximation $N_{R} \Rightarrow_{\operatorname{AP}} N'_{R}$ and ground conflicting core $N^\bot_{R}$ of $N'_{R}$,
$C_c$ is not a lift-conflict in $N^\bot_{R}$ modulo duplicate literal elimination.\qed \end{proof}
\begin{theorem}[Soundness and Completeness of FO-AR]\label{theo:refinement:scfoar} Let $N$ be an unsatisfiable clause set and $N'$ its MSL(SDC) approximation: (i)~if $N$ is unsatisfiable then there exists a conflicting core of $N'$ that can be lifted to a refutation in $N$, (ii)~if $N'$ is satisfiable, then $N$ is satisfiable too. \end{theorem} \begin{proof}(Idea)
By Lemma~\ref{lem:approx:sound} and Lemma~\ref{lem:refinement:refine}, where the latter can be used to show that a core of $N'$ that cannot
be lifted also excludes the respective instance for unsatisfiability of $N$.
Let $(C_a,\pi_a)$ be the conflict clause of $C_c$ and $(C;\pi)\in N$ be the parent clause of $(C_a,\pi_a)$.
$C_c$ is a descendant of $(C;\pi)$ with the corresponding $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C_a;\pi_a),N']$-descendant relation $\des{}{C_c}{0}$.
We apply induction on the number of distinct $[(C;\pi),N] \Rightarrow_{\operatorname{A}}^* [(C';\pi'),N'']$-descendant relations $\des{}{C_c}{}$ for arbitrary approximations $N \Rightarrow_{\operatorname{AP}}^* N''$.
Since only the shallow and linear transformations can produce lift-conflicts,
the clause $(C;\pi)$ is replaced by either a linearized clause $(C';\pi')$ or
two shallow clauses $(C_l;\pi)$ and $(C_r;\pi)$.
Then, the conflict clause $(C_a;\pi_a)$ of $C_c$ is either the linearized $(C';\pi')$ or the resolvent of $(C_l;\pi)$ and $(C_r;\pi)$.
In either case, $C_c=C_a\delta$ for some solution $\delta$ of $\pi_a$.
Furthermore, there exists a substitution $\tau=\{x'_1 \mapsto x_1,\ldots,x'_n \mapsto x_n\}$ such that $(C;\pi)$ and $(C_a;\pi_a)\tau$ are equal modulo duplicate literal elimination.
That is, $\tau= \{x'\mapsto x\}$ for a linear transformation and $\tau=\rho^{-1}$ for shallow transformation (Definition~\ref{def:approx:resolvent}).
Assume $C_c=C_a\tau\sigma$ for some grounding substitution $\sigma$, where $\tau\sigma$ is a solution of $\pi_a$.
Thus, $\sigma$ is a solution of $\pi_a\tau$, which is equivalent to $\pi$.
Then, $C_c$ is equal to $C\sigma$ modulo duplicate literal elimination an instance of $(C;\pi)$,
which contradicts with $C_c$ being a lift-conflict.
Hence, $C_c=C_a\delta$ is not an instance of $C_a\tau$ and thus, $x_i\delta \neq x'_i\delta$ for some $x_i$ in the domain of $\tau$.
Because $x_i\delta $ and $ x'_i\delta$ are ground, there is a position $p$ where $x_i\delta\vert_p $ and $x'_i\delta\vert_p$ have different function symbols.
We construct the straight term $t$ using the path from the root to $p$ on $x_i\delta$ with variables that are fresh in $(C,\pi)$.
Then, we can use $x_i$ and $t$ to segment $(C;\pi)$ into $(C;\pi \wedge x_i \neq t)$ and $(C;\pi)\{x_i \mapsto t\}$ for the refinement $N \Rightarrow_{\operatorname{Ref}} N_{R}$.
Note, that $x_i\delta$ is a ground instance of $t$, while $x'_i\delta$ is not.
Let $(L'_1,r'_1)$ and $(L'_2,r'_2)$ in $(C_a,\pi_a)$ be literal positions of the variables $x_i$ and $x'_i$ in $C_a$,
and $(L_1,r_1)$ and $(L_2,r_2)$ in $(C,\pi)$ be the parent literal positions of $(L'_1,r'_1)$ and $(L'_2,r'_2)$, respectively.
Because $(C_a,\pi_a)\tau$ is equal to $(C;\pi)$ modulo duplicate literal elimination, $L_1\vert_{r_1} = L_2\vert_{r_2}=x_i$.
Let $N \Rightarrow_{\operatorname{Ref}} N_1$ be the refinement where $(C;\pi)$ is segmented into $(C;\pi \wedge x_i \neq t)$ and $(C;\pi)\{x_i \mapsto t\}$.
By Lemma~\ref{lem:refinement:descendants}, all $[(C;\pi \wedge x_i \neq t),N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$- or $[(C;\pi)\{x_i \mapsto t\},N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relations
correspond to an equal $[(C;\pi),N] \Rightarrow_{\operatorname{A}} [(C'_a;\pi'_a),N_2]$-descendant relation.
Assume there is a $[(C;\pi \wedge x_i \neq t),N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relation $\des{}{C_c}{1}$ that is not distinct from $\des{}{C_c}{0}$.
Because $L'_1\delta \des{}{C_c}{0} (L_1,r)$ for some literal position $(L_1,r)$ in $(C;\pi)$, which is the parent literal position of $(L_1,r)$ in $(C;\pi \wedge x_i \neq t)$,
$L'_1\delta \des{}{C_c}{1} (L_1,r)$.
However, this contradicts Lemma~\ref{lem:refinement:ancestor_skel} because $x_i\delta $ is not an instance of $\mathrm{skt}(L_1\vert_{r_1},\pi \wedge x_i \neq t)=\mathrm{skt}(x_i,\pi \wedge x_i \neq t)$.
The case that there is a $[(C;\pi)\{x_i \mapsto t\},N_1] \Rightarrow_{\operatorname{A}}^* [(C'_a;\pi'_a),N_2]$-descendant relation that is not distinct from $\des{}{C_c}{0}$ is analogous using the argument that $x'_i\delta $ is not an instance of $\mathrm{skt}(L_2\{x_i \mapsto t\}\vert_{r_2},\pi)=\mathrm{skt}(t,\pi)$.
Hence, there are strictly less distinct descendant relations over $C_c$ and $(C;\pi \wedge x \neq t)$ or $(C;\pi)\{x \mapsto t\}$
than there are distinct descendant relations over $C_c$ and $(C,\pi)$.
If there are no descendant relations, then $C_c$ can no longer appear as a lift conflict.
Otherwise, by the inductive hypothesis, there exists a finite refinement $N \Rightarrow_{\operatorname{Ref}} N_1 \Rightarrow_{\operatorname{Ref}}^* N_{R}$
such that for any approximation $N_{R} \Rightarrow_{\operatorname{AP}} N'_{R}$ and ground conflicting core $N^\bot_{R}$ of $N'_{R}$,
$C_c$ is not a lift-conflict in $N^\bot_{R}$ modulo duplicate literal elimination.\qed \end{proof}
Actually, Lemma~\ref{lem:refinement:refine} can be used to define a fair strategy on refutations in $N'$ in order to receive also a dynamically complete FO-AR calculus, following the ideas presented in \cite{Teucke2015}.
In Lemma~\ref{lem:refinement:refine}, we segment the conflict clause's immediate parent clause. If the lifting later successfully passes this point, the refinement is lost and will be possibly repeated. Instead, we can refine any ancestor of the conflict clause as long as it contains the ancestor of the variable used in the refinement. By Lemma~\ref{lem:refinement:ancestor_skel}-(iii), such an ancestor will contain the ancestor variable at the same positions. If we refine the ancestor in the original clause set, the refinement is permanent because lifting the refinement steps always succeeds. Only variables introduced by shallow transformation cannot be traced to the original clause set. However, these shallow variables are already linear and the partitioning in the shallow transformation can be chosen such that they are not shared variables. Assume a shallow, shared variable $y$, that is used to extract the term $t$, in the shallow transformation of $\Gamma\rightarrow E[s]_p,\Delta$ into $S(x),\Gamma_l\rightarrow E[p/x],\Delta_l$ and $\Gamma_r\rightarrow S(s),\Delta_r$. Since $\Delta_l$ $\dot{\cup}$ $\Delta_r=\Delta$ is a partitioning, $y$ can only appear in either $E[p/x],\Delta_l$ or $S(s),\Delta_r$. If $y \in \operatorname{vars}(E[p/x],\Delta_l)$ we instantiate $\Gamma_r$ with $\{y \mapsto t\}$ and $\Gamma_l$, otherwise. Now, $y$ is no longer a shared variable.\\
The refinement Lemmas only guarantee a refinement for a given ground conflicting core. In practice, however, conflicting cores contain free variables. We can always generate a ground conflicting core by instantiating the free variables with ground terms. However, if we only exclude a single ground case via refinement, next time the new conflicting core will likely have overlaps with the previous one. Instead, we can often remove all ground instances of a given conflict clause at once.
The simplest case is when unifying the conflict clause with the original clause fails because their instantiations differ at some equivalent positions. For example, consider $N= \{ P(x,x); P(f(x,a), f(y,b)) \rightarrow\}$. $N$ is satisfiable but the linear transformation is unsatisfiable with conflict clause $P(f(x,a), f(y,b))$ which is not unifiable with $P(x,x)$, because the two terms $f(x,a)$ and $f(y,b)$ have different constants at the second argument. A refinement of $ P(x,x)$ is \newline \centerline{$\begin{array}{r@{\,;\,}l}
( P(x,x) & x \neq f(v,a)) \\ (P(f(x,a),f(x,a)) & \top) \\ \end{array}$}
$ P(f(x,a), f(y,b))$ shares no ground instances with the approximations of the refined clauses.
Next, assume that again unification fails due to structural difference, but this time the differences lie at different positions. For example, consider $N= {\{ P(x,x); P(f(a,b), f(x,x)) \rightarrow\}}$. $N$ is satisfiable but the linear transformation of $N$ is unsatisfiable with conflict clause $ P(f(a,b), f(x,x))$
which is not unifiable with $ P(x,x)$ because in $f(a,b)$ the first an second argument are different but the same in $f(x,x)$. A refinement of $ P(x,x)$ is \newline \centerline{$\begin{array}{r@{\,;\,}l}
( P(x,x) & x \neq f(a,v)) \\
(P(f(a,x), f(a,x))) & x \neq a) \\
( P(f(a,a), f(a,a))) & \top) \\
\end{array}$}
$ P(f(a,b), f(x,x))$ shares no ground instances with the approximations of the refined clauses.
It is also possible that the conflict clause and original clause are unifiable by themselves, but the resulting constraint has no solutions. For example, consider $N= {\{ P(x,x); (P(x, y) \rightarrow; x \neq a \wedge x \neq b \wedge y \neq c \wedge y \neq d )\}}$ with signature $\Sigma=\{a,b,c,d\}$. $N$ is satisfiable but the linear transformation of $N$ is unsatisfiable with conflict clause $ (\rightarrow P(x,y); x \neq a \wedge x \neq b \wedge y \neq c \wedge y \neq d)$. While $P(x,x)$ and $P(x,y)$ are unifiable, the resulting constraint $ x \neq a \wedge x \neq b \wedge x \neq c \wedge x \neq d$ has no solutions. A refinement of $P(x,x)$ is \newline \centerline{$\begin{array}{r@{\,;\,}l}
(P(x,x) & x \neq a \wedge x \neq b) \\
( P(a,a) & \top) \\
(P(b,b) & \top) \\
\end{array}$}
$ ( P(x,y); x \neq a \wedge x \neq b \wedge y \neq c \wedge y \neq d)$ shares no ground instances with the approximations of the refined clauses.\\
Lastly, we should mention that there are cases where the refinement process does not terminate. For example, consider the clause set $N= {\{ P(x,x) ; P(y,g(y)) \rightarrow \}}$. $N$ is satisfiable but the linear transformation of $N$ is unsatisfiable with conflict clause $ P(y,g(y))$, which is not unifiable with $ P(x,x)$. A refinement of $ P(x,x)$ based on the ground instance $P(a,g(a))$ is \newline \centerline{$\begin{array}{r@{\,;\,}l}
( P(x,x) & x \neq g(v)) \\ ( P(g(x),g(x)) & \top) \\ \end{array}$} While $ P(y,g(y))$ is not an instance of the refined approximation, it shares ground instances with $ P(g(x),g(x'))$. The new conflict clause is $ P(g(y),g(g(y)))$ and the refinement will continue to enumerate all $ P(g^i(x),g^i(x))$ instances of $ P(x,x)$ without ever reaching a satisfiable approximation. Satisfiability of first-order clause sets is undecidable, so termination cannot be expected by any calculus, in general.
\section{Experiments}\label{sec:experiments}
In the following we discuss several first-order clause classes for which FO-AR implemented in SPASS-AR immediately decides satisfiability but superposition and instantiation-based methods fail. We argue both according to the respective calculi and state-of-the-art implementations, in particular SPASS~3.9~\cite{DBLP:conf/cade/WeidenbachDFKSW09}, Vampire~4.1~\cite{KovacsVoronkov13,DBLP:conf/cav/Voronkov14}, for ordered-resolution/superposition, iProver~2.5~\cite{DBLP:conf/cade/Korovin08} an implementation of Inst-Gen \cite{Korovin13ganzinger}, and Darwin~v1.4.5 \cite{DBLP:journals/ijait/BaumgartnerFT06} an implementation of the model evolution calculus \cite{DBLP:conf/cade/BaumgartnerT03}. All experiments were run on a 64-Bit Linux computer (Xeon(R) E5-2680, 2.70GHz, 256GB main memory). For Vampire and Darwin we chose the CASC-sat and CASC settings, respectively. For iProver we set the schedule to ``sat'' and SPASS, SPASS-AR were used in default mode. Please note that Vampire and iProver are portfolio solvers including implementations of several different calculi including superposition (ordered resolution), instance generation, and finite model finding. SPASS, SPASS-AR, and Darwin only implement superposition, FO-AR, and model evolution, respectively.
For the first example\newline \centerline{$P(x,y) \rightarrow P(x,z) , P(z,y);\quad P(a,a)$} and second example,\newline \centerline{$Q(x,x);\quad
Q(v,w) , P(x,y) \rightarrow P(x,v) , P(w,y);\quad
P(a,a)$} the superposition calculus produces independently of the selection strategy and ordering an infinite number of clauses of form\newline \centerline{$\begin{array}{r@{\,\rightarrow\,}l} &P(a,z_1),\; P(z_1,z_2),\;\ldots,\;P(z_n,a).\\ \end{array}$}
Using linear approximation, however, FO-AR replaces $P(x,y) \rightarrow P(x,z) , P(z,y)$ and $\rightarrow Q(x,x)$ with $P(x,y) \rightarrow P(x,z) , P(z',y)$ and $\rightarrow Q(x,x')$, respectively. Consequently, ordered resolution derives $\rightarrow P(a,z_1) , P(z_2,a)$ which subsumes any further inferences $\rightarrow P(a,z_1) , P(z_2,z_3) , P(z_4,a)$. Hence, saturation of the approximation terminates immediately. Both examples belong to the Bernays-Sch\"onfinkel fragment, so model evolution (Darwin) and Inst-Gen (iProver) can decide them as well. Note that the concrete behavior of superposition is not limited to the above examples but potentially occurs whenever there are variable chains in clauses.
On the third problem\newline \centerline{$P(x,y) \rightarrow P(g(x),z);\quad P(a,a)$} superposition derives all clauses of the form $\rightarrow P(g(\ldots g(a)\ldots),z)$. With a shallow approximation of $ P(x,y) \rightarrow P(g(x),z)$ into $ S(v) \rightarrow P(v,z)$ and $ P(x,y) \rightarrow S(g(x))$, FO-AR (SPASS-AR) terminates after deriving $ \rightarrow S(g(a))$ and $S(x) \rightarrow S(g(x))$. Again, model evolution (Darwin) and Inst-Gen (iProver) can also solve this example.
The next example\newline \centerline{$P(a);\quad P(f(a))\rightarrow;\quad
P(f(f(x))) \rightarrow P(x);\quad
P(x) \rightarrow P(f(f(x)))$} is already saturated under superposition. For FO-AR, the clause $P(x) \rightarrow P(f(f(x)))$ is replaced by $S(x) \rightarrow P(f(x))$ and $P(x) \rightarrow S(f(x))$. Then ordered resolution terminates after inferring $S(a) \rightarrow$ and $S(f(x)) \rightarrow P(x)$.
The Inst-Gen and model evolution calculi, however, fail. In either, a satisfying model is represented by a finite set of literals, i.e, a model of the propositional approximation for Inst-Gen and the trail of literals in case of model evolution. Therefore, there necessarily exists a literal $P(f^n(x))$ or $\neg P(f^n(x))$ with a maximal $n$ in these models. This contradicts the actual model where either $P(f^n(a))$ or $P(f^n(f(a)))$ is true. However, iProver can solve this problem using its built-in ordered resolution solver whereas Darwin does not terminate on this problem.
Lastly consider an example of the form\newline \centerline{$f(x) \approx x \rightarrow;\,\; f(f(x)) \approx x \rightarrow;\,\, \ldots; f^n(x) \approx x \rightarrow$} which is trivially satisfiable, e.g., saturated by superposition, but any model has at least $n+1$ domain elements. Therefore, adding these clauses to any satisfiable clause set containing $f$ forces calculi that explicitly consider finite models to consider at least $n+1$ elements. The performance of final model finders~\cite{SlaneySurendonk96} typically degrades in the number of different domain elements to be considered.
Combining each of these examples into one problem is then solvable by neither superposition, Inst-Gen, or model evolution and not practically solvable with increasing $n$ via testing finite models. For example, we tested\newline \centerline{$\begin{array}{c}
P(x,y) \rightarrow P(x,z) , P(z,y); \quad P(a,a); \quad P(f(a),y) \rightarrow; \\
P(f(f(x)),y) \rightarrow P(x,y);\quad P(x,y) \rightarrow P(f(f(x)),y); \\
f(x) \approx x \rightarrow;, \ldots, f^{n}(x) \approx x \rightarrow;\\
\end{array}$} for $n=20$ against SPASS, Vampire, iProver, and Darwin for more than one hour each without success. Only SPASS-AR solved it in less than one second.
For iProver we added an artificial positive equation $b\approx c$. For otherwise, iProver throws away all disequations while preprocessing. This is a satisfiability preserving operation, however, the afterwards found (finite) models are not models of the above clause set due to the collapsing of ground terms.
\section{Conclusion} \label{sec:conclusion}
The previous section showed FO-AR is superior to superposition, instantiation-based methods on certain classes of clause sets. Of course, there are also classes of clause sets where superposition and instantiation-based methods are superior to FO-AR, e.g., for unsatisfiable clause sets where the structure of the clause set forces FO-AR to enumerate failing ground instances due to the approximation in a bottom-up way.
Our prototypical implementation SPASS-AR cannot compete with systems such as iProver or Vampire on the respective CASC categories of the TPTP~\cite{Sut09}. This is already due to the fact that they are all meanwhile portfolio solvers. For example, iProver contains an implementation of ordered resolution and Vampire an implementation of Inst-Gen. Our results, Section~\ref{sec:experiments}, however, show that these systems may benefit from FO-AR by adding it to their portfolio.
The DEXPTIME-completeness result for MSLH strongly suggest that both the MSLH and also our MSL(SDC) fragment have the finite model property. However, we are not aware of any proof. If MSL(DSC) has the finite model property, the finite model finding approaches are complete on MSL(SDC). The models generated by FO-AR and superposition are typically infinite. It remains an open problem, even for fragments enjoying the finite model property, e.g., the first-order monadic fragment, to design a calculus that combines explicit finite model finding with a structural representation of infinite models. For classes that have no finite models this problem seems to become even more difficult. To the best of our knowledge, SPASS is currently the only prover that can show satisfiability of the clauses $R(x,x)\rightarrow$; $R(x,y), R(y,z)\rightarrow R(x,z)$; $R(x,g(x))$ due to an implementation of chaining~\cite{BachmairGanzinger98,SudaEtAl10}. Apart from the superposition calculus, it is unknown to us how the specific inferences for transitivity can be combined with any of the other discussed calculi, including the abstraction refinement calculus introduced in this paper.
Finally, there are not many results on calculi that operate with respect to models containing positive equations. Even for fragments that are decidable with equality, such as the Bernays-Schoenfinkel-Ramsey fragment or the monadic fragment with equality, there seem currently no convincing suggestions compared to the great amount of techniques for these fragments without equality. Adding positive equations to MSL(SDC) while keeping decidability is, to the best of our current knowledge, only possible for at most linear, shallow equations $f(x_1,\ldots,x_n) \approx h(y_1,\ldots,y_n)$~\cite{JacquemardMeyerEtAl98}. However, approximation into such equations from an equational theory with nested term occurrences typically results in an almost trivial equational theory. So this does not seem to be a very promising research direction.
\end{document} |
\begin{document}
\title{Description of the minimizers of least squares regularized~ with~ $\bm{\ell_0} \newcommand{\slugmaster}{ \slugger{siims}{xxxx}{xx}{x}{x--x}}
\vspace*{-4cm}
\renewcommand{\arabic{footnote}}{\alph{footnote}} \hspace*{5cm}\textcolor[rgb]{0.1,0.1,0.1}{\sc (in press\footnote {Received by the editors November 10, 2011; accepted for publication (in revised form) January 14, 2013; published electronically DATE. })}
\vspace*{4cm}
\renewcommand{\arabic{footnote}}{\arabic{footnote}} \setcounter{footnote}{0} \begin{abstract} We have an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real-valued arbitrary matrix $A$ (e.g. a dictionary) with $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$ and data $d$ describing the sought-after object with the help of $A$. This work provides an in-depth analysis of the (local and global) minimizers of an objective function $\mathcal{F}_d$ combining a quadratic data-fidelity term and an $\ell_0$ penalty applied to each entry of the sought-after solution, weighted by a regularization parameter $\beta>0$. For several decades, this objective has attracted a ceaseless effort to conceive algorithms approaching a good minimizer. Our theoretical contributions, summarized below, shed new light on the existing algorithms and can help the conception of innovative numerical schemes. To solve the normal equation associated with any $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$-row submatrix of $A$ is equivalent to compute a local minimizer $\hat u$ of $\mathcal{F}_d$. (Local) minimizers $\hat u$ of $\mathcal{F}_d$ are strict if and only if the submatrix, composed of those columns of $A$ whose indexes form the support of $\hat u$, has full column rank. An outcome is that strict local minimizers of $\mathcal{F}_d$ are easily computed without knowing the value of $\beta$. Each strict local minimizer is linear in data. It is proved that $\mathcal{F}_d$ has global minimizers and that they are always strict. They are studied in more details under the (standard) assumption that $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$. The global minimizers with $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$-length support are seen to be impractical. Given $d$, critical values $\beta_\k$ for any $\k\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$ are exhibited such that if $\beta>\beta_\k$, all global minimizers of $\mathcal{F}_d$ are $\k$-sparse. An assumption on $A$ is adopted and proved to fail only on a closed negligible subset. Then for all data $d$ beyond a closed negligible subset, the objective $\mathcal{F}_d$ for $\beta>\beta_\k$, $\k\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$, has a unique global minimizer and this minimizer is $\k$-sparse. Instructive small-size ($5\times 10$) numerical illustrations confirm the main theoretical results. \end{abstract}
\begin{keywords}asymptotically level stable functions, global minimizers, local minimizers, $\ell_0$ regularization, nonconvex nonsmooth minimization, perturbation analysis, quadratic programming, solution analysis, sparse recovery, strict minimizers, underdetermined linear systems, uniqueness of the solution, variational methods \end{keywords}
\begin{AMS}
15A03,
15A29, 15A99, 26E35, 28A75, 46N99, 47J07, 49J99, 49K40,
90C26, 90C27, 94A08, 94A12. ltering, etc.) Communication, information - INFORMATION AND COMMUNICATION, CIRCUITS \end{AMS}
\paragraph{\hspace*{-6mm}\textbf{\small DOI}} {\small 10.1137/11085476X}
\begin{center}} \def\ec{\end{center}\textcolor[rgb]{0.1,0.1,0.1}{--------------------------------------------------------------------------------------------------------------------}\ec
\pagestyle{myheadings} \thispagestyle{plain} \markboth{MILA NIKOLOVA}{THE MINIMIZERS OF LEAST SQUARES REGULARIZED~ WITH~ $\bm{\ell_0}$-NORM}
\section{Introduction}
Let $A$ be an arbitrary matrix (e.g., a dictionary) such that \[~A\in\RR^{\m\x\n}~~~\mbox{for}~~~\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n~,\] where the positive integers $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $\n$ are fixed. Given a data vector $d\in\RR^{\m}$, we consider an objective function $\mathcal{F}_d:\RR^{\n}\rightarrow\mathbb R$ of the form
\begin{eqnarray}} \def\eeqn{\end{eqnarray}\mathcal{F}_d(u)&=&\|Au-d\|^2_2+\beta\|u\|_0~,~~\beta>0~,\label{fd}\\
\|u\|_0&=&\,\sharp\,\sigma(u)~, \nonumber\label{rj} \eeqn where $u\in\RR^{\n}$ contains the coefficients describing the sought-after object,
$\beta$ is a regularization parameter, $\,\sharp\,$ stands for cardinality and $\sigma(u)$ is the support of $u$ (i.e., the set of all $i\in\{1,\cdots,\n\}$ for which the $i$th entry of $u$ satisfies $u[i]\neq0$). By an abuse of language, the penalty in~\eqref{fd} is called the $\ell_0$-norm. Define $\phi:\mathbb R\rightarrow\mathbb R$ by \begin{equation}} \def\eeq{\end{equation}
\phi(t)\stackrel{\rm def}{=}\left\{\begin{array}} \def\earr{\end{array}{lll}0&\mbox{\rm if}&t=0~,\\1&\mbox{\rm if}&t\neq0~.\earr\right.\label{phi}\eeq Then $\disp{\|u\|_0=\sum_{i=1}^\n\phi(u[i])=\sum_{i\in\sigma(u)}\phi(u[i])}$, so $\mathcal{F}_d$ in~\eqref{fd} equivalently reads \begin{equation}} \def\eeq{\end{equation}
\mathcal{F}_d(u)=\|Au-d\|^2_2+\beta\sum_{i=1}^\n\phi(u[i]) =
\|Au-d\|^2_2+\beta\sum_{i\in\sigma(u)}\phi(u[i])~.\label{fds} \eeq We focus on all (local and global) minimizers $\hat u$ of an objective $\mathcal{F}_d$ of the form~\eqref{fd}: \begin{equation}} \def\eeq{\end{equation}\hat u\in\RR^{\n}~~\mbox{such that}~~ \mathcal{F}_d(\hat u)=\min_{u\in\O}\mathcal{F}_d(u)~,\label{P0}\eeq where $\O\,$ is an open neighborhood of $\hat u$~. We note that finding a global minimizer of $\mathcal{F}_d\,$ must be an {\em NP-hard} computational problem \cite{DavisMallatAvellaneda,Tropp06}.
The function $\phi$ in~\eqref{phi} served as a regularizer for a long time. In the context of Markov random fields it was used by Geman and Geman in 1984 \cite{Geman84} and Besag in 1986 \cite{Besag86}
as a prior in MAP energies to restore labeled images. The MAP objective reads as
\begin{equation}} \def\eeq{\end{equation}\mathcal{F}_d(u)=\|Au-d\|^2_2+\beta\sum_{k}\phi(D_ku)~,\label{map}\eeq where $D_k$ is a finite difference operator and $\phi$ is given by~\eqref{phi}. This label-designed form is known as the Potts prior model, or as the multi-level logistic model \cite{Besag89,Li95}. Various stochastic and deterministic algorithms have been considered to minimize~\eqref{map}. Leclerc \cite{Leclerc89} proposed in 1989 a deterministic continuation method to restore piecewise constant images. Robini, Lachal and Magnin \cite{Robini07} introduced the stochastic continuation approach and successfully used it to reconstruct 3D tomographic images. Robini and Magnin refined the method and the theory in \cite{Robini10}. Very recently, Robini and Reissman \cite{Robini12} gave theoretical results relating the probability for global convergence and the computation speed.
The problem stated in~\eqref{fd} and~\eqref{P0}---to (locally) minimize $\mathcal{F}_d$---arises when {\em sparse\,} solutions are desired. Typical application fields are signal and image processing, morphologic component analysis, compression, dictionary building, inverse problems, compressive sensing, machine learning, model selection, classification, and subset selection, among others. The original hard-thresholding method proposed by Donoho and Johnstone \cite{Donoho94} amounts to\footnote{As a reminder, if $d$ are some noisy coefficients, the restored coefficients $\hat u$
minimize $\|u-d\|^2+\beta\|u\|_0$ and read $\hat u[i]=0$ if $\left|d[i]\right|\leqslant\sqrt{\beta}$ and $\hat u[i]=d[i]$ otherwise. }
minimizing~$\mathcal{F}_d$, where $d$ contains the coefficients of a signal or an image expanded in a wavelet basis ($\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=\n$). When $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$, various (usually strong) restrictions on $\|u\|_0$ (often $\|u\|_0$ is replaced by a less irregular function)
and on $A$ (e.g., RIP-like criteria, conditions on $\|A\|$, etc.) are needed to conceive numerical schemes approximating a minimizer of $\mathcal{F}_d$, to establish local convergence and derive the asymptotic of the obtained solution. In statistics the problem has been widely considered for subset selection, and numerous algorithms have been designed, with limited theoretical production, as explained in the book by Miller~\cite{Miller02}. More recently, Haupt and Nowak \cite{Haupt06} investigate the statistical performances of the global minimizer of~$\mathcal{F}_d$ and propose an iterative bound-optimization procedure. Fan and Li \cite{Fan06} discuss a variable splitting and penalty decomposition minimization technique for \eqref{fd}, along with other approximations of the $\ell_0$-norm. Liu and Wu \cite{Liu07} mix the $\ell_0$ and $\ell_1$ penalties, establish some asymptotic properties of the new estimator and use mixed integer programming aimed at global minimization. For model selection, Lv and Fan \cite{Lv09} approximate the $\ell_0$ penalty using functions that are concave on $\mathbb R_+$ and prove a nonasymptotic nearly oracle property of the resultant estimator. Thiao, Dinh, and Thi~\cite{Thiao08} reformulate the problem so that an approximate solution can be found using difference-of-convex-functions programming. Blumensath and Davies \cite{Davies08} propose an iterative thresholding scheme to approximate a solution and prove convergence to a local minimizer of $\mathcal{F}_d$. Lu and Zhang \cite{LuZhang10} suggest a penalty decomposition method to minimize $\mathcal{F}_d$. Fornasier and Ward \cite{Fornasier10} propose an iterative thresholding algorithm for minimizing
$\mathcal{F}_d$ where $\ell_0$ is replaced by a reasonable sparsity-promoting relaxation given by $\phi(t)=\min\{|t|,1\}$; then convergence to a local minimizer is established. In a recent paper by Chouzenoux et al.~\cite{Pesquet12}, a mixed $\ell_2-\ell_0$ regularization is considered: a slightly smoothed version of the objective is analyzed and
a majorize-minimize subspace approach, satisfying a finite length property, converges to a critical point. Since the submission of our paper, image reconstruction methods have been designed where $\ell_0$ regularization is applied to the coefficients of the expansion of the sought-after image in a wavelet frame~\cite{Zhang12,Dong12}: the provided numerical results outperform $\ell_1$ regularization for a reasonable computational cost achieved using penalty decomposition techniques. In a general study on the convergence of descent methods for nonconvex objectives, Attouch, Bolte, and Svaiter~\cite{Attouch11} apply an inexact forward-backward splitting scheme to find a critical point of $\mathcal{F}_d$. Several other references can be evoked, e.g., \cite{Schorr05,Gasso09}.
Even though overlooked for several decades, the objective $\mathcal{F}_d$ was essentially considered from a numerical standpoint. The motivation naturally comes from the promising applications and the intrinsic difficulty of minimizing $\mathcal{F}_d$.
{\em The goal of this work is to analyze the (local and global) minimizers $\hat u$ of objectives $\mathcal{F}_d$ of the form~\eqref{fd}. \begin{itemize}} \def\eit{\end{itemize} \item We provide detailed results on the minimization problem. \item The uniqueness of the global minimizer of $\mathcal{F}_d$ is examined as well. \eit} \noindent We do not propose an algorithm. However, our theoretical results raise salient questions about the existing algorithms and can help the conception of innovative numerical schemes.
The minimization of $\mathcal{F}_d$ in~\eqref{fd} might seem close to its constraint variants: \begin{equation}} \def\eeq{\end{equation} \label{ka}\begin{array}} \def\earr{\end{array}{lll}
\mbox{given } \varepsilon\geq0,~&\mbox{minimize}~~\|u\|_0&~\mbox{subject to}~~\|Au-d\|^2\leqslant \varepsilon~,\\
\mbox{given } K\in\II_{\m},~&\mbox{minimize}~\|Au-d\|^2&~\mbox{subject to}~~\|u\|_0\leqslant K~. \earr\eeq The latter problems are abundantly studied in the context of sparse recovery in different fields. An excellent account is given in \cite{DonBruckElad09}, see also the book \cite{Mallat08}. For recent achievements, we refer the reader to~\cite{CSR-URL}. It is worth emphasizing that in general, {\em there is no equivalence between the problems stated in~\eqref{ka} and the minimization of $\mathcal{F}_d$ in~\eqref{fd}} because all of these problems are nonconvex.
\subsection{Main notation and definitions}\label{ntd}
We recall that if $\hat u$ is a {\em (local) minimizer} of $\mathcal{F}_d$, the value $\mathcal{F}_d(\hat u)$ is a (local) minimum\footnote{These two terms are often confused in the literature.} of $\mathcal{F}_d$ reached at (possibly numerous) points $\hat u$. Saying that a (local) minimizer $\hat u$ is {\em strict} means that there is a neighborhood $\O\subset\RR^{\n}$, containing $\hat u$, such that $\mathcal{F}_d(\hat u)<\mathcal{F}_d(v)$ for any $v\in\O\,\mathbf{\setminus}\,\{\hat u\}$. So $\hat u$ is an isolated minimizer.
Let $\k$ be any positive integer. The expression $\big\{u\in\mathbb R^\k~:~u~~\mbox{satisfying property}~~\mathfrak{P}\big\}$ designates the subset of $\mathbb R^\k$ formed from all elements $u$ that meet $\mathfrak{P}$. The identity operator on $\mathbb R^\k$ is denoted by $I_\k$. The entries of a vector $u\in\mathbb R^\k$ read as $u[i]$, for any $i$. The $i$th vector of the canonical basis\footnote{More precisely, for any $i\in\II_{\k}$, the vector $e_i\in\mathbb R^\k$ is defined by $e_i[i]=1$ and $e_i[j]=0,~\forall\; j\in\II_{\k}\,\mathbf{\setminus}\,\{i\}$. }
of $\mathbb R^\k$ is denoted by $e_i\in\mathbb R^\k$. Given $u\in\mathbb R^\k$ and $\rho>0$, the {\em open} ball at $u$ of radius $\rho$ with respect to the $\ell_p$-norm for $1\leqslant p\leqslant\infty$ reads as
\[ \Bm_p(u,\rho)\stackrel{\rm def}{=}\{v\in\mathbb R^\k~:~\|v-u\|_p<\rho\}~.\] To simplify the notation, the $\ell_2$-norm is {\em systematically} denoted by
$$\|\cdot\|\stackrel{\rm def}{=}\|\cdot\|_2~.$$ We denote by $\II_{\k}$ the {\em totally and strictly ordered} index set\footnote{E.g. without strict order we have $\omega=\{1,2,3\}=\{2,1,1,3\}$ in which case the notation in \eqref{as}-\eqref{us} below is ambiguous.} \begin{equation}} \def\eeq{\end{equation}\II_{\k}\stackrel{\rm def}{=}\big(\{1,\cdots,\k\},<\big)~,\label{ik}\eeq where the symbol $<$ stands for the natural order of the positive integers. Accordingly, {\em any subset $\omega\subseteq\II_{\k}$ inherits the property of being totally and strictly ordered.}
We shall often consider the index set $\II_{\n}$. The complement of $\omega\subseteq\II_{\n}$ in $\II_{\n}$ is denoted by \[\omega^c=\II_{\n}\,\mathbf{\setminus}\,\omega\subseteq \II_{\n}~.\]
\begin{definition}} \def\ED{\end{definition}\label{sp} For any $u\in\mathbb R^\n$, the {\em support} $\sigma(u)$ of $u$ is defined by \begin{equation}} \def\eeq{\end{equation}\sigma(u)=\Big\{i\in\II_{\n}~:~ u[i]\neq 0\Big\}\subseteq \II_{\n}~.\nonumber\label{s}\eeq \ED If $u=0$, clearly $\sigma(u)=\varnothing$.
The $i$th column in a matrix $A\in\RR^{\m\x\n}$ is denoted by $a_i$. It is {\em systematically} assumed that \begin{equation}} \def\eeq{\end{equation}\mbox{\framebox{$\disp{~a_i\neq0~~~~\forall\; i\in\II_{\n}~.}$}}\label{ao}\eeq For a matrix $A\in\RR^{\m\x\n}$ and a vector $u\in\RR^{\n}$, with any $\omega\subseteq\II_{\n}$, we associate the {\em submatrix} $A_{\vt}} \def\Pvt{\Pi_{\vt}$ and the {\em subvector} $u_\omega$ given by \begin{eqnarray}} \def\eeqn{\end{eqnarray} A_{\vt}} \def\Pvt{\Pi_{\vt}&\stackrel{\rm def}{=}&\big(a_{\omega[1]},\cdots,a_{\omega[\,\sharp\,\omega]}\big)\in\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times \,\sharp\,\omega}~,\label{as}\\ u_\omega&\stackrel{\rm def}{=}&\Big(u\big[\omega[1]\big],\cdots,u\big[\omega[\,\sharp\,\omega]\big]\Big)\in\mathbb R^{\,\sharp\,\omega}\label{us}~, \eeqn respectively, as well as the zero padding operator $Z_\omega~:~\mathbb R^{\,\sharp\,\omega}\rightarrow\RR^{\n}$ that inverts~\eqref{us}: \begin{equation}} \def\eeq{\end{equation} u=Z_\omega\left(u_\omega\right)~,~~u[i]=\left\{ \begin{array}} \def\earr{\end{array}{ll}0&\mbox{\rm if}~~i\not\in\omega~, \\ u_\omega[k]&\mbox{for the unique $k$ such that}~~\omega[k]=i.\earr \right.\label{zo}\eeq Thus for $\omega=\varnothing$ one finds $u_{\varnothing}=\varnothing~~~\mbox{\rm and}~~~u=Z_{\varnothing}\left(u_{\varnothing}\right)=0\in\RR^{\n}~.$
Using Definition~\ref{sp} and the notation in \eqref{as}-\eqref{us}, for any $u\in\RR^{\n}\,\mathbf{\setminus}\,\{0\}$ we have \begin{equation}} \def\eeq{\end{equation}\label{sd} \omega\in\II_{\n}~~\mbox{\rm and}~~\omega\supseteq\sigma(u)~~~\Rightarrow~~~Au=A_{\vt}} \def\Pvt{\Pi_{\vt} u_{\omega}~.\eeq
To simplify the presentation, we adopt the following {\em definitions} \footnote{Note that $(a)$ corresponds to the zero mapping on $\mathbb R^0$ and that $(b)$ is the usual definition for the rank of an empty matrix.}: \begin{equation}} \def\eeq{\end{equation}\begin{array}} \def\earr{\end{array}{lll} (a)&& A_{\varnothing}=[\ ]\in\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times 0}~,\\ (b)&&\mathrm{rank}\left(A_{\varnothing}\right)=0~.\earr\label{cw}\eeq In order to avoid possible ambiguities\footnote{In the light of~\eqref{as}, $A_\omega^T$ could also mean $\left(A^T\right)_\omega$.}, we set \[A_{\vt}} \def\Pvt{\Pi_{\vt}^T\stackrel{\rm def}{=}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)^T~,\] where the superscript $^T$ stands for transposed. If $A_{\vt}} \def\Pvt{\Pi_{\vt}$ is invertible, similarly $A_{\vt}} \def\Pvt{\Pi_{\vt}^{-1}\stackrel{\rm def}{=}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}$.
In the course of this work, we shall frequently refer to the constrained quadratic optimization problem stated next.
{\em Given $d\in\RR^{\m}$ and $\omega\subseteq\II_{\n}$, problem {\rm{($\,\P_{\vt}\,$)}~} reads as: \begin{equation}} \def\eeq{\end{equation}~~~~~~~~~~~~~~~~~~~~~~~~~~~\framebox{\mbox{$\left\{\begin{array}} \def\earr{\end{array}{lll}
&&\disp{\min_{u\in\RR^{\n} }\|Au-d\|^2}~,\\~\\ \mbox{\rm subject to}&& u[i]=0,~~\forall\; i\in \omega^c~.\\ \earr\right.$}}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\mbox{{\rm{($\,\P_{\vt}\,$)}~}}\label{cpa}\eeq } {\em Clearly, problem {\rm{($\,\P_{\vt}\,$)}~} always admits a solution.}
The definition below will be used to evaluate the extent of some subsets and assumptions. \begin{definition}} \def\ED{\end{definition}\label{ps} A property (an assumption) is called {\em generic on} $\mathbb R^\k$ if it holds true on a {\em dense open} subset of $\mathbb R^\k$. \ED
As usual, a subset ${\mathcal{S}}\subset\mathbb R^\k$ is said to be {\em negligible} in $\mathbb R^\k$ if there exists $\Z\subset\mathbb R^\k$ whose Lebesgue measure in $\mathbb R^\k$ is $\mathbb{L}^\k(\Z)=0$ and ${\mathcal{S}}\subseteq\Z$~. If a property fails only on a negligible set, it is said to hold
{\em almost everywhere}, meaning ``with probability one''. Definition~\ref{ps} requires much more than {\em almost everywhere\,}. Let us explain.
{\em If a property holds true for all $v\in\mathbb R^\k\,\mathbf{\setminus}\,{\mathcal{S}}$, where ${\mathcal{S}}\subseteq\Z\subset\mathbb R^\k$, $\Z$ is {\em closed in $\mathbb R^\k$} and $\mathbb{L}^\k(\Z)=0$, then this property is {\em generic} on $\mathbb R^\k$.} Indeed, $\mathbb R^\k\,\mathbf{\setminus}\,\Z$ contains a {\em dense open} subset of $\mathbb R^\k$. So if a property is generic on $\mathbb R^\k$, then it holds true almost everywhere on $\mathbb R^\k$. But the converse is false: an almost everywhere true property is not generic if the closure of its negligible subset has a positive measure,\footnote{ There are many examples---e.g. $\Z=\{x\in[0,1]~:~x ~\mbox{is rational}\}$, then $\mathbb{L}^1(\Z)=0$ and $\mathbb{L}^1(\mbox{closure}(\Z))=1$.} because then $\mathbb R^\k\,\mathbf{\setminus}\,\Z$ does not contains an open subset of $\mathbb R^\k$. In this sense, a generic property is stable with respect to the objects to which it applies.
{\em The elements of a set ${\mathcal{S}}\subset\mathbb R^\k$ where a generic property fails are highly exceptional in~$\mathbb R^\k$.} The chance that a truly random $v\in\mathbb R^\k$---i.e., a $v$ following a non singular probability distribution on $\mathbb R^\k$---comes across such an ${\mathcal{S}}$ can be ignored in practice.
\subsection{Content of the paper} The main result in section~\ref{LM} tells us that finding a solution of {\rm{($\,\P_{\vt}\,$)}~} for $\omega\subset\II_{\n}$ is {\em equivalent} to computing a (local) minimizer of $\mathcal{F}_d$. In section~\ref{sSM} we prove that the (local) minimizers $\hat u$ of $\mathcal{F}_d$ are {\em strict} if and only if the submatrix $A_{\sigma(\hat u)}$ has full column rank. The strict minimizers of $\mathcal{F}_d$ are shown to be linear in data $d$. The importance of the $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse strict minimizers is emphasized. The global minimizers of $\mathcal{F}_d$ are studied in section~\ref{sgm}. Their existence is proved. They are shown to be strict for any $d$ and for any $\beta>0$. More details are provided under the standard assumption that $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$. Given $d\in\RR^{\m}$, critical values $\beta_\k$ for $\k\in\II_{\m-1}$ are exhibited such that all global minimizers of $\mathcal{F}_d$ are $\k$-sparse\footnote{
As usual, a vector $u$ is said to be $\k$-sparse if $\|u\|_0\leqslant\k$.}
if $\beta>\beta_\k$.
In section~\ref{kb}, a gentle assumption on $A$ is shown to be {\em generic} for all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices. Under this assumption, for all data $d\in\RR^{\m}$ beyond a closed negligible subset, the objective $\mathcal{F}_d$ for $\beta>\beta_\k$, $\k\in\II_{\m-1}$, has a unique global minimizer and this minimizer is $\k$-sparse.
Small size ($A$ is $5\x10$) numerical tests in section~\ref{nm} illustrate the main theoretical results.
\section{All minimizers of $\bm{\mathcal{F}_d}$}\label{LM}
\subsection{Preliminary results}\label{pr}
First, we give some basic facts on problem {\rm{($\,\P_{\vt}\,$)}~} as defined in \eqref{cpa} that are needed for later use. If $\omega=\varnothing$, then $\omega^c=\II_{\n}$, so the unique solution of {\rm{($\,\P_{\vt}\,$)}~} is $\hat u=0$. For an arbitrary $\omega\subset\II_{\n}$ meeting $\,\sharp\,\omega\geq1$, {\rm{($\,\P_{\vt}\,$)}~} amounts to minimizing a quadratic term with respect to only $\,\sharp\,\omega$ components of $u$, the remaining entries being null. This {\em quadratic} problem {\rm{($\,\Q_{\vt}\,$)}~} reads as \begin{equation}} \def\eeq{\end{equation}\!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\disp{\min_{v\in\mathbb R^{\,\sharp\,\omega}}\big\|A_{\vt}} \def\Pvt{\Pi_{\vt} v-d\big\|^2},~~\,\sharp\,\omega\geq1~,~\label{cpr} ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\mbox{{\rm{($\,\Q_{\vt}\,$)}~}} \eeq and it always admits a solution. Using the zero-padding operator $Z_\omega$ in \eqref{zo}, we have \[\Big[~\hu_{\vt}\in\mathbb R^{\,\sharp\,\omega}~\mbox{\rm solves {\rm{($\,\Q_{\vt}\,$)}~} and}~~\hat u=Z_\omega\left(\hu_{\vt}\right)~\Big]~~~\Leftrightarrow~~~ \Big[~\mbox{$\hat u\in\RR^{\n}$ solves {\rm{($\,\P_{\vt}\,$)}~}},~~\,\sharp\,\omega\geq1~\Big]~.\] The optimality conditions for {\rm{($\,\Q_{\vt}\,$)}~}, combined with the definition in~\eqref{cw}(a), give rise to the following equivalence, which holds true for any $\omega\subseteq\II_{\n}$: \begin{equation}} \def\eeq{\end{equation} \Big[~\mbox{$\hat u\in\RR^{\n}$ solves {\rm{($\,\P_{\vt}\,$)}~}}\Big]~~\Leftrightarrow~~ \Big[~\hu_{\vt}\in\mathbb R^{\,\sharp\,\omega}~~\mbox{\rm solves $~\disp{A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\, v=A_{\vt}} \def\Pvt{\Pi_{\vt}^Td~}$ and}~~\hat u=Z_\omega\left(\hu_{\vt}\right)~\Big]. \label{eu}\eeq Note that $\disp{A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\, v=A_{\vt}} \def\Pvt{\Pi_{\vt}^Td}$ in~\eqref{eu} is the normal equation associated with $A_{\vt}} \def\Pvt{\Pi_{\vt}\,v=d$. The remark below shows that the optimal value of {\rm{($\,\P_{\vt}\,$)}~} in \eqref{cpa} can also be seen as an orthogonal projection problem.
\begin{remark} \label{ref} \rm Let $r\stackrel{\rm def}{=}\mathrm{rank}(A_{\vt}} \def\Pvt{\Pi_{\vt})$ and ${B_{\omega}}\in\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r}$ be an orthonormal basis for $\ran(A_{\vt}} \def\Pvt{\Pi_{\vt})$. Then $A_{\vt}} \def\Pvt{\Pi_{\vt}=B_\omega H_\omega$ for a uniquely defined matrix ${H_{\omega}}\in\mathbb R^{r\times\,\sharp\,\omega}$ with $\mathrm{rank}({H_{\omega}})=r$. Using~\eqref{eu}, we have \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\hu_{\vt}=A_{\vt}} \def\Pvt{\Pi_{\vt}^Td~~\Leftrightarrow~~{H_{\omega}}^T{H_{\omega}}\hu_{\vt}={H_{\omega}}^T{B_{\omega}}^Td ~~\Leftrightarrow~~{H_{\omega}}\hu_{\vt}={B_{\omega}}^Td~~\Leftrightarrow~~A_{\vt}} \def\Pvt{\Pi_{\vt}\hu_{\vt}={B_{\omega}}{B_{\omega}}^Td~. \eeqnn In addition, $\Pi_{\ran(A_{\vt}} \def\Pvt{\Pi_{\vt})}={B_{\omega}}{B_{\omega}}^T$ is the orthogonal projector onto the subspace spanned by the columns of $A_{\vt}} \def\Pvt{\Pi_{\vt}$, see e.g.~\cite{Meyer00}. The expression above combined with~\eqref{eu} shows that \[\Big[\,\mbox{$\hat u\in\RR^{\n}$ solves }(\P_{\omega})\,\Big]~~\Leftrightarrow~~ \Big[~\hu_{\vt}\in\mathbb R^{\,\sharp\,\omega}~~\mbox{\rm meets}~~A_{\vt}} \def\Pvt{\Pi_{\vt}\hu_{\vt}=\Pi_{\ran(A_{\vt}} \def\Pvt{\Pi_{\vt})}d~~~\mbox{and}~~\hat u=Z_\varpi(\hu_{\vs})\,\Big]. \label{pref}\] \em Obviously, $A\hat u=A_{\vt}} \def\Pvt{\Pi_{\vt}\hu_{\vt}$ is the orthogonal projection of $d$ onto the basis~$B_\omega$.\end{remark}
For $\omega\subseteq\II_{\n}$, let $\Km_\omega$ denote the vector subspace \begin{equation}} \def\eeq{\end{equation}\Km_\omega\stackrel{\rm def}{=}\big\{v\in\RR^{\n}~:~v[i]=0,~\forall\; i\in\omega^c\big\}~.\label{km}\eeq This notation enables problem {\rm{($\,\P_{\vt}\,$)}~} in \eqref{cpa} to be rewritten as
\begin{equation}} \def\eeq{\end{equation}\disp{\min_{u\in\Km_\omega}\|Au-d\|^2~.}\label{fa}\eeq
The technical lemma below will be used in what follows. We emphasize that its statement is {\em independent} of the vector $\hat u\in\RR^{\n}\,\mathbf{\setminus}\,\{0\}$.
\begin{lemma}} \def\EL{\end{lemma}\label{tl} Let $d\in\RR^{\m}$, $\beta>0$, and $\hat u\in\RR^{\n}\,\mathbf{\setminus}\,\{0\}$ be {\em arbitrary}. For ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)$, set \begin{equation}} \def\eeq{\end{equation}
\rho\stackrel{\rm def}{=}\min\left\{\min_{i\in{\hat\sigma}}\big|\,\hat u[i]\,\big|,~\frac{\beta}{2\Big(\|A^T(A\hat u-d)\|_1+1\Big)}\right\}. \label{rho}\eeq Then $\rho>0$. \begin{itemize}} \def\eit{\end{itemize} \item[\rm(i)] For $\phi$ as defined in~\eqref{phi}, we have \begin{equation}} \def\eeq{\end{equation}\nonumber\label{tlo} v\in \Bm_\infty(0,\rho)~~\Rightarrow~~\sum_{i\in\II_{\n}}\phi\big(\,\hat u[i]+v[i]\,\big)= \sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)+\sum_{i\in{\hat\sigma}^c}\phi\left(v[i]\right)~. \eeq \item[\rm(ii)] For $\Km_{{\hat\sigma}}$ defined according to \eqref{km}, $\mathcal{F}_d$ satisfies \begin{equation}} \def\eeq{\end{equation} v\in\Bm_\infty(0,\rho)\cap\big(\RR^{\n}\,\mathbf{\setminus}\,\Km_{{\hat\sigma}}\big)~~~\Rightarrow~~~\mathcal{F}_d(\hat u+v)\geqslant\mathcal{F}_d(\hat u) ~, \nonumber\label{pip}\eeq where the inequality is {\em strict} whenever ${\hat\sigma}^c\neq\varnothing$. \eit \EL
The proof is outlined in Appendix~\ref{dtl}.
\subsection{The (local) minimizers of $\bm{\mathcal{F}_d}$ solve quadratic problems}\label{fip}~~
{\em It is worth emphasizing that no special assumptions on the matrix $A$ are adopted.}
We begin with an easy but cautionary result.
\begin{lemma}} \def\EL{\end{lemma}\label{uz} For any $d\in\RR^{\m}$ and for all $\beta>0$, $\mathcal{F}_d$ has a {\em strict} (local) minimum at $\hat u=0\in\RR^{\n}$. \EL
\par\paragraph{Proof} \ignorespaces Using the fact that $\mathcal{F}_d(0)=\|d\|^2\geq0$, we have \begin{eqnarray}} \def\eeqn{\end{eqnarray}
\mathcal{F}_d(v)&=&\|Av-d\|^2+\beta\|v\|_0=\mathcal{F}_d(0)+\mathcal{R}_d(v)~,\label{wq}\\ \mbox{where}\quad
\mathcal{R}_d(v)&=&\|Av\|^2-2\<v,A^Td\>+\beta\|v\|_0~.\eeqn Noticing that $\beta\|v\|_0\geqslant\beta>0$ for $v\neq0$ leads to
\[v\in \Bm_2\left(0,\frac{\beta}{2\|A^Td\|+1}\right)\setminus\{0\}~~~\Rightarrow~~~
\mathcal{R}_d(v)\geqslant-2\|v\|\;\|A^Td\|+\beta >0~.\] Inserting this implication into~\eqref{wq} proves the lemma.
$\Box$\newline
{\em For any $\beta>0$ and $d\in\RR^{\m}$, the sparsest strict local minimizer of $\mathcal{F}_d$ reads $\hat u=0$. Initialization with zero of a suboptimal algorithm should generally be a bad choice.} Indeed, experiments have shown that such an initialization can be harmful; see, e.g., \cite{Miller02,Davies08}.
The next proposition states a result that is often evoked in this work.
\begin{proposition}} \def\EP{\end{proposition}\label{cp} Let $d\in\RR^{\m}$. Given an $\omega\subseteq\II_{\n}$, let $\hat u$ solve problem {\rm{($\,\P_{\vt}\,$)}~} as formulated in~\eqref{cpa}. Then for any $\beta>0$, the objective $\mathcal{F}_d$ in~\eqref{fd} reaches a (local) minimum at $\hat u$ and \begin{equation}} \def\eeq{\end{equation}\sigma(\hat u)\subseteq\omega~,\label{cop}\eeq where $\sigma(\hat u)$ is given in Definition~\ref{sp}. \EP
\par\paragraph{Proof} \ignorespaces Let $\hat u$ solve problem {\rm{($\,\P_{\vt}\,$)}~}, and let $\beta>0$. The constraint in {\rm{($\,\P_{\vt}\,$)}~} entails~\eqref{cop}.
Consider that $\hat u\neq0$, in which case for ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)$ we have $1\leqslant\,\sharp\,{\hat\sigma}\leqslant\,\sharp\,\omega$. Using the equivalent formulation of {\rm{($\,\P_{\vt}\,$)}~} given in~\eqref{km}-\eqref{fa}, yields
\begin{equation}} \def\eeq{\end{equation} v\in\Km_\omega~~~~\Rightarrow~~~\|A(\hat u+v)-d\|^2\geqslant\|A\hat u-d\|^2~.\label{sqi}\eeq The inclusion in \eqref{cop} is equivalent to $\omega^c\subseteq{\hat\sigma}^c~.$ Let $\Km_{{\hat\sigma}}$ be defined according to~\eqref{km} as well. Then \[\hat u\in\Km_{{\hat\sigma}}\subseteq\Km_\omega.\] Combining~the latter relation with~\eqref{sqi} leads to \begin{equation}} \def\eeq{\end{equation}\label{yui}
v\in\Km_{{\hat\sigma}}~~~\Rightarrow~~~\|A(\hat u+v)-d\|^2\geqslant\|A\hat u-d\|^2~. \eeq Let $\rho$ be defined as in~\eqref{rho} Lemma~\ref{tl}. Noticing that by \eqref{phi} and \eqref{km} \begin{equation}} \def\eeq{\end{equation} v\in\Km_{{\hat\sigma}}~~~\Rightarrow~~~\phi\left(v[i]\right)=0~~~\forall\; i\in{\hat\sigma}^c~, \label{rer}\eeq the following inequality chain is derived: \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} v\in \Bm_\infty(0,\rho)\cap\Km_{{\hat\sigma}}~~~\Rightarrow~~~
\mathcal{F}_d(\hat u+v)&=&\|A(\hat u+v)-d\|^2+\beta\sum_{i\in\II_{\n}}\phi\left(\hat u[i]+v[i]\right)\nonumber\\ \Big[\mbox{\rm by Lemma~\ref{tl}(i)}\Big]~~~~
&=&\|A(\hat u+v)-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)+\beta\sum_{i\in{\hat\sigma}^c}\phi\left(v[i]\right)\nonumber\\ \Big[\mbox{\rm by~\eqref{rer}}\Big]~~~~
&=&\|A(\hat u+v)-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)\nonumber\\
\Big[\mbox{\rm by~\eqref{yui}}\Big]~~~~&\geqslant&\|A\hat u-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)\nonumber\\ \Big[\mbox{\rm by~\eqref{fds}}\Big]~~~~&=&\mathcal{F}_d(\hat u) ~.\label{4v}\eeqnn Combining the obtained implication with~Lemma~\ref{tl}(ii) shows that \[\mathcal{F}_d(\hat u+v)\geqslant\mathcal{F}_d(\hat u)~~~\forall\; v\in \Bm_\infty(0,\rho)~.\]
If $\hat u=0$, this is a (local) minimizer of $\mathcal{F}_d$ by Lemma~\ref{uz}.
$\Box$\newline
Many authors mention that initialization is paramount for the success of approximate algorithms minimizing $\mathcal{F}_d$. In view of Proposition~\ref{cp}, if one already has a well-elaborated initialization, it could be enough to solve the relevant problem {\rm{($\,\P_{\vt}\,$)}~}.
The statement reciprocal to Proposition~\ref{cp} is obvious but it helps the presentation.
\begin{lemma}} \def\EL{\end{lemma}\label{ob} For $d\in\RR^{\m}$ and $\beta>0$, let $\mathcal{F}_d$ have a (local) minimum at $\hat u$. Then $\hat u$ solves {\rm{($\,\P_{\hsi}\,$)}~} for ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)$. \EL
\par\paragraph{Proof} \ignorespaces Let $\hat u$ be a (local) minimizer of $\mathcal{F}_d$. Denote ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)$. Then $\hat u$ solves the problem
\[\min_{u\in\RR^{\n}}\Big\{\|Au-d\|^2+\beta\,\sharp\,{\hat\sigma}\Big\}~~\mbox{subject to } u[i]=0~~\forall\; i\in {\hat\sigma}^c.\] Since $\,\sharp\,{\hat\sigma}$ is a constant, $\hat u$ solves {\rm{($\,\P_{\hsi}\,$)}~}.
$\Box$\newline
\begin{remark}} \def\ER{\end{remark} ~~ By Proposition~\ref{cp} and Lemma~\ref{ob}, solving {\rm{($\,\P_{\vt}\,$)}~} for some $\omega\subseteq\II_{\n}$ \\ is {\em equivalent} to finding a (local) minimizer of~$\mathcal{F}_d$. \label{x1}\ER
This equivalence underlies most of the theory developed in this work.
\begin{corollary}} \def\EC{\end{corollary}\label{nor} For $d\in\RR^{\m}$ and $\beta>0$, let $\hat u$ be a (local) minimizer of $\mathcal{F}_d$. Set ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)$. Then \begin{equation}} \def\eeq{\end{equation}\label{ee}\hat u=Z_{{\hat\sigma}}(\hu_{\hsi})~,~~~\mbox{\rm where $~\hu_{\hsi}~$ satisfies}~~~A_{\hsi}} \def\Phs{\Pi_{\hsi}^TA_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}=A_{\hsi}} \def\Phs{\Pi_{\hsi}^Td~. \eeq Conversely, if $\hat u\in\RR^{\n}$ satisfies \eqref{ee} for ${\hat\sigma}=\sigma(\hat u)$, then $\hat u$ is a (local) minimizer of $\mathcal{F}_d$. \EC \par\paragraph{Proof} \ignorespaces By Lemma~\ref{ob}, $\hat u$ solves {\rm{($\,\P_{\hsi}\,$)}~}. The equation in~\eqref{ee} follows directly from \eqref{eu}. The last claim is a straightforward consequence of \eqref{eu} and Proposition~\ref{cp}.
$\Box$\newline
\begin{remark}} \def\ER{\end{remark} \label{x2} \rm Equation \eqref{ee} shows that a (local) minimizer $\hat u$ of $\mathcal{F}_d$ follows a {\em pseudo}-hard thresholding scheme\footnote{In a Bayesian setting, the quadratic data fidelity term in~$\mathcal{F}_d$ models data corrupted with Gaussian i.i.d. noise.}: the nonzero part $\hu_{\hsi}$ of $\hat u$ is the least squares solution with respect to the submatrix $A_{\hsi}} \def\Phs{\Pi_{\hsi}$ and the whole data vector $d$ is involved in its computation. Unlike the hard thresholding scheme in \cite{Donoho94}, unsignificant or purely noisy data entries can hardly be discarded from $\hat u$ and they threaten to pollute its nonzero part $\hu_{\hsi}$. See also Remark~\ref{ifn}. \ER
Noisy data $d$ should degrade $\hu_{\hsi}$ and this effect is stronger if $A_{\hsi}} \def\Phs{\Pi_{\hsi}^TA_{\hsi}} \def\Phs{\Pi_{\hsi}$ is ill-conditioned \cite{Demoment89}. The quality of the outcome critically depends on the selected (local) minimizer and on the pertinence of $A$.
It may be interesting to evoke another consequence of Proposition~\ref{cp}: \begin{remark}} \def\ER{\end{remark} \label{x3} Given $d\in\RR^{\m}$, for any $\omega\subseteq\II_{\n}$, $\mathcal{F}_d$ has a (local) minimizer $\hat u$ defined by \eqref{ee} and obeying $\sigma(\hat u)\subseteq\omega$. \ER
\section{The strict minimizers of $\bm{\mathcal{F}_d}$}\label{sSM}~~
{\em We remind, yet again, that no special assumptions on $A\in\RR^{\m\x\n}$ are adopted.}
Strict minimizers of an objective function enable unambiguous solutions of inverse problems.
The definition below is useful in characterizing the strict minimizers of $\mathcal{F}_d$.
\begin{definition}} \def\ED{\end{definition}\label{om} Given a matrix $A\in\RR^{\m\x\n}$, for any $r\in\II_{\m}$ we define $\Omega_r$ as the subset of {\em all} $r$-length supports that correspond to full column rank $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r$ submatrices of $A$, i.e., \begin{equation}} \def\eeq{\end{equation}\mbox{\framebox{$\disp{~ \Omega_r=\Big\{~\omega\subset\II_{\n}~:~~\,\sharp\,\omega=r=\mathrm{rank}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)~\Big\}~.}$}} \nonumber\label{sm}\eeq Set $\Omega_0=\varnothing$ and define as well \begin{equation}} \def\eeq{\end{equation}\mbox{\framebox{$\disp{~ \Omega\stackrel{\rm def}{=}\bigcup_{r=0}^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}\Omega_r~~\mbox{\rm and}~~\Om_{\max}\stackrel{\rm def}{=}\Omega\cup\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~.}$}}\nonumber\label{si}\eeq \ED Definition~\ref{om} shows that for any $r\in\II_{\m}$, \[\mathrm{rank}(A)=r\geq1~~~\Leftrightarrow~~~\Omega_r\neq\varnothing~~\mbox{\rm and}~~\Omega_t=\varnothing~~\forall\; t\geqslant r+1~.\]
\subsection{How to recognize a strict minimizer of $\bm{\mathcal{F}_d}$?}\label{rec}
The theorem below gives an exhaustive answer to this question. \begin{theorem}} \def\ET{\end{theorem}\label{ra} Given $d\in\RR^{\m}$ and $\beta>0$, let $\hat u$ be a (local) minimizer of $\mathcal{F}_d$. Define \[{\hat\sigma}\stackrel{\rm def}{=} \sigma(\hat u)~.\] The following statements are {\em equivalent}: \begin{itemize}} \def\eit{\end{itemize} \item[\rm (i)] The (local) minimum that $\mathcal{F}_d$ has at $\hat u$ is {\em strict}; \item[\rm (ii)] \framebox{$\disp{\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=\,\sharp\,{\hat\sigma}}$}~; \item[\rm(iii)] ${\hat\sigma}\in\Om_{\max}$~. \eit If $\hat u$ is a strict (local) minimizer of $\mathcal{F}_d$, then it reads \begin{equation}} \def\eeq{\end{equation} \hat u=Z_{{\hat\sigma}}\left(\hu_{\hsi}\right)~~~\mbox{for}~~~ \hu_{\hsi}=\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}^TA_{\hsi}} \def\Phs{\Pi_{\hsi}\right)^{-1}A_{\hsi}} \def\Phs{\Pi_{\hsi}^Td~\label{hu}
\eeq and satisfies $\,\sharp\,{\hat\sigma}= \|\hat u\|_0\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~.$ \ET
\par\paragraph{Proof} \ignorespaces We break the proof into four parts.
\paragraph{\rm[(i)$\Rightarrow$(ii)]}~ We recall that by the rank-nullity theorem \cite{Golub96,Meyer00} \begin{equation}} \def\eeq{\end{equation}\mathrm{dim}\,\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=\,\sharp\,{\hat\sigma}-\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)~.\label{rn}\eeq Let\footnote{This part can alternatively be proven using Remark~\ref{ref}.}
$\hat u\neq0$ be a {\em strict} (local) minimizer of $\mathcal{F}_d$. Assume that (ii) fails. Then \eqref{rn} implies that \begin{equation}} \def\eeq{\end{equation} \mathrm{dim}\,\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)\geq1~.\label{qa}\eeq By Lemma~\ref{ob}, $\hat u$ solves {\rm{($\,\P_{\hsi}\,$)}~}. Let $\rho$ read as in~\eqref{rho} and let $\Km_{{\hat\sigma}}$ be defined according to~\eqref{km}. Noticing that \begin{equation}} \def\eeq{\end{equation} v\in\Km_{{\hat\sigma}},~~{\hat\sigma}\neq\varnothing~~~\Rightarrow~~~Av=A_{\hsi}} \def\Phs{\Pi_{\hsi} v_{{\hat\sigma}}~,\label{ui} \eeq Lemma~\ref{tl}(i) shows that \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} \left\{\begin{array}} \def\earr{\end{array}{c}v\in \Bm_\infty(0,\rho)\cap\Km_{{\hat\sigma}}~,\\~~\\ v_{{\hat\sigma}}\in\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)\earr\right.~~~\Rightarrow~~~
\mathcal{F}_d(\hat u+v)&=&\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\left(\hu_{\hsi}+v_{{\hat\sigma}}\right)-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]+v[i]\right)\\ \Big[\mbox{\rm by Lemma~\ref{tl}(i)}\Big]~~~~
&=&\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)+\beta\sum_{i\in{\hat\sigma}^c}\phi\left(v[i]\right)\\
\Big[\mbox{\rm by \eqref{rer}~}\Big]~~~~&=&\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)\\ \Big[\mbox{\rm by \eqref{fds}~}\Big]~~~~&=&\mathcal{F}_d(\hat u)~, \eeqnn i.e., that $\hat u$ is not a strict minimizer, which contradicts (i). Hence the assumption in~\eqref{qa} is false. Therefore (ii) holds true.
If $\hat u=0$, then ${\hat\sigma}=\varnothing$; hence $A_{\hsi}} \def\Phs{\Pi_{\hsi}\in\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\x0}$ and $\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=0=\,\sharp\,{\hat\sigma}$ according to \eqref{cw}.
\paragraph{\rm$\mathrm{[(ii)\Rightarrow(i)]}$} Let $\hat u$ be a minimizer of $\mathcal{F}_d$ that satisfies (ii). To have $\,\sharp\,{\hat\sigma}=0$ is equivalent to $\hat u=0$. By Lemma~\ref{uz}, $\hat u$ is a strict minimizer. Focus on $\,\sharp\,{\hat\sigma}\geq1$. Since $\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=\,\sharp\,{\hat\sigma}\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and problem~{\rm{($\,\Q_{\vt}\,$)}~} in~\eqref{cpr} is strictly convex for $\omega={\hat\sigma}$, its unique solution $\hu_{\hsi}$ satisfies
\[v\in\mathbb R^{\,\sharp\,{\hat\sigma}}\,\mathbf{\setminus}\,\{0\}~~~\Rightarrow~~~\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\left(\hu_{\hsi}+v\right)-d\|^2>\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}-d\|^2~.\] Using \eqref{ui}, this is equivalent to
\begin{equation}} \def\eeq{\end{equation} v\in\Km_{{\hat\sigma}}\,\mathbf{\setminus}\,\{0\}~~~\Rightarrow~~~\|A(\hat u+v)-d\|^2=\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\left(\hu_{\hsi}+v_{{\hat\sigma}}\right)-d\|^2>\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}-d\|^2
=\|A\hat u-d\|^2~.\label{$}\eeq
Lemma~\ref{tl}(i), along with \eqref{rer}, yields \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} v\in\Bm_\infty(0,\rho)\cap\Km_{{\hat\sigma}}\,\mathbf{\setminus}\,\{0\}~~~\Rightarrow~~~\mathcal{F}_d(\hat u+v)
&=&\|A(u+v)-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)\\
\Big[\mbox{\rm by~\eqref{$}}\Big]~~~~~~~~~&>&\|A\hat u-d\|^2+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)\\ &=&\mathcal{F}_d(\hat u)~. \eeqnn Since $\,\sharp\,{\hat\sigma}\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\leqslant\n-1$, we have ${\hat\sigma}^c\neq\varnothing$. So Lemma~\ref{tl}(ii) tells us that \[v\in\Bm_\infty(0,\rho)\,\mathbf{\setminus}\,\Km_{{\hat\sigma}}~~~\Rightarrow~~~\mathcal{F}_d(\hat u+v)>\mathcal{F}_d(\hat u)~.\] Combining the last two implications proves (i).
\paragraph{\rm$\mathrm{[(ii)\Rightarrow(iii)]}$} Comparing (iii) with Definitions \ref{sp} and \ref{om} proves the claim.
\paragraph{\rm[Equation~\eqref{hu}]} The proof follows from equation~\eqref{ee} in Corollary~\ref{nor} where\footnote{For ${\hat\sigma}=\varnothing$, \eqref{zo} and~\eqref{cw}$(a)$ show that~\eqref{hu} yields $\hat u=0$.} $A_{\hsi}} \def\Phs{\Pi_{\hsi}^TA_{\hsi}} \def\Phs{\Pi_{\hsi}$ is invertible.
$\Box$\newline
{\em Theorem~\ref{ra} provides a simple rule enabling one to verify whether or not a numerical scheme has reached a strict (local) minimizer of $\mathcal{F}_d$.}
The notations $\Omega_r$, $\Omega$ and $\Om_{\max}$ are frequently used in this paper. Their interpretation is obvious in the light of Theorem~\ref{ra}. For any $d\in\RR^{\m}$ and for all $\beta>0$, the set $\Om_{\max}$ is composed of the supports of all possible strict (local) minimizers of $\mathcal{F}_d$, while $\Omega$ is is the subset of those that are $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse.
An easy and useful corollary is presented next.
\begin{corollary}} \def\EC{\end{corollary}\label{blg} Let $d\in\RR^{\m}$. Given an arbitrary $\omega\in\Om_{\max}$, let $\hat u$ solve {\rm{($\,\P_{\vt}\,$)}~}. Then \begin{itemize}} \def\eit{\end{itemize} \item[\rm(i)] $\hat u$ reads as \begin{equation}} \def\eeq{\end{equation}\hat u=Z_\omega\left(\hu_{\vt}\right)~,~~\mbox{where}~~~\hu_{\vt}=\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^Td~,\label{bla}\eeq and obeys ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)\subseteq\omega$ and ${\hat\sigma}\in\Om_{\max}$~; \item[\rm(ii)] for any $\beta>0$, $\hat u$ is a {\em strict} (local) minimizer of $\mathcal{F}_d$; \item[\rm(iii)] $\hat u$ solves {\rm{($\,\P_{\hsi}\,$)}~}. \eit \EC
\par\paragraph{Proof} \ignorespaces Using~\eqref{eu}, $\hat u$ fulfills (i) since $A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}$ is invertible and $\sigma(\hat u)\subseteq\omega$ by the constraint in {\rm{($\,\P_{\vt}\,$)}~}. If ${\hat\sigma}=\varnothing$, (ii) follows from~Lemma~\ref{uz}. For $\,\sharp\,{\hat\sigma}\geq1$, $A_{\hsi}} \def\Phs{\Pi_{\hsi}$ is an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\,\sharp\,{\hat\sigma}$ submatrix of $A_{\vt}} \def\Pvt{\Pi_{\vt}$. Since $\mathrm{rank}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)=\,\sharp\,\omega$, we have $\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=\,\sharp\,{\hat\sigma}$ and so ${\hat\sigma}\in\Om_{\max}$. By~Proposition~\ref{cp} $\hat u$ is a (local) minimizer of $\mathcal{F}_d$, and~Theorem~\ref{ra} leads to (ii).
Lemma~\ref{ob} and Corollary~\ref{blg}(ii) yield (iii).
$\Box$\newline
\begin{remark}} \def\ER{\end{remark} \label{x4} One can easily compute a strict (local) minimizer $\hat u$ of $\mathcal{F}_d$ without knowing the value of the regularization parameter $\beta$. Just data $d$ and an $\omega\in\Om_{\max}$ are needed. \ER
This consequence of~Corollary~\ref{blg} might be striking.
}\def\bs{
}\def\ss{
Clearly, the support $\sigma(\bar u)$ of a nonstrict local minimizer $\bar{u}$ of $\mathcal{F}_d$ contains some subsupports yielding strict (local) minimizers of $\mathcal{F}_d$. It is easy to see that among them, there is ${\hat\sigma}\subsetneqq\sigma(\bar{u})$ such that the corresponding $\hat u$ given by \eqref{hu} {\em strictly} decreases the value of $\mathcal{F}_d$; i.e., $\mathcal{F}_d(\hat u)<\mathcal{F}_d(\bar{u}).$
\subsection{Every strict (local) minimizer of $\bm{\mathcal{F}_d}$ is linear in $\bm{d}$}\label{els} Here we explore the behavior of the strict (local) minimizers of $\mathcal{F}_d$ with respect to variations of~$d$. An interesting sequel of Theorem~\ref{ra} is presented in the following corollary.
\begin{corollary}} \def\EC{\end{corollary}\label{sk} For $d\in\RR^{\m}$ and $\beta>0$, let $\hat u$ be a (local) minimizer of $\mathcal{F}_d$ satisfying ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)\in\Omega~.$ Define \[\Nm_{{\hat\sigma}}\stackrel{\rm def}{=}\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}^T\right)\subset\RR^{\m}~.\] We have $\,\mathrm{dim}\,\Nm_{{\hat\sigma}}=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-\,\sharp\,{\hat\sigma}\geq1$ and \[{d^{\,\prime}}\in\Nm_{{\hat\sigma}}~~~\Rightarrow~~~\mathcal{F}}\def\G{\mathcal{G}}\def\H{{\mathcal H}}\def\I{{\mathcal I}}\def\J{{\mathcal J}}\def\K{\mathcal{K}_{d+{d^{\,\prime}}}~~\mbox{has a strict (local) minimum at}~~\hat u~.\] \EC
\par\paragraph{Proof} \ignorespaces Since ${\hat\sigma}\in\Omega$, the minimizer $\hat u$ is strict by Theorem~\ref{ra}. By ${d^{\,\prime}}\in\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}^T\right)$ we find $A_{\hsi}} \def\Phs{\Pi_{\hsi}^T(d+{d^{\,\prime}})=A_{\hsi}} \def\Phs{\Pi_{\hsi}^Td$ for any $\,{d^{\,\prime}}\in\Nm_{{\hat\sigma}}$. Inserting this into~\eqref{hu} in Theorem~\ref{ra} yields the result.
$\Box$\newline
All data located in the vector subspace $\Nm_{{\hat\sigma}}\supsetneqq\{0\}$ yield the same strict (local) minimizer $\hat u$.
\begin{remark} \label{ifn} {\rm If data contain noise $n$, it can be decomposed in a unique way as $n=n_{\Nm_{{\hat\sigma}}}+n_{\Nm_{{\hat\sigma}}^\bot}$ where $n_{\Nm_{{\hat\sigma}}}\in\Nm_{{\hat\sigma}}$ and $n_{\Nm_{{\hat\sigma}}^\bot}\in\Nm_{{\hat\sigma}}^\bot$. The component $n_{\Nm_{{\hat\sigma}}}$ is removed (Corollary~\ref{sk}), while $n_{\Nm_{{\hat\sigma}}^\bot}$ is transformed according to~\eqref{hu} and added to $\hu_{\hsi}$. } \end{remark}
We shall use the following definition.
\begin{definition}} \def\ED{\end{definition} Let ${\O}\subseteq\RR^{\m}$ be an open domain. We say that $\U:{\O}\rightarrow\RR^{\n}$ is a {\em local minimizer function} for the family of objectives $\mathfrak{F}_{\O}\stackrel{\rm def}{=}\{\mathcal{F}_d~:~d\in {\O}\}$ if for any $d\in {\O}$, the function $\mathcal{F}_d$ reaches a {\em strict} (local) minimum at $\U(d)$. \label{mf} \ED
Corollary~\ref{blg} shows that for any $d\in\RR^{\m}$, each strict (local) minimizer of $\mathcal{F}_d$ is entirely described by an $\omega\in\Om_{\max}$ via equation~\eqref{bla} in the same corollary. Consequently, a local minimizer function $\U$ is associated with every $\omega\in\Om_{\max}$.
\begin{lemma}} \def\EL{\end{lemma}\label{ct} For some arbitrarily fixed $\omega\in\Om_{\max}$ and $\beta>0$, the family of functions $\mathfrak{F}_{\RR^{\m}}$ has a {\em linear} (local) minimizer function $\U:\RR^{\m}\rightarrow\RR^{\n}$ that reads as \begin{equation}} \def\eeq{\end{equation} \forall\; d\in\RR^{\m},~~~\U(d)=Z_\omega\left(U_\omega \,d\right)~,~~ \mbox{where}~~
U_\omega=\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T ~\in\mathbb R^{\,\sharp\,\omega\times\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}}~. \label{bu}\eeq \EL
\par\paragraph{Proof} \ignorespaces The function $\U$ in~\eqref{bu} is linear in $d$. From~Corollary~\ref{blg}, for any $\beta>0$ and for any $d\in\RR^{\m}$, $\mathcal{F}_d$ has a strict (local) minimum at $\hat u=\U(d)$. Hence $\U$ fits Definition~\ref{mf}.
$\Box$\newline
{\em Thus, even if $\mathcal{F}_d$ has many strict (local) minimizers, each is linear in $d$.}
Next we exhibit a {\em closed negligible} subset of $\RR^{\m}$, associated with a nonempty $\omega\in\Om_{\max}$, whose elements are data $d$ leading to $\|\U(d)\|_0<\,\sharp\,\omega$.
\begin{lemma}} \def\EL{\end{lemma}\label{dom} For any $\omega\in\Om_{\max}$, define the subset $\Dm_\omega\subset\RR^{\m}$ by \begin{equation}} \def\eeq{\end{equation}\Dm_\omega\stackrel{\rm def}{=}\bigcup_{i=1}^{\,\sharp\,\omega} \Big\{~g\in\RR^{\m}~:~\left\<e_i\,,\,\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T \;g\right\>=0 ~\Big\}~.\label{can}\eeq Then $\Dm_\omega$ is closed in $\RR^{\m}$ and $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\Dm_\omega\right)=0$. \EL
\par\paragraph{Proof} \ignorespaces If $\omega=\varnothing$ then $\Dm_\omega=\varnothing$, hence the claim. Let $\,\sharp\,\omega\geq1$. For some $i\in\mathbb{I}_{\,\sharp\,\omega}$, set \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*}\Dm&\stackrel{\rm def}{=}&\Big\{~g\in\RR^{\m}~:~\left\<e_i\,,\,\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T \;g\right\>=0 ~\Big\}\\&=& \Big\{~g\in\RR^{\m}~:~\Big\langle} \def\>{\rangleA_{\vt}} \def\Pvt{\Pi_{\vt}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^T A_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}e_i,\;g\,\Big\>=0\Big\}~. \eeqnn Since $\mathrm{rank}\!\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^T A_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}\right)\!=\!\,\sharp\,\omega$, $\mathrm{ker}} \def\ran{\mathrm{range}\!\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^T A_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}\right)\!=\!\{0\}$. Hence $A_\omega\left(A_\omega^T A_\omega\right)^{-1}\!e_i\neq0$. Therefore $\Dm$ is a vector subspace of $\RR^{\m}$ of dimension $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$ and so $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\Dm\right)=0$. The conclusion follows from the fact that $\Dm_\omega$ in~\eqref{can} is the union of $\,\sharp\,\omega$ subsets like $\Dm$ (see, e.g., \cite{Rudin76,Evans92}).
$\Box$\newline
\begin{proposition}} \def\EP{\end{proposition}\label{ctp} For some arbitrarily fixed $\omega\in\Om_{\max}$ and $\beta>0$, let $\,\U:\RR^{\m}\rightarrow\RR^{\n}$ be the relevant (local) minimizer function for $\mathfrak{F}_{\RR^{\m}}$ as given in~\eqref{bu} (Lemma~\ref{ct}). Let $\Dm_\omega$ read as in \eqref{can}. Then the function $d\mapsto\mathcal{F}_d\big(\U(d)\big)$ is $\C^\infty$ on $\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega$ and \begin{equation}} \def\eeq{\end{equation} d\in\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega~~~\Rightarrow~~~ \sigma\left(\U(d)\right)=\omega~,\nonumber\label{mp}\eeq where the set $\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega$ contains an open and dense subset of $\RR^{\m}$. \EP
\par\paragraph{Proof} \ignorespaces The statement about $\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega$ is a direct consequence of Lemma~\ref{dom}.
If $\omega=\varnothing$, then $\U(d)=0$ for all $d\in\RR^{\m}$, so all claims in the proposition are trivial. Consider that $\,\sharp\,\omega\geq1$. For any $i\in\mathbb{I}_{\,\sharp\,\omega}$, the $\omega[i]$th component of $\,\U(d)$ reads as (see Lemma~\ref{ct}) \[\U_{\omega[i]}(d)=\left\<e_i\,,\,\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T \;d\right\>~.\] The definition of $\Dm_\omega$ shows that \[d\in\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega~~\mbox{\rm and}~~i\in\mathbb{I}_{\,\sharp\,\omega}~~~\Rightarrow~~~ \U_{\omega[i]}(d)\neq0~,\] whereas $\U_i(d)=0$ for all $i\in\omega^c$. Consequently, \[\omega\in\Om_{\max}~~\mbox{\rm and}~~d\in\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega~~~\Rightarrow~~~\sigma\left(\U(d)\right)=\omega~.\]
Then $\|\U(d)\|_0$ is constant on $\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega$ and
\[\omega\in\Om_{\max}~~\mbox{\rm and}~~d\in\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega~~~\Rightarrow~~~\mathcal{F}_d(\U(d))=\big\|A\U(d)-d\big\|^2+\beta\,\sharp\,\omega~.\]
We infer from \eqref{bu} that $d\mapsto\big\|A\U(d)-d\big\|^2$ is $\C^\infty$ on $\RR^{\m}$, so $d\mapsto\mathcal{F}_d\big(\U(d)\big)$ is $\C^\infty$ on $\RR^{\m}\,\mathbf{\setminus}\,\Dm_\omega$.
$\Box$\newline
{\em A generic property is that a local minimizer function corresponding to $\mathcal{F}_d$ produces solutions sharing the same support.} The application $d\mapsto\mathcal{F}_d\big(\U(d)\big)$ is discontinuous on the {\em closed negligible} subset $\Dm_\omega$, where the support of $\U(d)$ is shrunk.
\subsection{Strict minimizers with an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$-length support} \label{smm}
Here we explain why minimizers with an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$-length support are useless in general.
\begin{proposition}} \def\EP{\end{proposition}\label{bad} Let $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$, $\beta>0$ and for $d\in\RR^{\m}$ set
\[\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\stackrel{\rm def}{=}\big\{\hat u\in\RR^{\n}~:~ \hat u~~\mbox{is a strict (local) minimizer of ~$\mathcal{F}_d$~ meeting}~~\|\hat u\|_0=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~\big\}~.\] Put \begin{equation}} \def\eeq{\end{equation}\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\stackrel{\rm def}{=}\bigcup_{\omega\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}}~\bigcup_{i\in\II_{\m}} \big\{g\in\RR^{\m}~:~\left\<e_i\,,\,A_{\vt}} \def\Pvt{\Pi_{\vt}^{-1}g\right\>=0\,\big\}~. \label{qm}\eeq Then $\RR^{\m}\,\mathbf{\setminus}\,\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ contains a dense open subset of $\RR^{\m}$ and \[d\in\RR^{\m}\,\mathbf{\setminus}\,\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\quad\Rightarrow\quad\,\sharp\,\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\quad\mbox{\rm and}\quad\mathcal{F}_d(\hat u)=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~~\forall\;\hat u\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~.\] \EP
\par\paragraph{Proof} \ignorespaces Using the notation in~\eqref{can}, $\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ reads as \[\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=\bigcup_{\omega\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}}\Dm_\omega~.\] The claim on $\RR^{\m}\,\mathbf{\setminus}\,\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ follows from~Lemma~\ref{dom}. Since $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$, we have $\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\geq1$.
Consider that $d\in\RR^{\m}\,\mathbf{\setminus}\,\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. By Proposition~\ref{ctp} \[d\in\RR^{\m}\,\mathbf{\setminus}\,\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\quad\mbox{\rm and}\quad\omega\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\quad\Rightarrow\quad\mathcal{F}_d~ \mbox{has a strict (local) minimizer ~$\hat u$~ obeying}~\sigma(\hat u)=\omega~.\] Hence $\hat u\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. Therefore, we have a mapping $b:\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\rightarrow\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ such that $\hat u=b(\omega)\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. Using Lemma~\ref{ob} and Corollary~\ref{blg}, it reads as \[b(\omega)=Z_\omega(A_{\vt}} \def\Pvt{\Pi_{\vt}^{-1}d)~.\] For $(\omega,\varpi)\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ with $\varpi\neq\omega$ one obtains $\hat u=b(\omega)\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$, $\bar{u}=b(\varpi)\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $\hat u\neq\bar{u}$, hence $b$ is one-to-one. Conversely, for any $\hat u\in\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ there is $\omega\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ such that $\hat u=b(\omega)$ and $\sigma(\hat u)=\omega$ (because $d\not\in\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$). It follows that $b$ maps $\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ onto $\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. Therefore, $\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ are $\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ in one-to-one correspondence, i.e. $\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=\,\sharp\,\Um_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$.
Last, it is clear that $\omega\in\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $d\not\in\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ lead to
$\|A\hat u-d\|^2=0$ and $\mathcal{F}_d(\hat u)=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$.
$\Box$\newline
{\em For any $\beta>0$, a generic property of $\mathcal{F}_d$ is that it has $\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$
strict minimizers $\hat u$ obeying $\|\hat u\|_0=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $\mathcal{F}_d(\hat u)=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. It is hard to discriminate between all these minimizers. Hence the interest in minimizers with supports located in $\Omega$, i.e., strict $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse minimizers of $\mathcal{F}_d$.}
\section{On the global minimizers of $\bm{\mathcal{F}_d}$}\label{sgm}
The next proposition gives a {\em necessary condition} for a global minimizer of $\mathcal{F}_d$. It follows directly from~\cite[Proposition 3.4]{Nikolova04a} where\footnote{ Just set $g_i=e_i$, $P=I_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $H=I_\n$ in \cite[Proposition 3.4]{Nikolova04a}.}
the regularization term is $\|Du\|_0$ for a full row rank matrix $D$. For $\mathcal{F}_d$ in~\eqref{fd} with $\|a_i\|_2=1$, $\forall\; i\in\II_{\n}$, a simpler condition was derived later in \cite[Theorem 12]{Tropp06}, using different tools. For completeness, the proof for a general $A$ is outlined in Appendix~\ref{pou}.
\begin{proposition}} \def\EP{\end{proposition}\label{ou} For $d\in\RR^{\m}$ and $\beta>0$, let $\mathcal{F}_d$ have a global minimum at $\hat u$. Then\footnote{Recall that
$a_i\neq0$ for all $i\in\II_{\n}$ by \eqref{ao} and that $\|\cdot\|=\|\cdot\|_2$.}
\begin{equation}} \def\eeq{\end{equation} i\in\sigma(\hat u)~~~\Rightarrow~~~\big|\,\hat u[i]\,\big|\geqslant \frac{\sqrt{\beta}}{\|a_i\|}~. \label{rt}\eeq \EP
Observe that the lower bound on
$\disp{\left\{\big|\,\hat u[i]\,\big|~:~i\in\sigma(\hat u)\right\}}$ given in \eqref{rt} is independent of~$d$. This means that in general, \eqref{rt} provides a pessimistic bound.
The proof of the statement shows that~\eqref{rt} is met also by all (local) minimizers of $\mathcal{F}_d$~satisfying $$\mathcal{F}_d(\hat u)\leqslant\mathcal{F}_d(\hat u+\rho e_i)~~~\forall\;\rho\in\mathbb R,~~\forall\; i\in\II_{\n}~.$$
\subsection{The global minimizers of $\bm{\mathcal{F}_d}$ are strict}\label{sGM}~~
\begin{remark} \label{nu} \rm Let $d\in\RR^{\m}$ and $\beta>\|d\|^2$. Then $\mathcal{F}_d$ has a strict global minimum at $\hat u=0$. {\rm Indeed,}
\[v\neq 0~~\Rightarrow~~\|v\|_0\geq1~~\Rightarrow~~
\mathcal{F}_d(0)=\|d\|^2<\beta\leqslant\|Av-d\|^2+\beta\|v\|_0~.\] \end{remark}
For least-squares regularized with a more regular $\phi$, one usually gets $\hat u=0$ asymptotically as $\beta\rightarrow+\infty$ but $\hat u\neq0$ for finite values of $\beta$. This does not hold for $\mathcal{F}_d$ by Remark~\ref{nu}.
Some theoretical results on the global minimizers of $\mathcal{F}_d$ have been obtained \cite{Nikolova04a,Haupt06,Tropp06,Davies08}. Surprisingly, the question about the {\em existence of global minimizers of $\mathcal{F}_d$} has never been raised. We answer this question using the notion of {\em asymptotically level stable functions} introduced by Auslender~\cite{Auslender00} in 2000.
As usual, \[\mathrm{lev}\,(\mathcal{F}_d,\lambda)\stackrel{\rm def}{=}\{v\in\RR^{\n}~:~\mathcal{F}_d(v)\leqslant\lambda\}\quad\mbox{\rm for}\quad \lambda>\inf \mathcal{F}_d~.\] The following definition is taken from \cite[p.~94]{Auslender03}.
\begin{definition}} \def\ED{\end{definition}\label{als} Let $\mathcal{F}_d:\RR^{\n}\rightarrow\mathbb R\cup\{+\infty\}$ be lower semicontinuous and proper. Then $\mathcal{F}_d$ is said to be {\em asymptotically level stable} if for each $\rho>0$, each bounded sequence $\{\lambda_k\}\in\mathbb R$ and each sequence $\{v_k\}\in\RR^{\n}$ satisfying
\begin{equation}} \def\eeq{\end{equation} v_k\in\mathrm{lev}\,(\mathcal{F}_d,\lambda_k),\quad\|v_k\|\rightarrow+\infty, \quad v_k\;\|v_k\|^{-1}\rightarrow \bar v\in\mathrm{ker}} \def\ran{\mathrm{range}\left((\mathcal{F}_d)_\infty\right)~, \label{aal}\eeq where $(\mathcal{F}_d)_\infty$ denotes the asymptotic (or recession) function of $\mathcal{F}_d$, there exists $k_0$ such that \begin{equation}} \def\eeq{\end{equation} v_k-\rho\bar v\in\mathrm{lev}\,(\mathcal{F}_d,\lambda_k)\quad\forall\; k\geqslant k_0~.\label{lsa}\eeq \ED
One can note that a coercive function is asymptotically level stable, since \eqref{aal} is empty. We prove that our discontinuous noncoercive objective $\mathcal{F}_d$ is asymptotically level stable as well.
\begin{proposition}} \def\EP{\end{proposition}\label{pal} Let $\mathcal{F}_d:\RR^{\n}\rightarrow\mathbb R$ be of the form~\eqref{fd}. Then $\mathrm{ker}} \def\ran{\mathrm{range}\left((\mathcal{F}_d)_\infty\right)=\mathrm{ker}} \def\ran{\mathrm{range}(A)$ and $\mathcal{F}_d$ is asymptotically level stable. \EP
The proof is outlined in Appendix~\ref{palp}.
\begin{theorem}} \def\ET{\end{theorem}\label{gs}Let $d\in\RR^{\m}$ and $\beta>0$. Then \begin{itemize}} \def\eit{\end{itemize} \item[\rm(i)] the set of the global minimizers of $\mathcal{F}_d$ \begin{equation}} \def\eeq{\end{equation}\disp{\hat U\stackrel{\rm def}{=} \left\{\hat u\in\RR^{\n}~:~\hat u=\min_{u\in\RR^{\n}}\mathcal{F}_d(u)\right\}}\label{ops}\eeq is nonempty; \item[\rm(ii)] every $\hat u\in \hat U$ is a {\em strict} minimizer of $\mathcal{F}_d$, i.e., \[\sigma(\hat u)\in\Om_{\max}~,\]
hence $\|\hat u\|_0\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. \eit \ET
\par\paragraph{Proof} \ignorespaces For claim (i), we use the following statement\footnote{This result was originally exhibited in \cite{Baiocchi98} (without the notion of asymptotically level stable functions).}, whose proof can be found in the monograph by Ausleneder and Teboulle~\cite{Auslender03}:
\noindent{\cite[Corollary 3.4.2]{Auslender03}} {\it Let $\mathcal{F}_d:\RR^{\n}\rightarrow\mathbb R\cup\{+\infty\}$ be asymptotically level stable with $\inf\mathcal{F}_d>-\infty$. Then the optimal set $\hat U$---as given in \eqref{ops}---is nonempty .}
\noindent From Proposition~\ref{pal}, $\mathcal{F}_d$ is asymptotically level stable and $\inf\mathcal{F}_d\geq0$ from~\eqref{fd}. Hence $\hat U\neq\varnothing$.
(ii).~ Let $\hat u$ be a global minimizer of $\mathcal{F}_d$. Set ${\hat\sigma}=\sigma(\hat u)$.
If $\hat u=0$, (ii) follows from Lemma~\ref{uz}. Suppose that the global minimizer $\hat u\neq0$ of $\mathcal{F}_d$ is {\em nonstrict}. Then Theorem~\ref{ra}(ii) fails to hold; i.e., \begin{equation}} \def\eeq{\end{equation}\mathrm{dim}\,\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)\geq1~. \label{sit}\eeq Choose $v_{{\hat\sigma}}\in \mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)\,\mathbf{\setminus}\,\{0\}$ and set $v=Z_{{\hat\sigma}}\left(v_{{\hat\sigma}}\right)$. Select an $i\in{\hat\sigma}$ obeying $v[i]\neq0$. Define $\widetilde u$ by \begin{equation}} \def\eeq{\end{equation} \widetilde u\stackrel{\rm def}{=} \hat u -\hat u [i]\frac{v}{v[i]}~.\eeq We have $\widetilde u[i]=0$ and $\hat u [i]\neq0~.$
Set $\widetilde\sigma\stackrel{\rm def}{=}\sigma\left(\widetilde u\right)$. Then \begin{equation}} \def\eeq{\end{equation}\widetilde\sigma\subsetneqq{\hat\sigma}~~~\mbox{hence}~~~\,\sharp\,\widetilde\sigma\leqslant\,\sharp\,{\hat\sigma}-1~.\eeq From $\disp{v_{{\hat\sigma}}\frac{\hat u [i]}{v[i]}\in\mathrm{ker}} \def\ran{\mathrm{range}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi} \right)}$, using~\eqref{sd} and Remark~\ref{ref} shows that\footnote{In detail we have $A\hat u =A_{\hsi}} \def\Phs{\Pi_{\hsi} \hu_{\hsi} =A_{\hsi}} \def\Phs{\Pi_{\hsi} \left(\hu_{\hsi} -v_{{\hat\sigma}}\frac{\hat u [i]}{v[i]}\right)=A_{\hsi}} \def\Phs{\Pi_{\hsi} \widetilde u_{{\hat\sigma}}= A_{\widetilde\sigma}\widetilde u_{\widetilde\sigma}=A\widetilde u~.$} $A\hat u =A_{\hsi}} \def\Phs{\Pi_{\hsi} \hu_{\hsi} =A_{\widetilde\sigma}\widetilde u_{\widetilde\sigma}=A\widetilde u$. Then
\begin{eqnarray}} \def\eeqn{\end{eqnarray}\mathcal{F}_d\left(\widetilde u\right)&=&\|A\widetilde u-d\|^2+\beta\,\sharp\,\widetilde\sigma\nonumber\\
&\leqslant&\mathcal{F}_d\left(\hat u \right)-\beta=\|A\hat u -d\|^2+\beta\left(\,\sharp\,{\hat\sigma}-1\right)~.\nonumber\label{tr}\eeqn It follows that $\hat u$ is not a global minimizer, hence \eqref{sit} is false. Therefore $\mathrm{rank}\left(A_{\hsi}} \def\Phs{\Pi_{\hsi}\right)=\,\sharp\,{\hat\sigma}$ and $\hat u$ is a strict minimizer of $\mathcal{F}_d$ (Theorem~\ref{ra}).
$\Box$\newline
One can note that if $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$, any global minimizer $\hat u$ of $\mathcal{F}_d$ obeys $\,\mathcal{F}_d(\hat u)\leqslant\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}~.$
According to Theorem~\ref{gs}, {\em the global minimizers of $\mathcal{F}_d$ are strict and their number is finite: this is a nice property that fails for many convex nonsmooth optimization problems.}
\subsection{$\bm\k$-sparse global minimizers for $\bm{\k\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}$}
In order to simplify the presentation, in what follows we consider that \[\mbox{\framebox{~$\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n~.$}}\]
Since $\mathcal{F}_d$ has a large number (typically equal to $\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$)
of strict minimizers with $\|\hat u\|_0=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ yielding the same value $\mathcal{F}_d(\hat u)=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ (see Proposition~\ref{bad} and the comments given after its proof), it is important to be sure that the global minimizers of $\mathcal{F}_d$ are $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse.
We introduce a notation which is used in the rest of this paper. For any $\k\in\II_{\m-1}$, put \begin{equation}} \def\eeq{\end{equation} \mbox{\framebox{$\disp{~~\overline{\Omega}_\k\stackrel{\rm def}{=}\bigcup_{r=0}^\k\Omega_r~}$~}}\label{ga}\eeq where $\Omega_r$ was set up in~Definition~\ref{om}. Theorem~\ref{ra} gives a clear meaning of the sets\footnote{Clearly, $\overline{\Omega}_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}=\Omega~.$}~$\overline{\Omega}_\k$. {\em For any $d\in\RR^{\m}$ and any $\beta>0$, for any fixed $\k\in\II_{\m-1}$, the set $\overline{\Omega}_\k$ is composed of the supports of all possible $\k$-sparse strict (local) minimizers of $\mathcal{F}_d$.}
The next propositions checks the existence of $\beta>0$ ensuring that all the global minimizers of $\mathcal{F}_d$ are $\k$-sparse, for some $\k\in\II_{\m-1}$.
\begin{proposition}} \def\EP{\end{proposition}\label{ie} Let $d\in\RR^{\m}$.
For any $\k\in\II_{\m-1}$, there exists $\beta_\k\geq0$ such that if $\beta>\beta_\k$, then each global minimizer $\hat u$ of $\mathcal{F}_d$ satisfies
\begin{equation}} \def\eeq{\end{equation}\|\hat u\|_0\leqslant\k~~~\mbox{\rm and}~~~\sigma(\hat u)\in\overline{\Omega}_\k~ .\label{spo}\eeq One can choose $\beta_\k=\|A\widetilde u-d\|^2$ where $\widetilde u$ solves {\rm{($\,\P_{\vt}\,$)}~} for some $\omega\in\Omega_{\k}$. \EP
The proof is given in Appendix~\ref{pie}. The value of $\beta_\k$ in the statement is easy to compute, but in general it is not sharp\footnote{ For $\beta\gtrapprox\beta_\k$ the global minimizers of $\mathcal{F}_d$ might be $k$-sparse for $k\ll\k$. A sharper $\beta_\k$ can be obtained~as
$\beta_\k=\min_{\omega\in\Omega_\k}\left\{\|A\widetilde u-d\|^2~:~\widetilde u~~\mbox{solve {\rm{($\,\P_{\vt}\,$)}~} for}~~ \omega\in\Omega_\k\right\}~.$ }.
\section{Uniqueness of the global minimizer of $\bm{\mathcal{F}_d}$}\label{kb}
The presentation is simplified using the notation introduced next. Given a matrix $A\in\RR^{\m\x\n}$, with any $\omega\in\Omega$ (see Definition~\ref{om}), we associate the $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ matrix $\Pvt$ that yields the orthogonal projection\footnote{ If $\omega=\varnothing$, we have $A_{\vt}} \def\Pvt{\Pi_{\vt}\in\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\x0}$ and so $\Pi_\omega$ is an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ null matrix.} onto the subspace spanned by the columns of $A_{\vt}} \def\Pvt{\Pi_{\vt}$: \begin{equation}} \def\eeq{\end{equation}\mbox{\framebox{$\disp{~\Pvt\stackrel{\rm def}{=}A_{\vt}} \def\Pvt{\Pi_{\vt}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T~.}$}}\label{pro}\eeq \noindent For $\omega\in\Omega$, the projector in Remark~\ref{ref} reads $\Pi_{\ran(A_{\vt}} \def\Pvt{\Pi_{\vt})}= \Pvt$.
Checking whether a global minimizer $\hat u$ of $\mathcal{F}_d$ is unique requires us to compare its value $\mathcal{F}_d(\hat u)$ with the values $\mathcal{F}_d(\bar{u})$ of the concurrent strict minimizers~$\bar{u}$. Let $\hat u$ be an $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse strict (local) minimizer of $\mathcal{F}_d$. Then ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)\in\Omega$. Using~Remark~\ref{ref} shows that
\begin{eqnarray}} \def\eeqn{\end{eqnarray} \mathcal{F}_d(\hat u)&=&\|A_{\hsi}} \def\Phs{\Pi_{\hsi}\hu_{\hsi}-d\|^2+\beta\,\sharp\,{\hat\sigma}
=\|\Phs d-d\|^2+\beta\,\sharp\,{\hat\sigma}\nonumber\\&=&d^T\left(I_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-\Phs\right)d+\beta\,\sharp\,{\hat\sigma}~.\label{sx}\eeqn Let $\bar{u}$ be another $(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)$-sparse strict minimizer of $\mathcal{F}_d$; set $\bar{\sigma}\stackrel{\rm def}{=}\sigma(\bar{u})$. Then \[\mathcal{F}_d(\hat u)-\mathcal{F}_d(\bar{u})=d^T\left(\Pbs-\Phs\right)d+\beta(\,\sharp\,{\hat\sigma}-\,\sharp\,\bar{\sigma})~.\] If both $\hat u$ and $\bar{u}\neq\hat u$ are global minimizers of $\mathcal{F}_d$, the previous equality yields \begin{equation}} \def\eeq{\end{equation} \mathcal{F}_d(\hat u)=\mathcal{F}_d(\bar{u})~~~\Leftrightarrow~~~ d^T\left(\Pbs-\Phs\right)d=-\beta(\,\sharp\,{\hat\sigma}-\,\sharp\,\bar{\sigma})~.\label{hq}\eeq {\em Equation~\eqref{hq} reveals that the uniqueness of the global minimizer of $\mathcal{F}_d$ cannot be guaranteed without suitable assumptions on $A$ and on~$d$.}
\subsection{A generic assumption on $\bm{A}$}\label{ru}
We adopt an assumption on the matrix $A$ in $\mathcal{F}_d$ in order to restrict the cases when~\eqref{hq} takes place for some supports ${\hat\sigma}\neq\bar{\sigma}$ obeying $\,\sharp\,{\hat\sigma}=\,\sharp\,\bar{\sigma}$. \begin{hyp}} \def\EH{\end{hyp}\label{aa} The matrix $A\in\RR^{\m\x\n}$, where $\mathsf{M}<\mathsf{N}$, is such that for some given $\k\in\II_{\m-1}$, \begin{equation}} \def\eeq{\end{equation} \mbox{\framebox{$ ~ r\in\mathbb{I}_{\mathsf{K}}~~\mbox{\rm and}~~ (\omega,\varpi)\in(\Omega_r\times \Omega_r),~~\omega\neq\varpi~~~\Rightarrow~~~\Pi_{\omega}\neq \Pi_{\varpi}~.$}}\eeq \EH
Assumption H\ref{aa} means that the angle (or the gap) between the equidimensional subspaces $\ran\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)$ and $\ran\left(A_{\vs}} \def\Pvs{\Pi_{\vs}\right)$ must be nonzero~\cite{Meyer00}. For instance, if $(i,j)\in\II_{\n}\times\II_{\n}$ satisfy $i\neq j$, H\ref{aa} implies that $a_i\neq\kappa\,a_j$ for any $\kappa\in\mathbb R\,\mathbf{\setminus}\,\{0\}$
since $\Pi_{\{i\}}=a_ia_i^T/\|a_i\|^2~.$
Checking whether H\ref{aa} holds for a given matrix $A$ requires a combinatorial search over all possible couples $(\varpi,\omega)\in(\Omega_r\times \Omega_r)$ satisfying $\varpi\neq\omega$, $\forall\; r\in\II_{\k}$. This is hard to do. Instead, we wish to know whether or not H\ref{aa} is a {\em practical} limitation. Using some auxiliary claims, we shall show that H\ref{aa} {\em fails only for a closed negligible subset of matrices} in the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices.
\begin{lemma}} \def\EL{\end{lemma}\label{bb} Given $r\in\II_{\m-1}$ and $\varpi\in\Omega_r$, define the following set of submatrices of $A$: \[\H_\varpi=\Big\{A_{\vt}} \def\Pvt{\Pi_{\vt}~:~\omega\in\Omega_r~~\mbox{\rm and}~~\Pvt=\Pvs\Big\}~.\] Then $\H_\varpi$ belongs to an $(r\times r)$-dimensional subspace of the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r$ matrices. \EL
\par\paragraph{Proof} \ignorespaces Using the fact that $\varpi\in\Omega_r$ and $\omega\in\Omega_r$, we have\footnote{Using \eqref{pro}, as well as the fact that $A_{\vt}} \def\Pvt{\Pi_{\vt}=\PvtA_{\vt}} \def\Pvt{\Pi_{\vt}$, $\forall\;\omega\in\Omega_r$, one easily derives \eqref{fo} since \[\Pvt=\Pvs~~\Leftrightarrow\left\{\begin{array}} \def\earr{\end{array}{l} A_{\vt}} \def\Pvt{\Pi_{\vt}\left(A_{\vt}} \def\Pvt{\Pi_{\vt}^TA_{\vt}} \def\Pvt{\Pi_{\vt}\right)^{-1}A_{\vt}} \def\Pvt{\Pi_{\vt}^T=\Pvs\\A_{\vs}} \def\Pvs{\Pi_{\vs}\left(A_{\vs}} \def\Pvs{\Pi_{\vs}^TA_{\vs}} \def\Pvs{\Pi_{\vs}\right)^{-1}A_{\vs}} \def\Pvs{\Pi_{\vs}^T=\Pvt\earr\right.\Rightarrow~ \left\{\begin{array}} \def\earr{\end{array}{l}A_{\vt}} \def\Pvt{\Pi_{\vt}=\PvsA_{\vt}} \def\Pvt{\Pi_{\vt}\\A_{\vs}} \def\Pvs{\Pi_{\vs}=\PvtA_{\vs}} \def\Pvs{\Pi_{\vs}\earr\right.\Rightarrow~ \left\{\begin{array}} \def\earr{\end{array}{l}\Pvt=\Pvs\Pvt\\\Pvs=\Pvt\Pvs\earr\right.\Rightarrow~\Pvt=\Pvs.\]} \begin{equation}} \def\eeq{\end{equation}\Pvt=\Pvs~~~\Leftrightarrow~~~A_{\vt}} \def\Pvt{\Pi_{\vt}=\PvsA_{\vt}} \def\Pvt{\Pi_{\vt}~~.\label{fo}\eeq Therefore $\H_\varpi$ equivalently reads \begin{equation}} \def\eeq{\end{equation}\H_\varpi=\Big\{A_{\vt}} \def\Pvt{\Pi_{\vt}~:~\omega\in\Omega_r~~\mbox{\rm and}~~A_{\vt}} \def\Pvt{\Pi_{\vt}=\PvsA_{\vt}} \def\Pvt{\Pi_{\vt}\Big\}~.\label{gr}\eeq Let $A_{\vt}} \def\Pvt{\Pi_{\vt}\in\H_\varpi$. Denote the columns of $A_{\vt}} \def\Pvt{\Pi_{\vt}$ by $\tilde a_i$ for $i\in\II_r$. Then \eqref{gr} yields \[ \Pvs\tilde a_i=\tilde a_i,~~\forall\; i\in\II_r\quad\Rightarrow\quad\tilde a_i\in\ran(A_{\vs}} \def\Pvs{\Pi_{\vs}), ~~\forall\; i\in\II_r~.\] Hence all $\tilde a_i$, $i\in\II_r$, live in the $r$-dimensional vector subspace $\ran(A_{\vs}} \def\Pvs{\Pi_{\vs})$. All the columns of each matrix $A_{\vt}} \def\Pvt{\Pi_{\vt}\in\H_\varpi$ belong to $\ran(A_{\vs}} \def\Pvs{\Pi_{\vs})$ as well. It follows that $\H_\varpi$ belongs to a (closed) subspace of dimension $r\times r$ in the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r$ matrices, where $r\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$.
$\Box$\newline
More details on the submatrices of $A$ living in $\H_\varpi$ are given next. \begin{remark} \label{P} {\rm The {\em closed negligible} subset $\H_\varpi$ in~Lemma~\ref{bb} is formed from all the submatrices of $A$ that are column equivalent to $A_{\vs}} \def\Pvs{\Pi_{\vs}$ (see \cite[p.~171]{Meyer00}), that is, \begin{equation}} \def\eeq{\end{equation}A_{\vt}} \def\Pvt{\Pi_{\vt}\in\H_\varpi~~~\Leftrightarrow~~~\exists\,P\in\mathbb R^{r\times r}~~\mbox{such that}~~ \mathrm{rank}(P)=r~~\mbox{\rm and}~~A_{\vt}} \def\Pvt{\Pi_{\vt}=A_{\vs}} \def\Pvs{\Pi_{\vs} P~.\label{yr} \eeq Observe that $P$ has $r^2$ unknowns that must satisfy $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K} r$ equations and that $P$ must be invertible. It should be quite unlikely that such a matrix $P$ does exist.} \end{remark}
This remark can help to discern whether or not structured dictionaries satisfy H\ref{aa}.
Next we inspect the set of all matrices $A$ failing assumption H\ref{aa}. \begin{lemma}} \def\EL{\end{lemma}\label{ug} Consider the set $\H$ formed from $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices described next: \[\H\stackrel{\rm def}{=}\left\{A\in\RR^{\m\x\n}~:\exists\; r\in\II_{\m-1},~ \exists\;(\varpi,\omega)\in\Omega_r\times\Omega_r,~\varpi\neq\omega~~\mbox{\rm and}~~\Pvs=\Pvt\right\}~.\] Then $\H$ belongs to a finite union of vector subspaces in $\RR^{\m\x\n}$ whose Lebesgue measure in $\mathbb{R}^{\mathsf{M}\times\mathsf{N}}$ is null. \EL
\par\paragraph{Proof} \ignorespaces Let $A\in\H$. Then there exist at least one integer $r\in\II_{\m-1}$ and at least one pair $(\varpi,\omega)\in\Omega_r\times\Omega_r$ such that $\varpi\neq\omega$ and $\Pvs=\Pvt$. Using Lemma~\ref{bb}, $A$ contains (at least) one $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r$ submatrix $A_{\vs}} \def\Pvs{\Pi_{\vs}$ belonging to an $r\times r$ vector subspace in the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times r$ real matrices. Identifying $A$ with an $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\n$-length vector, its entries are included in a vector subspace of $\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\n}$ of dimension no larger than $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\n-1$. The claim of the lemma is straightforward.
$\Box$\newline
We can now clarify assumption H\ref{aa} and show that it is really good.
\begin{theorem}} \def\ET{\end{theorem}\label{usg} Given an arbitrary $\k\in\II_{\m-1}$, consider the set of $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices below \[\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k\stackrel{\rm def}{=}\Big\{A\in\RR^{\m\x\n}~:~~A~~\mbox{\rm satisfies~~ H\ref{aa}~~for~~}\k~\Big\}~.\] Then $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k$ contains an open and dense subset in the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real-valued matrices. \ET
\par\paragraph{Proof} \ignorespaces The complement of $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k$ in the space of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices reads as \[\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k^c=\Big\{A\in\RR^{\m\x\n}~:~\mbox{\rm H\ref{aa} fails for }~A~~\mbox{\rm and}~~\k~\Big\}~.\] It is clear that $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k^c\subset\H~,$ where $\H$ is described in Lemma~\ref{ug}. By the same lemma, $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k^c$ is included in a closed subset of vector subspaces in $\RR^{\m\x\n}$ whose Lebesgue measure in $\mathbb{R}^{\mathsf{M}\times\mathsf{N}}$ is null. Consequently, $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k$ satisfies the statement of the theorem.
$\Box$\newline
{\em For any $\k\in\II_{\m-1}$,
H\ref{aa} is a {\em generic} property of all $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\times\n$ real matrices meeting $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$.}
This is the meaning of Theorem~\ref{usg} in terms of Definition~\ref{ps}.
We can note that \[\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_{\k+1}\subseteq\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_\k,~~~\forall\;\k\in\mathbb{I}_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-2}~.\] One can hence presume that H\ref{aa} is weakened as $\k$ decreases. This issue is illustrated in section~\ref{nm}.
\subsection{A generic assumption on $\bm{d}$}\label{rs}
A preliminary result is stated next. \begin{lemma}} \def\EL{\end{lemma}\label{H} Let $(\omega,\varpi)\in\overline{\Omega}_\k\times \overline{\Omega}_\k$ for $\omega\neq\varpi$ and let H\ref{aa} hold for $\k\in\II_{\m-1}$. Given $\kappa\in\mathbb R$, define \begin{equation}} \def\eeq{\end{equation}\Tm_\kappa\stackrel{\rm def}{=}\{g\in\RR^{\m}~:~g^T\left(\Pvt-\Pvs\right)g=\kappa\}~.\nonumber\label{setS}\eeq Then $\Tm_\kappa$ is a closed subset of $\RR^{\m}$ and $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\Tm_\kappa\right)=0$. \EL
\par\paragraph{Proof} \ignorespaces Define $f:\RR^{\m}\rightarrow\mathbb R$ by $f(g)=g^T\left(\Pvt-\Pvs\right)g~.$ Then \begin{equation}} \def\eeq{\end{equation}\Tm_\kappa=\{g\in\RR^{\m}~:~f(g)=\kappa\}~.\label{tyk}\eeq Using H\ref{aa}, $\Tm_\kappa$ is closed in $\RR^{\m}$. Set \[\Qm=\{g\in\RR^{\m}~:~\nabla f(g)\neq0\}\quad\mbox{and}\quad\Qm^c=\RR^{\m}\,\mathbf{\setminus}\,\Qm.\]
Consider an arbitrary $g\in\Tm_\kappa\cap\Qm$. From H\ref{aa}, $\mathrm{rank} (\nabla f(g))=1$. For simplicity, assume that \[\nabla f(g)[\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}]=\frac{df(g)}{dg[\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}]}\neq0~.\label{asm}\] By the implicit functions theorem, there are open neighborhoods $\O_g\subset\Qm\subset\RR^{\m}$ and $\V\subset\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}$ of $g$ and $g_{\II_{\m-1}}$, respectively,
and a unique $\C^1$-function $h_g:\V\rightarrow \mathbb R$ with $\nabla h_g$ bounded, such that \begin{equation}} \def\eeq{\end{equation} \gamma=(\gamma_{\II_{\m-1}},\gamma[\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}])\in\O_g~~\mbox{and}~~f(\gamma)=\kappa\quad\Leftrightarrow\quad \gamma_{\II_{\m-1}}\in\V ~~\mbox{and}~~\gamma[\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}]=h_g(\gamma_{\II_{\m-1}})~.\label{imp} \eeq From~\eqref{tyk} and \eqref{imp} it follows that\footnote{From~\eqref{imp}, $\V$ is the restriction of $\O_g$ to $\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}$.} \[\O_g\cap\Tm_k=\psi^g\left(\O_g\cap(\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}\times\{0\})\right),~\] where $\psi^g$ is a diffeomorphism on $\O_g $ given by \[\psi^g_i(\gamma)=\gamma[i],\quad 1\leqslant i\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1\quad\mbox{and}\quad\psi^g_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\gamma)=h_g(\gamma_{\II_{\m-1}})+\gamma[\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}]~.\label{nap}\] Since $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\O_g\cap(\mathbb R^{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}\times\{0\})\right)=0$ and $\nabla\psi^g$ is bounded on $\O_g$, it follows from \cite[Lemma 7.25]{Rudin87} that\footnote{The same result follows from the change-of-variables theorem for the Lebesgue integral, see e.g. \cite{Rudin87}.}
$\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\V_g\cap\Tm_k\right)=0$. We have thus obtained that \begin{equation}} \def\eeq{\end{equation} S\subset Q~~\mbox{\rm and}~~S~\mbox{bounded}\quad\Rightarrow\quad\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(S\cap\Tm_k)=0~.\label{sq} \eeq Using that every open subset of $\RR^{\m}$ can be written as a countable union\footnote{From \eqref{sq}, adjacent cubes can also intersect in our case.}
of cubes in $\RR^{\m}$ \cite{Rudin76,Evans92,Stein05}, the result in \eqref{sq} entails that $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\Tm_\kappa\cap Q)=0$.
Next, $Q^c\!=\mathrm{ker}} \def\ran{\mathrm{range}\left(\Pvt-\Pvs\right)$. By H\ref{aa}, $\mathrm{dim}\,\mathrm{ker}} \def\ran{\mathrm{range}\left(\Pvt\!-\Pvs\right)\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$. Hence $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\Tm_\kappa\cap Q^c)=0$.
The proof follows from the equality $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\Tm_\kappa)=\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\Tm_\kappa\cap Q)+\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}(\Tm_\kappa\cap Q^c)$.
$\Box$\newline
We exhibit a {\em closed negligible} subset of data in $\RR^{\m}$ that can still meet the equality in \eqref{hq}.
\begin{proposition}} \def\EP{\end{proposition}\label{cor} For $\beta>0$ and $\k\in\II_{\m-1}$, put \begin{equation}} \def\eeq{\end{equation}\Sigma_\k\stackrel{\rm def}{=}\bigcup_{n=-\k}^{\k}~\bigcup_{\omega\in\overline{\Omega}_\k}~ \bigcup_{\varpi\in\overline{\Omega}_\k}\left\{g\in\RR^{\m}~:~ \omega\neq\varpi~~\mbox{\rm and}~~g^T\Big(\Pvt-\Pvs\Big)g=n\beta\right\}~,\label{S} \eeq where $\overline{\Omega}_\k$ is given in~\eqref{ga}. Let H\ref{aa} hold for $\k$. Then $\Sigma_\k$ is closed in $\RR^{\m}$ and $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\Sigma_\k\right)=0$. \EP
\par\paragraph{Proof} \ignorespaces For some $n\in\{-\k,\cdots,\k\}$ and $(\omega,\varpi)\in (\overline{\Omega}_\k\times\overline{\Omega}_\k)$ such that $\omega\neq\varpi$, put \[\Sigma\stackrel{\rm def}{=}\left\{g\in\RR^{\m}:g^T\Big(\Pvt-\Pvs\Big)g=n\beta\right\}~. \] If $\,\sharp\,\omega\neq\,\sharp\,\varpi$, then $\mathrm{rank}\big(\Pvt-\Pvs\big)\geq1$. If $\,\sharp\,\omega=\,\sharp\,\varpi$, H\ref{aa} guarantees that $\mathrm{rank}\big(\Pvt-\Pvs\big)\geq1$, yet again. The number $n\beta\in\mathbb R$ is given. According to Lemma~\ref{H}, $\Sigma$ is a closed subset of $\RR^{\m}$ and $\mathbb{L}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\Sigma\right)=0$. The conclusion follows from the fact that $\Sigma_\k$ is a finite union of subsets like~$\Sigma$.
$\Box$\newline
{\em We assume hereafter that if H\ref{aa} holds for some $\k\in\II_{\m-1}$, data $d$ satisfy \[d\in \{g\in\RR^{\m}~:~g\not\in\Sigma_\k\}=\RR^{\m}\,\mathbf{\setminus}\,\Sigma_\k~.\]}
\subsection{The unique global minimizer of $\bm{\mathcal{F}_d}$ is $\bm\k$-sparse for $\bm{\k\leqslant(\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1)}$}\label{kr}
We are looking for guarantees that $\mathcal{F}_d$ has a {\em unique} global minimizer $\hat u$ obeying
\[ \|\hat u\|_0\leqslant\k~~\mbox{for some fixed}~~ \k\in\II_{\m-1}~.\] This is the aim of the next theorem. \begin{theorem}} \def\ET{\end{theorem} \label{Uglob} Given $\k\in\II_{\m-1}$, let H\ref{aa} hold for $\k$, $\beta>\beta_\k$ where $\beta_\k$ meets Proposition~\ref{ie} and $\Sigma_\k\subset\RR^{\m}$ reads as in \eqref{S}. Consider that \[d\in\RR^{\m}\,\mathbf{\setminus}\,\Sigma_\k~.\] Then \begin{itemize}} \def\eit{\end{itemize}\item[\rm(i)] the set $\RR^{\m}\,\mathbf{\setminus}\,\Sigma_\k$ is open and dense in $\RR^{\m}$;
\item[\rm(ii)] $\mathcal{F}}\def\G{\mathcal{G}}\def\H{{\mathcal H}}\def\I{{\mathcal I}}\def\J{{\mathcal J}}\def\K{\mathcal{K}_d$ has a {\em unique global minimizer} $\hat u$, and $\|\hat u\|_0\leqslant\k$. \eit \ET
\par\paragraph{Proof} \ignorespaces Statement (i) follows from Proposition~\ref{cor}.
Since $\beta>\beta_\k$, all global minimizers of $\mathcal{F}_d$ have their support in $\overline{\Omega}_\k$ (Proposition~\ref{ie}). Using the fact that $d\in\RR^{\m}\,\mathbf{\setminus}\,\Sigma_\k~$, the definition of $\Sigma_\k$ in \eqref{S} shows that \begin{equation}} \def\eeq{\end{equation}-\k\leqslant n\leqslant\k~~\mbox{\rm and}~~(\omega,\varpi)\in (\overline{\Omega}_\k\times\overline{\Omega}_\k),~\omega\neq\varpi~~~\Rightarrow~~~ d^T\Big(\Pvt-\Pvs\Big)d\neq n\beta~.\label{gc}\eeq The proof is conducted by contradiction. Let $\hat u$ and $\bar{u}\neq\hat u$ be two global minimizers of $\mathcal{F}_d$. Then \[{\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)\in\overline{\Omega}_\k~~~\mbox{\rm and}~~~\bar{\sigma}\stackrel{\rm def}{=}\sigma(\bar{u})\in\overline{\Omega}_\k~,\] and ${\hat\sigma}~\neq~\bar{\sigma}$. By $\mathcal{F}}\def\G{\mathcal{G}}\def\H{{\mathcal H}}\def\I{{\mathcal I}}\def\J{{\mathcal J}}\def\K{\mathcal{K}_d(\hat u)=\mathcal{F}}\def\G{\mathcal{G}}\def\H{{\mathcal H}}\def\I{{\mathcal I}}\def\J{{\mathcal J}}\def\K{\mathcal{K}_d(\bar{u})$,~\eqref{hq} yields \begin{equation}} \def\eeq{\end{equation} d^T\left(\Phs-\Pbs\right)d=\beta(\,\sharp\,{\hat\sigma}-\,\sharp\,\bar{\sigma})~.\label{qw}\eeq An enumeration of all possible values of $\,\sharp\,{\hat\sigma}-\,\sharp\,\bar{\sigma}$ shows that \[\beta\left(\,\sharp\,{\hat\sigma}-\,\sharp\,\bar{\sigma}\right)=n\beta~~~\mbox{for some}~~~n\in\{-\k,\cdots,\k\}~.\] Inserting this equation into~\eqref{qw} leads to \[d^T\left(\Phs-\Pbs\right)d=n\beta~~~\mbox{for some}~~~n\in\{-\k,\cdots,\k\}~.\] The last result contradicts~\eqref{gc}; hence it violates the assumptions H\ref{aa} and $d\in\RR^{\m}\,\mathbf{\setminus}\,\Sigma_\k$. Consequently, $\mathcal{F}_d$ cannot have two global minimizers. Since $\mathcal{F}_d$ always has global minimizers (Theorem~\ref{gs}(i)), it follows that
$\mathcal{F}_d$ has a unique global minimizer, say $\hat u$. And $\|\hat u\|_0\leqslant \mathsf{K}$ because $\sigma(\hat u)\in\overline{\Omega}_\mathsf{K}$.
$\Box$\newline
{\em For $\beta>\beta_\k$, the objective $\mathcal{F}_d$ in~\eqref{fd} has a unique global minimizer and it is $\k$-sparse for $\k\leqslant\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$. For all $\;\k\in\II_{\m-1}$, the claim holds true in a generic sense.} This is the message of Theorem~\ref{Uglob} using Definition~\ref{ps}.
\section{Numerical illustrations}\label{nm} \subsection{On assumption H\ref{aa}} Assumption H\ref{aa} requires that $\Pvt\neq\Pvs$ when $(\omega,\varpi)\in\Omega_r\times\Omega_r$, $\omega\neq\varpi$ for all $r\leqslant\k\in\II_{\m-1}$. From a practical viewpoint, the magnitude of $\left(\Pvt-\Pvs\right)$ should be discernible. One way to assess the viability of H\ref{aa} for a matrix $A$ and $\k\in\II_{\m-1}$ is to calculate \begin{eqnarray}} \def\eeqn{\end{eqnarray}\xi_\k(A)&\stackrel{\rm def}{=}&\min_{r\in\II_{\k}}~~\mu_r(A)~,\label{xi}\\ \mbox{where}~~~~~
\mu_r(A)&=&\disp{\min_{\small\begin{array}} \def\earr{\end{array}{c}(\omega,\varpi)\in\Omega_r\times\Omega_r\\\omega\neq\varpi\earr}
\left\|\Pvt-\Pvs\right\|_2,~~~\forall\; r\in\II_{\k}~.}\nonumber\eeqn In fact, $\left\|\Pvt-\Pvs\right\|_2=\sin(\theta)$, where $\theta\in[0,\pi/2]$ is the maximum angle between $\ran\left(A_{\vt}} \def\Pvt{\Pi_{\vt}\right)$ and $\ran\left(A_{\vs}} \def\Pvs{\Pi_{\vs}\right)$; see \cite[p. 456]{Meyer00}. These subspaces have the same dimension and $\Pvt\neq\Pvs$ when $(\omega,\varpi)\in\Omega_r\times\Omega_r$, $\omega\neq\varpi$ and $r\in\II_{\k}$, hence $\theta\in(0,\pi/2]$. Consequently, \[\mbox{H\ref{aa} holds for}~~\k\in\II_{\m-1}~~~\Rightarrow~~~\mu_r(A)\in(0,1]~~\forall\; r\in\II_{\k}~~~\Rightarrow~~~\xi_\k(A)\in(0,1]~.\] According to \eqref{xi}, we have $\xi_\k\geqslant\xi_{\k+1}$, $\forall\; \k\in\mathbb{I}_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-2}$. Our guess that {\em assumption~H\ref{aa} is lightened when $\k$ decreases} (see the comments following the proof of Theorem~\ref{usg}) means that \begin{equation}} \def\eeq{\end{equation}\xi_1(A)>\cdots>\xi_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}(A)~.\label{tya}\eeq
We provide numerical tests on two subsets of real-valued random matrices for $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=5$ and $\n=10$, denoted by $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$ and $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$. The values of $\xi_\k(\cdot)$, $\k\in\II_{\m-1}=\mathbb{I}_4$, for every matrix in $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$ and in $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$, were calculated using an {\em exhaustive combinatorial search}. {\em All tested matrices satisfy assumption H\ref{aa}}, which confirms Theorem~\ref{usg} and its consequences. In order to evaluate the extent of H\ref{aa}, we computed the {\em worst} and the {\em best} values of $\xi_\k(\cdot)$ over these sets: \begin{equation}} \def\eeq{\end{equation}\left\{\begin{array}} \def\earr{\end{array}{rcl}\xi_\k^{\mbox{\footnotesize\it worst}}&=&\disp{\min_{A\in\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}}\xi_\k(A)}~\\~\\ \xi_\k^{\mbox{\footnotesize\it best}}&=&\disp{\max_{A\in\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}}\xi_\k(A)}~\earr\right.~~~~~~\forall\;\k\in\II_{\m-1}~,~~~ \mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}\in\left\{\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20},~\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}\right\}~.\label{be}\eeq
\paragraph{Set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$} This set was formed from 20 matrices $A^n$, ~$n\in\mathbb{I}_{20}$ of size $5\times 10$. The components of each matrix $A^n$ were independent and uniformly drawn
from the standard normal distribution with mean zero and variance one. The values of $\xi_\k(\cdot)$ are depicted in Fig.~\ref{TH}. We have\footnote{ This is why on the figure, in columns 10 and 17, the green ``$\circ$'' and the red ``$\lozenge$'' overlap.} $\xi_1(A^{10})=\xi_2(A^{10})$ and $\xi_1(A^{17})=\xi_2(A^{17})$. In all other cases \eqref{tya} is satisfied. Fig.~\ref{TH} clearly shows that $\xi_\k(\cdot)$ increases as $\k$ decreases (from $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$ to $1$).
\setlength{\unitlength}{1mm} [h!] \begin{center}} \def\ec{\end{center} \begin{picture}} \def\epic{\end{picture}(143,81) \put(0,1){\epsfig{figure=testH_20_N.eps,width=10.0cm,height=8cm}} \put(20,-1.4){(a)~ $\xi_\k(\!A^n\!)$ for the matrices in $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}_{20}$} \put(89,0.5){$n$} \put(-5.6,61){\small$\xi_\k(\!A^n\!)$} \put(101,1.2){\epsfig{figure=testH_20_NZ.eps,width=4.7cm,height=5cm}} \put(101.5,30){z} \put(101.5,28){o} \put(101.5,26){o} \put(100.8,24){m} \put(107,-1.3){\small (b) Zoom of (a) -- $y$-axis} \put(107,70){$\k=1$: \textcolor[rgb]{0,0.5,0}{\bf green $\bm{\circ}$}} \put(107,65){$\k=2$: \textcolor[rgb]{0.75,0,0}{\bf red $\bm{\lozenge}$}} \put(107,60){$\k=3$: \textcolor{blue}{\bf blue $\bm{\square}$}} \put(107,55){$\k=4$: \textcolor[rgb]{0.8,0,0.6}{\bf magenta $\bm{\vartriangle}$}} \epic \ec \caption{$x$-axis: the list of the $20$ random matrices in $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$. (a) $y$-axis: the value $\xi_\k(A^n)$ according to~\eqref{xi} for all $\k\in\II_{\m-1}$ and for all $n\in\mathbb{I}_{20}$. The plot in (b) is a zoom of~(a) along the $y$-axis.} \label{TH} \efig
The worst and the best values of $\xi_\k(\cdot)$ over the whole set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$ are displayed in Table~\ref{mmu}. \begin{table}} \def\etabe{\end{table}[h!] \caption{The worst and the best values of $\xi_\k(A)$, for $\k\in\II_{\m-1}$, over the set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$, see \eqref{be}.}
\centering\mbox{\begin{tabular}{l||c|c|c|c|} \hline & $\k=1$ & $\k=2$ & $\k=3$ & $\k=4$\\ \hline
$\disp{\xi_\k^{\mbox{\footnotesize\it worst}}}$ & 0.3519 & 0.1467 & 0.0676 & 0.0072\\ \hline
$\disp{\xi_\k^{\mbox{\footnotesize\it best}}}$ &0.8666 & 0.5881 & 0.3966 & 0.0785\\
\hline \end{tabular}}\label{mmu} \etabe
\paragraph{Set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$} The set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$ was composed of one thousand $5\times 10$ matrices $A^n$, ~$n\in\mathbb{I}_{1000}$. The entries of each matrix $A^n$ were independent and uniformly sampled on $[-1,1]$. The obtained values for $\xi_\k^{\mbox{\footnotesize\it worst}}$ and $\xi_\k^{\mbox{\footnotesize\it best}}$, calculated according to~\eqref{be}, are shown in Table~\ref{xmu}.
\begin{table}} \def\etabe{\end{table}[h!] \caption{The worst and the best values of $\xi_\k(A)$, for $\k\in\II_{\m-1}$, over the set $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$.}
\centering\mbox{\begin{tabular}{l||c|c|c|c|} \hline & $\k=1$ & $\k=2$ & $\k=3$ & $\k=4$\\ \hline
$\disp{\xi_\k^{\mbox{\footnotesize\it worst}}}$ & 0.1085 & 0.0235 & 0.0045 & 0.0001\\ \hline
$\disp{\xi_\k^{\mbox{\footnotesize\it best}}}$ &0.9526 & 0.8625 & 0.5379 & 0.1152\\ \hline \end{tabular}}\label{xmu} \etabe For $\k\in\mathbb{I}_3$, the {\em best} values of $\xi_\k(\cdot)$ were obtained for the same matrix, $A^{964}$. Note that $\xi_4(A^{964})= 0.0425\gg\disp{\xi_4^{\mbox{\footnotesize\it worst}}}$. The {\em worst} values in Table~\ref{xmu} are smaller than those in Table~\ref{mmu}, while the {\em best} values in Table~\ref{xmu} are larger than those in Table~\ref{mmu}; one credible reason is that $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$ is much larger than $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^N_{20}$.
\begin{table}} \def\etabe{\end{table}[h!] \caption{Percentage of the cases in $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$ when \eqref{tya} fails to hold.}
\centering\mbox{\begin{tabular}{c||c|c|c|} \hline & $\xi_1(A^n)=\xi_2(A^n)$&$\xi_2(A^n)=\xi_3(A^n)$&$\xi_3(A^n)=\xi_4(A^n)$\\ \hline occurrences $\{n\}$ ~ & 5 $\%$& 1.6 $\%$& 0.1 $\%$\\ \hline \end{tabular}} \label{kx} \etabe
Overall, \eqref{tya} is satisfied on $\mathcal{A}}\def\B{\mathcal{B}}\def\C{{\mathcal C}}\def\D{\mathcal{D}}\def\E{{\mathcal E}^U_{1000}$---the percentages in Table~\ref{kx} are pretty small. All three tables and Figure~\ref{TH} agree with our guess that~H\ref{aa} is more viable for smaller values of~$\k$.
{\em Based on the magnitudes for $\xi_\k^{\mbox{\footnotesize\it best}}$ in Tables~\ref{mmu} and~\ref{xmu}, one can expect that there are some classes of matrices (random or not) that fit~H\ref{aa} for larger values of $\xi_\k(\cdot)$.}
\subsection{On the global minimizers of $\bm{\mathcal{F}_d}$}\label{ong}
Here we summarize the outcome of a series of experiments corresponding to several matrices $A\in\RR^{\m\x\n}$ where $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=5$ and $\n=10$, satisfying H\ref{aa} for $\k=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$, different original vectors ${\ddot{u}}\in\RR^{\n}$ and data samples $d=A{\ddot{u}}+\mathrm{noise}$, for various values of $\beta>0$. In each experiment, we computed the complete list of all different strict (local) minimizers of $\mathcal{F}_d$, say $\left(\hat u^i\right)_{i=1}^n$. Then the sequence of values $\left(\mathcal{F}_d(\hat u^i)\right)_{i=1}^n$ was sorted in increasing order, $\mathcal{F}_d\left(\hat u^{i_1}\right)\leqslant\mathcal{F}_d\left(\hat u^{i_2}\right)\leqslant\cdots\leqslant\mathcal{F}_d\left(\hat u^{i_n}\right)~.$ A global minimizer $\hat u^{i_1}$ is unique provided that
$\mathcal{F}_d\left(\hat u^{i_1}\right)<\mathcal{F}_d\left(\hat u^{i_2}\right)$. In order to discard numerical errors, we also checked whether $\left|\mathcal{F}_d\left(\hat u^{i_1}\right)-\mathcal{F}_d\left(\hat u^{i_2}\right)\right|$ is easy to detect.
{\em In all experiments we carried out, the following facts were observed: \begin{itemize}} \def\eit{\end{itemize} \item The global minimizer of $\mathcal{F}_d$ was unique---manifestly data $d$ never did belong to the {\em closed negligible} subset $\Sigma_\k$ in~Proposition~\ref{cor}. This confirms Theorem~\ref{Uglob}. \item The global minimizers of $\mathcal{F}_d$ remained unchanged under large variations of~$\beta$. \item The necessary condition for a global minimizer in Proposition~\ref{ou} was met. \eit}
Next we present in detail two of these experiments where $\mathcal{F}_d$ is defined using \begin{equation} A=\left[\begin{array}{c} 7~~~2~~~4~~~9~~~0~~~3~~~3~~~6~~~6~~~7\\ 3~~~4~~~9~~~3~~~3~~~9~~~1~~~3~~~1~~~5\\ 5~~~4~~~2~~~4~~~0~~~7~~~1~~~9~~~2~~~9\\ 8~~~4~~~0~~~9~~~6~~~0~~~4~~~2~~~3~~~7\\ 6~~~3~~~6~~~5~~~0~~~9~~~0~~~0~~~3~~~8 \end{array}\right] ~~\begin{array}{l}d=A{\ddot{u}}+n~,\\~\\ \mbox{~~~where $n$ is noise and}\\~\\ {\ddot{u}}=\big(\,0\,,~1\,,~8\,,~0\,,~3\,,~0\,,~0\,,~0\,,~0\,,~9\,\big)^T.\end{array} \label{num}\end{equation} Only integers appear in~\eqref{num} for better readability. We have $\mathrm{rank}(A)=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}=5$. An exhaustive combinatorial test shows that the arbitrary matrix $A$ in~\eqref{num} satisfies~H\ref{aa} for $\k=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1$. The values of $\xi_\k(A)$ are seen in Table~\ref{D}. One notes that $\mu_2(A)>\mu_1(A)$; hence $\xi_1(A)=\xi_2(A)$.
\begin{table}} \def\etabe{\end{table}[h!] \caption{The values of $\xi_\k(A)$ and $\mu_\k(A)$, $\forall\;\k\in\II_{\m-1}$, for the matrix $A$ in~\eqref{num}.}
\centering\mbox{\begin{tabular}{l||c|c|c|c|} \hline & $\k=1$ & $\k=2$ & $\k=3$ & $\k=4$\\ \hline
$\xi_\k(A)$ & 0.2737 & 0.2737 & 0.2008 & 0.0564\\ \hline \hline
$\mu_\k(A)$ & 0.2737 & 0.2799 & 0.2008 & 0.0564\\ \hline \end{tabular}} \label{D} \etabe
One {\em expects} (at least when data are noise-free) that the global minimizer $\hat u$ of $\mathcal{F}_d$ obeys $\hat\sigma\subseteq\sigma({\ddot{u}})$, where ${\ddot{u}}$ is the original in~\eqref{num}, and that the vanished entries of $\hat u$ correspond to the least entries of ${\ddot{u}}$. This inclusion provides a partial way to rate the quality of the solution provided by a global minimizer $\hat u$ of $\mathcal{F}_d$.
The experiments described hereafter correspond to two data samples relevant to \eqref{num}---without and with noise---and to several values of $\beta>0$.
\paragraph{Noise-free data} The noise-free data in \eqref{num} read as: \begin{equation}} \def\eeq{\end{equation} d=A{\ddot{u}}=\big(~97\,,~~130\,,~~101\,,~~85\,,~~123~\big)^T.\label{kli}\eeq For different values of $\beta$, the global minimizer $\hat u$ is given in Table~\ref{bem}. \begin{table}[h!] \caption{The global minimizer $\hat u$ of $\mathcal{F}_d$ and its value $\mathcal{F}_d(\hat u)$ for the noise-free data $d$ in~\eqref{kli} for different values of $\beta$. Last row: the original ${\ddot{u}}$ in \eqref{num}.} \begin{tabular}{l} \hline
\begin{tabular}{c|c|c|c|c}
$\beta$& The global minimizer $\hat u$ of $\mathcal{F}_d$ {\it ~~(row vector)}&$\|\hat u\|_0$ & $\mathcal{F}_d(\hat u)$ \\ \hline $\begin{array}{c} 1\\ 10^2\\ 10^3\\ 10^4\\ 7\!\cdot\! 10^4 \end{array}$ & $\begin{array}{cccccccccc} 0 & \mathbf{ 1} & \mathbf{8} & 0 & \mathbf{3} & 0 & 0 & 0 & 0 & \mathbf{9}\\
0 & 0 & \mathbf{ 8.12} & 0 & \mathbf{3.31} & 0 & 0 & 0 & 0 & \mathbf{9.33}\\
0 & 0 & 0 & 0 & 0 & \mathbf{12.58} & \mathbf{20.28} & 0 & 0 & 0\\ 0 & \mathbf{29.95} & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0&0&0&0&0&0&0&0&0&0 \end{array}$& $\begin{array}{c}4\\3\\2\\1\\0\end{array}$ & $\begin{array}{c} 4\\ 301.52\\ 2179.3\\ 14144\\ 58864 \end{array}$ \\ \hline \hline \end{tabular}\\ $~~~~~~{\ddot{u}}~~~=~\,0~~~~~\;\mathbf{1}~~~~~~~~ \mathbf{8}~~~~~ 0~~~~\,\mathbf{3}~~~~~~~~ 0~~~~~~~~~ 0~~~~~~ 0~~~ 0~~~~~ \mathbf{9}$\\
\hline \end{tabular} \label{bem} \end{table} Since $\sigma({\ddot{u}})\in\Omega$ and the data are noise-free,
$\mathcal{F}_d$ does not have global minimizers with $\|\hat u\|_0=5$. Actually, applying Proposition~\ref{ie} for $\widetilde u={\ddot{u}}$ yields $\beta_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}=0$, hence for any $\beta>0$ all global minimizers of $\mathcal{F}_d$ have a support in $\Omega=\overline{\Omega}_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}$ (see Definition~\ref{om} and~\eqref{ga}). The global minimizer $\hat u$ for $\beta=1$ meets $\hat u={\ddot{u}}$. For $\beta=100$, the global minimizer $\hat u$ obeys ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)=\{3,5,10\}\subsetneqq\sigma({\ddot{u}})$ and
$\|\hat u\|_0=3$---the least nonzero entry of the original ${\ddot{u}}$ is canceled, which is reasonable. The global minimizers corresponding to $\beta\geqslant 300$ are meaningless. We could not find any positive value of $\beta$ giving better $2$-sparse global minimizers. Recalling that data are noise-free, this confirms Remark~\ref{x2}:
the global minimizers of $\mathcal{F}_d$ realize a only {\em pseudo}-hard thresholding. For $\beta\geqslant 7\cdot 10^4>\|d\|^2$, the global minimizer of $\mathcal{F}_d$ is $\hat u=0$ which confirms~Remark~\ref{nu}.
\paragraph{Noisy data} Now we consider {\em noisy data} in \eqref{num} for \begin{equation}} \def\eeq{\end{equation} n=\big(~4\,,~~ -1\,,~~ 2\,,~~ -3\,,~~ 5~\big)^T~.\label{noy}\eeq This arbitrary noise yields a signal-to-noise ratio\footnote{ Let us denote $\ddot{d}=A{\ddot{u}}$ and $d=\ddot{d}+n$. The SNR reads \cite{Vetterli95} $\mathrm{SNR}(\ddot{d},d)=10\,\log_{10}\frac{\sum_{i=1}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(\ddot{d}[i]-\frac{1}{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}}\sum_{i=1}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K} \ddot{d}[i]\right)^2} {\sum_{i=1}^\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}\left(d[i]-\ddot{d}[i]\right)^2}~.$ } (SNR) equal to $14.07$ dB. If $\beta\leq0.04$, $\mathcal{F}_d$ has $252$ different strict global minimizers $\hat u$
obeying $\|\hat u\|_0=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $\mathcal{F}_d(\hat u)=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ (recall Proposition~\ref{bad}). For $\beta\geq0.05$, the global minimizer $\hat u$ of $\mathcal{F}_d$ is unique and satisfies $\sigma(\hat u)\in\Omega$. It is given in Table~\ref{ben} for several values of $\beta\geq0.05$. \begin{table}[h!] \caption{The global minimizer $\hat u$ of $\mathcal{F}_d$ and its value $\mathcal{F}_d(\hat u)$ for noisy data given by \eqref{num} and \eqref{noy}, for different values of $\beta$. Last row: the original ${\ddot{u}}$.} \begin{tabular}{l} \hline
\begin{tabular}{c|c|c|c}
$\beta$& The global minimizer $\hat u$ of $\mathcal{F}_d$ {\it ~~(row vector)}&$\|\hat u\|_0$ & $\mathcal{F}_d(\hat u)$\\ \hline $\begin{array}{c} 1\\ 10^2\\ 10^3\\ 10^4\\ 7\!\cdot\!10^4 \end{array}$ & $\begin{array}{cccccccccc} 0 & \mathbf{6.02} & \mathbf{2.66} & \mathbf{6.43} & 0 & \mathbf{6.85} & 0 & 0 & 0 & 0\\ 0 & 0 & \mathbf{8.23} & 0 & \mathbf{2.3} & 0 & 0 & 0 & 0 & \mathbf{9.71}\\
0 & 0 & \mathbf{8.14} & 0 & 0 & 0 & 0 & 0 & 0 & \mathbf{10.25}\\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & \mathbf{14.47}\\ 0&0&0&0&0&0&0&0&0&0 \end{array}$ &$\begin{array}{c}4\\3\\2\\1\\0\end{array}$ & $\begin{array}{c} 4.0436\\301.94\\2174.8\\ 14473\\ 60559 \end{array}$ \\ \hline \hline \end{tabular}\\ $~~~~~~{\ddot{u}}~~~=\,~0~~~~~\mathbf{1}~~~~~~~ \mathbf{8}~~~~~~~\, 0~~~~~~ \mathbf{3}~~~~~~
0~~~~~ 0~~~ 0~~\, 0~~~~~~ \mathbf{9}$\\ \hline \end{tabular} \label{ben} \end{table} For $\beta=1$, the global minimizer is meaningless. We could not find any positive value of $\beta$ yielding a better global minimizer with a $4$-length support. For the other values of $\beta$, the
global minimizer $\hat u$ meets ${\hat\sigma}\stackrel{\rm def}{=}\sigma(\hat u)\varsubsetneqq\sigma({\ddot{u}})$, and its vanished entries correspond to the least entries in the original ${\ddot{u}}$. For $\beta=100$, the global minimizer seems to furnish a good approximation to ${\ddot{u}}$. Observe that the last entry of the global minimizer $\hat u[10]$, corresponding to the largest magnitude in ${\ddot{u}}$, freely increases when
$\beta$ increases from $10^2$ to $10^4$. We tested a tight sequence of intermediate values of $\beta$ without finding better results. Yet again, $\beta\geqslant 7\cdot 10^4>\|d\|^2$ leads to a unique null global minimizer (see Remark~\ref{nu}).
\setlength{\unitlength}{1mm} [h!] \begin{center}} \def\ec{\end{center} \begin{picture}} \def\epic{\end{picture}(160,83) \put(5,7){\epsfig{figure=allmin.eps,width=7.4cm,height=7.6cm}} \put(85,8){\epsfig{figure=allminZ.eps,width=6.9cm,height=7.5cm}} \put(6,0){\footnotesize (a) The values of all strict (local) minimizers of $\mathcal{F}_d$} \put(94,1){\footnotesize (b) Zoom of (a) along the $y$-axis} \put(-2,79){\footnotesize $4\;10^4$} \put(0.5,4.5){\footnotesize $(0,0)$} \put(75.3,4.5){\footnotesize $637$} \put(65.3,4.5){\footnotesize $\{\hat u\}$} \put(0.0,7.5){\footnotesize $302$} \put(-3,68){\footnotesize $\mathcal{F}_d(\hat u)$} \put(50,75){\footnotesize $\mathcal{F}_d(\hat u)$ for}
\put(50,70){\footnotesize $\|\hat u\|_0\!=\!1$: \textcolor[rgb]{0,0.5,0}{\bf green}}
\put(50,65){\footnotesize $\|\hat u\|_0\!=\!2$: \textcolor[rgb]{0.75,0,0}{\bf red}}
\put(50,60){\footnotesize $\|\hat u\|_0\!=\!3$: \textcolor{blue}{\bf blue}}
\put(50,55){\footnotesize $\|\hat u\|_0\!=\!4$: \textcolor[rgb]{0.8,0,0.6}{\bf magenta}}
\put(50,50){\footnotesize $\|\hat u\|_0\!=\!5$: {\bf black}} \put(79.5,68){\footnotesize $\mathcal{F}_d(\hat u)$} \put(80.0,12){\footnotesize $302$} \put(80.0,77.1){\footnotesize $500$} \put(79.8,68){\footnotesize $\mathcal{F}_d(\hat u)$} \put(149.2,5){\footnotesize $637$} \put(140.5,5){\footnotesize $\{\hat u\}$} \put(87.5,5){\footnotesize $0$}
\put(91.9,18){\footnotesize $\|\hat u\|_0\!=\!3$}
\put(109.7,18){\footnotesize $\|\hat u\|_0=4$}
\put(133.3,18){\footnotesize $\|\hat u\|_0=5$} \epic \ec
\caption{All $\bm{638}$ strict (local) minima of $\mathcal{F}_d$ in \eqref{num} for $\beta=100$ and data $d$ corrupted with the arbitrary noise in \eqref{noy}. The $x$-axis lists all strict (local) minimizers $\{\hat u\}$ of $\mathcal{F}_d$ sorted according to their $\ell_0$-norm $\|\hat u\|_0$ in increasing order.
(a) The $y$-axis shows the value $\mathcal{F}_d(\hat u)$ of these minimizers marked with a star. The value of $\mathcal{F}_d$ for $\hat u=0$ is not shown because it is too large ($\mathcal{F}_d(0)=60559=\|d\|^2$). (b) A zoom of (a) along the $y$-axis. It clearly shows that $\mathcal{F}_d$ has a very recognizable unique global minimizer} \label{all} \efig
Figure~\ref{all} shows the value $\mathcal{F}_d(\hat u)$ of all the strict local minimizers of $\mathcal{F}_d$ for $\beta=100$. In the zoom in Figure~\ref{all}(b) it is easily seen that the global minimizer is unique (remember Theorem~\ref{Uglob}). It obeys $\|\hat u\|_0=3$ and $\mathcal{F}_d(\hat u)=301.94$. One observes that $\mathcal{F}_d$ has $252=\,\sharp\,\Omega_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ different strict local minimizers $\hat u$
with $\|\hat u\|_0=5=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ and $\mathcal{F}_d(\hat u)=500=\beta\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$. This confirms Proposition~\ref{bad}---obviously $d$ does not belong to the closed negligible subset $\Qm_\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}$ described in the proposition.
\section{Conclusions and perspectives}\label{Con}
We provided a detailed analysis of the (local and global) minimizers of a regularized objective $\mathcal{F}_d$ composed of a quadratic data fidelity term and an $\ell_0$ penalty weighted by a parameter $\beta>0$, as given in \eqref{fd}. We exhibited easy necessary and sufficient conditions ensuring that a (local) minimizer $\hat u$ of $\mathcal{F}_d$ is strict (Theorem~\ref{ra}). The global minimizers of $\mathcal{F}_d$ (whose existence was proved) were shown to be strict as well (Theorem~\ref{gs}). Under very mild conditions, $\mathcal{F}_d$ was shown to have a unique global minimizer (Theorem~\ref{Uglob}). Other interesting results were listed in the abstract. Below we pose some perspectives and open questions raised by this work. \begin{itemize}} \def\eit{\end{itemize} \item The relationship between the value of the regularization parameter $\beta$ and the sparsity of the global minimizers of $\mathcal{F}_d$ (Proposition~\ref{ie}) can be improved.
\item The {\em generic} linearity in data $d$ of each strict (local) minimizer of $\mathcal{F}_d$ (subsection~\ref{els}) should be exploited to better characterize the global minimizers of $\mathcal{F}_d$.
\item Is there a simple way to check whether assumption H\ref{aa} is satisfied by a given matrix $A\in\RR^{\m\x\n}$ when $\mathsf{N}$ and $\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}<\n$ are large? Remark~\ref{P} and in particular~\eqref{yr} could help to discard some nonrandom matrices. Conversely, one can ask whether there is a systematic way to construct matrices $A$ that satisfy H\ref{aa}.
An alternative would be to exhibit families of matrices that satisfy H\ref{aa} for
large values of $\xi_\k(\cdot)$, where the latter quantifiers are defined in equation~\eqref{xi}.
\item A proper adaptation of the results to matrices $A$ and data $d$ with complex entries should not present inherent difficulties.
\item The theory developed here can be extended to MAP energies of the form evoked in~\eqref{map}. This is important for the imaging applications mentioned there.
\item Based on Corollary~\ref{nor}, and Remarks~\ref{x2} and \ref{ifn}, and the numerical tests in subsection \ref{ong}, one is justified in asking for conditions ensuring that the global minimizers of $\mathcal{F}_d$ perform a valid work. Given the high quality of the numerical results provided in many papers (see e.g.,~\cite{Robini07,Robini10}), the question deserves attention. \eit
There exist numerous algorithms aimed at approximating a (local) minimizer of $\mathcal{F}_d$. As a by-product of our research, we obtained simple rules to verify whether or not an algorithm could find \begin{itemize}} \def\eit{\end{itemize}\item[-] a (local) minimizer $\hat u$ of $\mathcal{F}_d$---by checking whether $\hat u$ satisfies \eqref{ee} in Corollary~\ref{nor};
\item[-] and whether this local minimizer is strict by testing whether the submatrix whose columns are indexed by the support of $\hat u$ (i.e., $A_{\sigma(\hat u)}$) has full column rank (Theorem~\ref{ra}). \eit Some properties of the minimizers of $\mathcal{F}_d$ given in this work can be inserted in numerical schemes in order to quickly escape from shallow local minimizers.
Many existing numerical methods involve a studious choice of the regularization parameter $\beta$, and some of them are proved to converge to a local minimizer of $\mathcal{F}_d$. {\em We have seen that finding a (strict or nonstrict) local minimizer of $\mathcal{F}_d$ is easy and that it is independent of the value of $\beta$ (Corollaries~\ref{nor} and \ref{blg}). {\em It is therefore obscure what meaning to attach to ``choosing a good $\beta$ and proving (local) convergence''.}}
Other successful algorithms are not guaranteed to converge to a local minimizer of $\mathcal{F}_d$. Whenever algorithms do a good job, the choice of $\beta$, the assumptions on $A$ and on $\|\hat u\|_0$, and the iterative scheme and its initialization {\em obviously} provide a tool for selecting a meaningful solution by minimizing $\mathcal{F}_d$. {\em There is a theoretical gap that needs clarification.}
{\em The connection between the existing algorithms and the description of the minimizers exposed in this paper deserves deep exploration. What conditions ensure that an algorithm minimizing $\mathcal{F}_d$ yields meaningful solutions? Clearly, showing local convergence does not answer this important question.}
One can expect such research to give rise to innovative and more efficient algorithms enabling one to compute relevant solutions by minimizing the tricky objective $\mathcal{F}_d$.
\section{Appendix}
\subsection{Proof of Lemma~\ref{tl}}\label{dtl}
Since $\hat u\neq0$, the definition of ${\hat\sigma}$ shows that $\disp{\min_{i\in{\hat\sigma}}\big|\,\hat u[i]\,\big|>0}$. Then $\rho$ in~\eqref{rho} fulfills $\rho>0$.
\paragraph{\rm(i)}~ Since $\,\sharp\,{\hat\sigma}\geq1$, we have
\begin{eqnarray}} \def\eeqn{\end{eqnarray} i\in{\hat\sigma}~,~~v\in\Bm_\infty(0,\rho)~~&\Rightarrow&~~\max_{j\in{\hat\sigma}}\big|\,v[j]\,\big|<\rho\nonumber\\
&\Rightarrow&~~\max_{j\in{\hat\sigma}}\big|\,v[j]\,\big|<\min_{j\in{\hat\sigma}}\big|\,\hat u[j]\,\big|\nonumber\\
&\Rightarrow&~~|\hat u[i]+v[i]|\geqslant |\hat u[i]|-|v[i]|\nonumber\\
&&~~\geqslant\min_{j\in{\hat\sigma}} |\hat u[j]|-\max_{j\in{\hat\sigma}}|v[j]|\geqslant\rho-\max_{j\in{\hat\sigma}}|v[j]|>0\nonumber\\ &\Rightarrow&~~ \hat u[i]+v[i]\neq0~\nonumber\\ \Big[\mbox{\rm by~\eqref{phi}~}\Big]~~~~~~~~&\Rightarrow&~~\phi\left(\hat u[i]+v[i]\right)=\phi\left(\hat u[i]\right)=1~. \label{ry}\eeqn If ${\hat\sigma}^c=\varnothing$ the result is proved. Let ${\hat\sigma}^c\neq\varnothing$. Then $\hat u[i]=0=\phi\left(\hat u[i]\right)$, $\forall\; i\in{\hat\sigma}^c$. Inserting this and~\eqref{ry} into \[\sum_{i\in\II_{\n}}\phi\big(\,\hat u[i]+v[i]\,\big)= \sum_{i\in{\hat\sigma}}\phi\big(\,\hat u[i]+v[i]\,\big)+\sum_{i\in{\hat\sigma}^c}\phi\big(\,\hat u[i]+v[i]\,\big)~\] proves claim (i).
\paragraph{\rm(ii)}~
Using the fact that $\|A(\hat u+v)-d\|^2=\|A\hat u-d\|^2+\|Av\|^2+2\<Av,\,A\hat u-d\>$, one obtains \begin{eqnarray}} \def\eeqn{\end{eqnarray} v\in\Bm_\infty(0,\rho)\,\mathbf{\setminus}\,\Km_{{\hat\sigma}}~~~\Rightarrow~~~
\mathcal{F}_d(\hat u+v)&=&\|A\hat u-d\|^2+\|Av\|^2+2\<Av,\,A\hat u-d\>\nonumber\\ \Big[\mbox{\rm by Lemma~\ref{tl}(i)}\Big]~~~~&&+\beta\sum_{i\in{\hat\sigma}}\phi\left(\hat u[i]\right)+\beta\sum_{i\in{\hat\sigma}^c}\phi\left(v[i]\right)\nonumber\\
\Big[\mbox{\rm using~\eqref{fds}~}\Big]~~~~&=&\mathcal{F}_d(\hat u)+\|Av\|^2+2\<Av,\,A\hat u-d\>+\beta\sum_{i\in{\hat\sigma}^c}\phi\left(v[i]\right)\nonumber\\
&\geqslant&\mathcal{F}_d(\hat u)-\big|2\<v,\,A^T(A\hat u-d)\>\big|+\beta\|v_{{\hat\sigma}^c}\|_0\nonumber\\
\Big[\mbox{\rm by H\"{o}lder's inequality}\Big]~~~~&\geqslant&\mathcal{F}_d(\hat u)-2\|v\|_\infty\,\|A^T(A\hat u-d)\|_1+\beta \|v_{{\hat\sigma}^c}\|_0~.\label{pop}
\eeqn If $\,\sharp\,{\hat\sigma}^c=0$, then $\Km_{{\hat\sigma}}=\RR^{\n}$, so $v\in\mathbb R^0$ and $\|v\|_\infty=0$; hence we have the inequality.
Let $\,\sharp\,{\hat\sigma}^c\geq1$. For $v\not\in\Km_{{\hat\sigma}}$, there at least one index $i\in{\hat\sigma}^c$ such that $v[i]\neq0$; hence $\|v_{{\hat\sigma}^c}\|_0\geq1$. The definition of $\rho$ in~\eqref{rho} shows that \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} v\in\Bm_\infty(0,\rho)\,\mathbf{\setminus}\,\Km_{{\hat\sigma}}~&\Rightarrow&~
-\|v\|_\infty>-\rho\geqslant-\,\frac{\beta}{2\Big(\|A^T(A\hat u-d)\|_1+1\Big)}\\
~&\Rightarrow&~-2\|v\|_\infty\,\|A^T(A\hat u-d)\|_1+\beta\|v_{{\hat\sigma}^c}\|_0
>-\,\frac{2\beta\|A^T(A\hat u-d)\|_1}{2\Big(\|A^T(A\hat u-d)\|_1+1\Big)}+\beta>0~. \eeqnn Introducing the last inequality into~\eqref{pop} shows that for $\,\sharp\,{\hat\sigma}^c\geq1$, the inequality in (ii) is strict.
\subsection{Proof of Proposition~\ref{ou}}\label{pou} If $\hat u=0$, the statement is obvious. We focus on $\hat u\neq0$. For an arbitrary $i\in\II_{\n}$, define \[\hat u^{(i)}\stackrel{\rm def}{=}\big(\hat u[1],\cdots,\hat u[i-1],\,0,\,\hat u[i+1],\cdots,\hat u[\n]\big)\in\RR^{\n}~.\] We shall use the equivalent formulation of $\mathcal{F}_d$ given in~\eqref{fds}. Clearly\footnote{Using the definition of $\hat u^{(i)}$, we have $\hat u^{(i)}=A_{(\II_{\n}\,\mathbf{\setminus}\,\{i\})}\hat u_{(\II_{\n}\,\mathbf{\setminus}\,\{i\})}~,$ hence $A\hat u^{(i)}$ is independent of $\hat u[i]$. },
\[\mathcal{F}_d(\hat u)=\mathcal{F}_d\big(\hat u^{(i)}+e_i\hat u[i]\big)=\|A\hat u^{(i)}+a_i\hat u[i]-d\|^2+
\beta \sum_{j\in\II_{\n}}\phi\left(\hat u^{(i)}[j]\right)+\phi\left(\hat u[i]\right)~.\] Consider $f:\mathbb R\rightarrow\mathbb R$ as given below \begin{equation}} \def\eeq{\end{equation} f(t)\stackrel{\rm def}{=}\mathcal{F}_d\left(\hat u^{(i)}+e_it\right). \label{fi}\eeq Since $\hat u$ is a global minimizer of $\mathcal{F}_d$, for any $i\in\II_{\n}$, we have \begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} f\left(\hat u[i]\right)&=&\mathcal{F}_d\big(\hat u^{(i)}+e_i\hat u[i]\big)\\
&\leqslant&\mathcal{F}_d\big(\hat u^{(i)}+e_i\,t\big)=f(t)~~~\forall\; t\in\mathbb R~.\eeqnn Equivalently, for any $i\in\II_{\n}\,$, $\,f\left(\hat u[i]\right)\,$ is the global minimum of $f(t)$ on $\mathbb R$. Below we will determine the global minimizer(s) $\hat t=\hat u[i]$ of $f$ as given in~\eqref{fi}, i.e., \[\hat t=\hat u[i]=\arg\min_{t\in\mathbb R}f(t)~.\] In detail, the function $f$ reads as
\begin{eqnarray}} \def\eeqn{\end{eqnarray} f(t)&=&\|A\hat u^{(i)}+a_it-d\|^2+
\beta \sum_{j\in\II_{\n}}\phi\left(\hat u^{(i)}[j]\right)+\beta\phi(t)\nonumber\\
&=&\|A\hat u^{(i)}-d\|^2+\|a_i\|^2t^2+2t\<a_i,A\hat u^{(i)}-d\> +\beta \sum_{j\in\II_{\n}}\phi\left(\hat u^{(i)}[j]\right)+\beta\phi(t)\nonumber\\
&=&\|a_i\|^2t^2+2t\<a_i,A\hat u^{(i)}-d\>+\beta\phi(t)+C\label{fin0}~, \eeqn where
\[C=\|A\hat u^{(i)}-d\|^2+\beta \sum_{j\in\II_{\n}}\phi\left(\hat u^{(i)}[j]\right)~.\] Note that $C$ does not depend on $t$. The function $f$ has two local minimizers denoted by $\hat t_0$ and $\hat t_1$. The first is \begin{equation}} \def\eeq{\end{equation}\hat t_0=0~~~\Rightarrow~~~f(\hat t_0)=C~.\label{min1}\eeq The other one, $\hat t_1\neq0$, corresponds to $\phi(t)=1$. From~\eqref{fin0}, $\hat t_1$ solves
\[2\|a_i\|^2t+2\<a_i,A\hat u^{(i)}-d\>=0~.\] Recalling that $a_i\neq0$, $\forall\; i\in\II_{\n}$ (see \eqref{ao}), it follows that
\begin{equation}} \def\eeq{\end{equation}\hat t_1=-\frac{\<a_i,A\hat u^{(i)}-d\>}{\|a_i\|^2}~~~\Rightarrow~~~
f(\hat t_1)=-\frac{\<a_i,A\hat u^{(i)}-d\>^2}{\|a_i\|^2}+\beta+C~. \label{min2}\eeq Next we check whether $\hat t_0$ or $\hat t_1$ is a global minimizer of $f$. From~\eqref{min1} and~\eqref{min2} we get
\[ f(\hat t_0)-f(\hat t_1)=\frac{\<a_i,A\hat u^{(i)}-d\>^2}{\|a_i\|^2}-\beta~. \] Furthermore, \begin{eqnarray}} \def\eeqn{\end{eqnarray} f(\hat t_0)<f(\hat t_1)~~&\Rightarrow&~~\hat u[i]=\hat t_0=0~,\nonumber\\
f(\hat t_1)<f(\hat t_0)~~&\Rightarrow&~~\hat u[i]=\hat t_1=-\frac{\<a_i,A\hat u^{(i)}-d\>}{\|a_i\|^2}~,\label{min3}\\ f(\hat t_0)=f(\hat t_1)~~&\Rightarrow&~~\mbox{$\hat t_0$ and $\hat t_1$ are global minimizers of $f$.}\nonumber \eeqn In particular, we have
\begin{eqnarray}} \def\eeqn{\end{eqnarray} f(\hat t_1)\leqslant f(\hat t_0)~~&\Leftrightarrow&~~\<a_i,A\hat u^{(i)}-d\>^2\geqslant\beta\|a_i\|^2\label{Delta1}\\ \Big[\mbox{\rm by~\eqref{min3}}\Big]~~~~~~~~&\Rightarrow&~
|\hat u[i]|=\frac{\left|\<a_i,A\hat u^{(i)}-d\>\right|}{\|a_i\|^2}\nonumber\\ \Big[\mbox{\rm by~\eqref{Delta1}}\Big]~~~~~~~~
&&~~~~~~~~\geqslant\frac{\sqrt{\beta}\|a_i\|}{\|a_i\|^2}=\frac{\sqrt{\beta}}{\|a_i\|}~.\nonumber \eeqn It is clear that the conclusion holds true for any $i\in\II_{\n}$.
\subsection{ Proof of Proposition~\ref{pal}}\label{palp} The asymptotic function $\left(\mathcal{F}_d\right)_\infty(v)$ of $\mathcal{F}_d$ can be calculated according to\footnote{In the nonconvex case, the notion of asymptotic functions and the representation formula were first given by J.P. Dedieu \cite{Dedieu77}.} ~\cite[Theorem 2.5.1]{Auslender03} \[ \left(\mathcal{F}_d\right)_\infty(v)=\liminf_{\begin{array}} \def\earr{\end{array}{c}v'\rightarrow v\\ t\rightarrow\infty\earr}\frac{\mathcal{F}_d(tv')}{t}~.\] Then
\begin{eqnarray*}} \def\eeqnn{\end{eqnarray*} \left(\mathcal{F}_d\right)_\infty(v)&=&\liminf_{\begin{array}} \def\earr{\end{array}{c}v'\rightarrow v\\ t\rightarrow\infty\earr}\frac{\|Av'-d\|^2+\beta\|v'\|_0}{t}\nonumber\\
&=&\liminf_{\begin{array}} \def\earr{\end{array}{c}v'\rightarrow v\\ t\rightarrow\infty\earr}\left(t\|Av'\|^2-2\<d,Av'\>+\frac{\|d\|^2+\beta\|v'\|_0}{t}\right)\nonumber\\~\nonumber\\ &=&\left\{\begin{array}} \def\earr{\end{array}{lll} 0 & \mbox{if} & v\in \mathrm{ker}} \def\ran{\mathrm{range}(A)~,\\+\infty & \mbox{if} & v\not\in \mathrm{ker}} \def\ran{\mathrm{range}(A)~.\earr \right.\label{auv} \eeqnn Hence \begin{equation}} \def\eeq{\end{equation}\mathrm{ker}} \def\ran{\mathrm{range}\left((\mathcal{F}_d)_\infty\right)=\mathrm{ker}} \def\ran{\mathrm{range}(A)~,\label{wd}\eeq where $\mathrm{ker}} \def\ran{\mathrm{range}\left((\mathcal{F}_d)_\infty\right)=\{v\in\RR^{\n}~:~(\mathcal{F}_d)_\infty(v)=0\}$.
Let $\{v_k\}$ satisfy~\eqref{aal} with $v_k\,\|v_k\|^{-1}\rightarrow \bar v\in\mathrm{ker}} \def\ran{\mathrm{range}(A)$. Below we compare the numbers $\|v_k\|_0$ and $\|v_k-\rho \bar v\|_0$ where $\rho>0$. There are two options. \begin{enumerate}} \def\een{\end{enumerate}
\item Consider that $i\in\sigma(\bar v)$, that is, $\disp{\bar v[i]=\lim_{k\rightarrow\infty} v_k[i] \, \|v_k\|^{-1}\neq 0}$. Then $|\,v_k[i]\,|>0$ for all but finitely many $k$ as otherwise, $v_k[i] \, \|v_k\|^{-1}$ would converge to 0. Therefore, there exists $k_i$ such that
\begin{equation}} \def\eeq{\end{equation} \left|\,v_k[i]-\rho\,\bar v[i]\,\right|\geq0\quad\mbox{and}\quad |\,v_k[i]\,|>0 \quad\forall\; k\geqslant k_i~.\label{wb}\eeq
\item If $i\in(\sigma(\bar v))^c$, i.e. $\bar v[i]=0$, then clearly \begin{equation}} \def\eeq{\end{equation} v_k[i]-\rho\, \bar v[i]=v_k[i] ~.\label{wc}\eeq
\een Combining \eqref{wb} and \eqref{wc}, the definition of $\|\cdot\|_0$ using $\phi$ in \eqref{phi} shows that
\begin{equation}} \def\eeq{\end{equation}\|v_k-\rho\,\bar v\|_0\leqslant\|v_k\|_0 \quad\forall\; k\geqslant k_0\stackrel{\rm def}{=}\max_{i\in\sigma(\bar v)}k_i~. \label{we}\eeq
By \eqref{wd}, $A\bar v=0$. This fact, jointly with~\eqref{we}, entails that
\begin{eqnarray}} \def\eeqn{\end{eqnarray}\mathcal{F}_d(v_k-\rho\bar v)&=&\|A(v_k+\rho\bar v)-d\|^2+\beta\|v_k-\rho\bar v\|_0\nonumber\\
&=&\|Av_k-d\|^2+\beta\|v_k-\rho\bar v\|_0\nonumber\\
&\leqslant& \|Av_k-d\|^2+\beta\|v_k\|_0=\mathcal{F}_d(v_k)\quad\forall\; k\geqslant k_0~.\nonumber\label{hb}\eeqn It follows that for any $k\geqslant k_0$ we have \[v_k\in\mathrm{lev}\,(\mathcal{F}_d,\lambda_k)\quad \Rightarrow\quad v_k-\rho\bar v\in\mathrm{lev}\,(\mathcal{F}_d,\lambda_k)~,\] and thus $\mathcal{F}_d$ satisfies Definition~\ref{als}.
\subsection{Proof of Proposition~\ref{ie}}\label{pie} Given $\k\in\mathbb{I}_{\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1}$, set \begin{equation}} \def\eeq{\end{equation}\Um_{\k+1}\stackrel{\rm def}{=}\bigcup_{\omega\subset\II_{\n}}\left\{\bar{u}~:~\bar{u}~~\mbox{solves {\rm{($\,\P_{\vt}\,$)}~}}~~\mbox{\rm and}~~
\|\bar{u}\|_0\geqslant\k+1\right\}~.\label{ws}\eeq \begin{itemize}} \def\eit{\end{itemize}\item Let $\Um_{\k+1}\neq\varnothing$. By Proposition~\ref{cp}, for any $\beta>0$, $\mathcal{F}_d$ has a (local) minimum at each $\bar{u}\in\Um_{\k+1}$. Thus
\begin{equation}} \def\eeq{\end{equation}\bar{u}~~\mbox{is a (local) minimizer of $\mathcal{F}_d$ and}~~\|\bar{u}\|_0\geqslant\k+1 ~~~\Leftrightarrow~~~\bar{u}\in\Um_{\k+1}~.\label{koi}\eeq Then for any $\beta>0$ \begin{equation}} \def\eeq{\end{equation}\mathcal{F}_d(\bar{u})\geqslant\beta(\k+1)\quad\forall\;\bar{u}\in\Um_{\k+1}~.\label{kf}\eeq
Let $\widetilde u$ be defined by\footnote{Such a $\widetilde u$ always exists; see subsection~\ref{ntd}. By Proposition~\ref{cp} and Theorem~\ref{ra}, it is uniquely defined.}: \[\widetilde u~~\mbox{solves~{\rm{($\,\P_{\vt}\,$)}~}~for~some}~~\omega\in\Omega_\k~.\] Then
\begin{equation}} \def\eeq{\end{equation} \|\widetilde u\|_0\leqslant\k~\label{ku}~.\eeq Set $\beta$ and $\beta_\k$ according to
\begin{equation}} \def\eeq{\end{equation}\beta>\beta_\k\stackrel{\rm def}{=}\|A\widetilde u-d\|^2~.\label{kn}\eeq For such a $\beta$ we have
\begin{eqnarray*}} \def\eeqnn{\end{eqnarray*}\mathcal{F}_d(\widetilde u)&=&\|A\widetilde u-d\|^2+\beta\|\widetilde u\|_0\\ \Big[\mbox{\rm by~\eqref{ku}~and~\eqref{kn}~}\Big]&<&\beta+\beta\k=\beta(\k+1)\\ \Big[\mbox{\rm by~\eqref{kf}~}\Big]&\leqslant&\mathcal{F}_d(\bar{u})~~~~\forall\;\bar{u}\in\Um_{\k+1}~. \eeqnn Let $\hat u$ be a global minimizer of $\mathcal{F}_d$. Then \[\mathcal{F}_d(\hat u)\leqslant\mathcal{F}_d(\widetilde u)<\mathcal{F}_d(\bar{u})~~~~\forall\;\bar{u}\in\Um_{\k+1}~.\]
Using \eqref{ws}-\eqref{koi}, we find $$\|\hat u\|_0\leqslant\k~.$$ \item $\Um_{\k+1}=\varnothing$ entails that\footnote{Let $A=(e_1,e_2,e_3,e_4,e_1)\in\mathbb R^{4\times 5}$ and $d=e_1\in\mathbb R^4$. For $\k=\mathsf{M}} \def\n{\mathsf{N}} \def\l{\mathsf{L}} \def\k{\mathsf{K}-1=3$ one can check that $\Um_{\k+1}=\varnothing$.}
\begin{equation}} \def\eeq{\end{equation}\bar{u}~~\mbox{solves~{\rm{($\,\P_{\vt}\,$)}~}~for}~~\omega\subset\II_{\n},~~\,\sharp\,\omega\geqslant\k+1~~~\Rightarrow~~~\|\bar{u}\|_0\leqslant\k~.\label{ha}\eeq Let $\hat u$ be a global minimizer of $\mathcal{F}_d$. By \eqref{ha} we have
\[\|\hat u\|_0\leqslant\k~.\] \eit According to Theorem~\ref{gs}(ii), any global minimizer of $\mathcal{F}_d$ is strict, hence $\disp{\sigma(\hat u)\in\overline{\Omega}_\k}~.$ \section*{Acknowledgments} The author would like to thank the anonymous Reviewer $2$ for very helpful remarks and suggestions.
\defIEEE Transactions on Acoustics Speech and Signal Processing{IEEE Transactions on Acoustics Speech and Signal Processing} \defIEEE Transactions on Image Processing{IEEE Transactions on Image Processing} \defIEEE Transactions on Information Theory{IEEE Transactions on Information Theory} \defIEEE Transactions on Pattern Analysis and Machine Intelligence{IEEE Transactions on Pattern Analysis and Machine Intelligence} \defIEEE Transactions on Signal Processing{IEEE Transactions on Signal Processing} \defJournal of the Royal Statistical Society B{Journal of the Royal Statistical Society B} \defCompte-rendus de l'acad\'emie des sciences{Compte-rendus de l'acad\'emie des sciences} \defBiometrika{Biometrika} \defInternational Journal of Computer Vision{International Journal of Computer Vision} \defSIAM Journal on Multiscale Modeling and Simulation{SIAM Journal on Multiscale Modeling and Simulation} \defJournal of Applied Statistics{Journal of Applied Statistics} \defSIAM Review{SIAM Review} \defSIAM Journal on Imaging Sciences{SIAM Journal on Imaging Sciences} \defJournal of Global Optimization{Journal of Global Optimization} \defJournal of Scientific Computing{Journal of Scientific Computing} \defMathematics of Computation{Mathematics of Computation} \defMathematical Programming{Mathematical Programming} \defJournal of Fourier Analysis and Applications{Journal of Fourier Analysis and Applications}
\def\uppercase{\uppercase}
\end{document} |
\begin{document}
\title{Sampling of globally depolarized random quantum circuit} \begin{flushright} YITP-19-90 \end{flushright} \author{Tomoyuki Morimae} \email{[email protected]} \affiliation{Yukawa Institute for Theoretical Physics, Kyoto University, Kitashirakawa Oiwakecho, Sakyoku, Kyoto 606-8502, Japan} \affiliation{JST, PRESTO, 4-1-8 Honcho, Kawaguchi, Saitama, 332-0012, Japan} \author{Yuki Takeuchi} \email{[email protected]} \affiliation{NTT Communication Science Laboratories, NTT Corporation, 3-1 Morinosato Wakamiya, Atsugi, Kanagawa 243-0198, Japan} \author{Seiichiro Tani} \email{[email protected]} \affiliation{NTT Communication Science Laboratories, NTT Corporation, 3-1 Morinosato Wakamiya, Atsugi, Kanagawa 243-0198, Japan}
\date{\today} \begin{abstract} The recent paper [F. Arute et al. Nature {\bf 574}, 505 (2019)] considered exact classical sampling of the output probability distribution of the globally depolarized random quantum circuit. In this paper, we show three results. First, we consider the case when the fidelity $F$ is constant. We show that if the distribution is classically sampled in polynomial time within a constant multiplicative error, then ${\rm BQP}\subseteq{\rm SBP}$, which means that BQP is in the second level of the polynomial-time hierarchy. We next show that for any $F\le1/2$, the distribution is classically trivially sampled by the uniform distribution within the multiplicative error $F2^{n+2}$, where $n$ is the number of qubits. We finally show that for any $F$, the distribution is classically trivially sampled by the uniform distribution within the additive error $2F$. These last two results show that if we consider realistic cases, both $F\sim2^{-m}$ and $m\gg n$, or at least $F\sim2^{-m}$, where $m$ is the number of gates, quantum supremacy does not exist for approximate sampling even with the exponentially-small errors. We also argue that if $F\sim2^{-m}$ and $m\gg n$, the standard approach will not work to show quantum supremacy even for exact sampling. \end{abstract}
\maketitle
\section{Introduction} Several sub-universal quantum computing models are shown to be hard to classically simulate. For example, output probability distributions of the depth-four model~\cite{TD}, the Boson Sampling model~\cite{BS}, the IQP model~\cite{IQP1,IQP2}, the one-clean qubit model~\cite{KL,MFF,M,Kobayashi,KobayashiICALP}, and the random circuit model~\cite{random1,random2} cannot be classically sampled in polynomial time unless some conjectures in classical complexity theory (such as the infiniteness of the polynomial-time hierarchy) are refuted. Impossibilities of exponential-time classical simulations of sub-universal quantum computing models have also been shown recently based on classical fine-grained complexity theory~\cite{Dalzell,DalzellPhD,Huang,Huang2,MorimaeTamaki}.
Let $p_z$ be the probability that an $n$-qubit ideal random quantum circuit outputs the $n$-bit string $z\in\{0,1\}^n$. Ref.~\cite{Google} considered the globally depolarized version where the probability $p_z'$ that the output is $z\in\{0,1\}^n$ is written as \begin{eqnarray*} p_z'=Fp_z+\frac{1-F}{2^n}, \end{eqnarray*} where $0< F<1$ is the fidelity.
In this paper, we show the following three results:
\begin{theorem} \label{theorem:result} Assume that $F$ is constant. Then, if the probability distribution $\{p_z'\}_{z\in\{0,1\}^n}$ is sampled in classical $poly(n)$ time within a constant multiplicative error $\epsilon<1$, then ${\rm BQP}\subseteq{\rm SBP}$. \end{theorem}
\begin{theorem} \label{theorem:result2} For any $F\le\frac{1}{2}$, $\{p_z'\}_{z\in\{0,1\}^n}$ is classically sampled by the uniform distribution within the multiplicative error $F2^{n+2}$. \end{theorem}
\begin{theorem} \label{theorem:result3} For any $F$, $\{p_z'\}_{z\in\{0,1\}^n}$ is classically sampled by the uniform distribution within the additive error $2F$. \end{theorem}
Proofs are given in the later sections. In the rest of this section, we provide several remarks.
First, the class SBP~\cite{SBP} is defined as follows.
\begin{definition} A language $L$ is in SBP if and only if there exist a polynomial $s$ and a classical polynomial-time probabilistic algorithm such that if
$x\in L$ then $p_{acc}\ge 2^{-s(|x|)}$, and if $x\notin L$ then $p_{acc}\le 2^{-s(|x|)-1}$. Here, $p_{acc}$ is the acceptance probability. \end{definition}
Note that the class SBP remains unchanged even when the two thresholds, $2^{-s(|x|)}$ and $2^{-s(|x|)-1}$, are replaced with
$\alpha 2^{-s(|x|)}$ and $\beta2^{-s(|x|)}$, respectively, for any constants $\alpha$ and $\beta$ satisfying $0\le\beta<\alpha\le1$. It is known that SBP is in AM, and therefore ${\rm BQP}\subseteq{\rm SBP}$ means that BQP is in the second level of the polynomial-time hierarchy. The containment of BQP in the polynomial-time hierarchy is not believed. (For example, there is an oracle separation~\cite{Raz}.)
Second, note that quantum supremacy for the globally depolarized circuits was previously studied in Ref.~\cite{nonclean} for the one-clean qubit model.
Third, Theorem~\ref{theorem:result} holds for a broader class of quantum circuits than the globally depolarized random circuit. In particular, we can replace our random gate application with the coherent one. In this paper, however, we concentrate on the globally depolarized random circuit for the simplicity. Theorem~\ref{theorem:result2} and Theorem~\ref{theorem:result3} hold for the output probability distribution $\{p_z\}_z$ of any quantum circuit.
Finally, in Ref.~\cite{Google}, it was claimed that if the exact polynomial-time classical sampling of $\{p_z'\}_z$ is possible, then estimating
$|\langle0^n|U|0^n\rangle|^2$ for an $n$-qubit unitary $U$ can be done by an Arthur-Merlin protocol with the $F^{-1}$-time Arthur. However, if we consider the realistic case, $F\sim2^{-m}$ and $m\gg n$, where $m$ is the number of gates, the time-complexity of Arthur is $\sim2^m$.
(On the other hand, the exact computation of $|\langle0^n|U|0^n\rangle|^2$ can be done in time $\sim2^n$.) Moreover, although Ref.~\cite{Google} considered exact sampling of $\{p_z'\}_z$, what a realistic quantum computer can do is approximately sampling $\{p_z'\}_z$. Theorem~\ref{theorem:result2} shows that if we consider the realistic case, $F\sim2^{-m}$ and $m\gg n$, quantum supremacy does not exist for approximate sampling of $\{p_z'\}_z$ even with the exponentially-small multiplicative error $\sim 2^{-(m-n)}$. Theorem~\ref{theorem:result3} shows that if we consider the realistic case, $F\sim2^{-m}$, quantum supremacy does not exist for approximate sampling of $\{p_z'\}_z$ even with the exponentially-small additive error $\sim2^{-m}$.
\section{Discussion}
Our theorems show that if $F\sim2^{-m}$ and $m\gg n$, or at least $F\sim2^{-m}$, quantum supremacy does not exist for approximate sampling of $\{p_z'\}_z$. In this section, we argue that if $F\sim2^{-m}$ and $m\gg n$, the standard approach will not work to show quantum supremacy for exact sampling of $\{p_z'\}_z$.
In the standard proof of quantum supremacy~\cite{TD,BS,IQP1,MFF,Kobayashi,KobayashiICALP}, we first consider the following promise problem: given the classical description of an $n$-qubit $m$-size quantum circuit $U$, and parameters $a$ and $b$, decide $p_{acc}\ge a$ or $p_{acc}\le b$, where $p_{acc}$ is the acceptance probability. In the standard proof of quantum supremacy, we take the promise problem as the complete problem of a ``strong" quantum class, such as postBQP, SBQP, or NQP.
We next assume that $p_{acc}'\equiv Fp_{acc}+\frac{1-F}{2^n}$ is exactly classically sampled. It means that there exists a polynomial-time classical probabilistic algorithm that accepts with probability $q_{acc}$ such that $q_{acc}=p_{acc}'$.
If the answer of the promise problem is yes, then $q_{acc}\ge Fa+\frac{1-F}{2^n}\equiv\alpha$. If the answer of the promise problem is no, then $q_{acc}\le Fb+\frac{1-F}{2^n}\equiv\beta$. In the standard proof of quantum supremacy, we then conclude that the promise problem is in a ``weaker" class (such as postBPP, SBP, or NP) that leads to an unlikely consequence in complexity theory, such as ${\rm postBQP}\subseteq{\rm postBPP}$, ${\rm SBQP}\subseteq{\rm SBP}$, or ${\rm NQP}\subseteq{\rm NP}$. However, deciding $q_{acc}\ge \alpha$ or $q_{acc}\le \beta$ seems to be ``more difficult" than the original promise problem: the original promise problem can be solved in time $\sim2^n$, while deciding $q_{acc}\ge\alpha$ or $q_{acc}\le\beta$ will not be solved in that time because $\alpha-\beta=F(a-b)=O(2^{-m})$, and $m\gg n$. Therefore we will not have any unlikely consequence in this approach.
Although the above argument does not exclude the existence of a completely new supremacy proof for the exact sampling of $\{p_z'\}_z$ that works even when $F\sim2^{-m}$ and $m\gg n$, we can also argue that even if the realistic quantum computer exactly samples $\{p_z'\}_z$, it is ``effectively" classically samplable by the uniform distribution when $F\sim2^{-m}$ unless we can access exponentially many samples.
To see this, let us consider the task of distinguishing $\rho_0\equiv\frac{I^{\otimes n}}{2^n}$ and $\rho_1\equiv F\rho+(1-F)\frac{I^{\otimes n}}{2^n}$, where $\rho$ is any $n$-qubit state. Assume that we can measure $k$ copies of $\rho_0$ or $\rho_1$. Let $\Pi_0$ $(\Pi_1)$ be the POVM element that we conclude that the actual state is $\rho_0^{\otimes k}$ ($\rho_1^{\otimes k}$), where $\Pi_0+\Pi_1=I^{\otimes nk}$. The probability $p_{correct}$ that we make the correct decision is \begin{eqnarray*} p_{correct}&\equiv& \frac{1}{2}{\rm Tr}(\Pi_0\rho_0^{\otimes k}) +\frac{1}{2}{\rm Tr}(\Pi_1\rho_1^{\otimes k})\\ &=& \frac{1}{2}+\frac{1}{2}\Big[ {\rm Tr}(\Pi_0\rho_0^{\otimes k}) -{\rm Tr}(\Pi_0\rho_1^{\otimes k})\Big]\\ &\le& \frac{1}{2}+
\frac{1}{4}\big\|\rho_0^{\otimes k}
-\rho_1^{\otimes k}\big\|_1\\ &\le& \frac{1}{2} +\frac{k}{4}
\big\|\rho_0-\rho_1\big\|_1\\ &=& \frac{1}{2}+
\frac{k}{4}\Big\|\frac{I^{\otimes n}}{2^n}
-\Big[F\rho+(1-F)\frac{I^{\otimes n}}{2^n}\Big]\Big\|_1\\ &=& \frac{1}{2}+
\frac{kF}{4}\Big\|
\rho-\frac{I^{\otimes n}}{2^n}\Big\|_1\\ &\le&\frac{1}{2}+\frac{kF}{2}. \end{eqnarray*} If $F\sim2^{-m}$ and $k=o(2^m)$, $p_{correct}\to \frac{1}{2}$.
\section{Proof of Theorem~\ref{theorem:result}}
Assume that a language $L$ is in BQP. Then for any polynomial $r$, there exists a polynomial-time uniformly generated family $\{V_x\}_x$ of quantum circuits such that if $x\in L$ then $p_{acc}\ge 1-2^{-r(|x|)}$, and if $x\notin L$ then $p_{acc}\le 2^{-r(|x|)}$. Here \begin{eqnarray*}
p_{acc}\equiv\langle0^w|V_x|0^w\rangle \end{eqnarray*}
with $w=poly(|x|)$ is the acceptance probability.
Let $m$ be the number of elementary gates in $V_x$, i.e., $V_x=w_mw_{m-1}...w_2w_1$, where each $w_j$ is an elementary gate (such as $H$, $CNOT$, and $T$, etc.). Let us consider the following random quantum circuit on $n\equiv w+m$ qubits: \begin{itemize} \item[1.]
The initial state is $|0^w\rangle\otimes|0^m\rangle$, where we call the first $w$-qubit register the main register, and the second $m$-qubit register the ancilla register. \item[2.] For each $j=1,2,...,m$, apply $w_j\otimes I$ or $\eta_j\otimes X$ with probability 1/2, where $\eta_j$ is any elementary gate, $w_j$ and $\eta_j$ act on the main register, and $I$ and $X$ act on the $j$th qubit of the ancilla register. Thus obtained the final state is \begin{eqnarray} \frac{1}{2^m}\sum_{\alpha\in\{0,1\}^m}
\xi_m^{\alpha_m}...\xi_1^{\alpha_1}|0^w\rangle\langle0^w| (\xi_1^{\alpha_1})^\dagger...(\xi_m^{\alpha_m})^\dagger
\otimes|\alpha\rangle\langle\alpha|, \label{final} \end{eqnarray} where $\alpha\equiv(\alpha_1,...,\alpha_m)\in\{0,1\}^m$ is an $m$-bit string, $\xi_j^0=w_j$, and $\xi_j^1=\eta_j$. \item[3.] Measure all $n$ qubits in the computational basis. If all results are 0, accept. Otherwise, reject. \end{itemize}
If we consider the globally depolarized version, the state of Eq.~(\ref{final}) is replaced with \begin{eqnarray*} \frac{F}{2^m}\sum_{\alpha\in\{0,1\}^m}
\xi_m^{\alpha_m}...\xi_1^{\alpha_1}|0^w\rangle\langle0^w| (\xi_1^{\alpha_1})^\dagger...(\xi_m^{\alpha_m})^\dagger
\otimes|\alpha\rangle\langle\alpha| +(1-F)\frac{I^{\otimes n}}{2^n}. \end{eqnarray*} The acceptance probability $p_{acc}'$ is \begin{eqnarray*} p_{acc}'=\frac{Fp_{acc}^2}{2^m}+\frac{1-F}{2^n}. \end{eqnarray*}
Assume that there exists a classical $poly(n)$-time probabilistic algorithm that accepts with probability $q_{acc}$ such that
$|p_{acc}'-q_{acc}|\le \epsilon p_{acc}'$, where $\epsilon<1$ is a constant. Then, if $x\in L$, \begin{eqnarray*} q_{acc}&\ge&(1-\epsilon)p_{acc}'\\ &=&(1-\epsilon)\Big(\frac{Fp_{acc}^2}{2^m}+\frac{1-F}{2^n}\Big)\\ &\ge&(1-\epsilon)F2^{-m}(1-2^{-r})^2, \end{eqnarray*} and if $x\notin L$, \begin{eqnarray*} q_{acc}&\le&(1+\epsilon)p_{acc}'\\ &=&(1+\epsilon)\Big(\frac{Fp_{acc}^2}{2^m}+\frac{1-F}{2^n}\Big)\\ &\le&(1+\epsilon)\Big(F2^{-2r-m}+\frac{1-F}{2^n}\Big)\\ &=&2^{-m}(1+\epsilon)F\Big(2^{-2r}+\frac{1-F}{F2^w}\Big). \end{eqnarray*} If $r$ and $w$ are sufficiently large, $L$ is in SBP. \fbox
Note that although here we have considered constant $F$, the same result also holds for other ``not so small" $F$ such as $F=\frac{1}{poly(m)}$.
\section{Proof of Theorem~\ref{theorem:result2}} Let us take $\epsilon=F2^{n+2}$. For any $z\in\{0,1\}^n$, \begin{eqnarray*}
\Big|p_z'-\frac{1}{2^n}\Big|
=\Big|\Big(Fp_z+\frac{1-F}{2^n}\Big)
-\frac{1}{2^n}\Big|\le F\Big(1+\frac{1}{2^n}\Big)< \epsilon p_z', \end{eqnarray*} where in the last inequality, we have used \begin{eqnarray*} \epsilon p_z'-F\Big(1+\frac{1}{2^n}\Big) &=& \epsilon\Big(Fp_z+\frac{1-F}{2^n}\Big)-F\Big(1+\frac{1}{2^n}\Big)\\ &\ge&\frac{\epsilon(1-F)}{2^n}-F\Big(1+\frac{1}{2^n}\Big)\\ &=&\frac{F2^{n+2}(1-F)}{2^n}-F\Big(1+\frac{1}{2^n}\Big)\\ &=&4F(1-F)-F\Big(1+\frac{1}{2^n}\Big)\\ &>0&. \end{eqnarray*} \fbox
\section{Proof of Theorem~\ref{theorem:result3}} \begin{eqnarray*}
\sum_{z\in\{0,1\}^n}\Big|p_z'-\frac{1}{2^n}\Big|
= F\sum_{z\in\{0,1\}^n}\Big|p_z-\frac{1}{2^n}\Big| \le 2F. \end{eqnarray*} \fbox
\acknowledgements
TM is supported by MEXT Quantum Leap Flagship Program (MEXT Q-LEAP) Grant Number JPMXS0118067394, JST PRESTO No.JPMJPR176A, and the Grant-in-Aid for Young Scientists (B) No.JP17K12637 of JSPS. YT is supported by MEXT Quantum Leap Flagship Program (MEXT Q-LEAP) Grant Number JPMXS0118067394.
\end{document} |
\begin{document}
\title{Hopf bifurcation and heteroclinic cycles in a class of $\mathbb{D}
\begin{abstract} In this paper we analyze a generic dynamical system with $\mathbb{D}_2$ constructed via a Cayley graph. We study the Hopf bifurcation and find conditions for obtaining a unique branch of periodic solutions. Our main result comes from analyzing the system under weak coupling, where we identify the conditions for heteroclinic cycle between four equilibria in the two-dimensional fixed point subspace of some of the isotropy subgroups of $\mathbb{D}_2\times\mathbb{S}^1.$ We also analyze the stability of the heteroclinic cycle. \end{abstract}
\begin{keywords} equivariant dynamical system, Cayley graph, Hopf bifurcation, heteroclinic cycle. \end{keywords}
\begin{AMS} 37C80, 37G40, 34C15, 34D06, 34C15 \end{AMS}
\pagestyle{myheadings} \thispagestyle{plain} \markboth{Adrian C. Murza}{Hopf bifurcation and heteroclinic cycles in a class of $\mathbb{D}_2-$equivariant systems} \section{Introduction} The global dynamics of networks of $n$ coupled oscillators with different types of coupling has been studied in \cite{Ashwin_Swift}. In their formalism it is shown that the symmetry group of the network can be considered a subgroup of $\mathbb{S}_n$, as long as the oscillators taken individually have no internal symmetries. These ideas have been investigated in the above cited paper, for general $\mathbb{D}_n$ and $\mathbb{Z}_n$ cases. However, it is a routine considering $n\geqslant3$ so the case $\mathbb{D}_2$ is sometimes not explicitly taken into account. Another reason for carrying out our study is the fact that among the dihedral groups, $\mathbb{D}_2$ is the only abelian group, which makes its action and therefore its analysis slightly different.
In this paper we are concerned with two properties of networks with $\mathbb{D}_2$ symmetry: Hopf bifurcation and low coupling case leading to heteroclinic cycles. Firstly we use the methodology developed by Ashwin and Stork \cite{Stork} to construct a network of differential systems with $\mathbb{D}_2$ symmetry. Our case is a particular situation of a network of $n$ coupled oscillators with symmetry to be a subgroup of $\mathbb{S}_n,$ that analyzes different types of coupling between the oscillators, as shown in \cite{Stork}. While this approach has been successfully used for the subgroups $\mathbb{Z}_n$ and $\mathbb{D}_n,$ our approach is interesting not only because of the particularities already mentioned of $\mathbb{D}_2,$ but also because it offers the possibility of analyzing the weak coupling limit, where dynamics of the network is governed only by the phases of the oscillators.\\
A large variety of subgroups of $\mathbb{S}_n$ can be easily generated by the method described by Ashwin and Stork \cite{Stork} based on graph theory. The automorphism group of the colored graph can be defined to be its symmetry group. Therefore, it is possible, via Cayley graphs, to design oscillatory networks with the prescribed symmetry of a subgroup of $\mathbb{S}_n.$ While the Hopf bifurcation is in fact a simple adaptation of the theory developed by Golubitsky, Stewart and Schaeffer in \cite{GS85},\cite{GS86} and \cite{GS88}, the low coupling case is much more interesting since it allows the existence of heteroclinic cycles in systems with $\mathbb{D}_2$ symmetry. In this case it is possible to reduce the asymptotic dynamics to a flow on an four-dimensional torus $\mathbb{T}^4;$ by assuming a weak coupling we average the whole network and introduce an extra $\mathbb{S}^1$ symmetry.
We determine the two-dimensional invariant subspaces on this torus and show that a heteroclinic cycle consisting of four one-dimensional routes among the four zero-dimensional equilibria can appear. We then apply the general stability theory for heteroclinic cycles developed by Krupa and Melbourne in \cite{Krupa}, to analyze the stability of the heteroclinic cycles.
\section{The Cayley graph of the $\mathbb{D}_2$ group}\label{section Cayley} Our aim is to construct an oscillatory system with the $\mathbb{D}_2$ symmetry. To achieve this goal we need first to represent the group by a Cayley diagram. A Cayley diagram is a graph (that is, a set of nodes and the arrows between them) to represent a group. Vertex or nodes of the graph are the group elements and the arrows show how the generators act on the elements of the group. At any vertex there are arrows pointing towards it, others pointing away. Proceeding as in \cite{ADSW} or \cite{Stork}, let $J\subset\mathbb{D}_2$ be the generating set of $\mathbb{D}_2.$ This implies that \begin{itemize} \item[(a)] $J$ generates $\mathbb{D}_2,$ \item[(b)] $J$ is finite and \item[(c)] $J=J^{-1}.$ \end{itemize} Since $\mathbb{D}_2$ is finite we can forget assumptions $(b)$ and $(c).$ Then the Cayley graph of $\mathbb{D}_2$, is a directed colored graph whose arrows indicate the action the group elements have on each other and the vertices are the group elements. As shown in \cite{ADSW} the generating set $J$ is a set of "colors" of the directed edges and the group elements $\sigma_i$ and $\sigma_j$ are connected through an edge from $\sigma_i$ to $\sigma_j$ of color $c\in J$ if and only if $\sigma_i=c\sigma_j.$
\begin{figure}
\caption{A Cayley graph of the $\mathbb{D}_2$ group. Solid arrows represent left-multiplication with $\kappa,$ dot-and-dashed arrows left multiplication with $\zeta,$ the two generators of this group.}
\label{second_figure}
\end{figure}
The Cayley graphs for $\mathbb{D}_2$ is shown in Figure \eqref{second_figure}. Following the approach by Stork et al. \cite{Stork}, we may identify the vertices as cells with a certain dynamics and the edges as couplings between the cells. In this way we can construct an ODE system which has the symmetry of $\mathbb{D}_2$. The action of the group $\mathbb{D}_2$ on the cells can be written as \begin{equation}\label{elements} \begin{array}{l} \kappa=(1~2)(3~4)\in\mathbb{S}_4,\\ \zeta=(1~3)(2~4)\in\mathbb{S}_4, \end{array} \end{equation} where the two generators and their commuting product $\kappa\zeta=\zeta\kappa$ act on $(x_1,x_2,x_3,x_4)\in\mathbb{R}^4$ as \begin{equation}\label{commutators} \begin{array}{l} \kappa:(x_1,x_2,x_3,x_4)\rightarrow(x_2,x_1,x_4,x_3)\\ \zeta:(x_1,x_2,x_3,x_4)\rightarrow(x_3,x_4,x_1,x_2)\\ \kappa\zeta:(x_1,x_2,x_3,x_4)\rightarrow(x_4,x_3,x_2,x_1) \end{array} \end{equation}
If we assign coupling between cells related by the permutations in \eqref{elements}, we can build the following pairwise system in with the $\mathbb{D}_2$ symmetry.
\begin{equation}\label{array 4 eq} \begin{array}{l} \dot{x}_1=f(x_1)+g(x_2,x_1)+h(x_3,x_1)\\ \dot{x}_2=f(x_2)+g(x_1,x_2)+h(x_4,x_2)\\ \dot{x}_3=f(x_3)+g(x_4,x_3)+h(x_1,x_3)\\ \dot{x}_4=f(x_4)+g(x_3,x_4)+h(x_2,x_4)\\ \end{array} \end{equation} where $f:\mathbb{R}\rightarrow\mathbb{R}$ and $g,~h:\mathbb{R}^2\rightarrow\mathbb{R}$. As shown by Ashwin and Stork \cite{Stork} we can think of $f,~g,~h$ as being generic functions that assure that the isotropy of this vector field under the action of $\mathbb{O}_4$ is generically $\mathbb{D}_2$.
\section{Hopf bifurcation}\label{section Hopf bifurcation}
The group $\mathbb{D}_2$ has order $4$, the generators being $(\kappa,~\zeta).$ Since all irreducible representations of the $\mathbb{D}_2$ group are one dimensional, we restrict our study to the actions of the generators on $z\in\mathbb{C}\equiv\mathbb{R}^2.$ The orthogonal representation of $\mathbb{D}_2$ $$R_{\mathbb{D}_2}:\mathbb{D}_2\times W\rightarrow W$$ on the complex vector space $W$ is irreducible if and only if the only $\mathbb{D}_2-$invariant subspaces of $W$ are the trivial ones. In fact it can be shown that $$\mathbb{D}_2\times\mathbb{S}^1/\mathrm{ker}R_{\mathbb{D}_2\times\mathbb{S}^1}\equiv\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{S}^1/\mathrm{ker}R_{\mathbb{Z}_2\times\mathbb{Z}_2\times\mathbb{S}^1}.$$ The group element $(\kappa,~\zeta)$ acts on the point $z\in\mathbb{C}$ by \begin{equation}\label{action on C} \begin{array}{l} \kappa\cdot z=\bar{z}\\ \zeta\cdot z=\pm z. \end{array} \end{equation} The orbit of a point $(x,y)\in\mathbb{R}^2$ under the action of $\mathbb{D}_2$ is
$$\{(\kappa,\zeta)\cdot z|(\kappa,\zeta)\in\mathbb{D}_2\}.$$ Therefore the orbits are \begin{itemize} \item[(a)] The origin, $(0,0),$ \item[(b)] Points on the $x$-axis, $\pm x,0$ with $(x\neq0),$ \item[(c)] Points on the $y$-axis, $0,\pm y$ with $(y\neq0),$ \item[(d)] Points off the axes $\pm x,\pm y$ with $(x\neq0,~y\neq0).$ \end{itemize}
The isotropy subgroup of a point $(x,y)\in\mathbb{R}^2$ under the action of $\mathbb{D}_2$ is
$$\{(\kappa,~\zeta)\in\mathbb{D}_2|(\kappa,~\zeta)\cdot(x,y)=(x,y)\}.$$ $\mathbb{D}_2$ has four isotropy subgroups: \begin{itemize} \item[(a)] $\mathbb{D}_2$ corresponding to the origin \item[(a)] $\mathbb{Z}_2=\{(1,\zeta)\}$ corresponding to $(x,0)$ with $x\neq0,$ \item[(a)] $\mathbb{Z}_2=\{(\kappa,1)\}$ corresponding to $(0,y)$ with $y\neq0,$ \item[(a)] $\mathbbm{1}$ corresponding to $(x,y)$ with $x\neq0,~y\neq0.$ \end{itemize}
In this section we basically recall the theory of Golubitsky and Stewart \cite{GS85} and \cite{GS88} on one parameter Hopf bifurcations with symmetry.
\subsection{General considerations on Hopf bifurcation with $\mathbb{D}_2\times\mathbb{S}^1-$symmetry}
The group $\mathbb{D}_2$ only has four $1-$dimensional real irreducible representations and so they are absolutely irreducible. One of them is the trivial representation and the other three are non-isomorphic each with a nontrivial $\mathbb{Z}_2$ action. Therefore, the only possible way to have a Hopf bifurcation is for the linearization at the equilibrium to be from a $\Gamma-$simple subspace of the form $V\oplus V$ where $V$ is an absolutely irreducible representation which has either a trivial or a $\mathbb{Z}_2$ action. Generically, as a parameter is varied, the critical eigenspace with purely imaginary eigenvalues is two-dimensional of the form $V\oplus V$ described above. The periodic solution bifurcating is unique and has the same symmetry has the $\mathbb{Z}_2$ action on V, or has no symmetry.
In fact, the $\mathbb{D}_2$ symmetric coupled system does not support any periodic solution. One can easily show that the isotypic decomposition at a trivial steady-state is made up of the trivial representation $(1,1,1,1)$ and the complement decomposes into the remaining three non-isomorphic irreducible representations. Therefore, the $V\oplus V$ subspace cannot exist in the tangent space at the trivial equilibrium. Each cell in the network needs to be two-dimensional to hope to have a Hopf bifurcation with $\mathbb{D}_2$ symmetry in such a $4-$cell system.
\begin{priteo} For the $\mathbb{D}_2-$equivariant Hopf bifurcation in $\mathbb{C}$, there is a unique branch of periodic solutions consisting of rotating waves with $\mathbb{Z}_2$ spatial symmetry. \end{priteo} \begin{proof} First, unicity of a branch of periodic solutions is based on the fact that $\mathbb{D}_2$ is abelian, and we recall the Abelian Hopf Theorem in \cite{AbelianHopf}. Therefore, the periodic solution predicted by the Equivariant Hopf Theorem \cite{GS88} with the symmetry of the unique isotropy subgroup with a two-dimensional fixed point subspace is the only one. \end{proof}\\
\section{Normal form and generic linearized stability} We begin with recalling results on the normal form for the $\mathbb{D}_2-$equivariant bifurcation from \cite{GS85} on the $\mathbb{Z}_2\times\mathbb{Z}_2$ group, which is isomorphic to $\mathbb{D}_2.$ This means that we can adapt the theory of $\mathbb{Z}_2\times\mathbb{Z}_2$ to our case, withe merely a re-interpretation of the branches. We say that the bifurcation problem $g$ commutes with $\mathbb{D}_2$ if \begin{equation}\label{commut} g((\kappa,\zeta)\cdot(x,y),\lambda)=(\kappa,\zeta)\cdot g(x,y,\lambda). \end{equation} \begin{lema}\label{lema normal form} Let us consider the $g:\mathbb{C}\times\mathbb{R}\rightarrow\mathbb{R}$ bifurcation problem with $z=x+iy$ commuting with the action of $\mathbb{D}_2$. Then there exist smooth functions $p(u,v,\lambda),~q(u,v,\lambda)$ such that\\ \begin{equation}\label{eq lema normal form} \begin{array}{l} g(x,y,\lambda)=(p(x^2,y^2,\lambda)x,~q(x^2,y^2,\lambda)y),~~\mathrm{where}\\ \\ \hspace{2cm}p(0,0,0)=0,~~~q(0,0,0)=0. \end{array} \end{equation} \end{lema}
\begin{proof} The proof is almost identical to the one for $\mathbb{Z}_2\times\mathbb{Z}_2$ in \cite{GS85} but we prefer to give it adapted to the action of the group generators of $\mathbb{D}_2.$ We write $g$ as \begin{equation}\label{eq lema g} g(x,y,\lambda)=(a(x,y,\lambda),~b(x,y,\lambda)). \end{equation} Commutativity with equation \eqref{commut} implies \begin{equation}\label{eq lema normal form2} \begin{array}{l} a(\kappa x,\zeta y,\lambda)=\kappa a(x,y,\lambda)\\ \\ b(\kappa x,\zeta y,\lambda)=\zeta b(x,y,\lambda) \end{array} \end{equation} Now $\kappa$ transforms $z$ into $\bar{z}$, i.e. $(x,y)\rightarrow(x,-y)$ and if the action of $\zeta$ is $+1$ then we get that $a$ is odd in $x$ while $b$ is even in $x.$ When $\kappa$ acts as identity and $\zeta$ acts as $-1$ then $a$ is even in $y$ and $b$ is odd in $y.$ Now the rest of the proof is exactly the same as in \cite{GS85}. \end{proof}\\ \\ In the following we will follow again the route and results in \cite{GS85} to obtain the Jacobian matrix of equation \eqref{eq lema normal form} \begin{equation}\label{equation dg} dg= \begin{array}{l} \begin{bmatrix} p+2up_u&2p_vxy\\ 2q_uxy&q+2vq_v \end{bmatrix} \end{array}. \end{equation} What is more, we can reformulate or adapt to our case Lemma $3.1$ (page $427$ in \cite{GS85}) and add the proof again corresponding to our case, which in the cited reference has been left as an exercise for the reader. \begin{lema}\label{lema trivial} Let $(x,y,\lambda)$ be a solution to $g=0.$ \begin{itemize} \item[(a)] If $f(x,y,\lambda)$ is a trivial or a pure mode solution, then $dg$ in \eqref{equation dg} is diagonal and its eigenvalues which are real have the signs listed as follows:
\begin{itemize}
\item [(i)] Trivial solution: $\mathrm{sgn}~p(0,0,\lambda),~\mathrm{sgn}~q(0,0,\lambda);$
\item [(ii)] $x-$mode solution: $\mathrm{sgn}~p_u(x,0,\lambda),~\mathrm{sgn}~q(x,0,\lambda);$
\item [(iii)] $y-$mode solution: $\mathrm{sgn}~p(0,y,\lambda),~\mathrm{sgn}~q_v(0,y,\lambda);$
\end{itemize} \item[(b)] If $f(x,y,\lambda)$ is a mixed mode solution, then \begin{itemize} \item[(a)] $\mathrm{sgn~det}(dg)=\mathrm{sgn}(p_uq_v-p_vq_u)$ at $(x,y,\lambda);$ \item[(b)] $\mathrm{sgn~tr}(dg)=\mathrm{sgn}(up_u+vq_u)$ at $(x,y,\lambda).$ \end{itemize} \end{itemize} \end{lema} \begin{proof} By using the terminology in \cite{GS85}, we have \begin{itemize} \item[(a)] trivial solution when $x=y=0,$ \item[(b)] $x-$mode solution when $p(u,0,\lambda)=y=0,~x\neq0,$ \item[(c)] $y-$mode solution when $q(0,v,\lambda)=x=0,~y\neq0,$ \item[(d)] mixed-mode solution when $p(u,v,\lambda)=q(u,v,\lambda)=0,~x\neq0,~y\neq0.$ \end{itemize} Accordingly, if $x=y=0$ equation \eqref{equation dg} reduces to \begin{equation}\label{equation dg1} dg= \begin{array}{l} \begin{bmatrix} p&0\\ 0&q \end{bmatrix} \end{array} \end{equation} in the first case. Moreover, since $y=0$ for the $x-$mode solution, then $v=0,~x\neq0$ and therefore the Jacobian matrix becomes \begin{equation}\label{equation dg2} dg= \begin{array}{l} \begin{bmatrix} 2up_u&0\\ 0&q \end{bmatrix} \end{array}. \end{equation} Similarly, since $x=0$ for the $y-$mode solution, then $u=0,~y\neq0$ and therefore the Jacobian matrix becomes \begin{equation}\label{equation dg4} dg= \begin{array}{l} \begin{bmatrix} p&0\\ 0&2vq_v \end{bmatrix} \end{array}. \end{equation}
Finally, when we deal with the mixed-mode solutions then the Jacobian matrix becomes \begin{equation}\label{equation dg5} dg= \begin{array}{l} \begin{bmatrix} 2up_u&2p_vxy\\ 2q_uxy&2vq_v \end{bmatrix} \end{array}, \end{equation}
and the eigenvalues are \begin{equation}\label{eigenvalues normal form} \begin{array}{l} \lambda_{1,2}=\displaystyle{\frac{2up_u+2vq_v\pm2\sqrt{u^2p_u^2+6uvp_uq_v+v^2q_v^2}}{2}}=\\ \\ \hspace{3cm}up_u+vq_v\pm\sqrt{\left(up_u+vq_v\right)^2+4uvp_uq_v}. \end{array} \end{equation} Now since $u=x^2$ and $v=y^2$ we explicitly derive the conditions for stability/unstability of the linearized system, in Table \eqref{table sign jacobian}. \begin{table} \centering \begin{center} \caption{Mixed-mode solutions of the $\mathbb{D}_2-$equivariant normal of equation \eqref{eq lema normal form}.}\label{table sign jacobian}
\end{center} \begin{tabular}{cccc} \toprule
$(p_u,q_v)$ & $up_u+vq_v$& $\left(up_u+vq_v\right)^2+4uvp_uq_v$&Stability \\ \midrule \centering $(+,+)$ &+&+&unstable\\ $(-,-)$ &-&+&stable\\ $(+,-)$& +&+&unstable\\ $(+,-)$& +&-&unstable\\ $(+,-)$& -&+&stable\\ $(+,-)$& -&-&stable\\ $(-,+)$& +&+&unstable\\ $(-,+)$& +&-&unstable\\ $(-,+)$& -&+&stable\\ $(-,+)$& -&-&stable\\ \bottomrule \end{tabular} \end{table} \end{proof}
\section{Weak Coupling}\label{section Weak Coupling}
We can think on ideal physical systems as interactions between identical or nearly identical subsystems. Moreover, as in \cite{Ashwin_Swift}, the whole system can be described as being a perturbation of an uncoupled system. This can be thought of as continuous path between the system and a system that is a product of several dynamical systems. In the following we will assume that our system of oscillators has a weak coupling limit. In fact, as it has been shown in Ashwin and Swift \cite{Ashwin_Swift}, even strongly coupled oscillator systems must have a weakly coupled limit. Moreover, we will assume that our system is formed by dissipative oscillators, so that the periodic orbit is attracting and unique in some neighborhood. In the weak coupling case, we focus on the dynamics of the relative phases of the oscillators. This situation can be better understood when in the no coupling case there is an attracting $N-$torus with one angle for each oscillator. An apparent problem can arise when considering phase differences; however we can choose coordinates so that the dynamics is a linear flow in the direction and there is no change in the phase differences. The theory developed in \cite{Ashwin_Swift} shows that the torus is normally hyperbolic, so with small coupling, the torus persists and there is a slow evolution of the phase differences. Moreover, it has been pointed out that the weak coupling limit offers different information about the dynamics rather than analyzing only the small amplitude periodic solutions near the Hopf bifurcation point.
Our system \eqref{array 4 eq} can be rewritten under weak coupling case as an ODE of the form: \begin{equation}\label{generic weak coupling equation} \dot{x}_i=f(x_i)+\epsilon g_i(x_1,\ldots,x_4) \end{equation} for $i=1,\ldots,4,~x_i\in \mathcal{X}$ and commuting with the permutation action of $\mathbb{D}_2$ on $\mathcal{X}^4,$ both $f$ and $g_i$ being of the class $\mathcal{C}^{\infty}.$ The constant $\epsilon$ represents the coupling strength and it is assumed to have low values. As in \cite{Ashwin_Swift}, \cite{ADSW} or \cite{Stork} we assume $\dot{x}$ has an hyperbolic stable limit cycle.\\
As shown in \cite{Ashwin_Swift} in the case of weak coupling, there is a natural reason why we should not just look at irreducible representations of $\mathbb{D}_2$. In our case there are $4$ stable hyperbolic limit cycles in the limit of $\epsilon=0,$ which means that the asymptotic dynamics of the system factors into the asymptotic dynamics of four limit cycles. This way as it we show later, we can for example embed the flow of a $2-$dimensional torus on a four-dimensional torus $\mathbb{T}^4.$ Moreover we assume hyperbolicity of the individual limit cycles for small enough values of the coupling parameter, and this justifies expressing the dynamics of the system as an ODE in terms of four phases, i.e. an ODE on $\mathbb{T}^4$ which is $\mathbb{D}_2-$equivariant. \begin{table} \centering \begin{center} \caption{Isotropy subgroups and fixed point subspaces for the $\mathbb{D}_2\times\mathbb{S}^1$ action on $\mathbb{T}^4$. The generators are $\kappa=\{(0~1)(2~3)\}$ and $\zeta=\{(0~2)(1~3)\}.$ The isotropy subgroups act by reflection about the zero, one and two-dimensional manifolds on $\mathbb{T}^4.$ For example, by notation $\mathbb{Z}^{\phi}_2(\kappa_{0,0})$ we mean the group $\mathbb{Z}_2$ acting through reflection about the circle $x_0=x_1=0,x_2=x_3=\phi,$ while $\mathbb{Z}^{\phi_i}_2(\kappa_{0,0})$ whith $i=\{1,2\}$ acts by reflection about the disk $x_0=x_1=0,x_2=\phi_1,x_3=\phi_2$.$~(\phi,~\phi_1,~\phi_2\in\left[0,\pi\right])$.}\label{table grande}
\end{center} \begin{tabular}{ccccc} \toprule
$\Sigma$ & $\mathrm{Fix}(\Sigma)$ & Generators & $\mathrm{dim~Fix}(\Sigma)$\\ \midrule \centering $\mathbb{D}_2(0)$ & $(0,0,0,0)$ & $\{\kappa,~\zeta\}_{(0)}$ &0\\ $\mathbb{D}_2(\kappa,\mathrm{Id})$ & $(0,0,\pi,\pi)$ & $\{\kappa,~\mathrm{Id}\}_{(0,\pi)}$ &0\\ $\mathbb{D}_2(\kappa\zeta,\kappa)$ & $(0,\pi,0,\pi)$ & $\{\kappa\zeta,~\kappa\}_{(0,\pi)}$ &0\\ $\mathbb{D}_2(\pi)$& $(\pi,0,0,\pi)$ & $\{\kappa,~\zeta\}_{(\pi)}$ &0\\ $\mathbb{Z}^{\phi}_2(\kappa_{(0,0)})$ & $(0,0,\phi,\phi)$ & $\{(0~1)\}_{(0,0)}$ &1\\ $\mathbb{Z}^{\phi}_2(\kappa_{(\pi,\pi)})$ & $(\pi,\pi,\phi,\phi)$ & $\{(0~1)\}_{(\pi,\pi)}$ &1\\ $\mathbb{Z}^{\phi}_2(\zeta_{(\pi,\pi)})$ & $(\phi,\phi,\pi,\pi)$ & $\{(2~3)\}_{(\pi,\pi)}$ &1\\ $\mathbb{Z}^{\phi}_2(\zeta_{(0,0)})$ & $(\phi,\phi,0,0)$ & $\{(2~3)\}_{(0,0)}$ &1\\ $\mathbb{Z}^{\phi_i}_2(\kappa_{(0,0)})$ & $(0,0,\phi_1,\phi_2)$ & $\{(0~1)\}_{(0,0)}$ &2\\ $\mathbb{Z}^{\phi_i}_2(\kappa_{(\pi,\pi)})$ & $(\pi,\pi,\phi_1,\phi_2)$ & $\{(0~1)\}_{(\pi,\pi)}$ &2\\ $\mathbb{Z}^{\phi_i}_2(\zeta_{(\pi,\pi)})$ & $(\phi_1,\phi_2,\pi,\pi)$ & $\{(2~3)\}_{(\pi,\pi)}$ &2\\ $\mathbb{Z}^{\phi_i}_2(\zeta_{(0,0)})$ & $(\phi_1,\phi_2,0,0)$ & $\{(2~3)\}_{(0,0)}$ &2\\ \bottomrule \end{tabular} \end{table}
In addition, Ashwin and Swift showed in \cite{Ashwin_Swift} that for small enough values of the coupling parameter it is possible to average the equations and introduce an approximate decoupling between the fast variation of the phases and the slow variation of the phase differences. This can be seen as introducing and phase shift symmetry which acts on $\mathbb{T}^4$ by translation along the diagonal; $$R_{\theta}(\phi_1,\ldots,\phi_4):=(\phi_1+\theta,\ldots,\phi_4+\theta),$$ for $\theta\in\mathbb{S}^1.$
Now have an ODE on that is equivariant under the action of $\mathbb{D}_2\times\mathbb{S}^1;$ now we have to classify the isotropy types of points under this action.
\begin{priteo} The isotropy subgroups for the action of $\mathbb{D}_2\times\mathbb{S}^1$ on $\mathbb{T}^4$ together with their generators and dimension of their fixed-point subspaces are those listed in Table \eqref{table grande}. \end{priteo} \begin{proof} We will explicitly calculate two examples, for the zero and one-dimensional fixed-point subspaces, respectively, the other cases being treated similarly. $(a)$ Let's take the action of $\mathbb{D}_2(\kappa,\mathrm{Id})$ on $\mathbb{T}^4.$ We have \begin{equation}\label{teorema tabla example} \begin{array}{l} \begin{bmatrix} \cos\phi_1&-\sin\phi_1&0&0\\ \sin\phi_1&\cos\phi_1&0&0\\ 0&0&\cos\phi_2&-\sin\phi_2\\ 0&0&\sin\phi_2&\cos\phi_2\\ \end{bmatrix} \begin{bmatrix} a\\b\\c\\delta \end{bmatrix}= \begin{bmatrix} a\cos\phi_1-b\sin\phi_1\\ b\cos\phi_1+a\sin\phi_1\\ c\cos\phi_2-d\sin\phi_2\\ d\cos\phi_2+c\sin\phi_2\\ \end{bmatrix}= \begin{bmatrix} a\cos\phi\\ b\cos\phi\\ c\cos\phi\\ d\cos\phi\\ \end{bmatrix}= \begin{bmatrix} \pm a\\ \pm b\\ \pm c\\ \pm d\\ \end{bmatrix}. \end{array} \end{equation} because $\phi_1,\phi_2=\{0,\pi\}.$ Therefore the only possible values for any arbitrary point on $\mathbb{T}^4$ to be fixed by the group are $0$ and $\pi.$ The four choices for the first four lines are deduced from the action of the elements of $\mathbb{D}_2.$\\
$(b)$ Let's take for example to action of $\mathbb{Z}^{\phi}_2(\zeta_{(\pi,0)})$ on one dimensional manifolds on $\mathbb{T}^4.$ \begin{equation}\label{teorema tabla example2} \begin{array}{l} \begin{bmatrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&\cos\phi&-\sin\phi\\ 0&0&\sin\phi&\cos\phi\\ \end{bmatrix} \begin{bmatrix} \phi\\ \phi\\ \pi\\ \pi\\ \end{bmatrix}= \begin{bmatrix} \phi\\ \phi\\ \pi\cos\phi\\ \pi\cos\phi\\ \end{bmatrix}= \begin{bmatrix} \phi\\ \phi\\ \pm\pi\\ \pm\pi \end{bmatrix}= \begin{bmatrix} \phi\\ \phi\\ \pi\\ \pi\\ \end{bmatrix}, \end{array} \end{equation} because the only options for $\phi$ are $0$ or $\pi$ radians.
\end{proof}
\subsection{Analysis of a family of vector fields in $\mathrm{Fix}(\mathbb{Z}_2)$}\label{onetorus_theor} We can define coordinates in $\mathrm{Fix}(\mathbb{Z}_2) $ by taking a basis \begin{equation}\label{basis} \begin{array}{l} e_1=\frac{1}{2}(-1,1,-1,-1)\\ e_2=\frac{1}{2}(-1,-1,1,-1) \end{array} \end{equation} and consider the space spanned by $\{e_1,e_2\}$ parameterized by $\{\phi_1,\phi_2\}:$ \begin{equation}\label{coordinates} \sum_{n=1}^2\phi_ne_n \end{equation} By using these coordinates, we construct the following family of two-dimensional differential systems which satisfies the symmetry of $\mathrm{Fix}(\mathbb{Z}_2)$. \begin{equation}\label{systema ejemplo} \left\{ \begin{array}{l} \dot{\phi_1}=a\sin{\phi_1}\cos{\phi_2}+\epsilon\sin{2\phi_1}\cos{2\phi_2}\\ \\ \dot{\phi_2}=-b\sin{\phi_2}\cos{\phi_1}+\epsilon\sin{2\phi_2}\cos{2\phi_1}+q(1+\cos\phi_1)\sin2\phi_2,\\ \end{array} \right. \end{equation} where $a,~b>0.$ We argue that the family of vector fields \eqref{systema ejemplo} exhibits structurally stable, attracting heteroclinic cycles, which are structurally stable or completely unstable, depending on parameters $a,~b,~\epsilon$ and $q,$ as we shall prove in Theorem \ref{teorema estabilidad heteroclinas}. In the following we will show that the planes $\phi_1=0~(\mathrm{mod}~\pi),~\phi_2=0~(\mathrm{mod}~\pi)$ are invariant under the flow of \eqref{systema ejemplo}.
Let $\mathcal{X}$ be the vector field of system \eqref{systema ejemplo}.\\ \begin{defi} We call a trigonometric invariant algebraic curve $h(\phi_1,\phi_2)=0,$ if it is invariant by the flow of \eqref{systema ejemplo}, i.e. there exists a function $K(\phi_1,\phi_2)$ such that \begin{equation}\label{campo} \mathcal{X}h=\frac{\partial h}{\partial\phi_1}\dot{\phi_1}+\frac{\partial h}{\partial\phi_2}\dot{\phi_2}=Kh. \end{equation} \end{defi}
\begin{lema}\label{lema invariant2} Functions $\sin\phi_1$ and $\sin\phi_2$ are trigonometric invariant algebraic curves for system \eqref{systema ejemplo}. \end{lema} \begin{proof} We can write the system \eqref{systema ejemplo} in the form \begin{equation}\label{systema ejemplo1} \left\{ \begin{array}{l} \dot{\phi_1}=\sin{\phi_1}\left(a\cos{\phi_2}+2\epsilon\cos{\phi_1}\cos{2\phi_2}\right)\\ \\ \dot{\phi_2}=\sin{\phi_2}\left(-b\cos{\phi_1}+2\epsilon\cos{\phi_2}\cos{2\phi_1}+2q(1+\cos\phi_1)\cos\phi_2\right)\\ \end{array} \right. \end{equation}
Now if we choose $h_1=\sin\phi_1,$ then $Xh_1=\cos{\phi_1}\sin{\phi_1}\left(a\cos{\phi_2}+2\epsilon\cos{\phi_1}\cos{2\phi_2}\right)$ so $K_1=\cos{\phi_1}\left(a\cos{\phi_2}+2\epsilon\cos{\phi_1}\cos{2\phi_2}\right).$ The second case follows similarly. \end{proof}\\
Since the planes $\phi_i=0(\mathrm{mod}~\pi),~i=1,2$ are invariant under the flow of \eqref{systema ejemplo}, it is clear that $(0,0),~(\pi,0),~(\pi,\pi),~(0,\pi)$ are equilibria for \eqref{systema ejemplo}. To check the possibility of heteroclinic cycles in system \eqref{systema ejemplo}, we linearize first about the equilibria (i.e. the zero-dimensional fixed points) and then about the one-dimensional manifolds connecting these equilibria. We can assume without loss of genericity that $\mathrm{Fix}\left(\mathbb{Z}_2\right)$ is attracting for the dynamics and therefore the stabilities. The idea is that the analysis of the dynamics within the fixed point space $\mathrm{Fix}\left(\mathbb{Z}_2\right)$ is crucial in determining the stabilities of the full system. In particular we will prove that eigenvalues of the linearization in each cases are of opposite signs, allowing the existence of such a heteroclinic network between the equilibria.
In the following we will show that there exists the possibility of a heteroclinic cycle in any of the two-dimensional fixed-point spaces in Table \eqref{table grande} and the connections between these zero dimensional fixed point spaces are possible within specific routes within the one-dimensional fixed points of \eqref{systema ejemplo}. For the proof we will need \begin{lema}\label{lema saddles}
Assume $|\epsilon|<\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}$ and $|\epsilon+2q|<\frac{b}{2}.$ Then the four fixed points in Table \eqref{table fixed points} are saddles. \end{lema} \begin{proof} By inspection it is clear that the eigenvalues of these fixed points are of opposite signs in each case.
\begin{table} \centering \begin{center} \caption{Eigenvalues of the flow of equation \eqref{systema ejemplo}, at the four non-conjugate zero-dimensional fixed points.}\label{table fixed points}
\end{center} \begin{tabular}{cccc} \toprule
$\mathrm{Fix}(\Sigma)$ & $(\phi_1,\phi_2)$ & $\lambda_1$ & $\lambda_2$\\ \midrule \centering $\mathbb{D}_2(0)$& $(0,0)$ & $a+2\epsilon$ &$-b+2(\epsilon+2q)$\\ $\mathbb{D}_2(\kappa,\mathrm{Id})$ & $(0,\pi)$ & $-a+2\epsilon$ &$b+2(\epsilon+2q)$\\ $\mathbb{D}_2(\kappa\zeta,\kappa)$ & $(\pi,0)$ & $-a+2\epsilon$ &$b+2\epsilon$\\ $\mathbb{D}_2(\pi)$ & $(\pi,\pi)$ & $a+2\epsilon$ &$-b+2\epsilon$\\ \bottomrule \end{tabular} \end{table}
\end{proof} \begin{priteo} There exists the possibility of a heteroclinic cycle in any of the two-dimensional fixed point subspaces in Table \eqref{table grande} in the following way: \begin{equation}\label{flechas} \begin{array}{l} \cdots\xrightarrow{\mathbb{Z}^{\phi}_2(\zeta_{(0,0)})}\mathbb{D}_2(0)\xrightarrow{\mathbb{Z}^{\phi}_2(\kappa_{(0,0)})} \mathbb{D}_2(\kappa,\mathrm{Id})\xrightarrow{\mathbb{Z}^{\phi}_2(\zeta_{(\pi,\pi)})}\\ \\ \hspace{3cm}\mathbb{D}_2(\pi) \xrightarrow{\mathbb{Z}^{\phi}_2(\kappa_{(\pi,\pi)})}\mathbb{D}_2(\kappa\zeta,\kappa)\xrightarrow{\mathbb{Z}^{\phi}_2(\zeta_{(0,0)})}\cdots \end{array} \end{equation} where the connection between the four equilibria in the plane $\phi_1,\phi_2$ is carried out along the indicated one-dimensional manifolds. \end{priteo}
\begin{proof} From Lemma \eqref{lema saddles} we know that the four fixed points are saddles, so there is possible a heteroclinic connection between them. We linearize the system \eqref{systema ejemplo} at every point in the one-dimensional manifolds of the fixed point spaces in Table \eqref{table grande}. The Jacobian matrix of the system evaluated at these points has eigenvalues shown in Table \eqref{table fixed points repetition}. This way we obtain the four paths shown in Table \eqref{table fixed points}. Moreover, using the conditions for $\epsilon$ and $q$ in Lemma \eqref{lema saddles} these eigenvalues are clearly of opposite signs in each case. A schematic view of the heteroclinic cycle is offered in Figure \eqref{flechas repetition}. \begin{table} \centering \begin{center} \caption{Eigenvalues of the flow of equation \eqref{systema ejemplo}, at the four non-conjugate paths on the one-dimensional fixed-point subspaces on $\mathbb{T}^4$.}\label{table fixed points repetition} \end{center} \begin{tabular}{cccc} \toprule
$\mathrm{Fix}(\Sigma)$ & $(\phi_1,\phi_2)$ & $\lambda_1$ & $\lambda_2$\\ \midrule \centering $\mathbb{Z}^{\phi_1}_2(\kappa_{(0,0)})$ &$(\phi_1,0)$& $a\cos\phi_1+2\epsilon\cos2\phi_1$ &$-b\cos\phi_1+2q(1+\cos\phi_1)+2\epsilon\cos2\phi_1$\\ $\mathbb{Z}^{\phi}_2(\zeta_{(\pi,\pi)})$& $(\pi,\phi_2)$& $-a\cos\phi_2+2\epsilon\cos2\phi_2$ &$b\cos\phi_2+2\epsilon\cos2\phi_2$\\ $\mathbb{Z}^{\phi}_2(\kappa_{(\pi,\pi)})$ & $(\phi_1,\pi)$ &$-a\cos\phi_1+2\epsilon\cos2\phi_1$&$b\cos\phi_1+2q(1+\cos\phi_1)+2\epsilon\cos2\phi_1$\\ $\mathbb{Z}^{\phi}_2(\zeta_{(0,0)})$& $(0,\phi_2)$ & $a\cos\phi_2+2\epsilon\cos2\phi_2$ &$-b\cos\phi_2+2\epsilon\cos2\phi_2+4q\cos2\phi_2$\\ \bottomrule \end{tabular} \end{table}
\end{proof}
\begin{figure}
\caption{Schematic representation of a structurally stable heteroclinic cycle for the weak coupled oscillators within the two-dimensional space of the $\mathrm{Fix}(\mathbb{Z}_2)$ group.}
\label{flechas repetition}
\end{figure}
Under the conditions in Lemma \eqref{lema saddles}, the stability of the heteroclinic cycle is guaranteed by the fact that it is restricted to the dynamics of system \eqref{systema ejemplo} within the fixed point spaces. We use the criteria of Krupa and Melbourne \cite{Krupa} to study the stability of the heteroclinic cycle. We now recall these stability criteria, which are based on four hypotheses.\\ The first hypothesis guarantees that the heteroclinic cycle is robust. \begin{itemize} \item ($S_1$) There is an isotropy subgroup $\Sigma_j$ with the fixed-point subspace $P_j=\mathrm{Fix}(\Sigma_j)$ such that $W^u(\xi_j)\cap P_j\subset W^s(\xi_{j+1})$ and $\xi_{j+1}$ is a sink in $P_j.$\\ Corresponding to each isotropy subgroup $\Sigma_j$ in ($S_1$) is the isotypic decomposition $\mathbb{R}^n=W_0\oplus\ldots\oplus W_q$ of $\mathbb{R}^n$ into isotypic components. Let $W_0=P_j$ and $N(\Sigma_j)$ denote the normalizer of $\Sigma_j$ in $\Gamma.$\\ \item ($S_2$) The eigenspaces corresponding to $c_j,~t_j,~e_{j+1}$ and $t_{j+1}$ lie in the same $\Sigma_j-$isotypic component;\\ \item ($S_3$) $\mathrm{dim}W^u(\xi_j)\cap P_j=\mathrm{dim}(N(\Sigma_j)/\Sigma_j)+1;$\\ \item ($S_4$) All transverse eigenvalues of $\xi_j$ with positive real part lie in the same $\Sigma_j-$isotypic component. \end{itemize}
Set $\rho_j=\mathrm{min}(c_j/e_j,1-t_j/e_j)$ and define $\rho=\rho_1\cdot\cdot\cdot\rho_m.$
\begin{priteo} [Krupa, Melbourne] Let $\Gamma$ be a finite group acting on $\mathbb{R}^n$ and $f:\mathbb{R}^n\rightarrow\mathbb{R}^n$ be a $\Gamma-$equivariant vector field. Suppose that $X$ is a heteroclinic cycle for $f$ satisfying hypotheses $(S_1)-(S_4).$ Then generically the stability of $X$ is described by precisely one of the following possibilities. \begin{itemize} \item [(a)] asymptotically stable ($\rho>1$ and $t_j<0$ for each $j$), \item [(b)] unstable but essentially asymptotically stable ($\rho>1$ and $t_j<e_j$ for each $j$ and $t_j>0$ for some $j$), \item [(c)] almost completely unstable ($\rho<1$ or $t_j>e_j$ for some $j$), \item [(d)] completely unstable if $\rho<1.$ \end{itemize} \end{priteo}
Applying these criteria to our case, we have
\begin{priteo}\label{teorema estabilidad heteroclinas} Heteroclinic cycle \eqref{flechas} is: \begin{itemize} \item [(a)] asymptotically stable if \begin{equation}\label{eq1 teorema estabilidad} \begin{array}{l} -\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}<\epsilon<0~~\mathrm{and}~~-\frac{b}{2}<\epsilon+2q<0,~~\mathrm{or}\\ \\ -\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}<\epsilon<0~~\mathrm{and}~~0<\epsilon+2q<\frac{b}{2}~~\mathrm{and}\\ \\ \hspace{5cm}\displaystyle{\frac{b-2(\epsilon+2q)}{b+2(\epsilon+2q)}>\frac{(a+2\epsilon )^2(b+2\epsilon)}{(a-2\epsilon )^2(b-2\epsilon)}},~~\mathrm{or}\\ \\ 0<\epsilon<\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}~~\mathrm{and}~~-\frac{b}{2}<\epsilon+2q<0~~\mathrm{and}\\ \\ \hspace{5cm}\displaystyle{\frac{b-2(\epsilon+2q)}{b+2(\epsilon+2q)}<\frac{(a+2\epsilon )^2(b+2\epsilon)}{(a-2\epsilon )^2(b-2\epsilon)}}; \end{array} \end{equation} \item [(b)] completely unstable if \begin{equation}\label{eq1 teorema estabilidad repetition} \begin{array}{l} 0<\epsilon<\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}~~\mathrm{and}~~0<\epsilon+2q<\frac{b}{2},~~\mathrm{or}\\ \\ -\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}<\epsilon<0~~\mathrm{and}~~0<\epsilon+2q<\frac{b}{2}~~\mathrm{and}\\ \\ \hspace{5cm}\displaystyle{\frac{b-2(\epsilon+2q)}{b+2(\epsilon+2q)}<\frac{(a+2\epsilon )^2(b+2\epsilon)}{(a-2\epsilon )^2(b-2\epsilon)}},~~\mathrm{or}\\ \\ 0<\epsilon<\mathrm{min}\{\frac{a}{2},\frac{b}{2}\}~~\mathrm{and}~~-\frac{b}{2}<\epsilon+2q<0~~\mathrm{and}\\ \\ \hspace{5cm}\displaystyle{\frac{b-2(\epsilon+2q)}{b+2(\epsilon+2q)}>\frac{(a+2\epsilon )^2(b+2\epsilon)}{(a-2\epsilon )^2(b-2\epsilon)}}. \end{array} \end{equation} \end{itemize} \end{priteo} \begin{proof}
The stability is expressed by \begin{equation}\label{stability krupa1} \rho=\prod_{j=1}^4\rho_j \end{equation} where \begin{equation}\label{stability krupa2} \rho_j=\mathrm{min}\{c_j/e_j,1-t_j/e_j\}. \end{equation} In equation \eqref{stability krupa2}, $e_i$ is the expanding eigenvector at the $i$th point of the cycle, $-c_i$ is the contracting eigenvector and $t_i$ is the tangential eigenvector of the linearization. For the heteroclinic cycle we have \begin{equation}\label{rho values1} \begin{array}{l} \rho_1=\displaystyle{\frac{b-2(\epsilon+2q)}{a+2\epsilon}}~~ \rho_2=\displaystyle{\frac{a-2\epsilon}{b+2(\epsilon+2q)}}~~ \rho_3=\displaystyle{\frac{a-2\epsilon}{b+2\epsilon}}~~ \rho_4=\displaystyle{\frac{b-2\epsilon}{a+2\epsilon}}, \end{array} \end{equation} so from equation \eqref{stability krupa1} we obtain \begin{equation}\label{rho values2} \begin{array}{l} \rho=\displaystyle{\frac{\left[b-2(\epsilon+2q)\right](a-2\epsilon )^2(b-2\epsilon)}{\left[b+2(\epsilon+2q)\right](a+2\epsilon )^2(b+2\epsilon)}}. \end{array} \end{equation} Then the proof follows by applying Theorem $2.4$ in \cite{Krupa}. \end{proof}\\ \paragraph{\bf Acknowledgements} The author would like to thank the referee for indications which improved the presentation of this paper. He also acknowledges financial support from FCT grant $SFRH/ BD/ 64374/ 2009.$
\end{document} |
\begin{document}
\title{Quantum simulation of quantum field theory using continuous variables}
\author{Kevin Marshall}
\affiliation{Department of Physics, University of Toronto, Toronto, M5S 1A7, Canada} \author{Raphael Pooser}
\affiliation{Quantum Information Science Group, Oak Ridge National Laboratory, Oak Ridge, Tennessee 37831, U.S.A} \affiliation{Department of Physics and Astronomy, The University of Tennessee, Knoxville, Tennessee 37996-1200, U.S.A.} \author{George Siopsis} \email{[email protected]} \affiliation{Department of Physics and Astronomy, The University of Tennessee, Knoxville, Tennessee 37996-1200, U.S.A.} \author{Christian Weedbrook}
\affiliation{QKD Corp, 60 St.~George St., Toronto, M5S 1A7, Canada}
\date{\today} \pacs{42.50.Ex, 03.70.+k, 42.50.Dv, 03.67.Lx}
\begin{abstract}
Much progress has been made in the field of quantum computing using continuous variables over the last couple of years. This includes the generation of extremely large entangled cluster states (10,000 modes, in fact) as well as a fault tolerant architecture. This has led to the point that continuous-variable quantum computing can indeed be thought of as a viable alternative for universal quantum computing. With that in mind, we present a new algorithm for continuous-variable quantum computers which gives an exponential speedup over the best known classical methods. Specifically, this relates to efficiently calculating the scattering amplitudes in scalar bosonic quantum field theory, a problem that is believed to be hard using a classical computer. Building on this, we give an experimental implementation based on cluster states that is feasible with today's technology. \end{abstract}
\maketitle
\section{Introduction}
For more than a decade now, continuous-variable (CV) quantum information~\cite{Braunstein2005,Weedbrook2011} has been a prominent substrate in implementing quantum technologies. Primarily this can be attributed to its largely Gaussian nature which invites simple and convenient mathematical calculations, as well as accessible experimental demonstrations, which are often deterministic in nature. Furthermore, one can also use CVs as a key element in another promising architecture, known as hybrid quantum information~\cite{Furusawa2011}.
The field of quantum computing~\cite{Ladd2010} using CVs~\cite{Lloyd1999,Weedbrook2011} has also progressed significantly in the last few years. From its original conception in 1999~\cite{Lloyd1999}, progress began to accelerate after a cluster state~\cite{Raussendorf2001} version was established in 2006~\cite{Zhang2006,Menicucci2006}, leading to something significantly more tangible for experimentalists. This resulted in numerous proof-of-principle demonstrations~\cite{Yokoyama2014,Miyata2014,Pysher2011,Takeda2013}, currently culminating in a 10,000 node cluster~\cite{Yokoyama2013} created `on-the-go' along with a 60 node cluster created simultaneously~\cite{Chen2014}. From a theoretical perspective, much progress has been made \cite{Marshall2014,Lau2013,Loock2007,Gu2009,Alexander2014,Demarie2014,Menicucci2015,Wang2014,Menicucci2011}, including recently, an important fault tolerant architecture~\cite{Menicucci2014}, achieved by leveraging the Gottesman-Kitaev-Preskill (GKP) encoding~\cite{Gottesman2001}. However, one area that is significantly underdeveloped is that of algorithms for a CV quantum computer. Thus far there only exists CV versions of quantum searching~\cite{Pati2000} and the Deutsch-Jozsa algorithm~\cite{Pati2003,Adcock2009,Zwierz2010,Adcock2013}.
In this paper, we present an algorithm that simulates~\cite{Georgescu2014} the scattering amplitudes in scalar bosonic quantum field theory (QFT) using a continuous-variable quantum computer. In fact, we show one can obtain an exponential speedup over the best known classical algorithms. A discrete version of the algorithm was originally shown in Refs.~\cite{Jordan2012,Jordan2014a} for a quantum computer based on qubits. Further work extended this result to fermionic QFTs~\cite{Jordan2014}, as well as using wavelets for multi-scale simulations~\cite{Brennen2014}.
Typically, $q$ and $p$ are the CVs spreading across all real numbers. To encode them in qubits, one needs a whole register of qubits at each point in space. However, with CVs, there is a 1-to-1 mapping to qumodes (the CV equivalent of a qubit). In fact it is arguable that a CV quantum computer is the natural choice for such a QFT problem given that​ the fields are continuous variables. Thus, the value of the field at a given point in space can be mapped onto a qumode naturally. If qubits are used, instead, the qumode needs to be replaced by a register of $M$ qubits which only allows the field to take on $2^M$ discrete values. Brennen \textit{et al.} describe both possibilities in Ref.~\cite{Brennen2014}, although they do not explain how to implement the quartic phase gate with CVs, which we do here. Furthermore, the quartic vertex in wavelets becomes very complicated. Implementing it would require gates acting on more than two modes (resulting in logarithmic overhead in complexity). Another benefit to our approach is in the development of the initial cluster state. Here we show how to create the initial CV cluster state as well as suggesting an experimental implementation based on standard linear optics. Furthermore, we also note that in the preparation of the initial state we see a slight improvement over the original qubit approach of Ref.~\cite{Jordan2012}. There they require $O(N^{2.376})$ gates to engineer the ground cluster state; whereas in our scheme, we require slightly less than that, specifically, $O(N^2)$ gates.
Our paper is structured in the following way. In Sec.~\ref{sec2}, we discretize space for a one-dimensional scalar bosonic QFT while leaving the field and time as continuous parameters. Next, we show how to generate the initial cluster state using only Gaussian operations in Sec.~\ref{prep}. In Sec.~\ref{compute} we outline the steps necessary to compute a scattering amplitude including the required measurement. We provide an explicit experimental implementation in Sec.~\ref{experiment}. Finally, the benefits of our approach over classical methods are discussed in Sec.~\ref{conclusion}. \section{Discretization in one-dimension} \label{sec2}
We consider a relativistic scalar field $\phi$ in one spatial dimension including a quartic self-interaction. We shall outline the discretization specifically in the one-dimensional case so as not to clutter the notation unnecessarily, but generalization to higher dimensions is straightforward and is discussed in the supplementary material. We note that the field $\phi$ is a function of $x$ and $t$ (time), $\phi(x,t)$. All three parameters are continuous. In our approach, we discretize $x$, but not $\phi$ or $t$. In the case of qubits, one would discretize $x$ and $\phi$, but not $t$. In classical lattice calculations, one discretizes all three $\phi$, $x$, and $t$.
In the continuum, the one-dimensional free scalar QFT is given by the Hamiltonian \begin{equation} H_0 = \frac{1}{2} \int_0^L dx \left[ \pi^2 + \left( \frac{\partial\phi}{\partial x} \right)^2 + m^2 \phi^2 \right] \end{equation} where $\phi$ is the scalar field and $\pi$ the conjugate momentum field. They obey commutation relations $[ \phi (x),\pi (x') ] = i\delta (x-x')$ where we choose units in which $\hbar =1$.
We discretize space by letting $x = na$, $n=0,1,\dots, N-1$, where $a$ is the lattice spacing and $L=Na$ is the finite length of the spatial dimension ($L\gg a$). We choose units in which $a=1$, for simplicity, and denote $Q_n = \phi (x)$, $P_n = \pi(x)$. The discretized variables obey standard commutator relations, $[ Q_n,P_m ] = i \delta_{nm}$. The Hamiltonian becomes \begin{equation}\label{eq18} H_0 = \sum_{n=0}^{N-1} \frac{ P_n^2 + m^2 Q_n^2}{2} + \frac{1}{2} \sum_{n=0}^{N-1} (Q_n- Q_{n+1})^2 \end{equation} where we employed periodic boundary conditions and defined $Q_{N} \equiv Q_0$.
It is useful to define creation and annihilation operators, $A_n^\dagger$ and $A_n$, respectively, by ${A}_n = (Q_n + iP_n)/\sqrt{2}$. They obey the commutation relations $[ A_n,A_m^\dagger ] = \delta_{nm}$ and the Hamiltonian can then be written as \begin{equation} \label{eq8} H_0 = \frac{1}{2} \mathbf{P}^T \mathbf{P} + \frac{1}{2} \mathbf{Q}^T \mathbf{V} \mathbf{Q} \end{equation} where $\mathbf{P} \equiv [ P_0,P_1, \dots, P_{N-1}]^T$ and $\mathbf{Q} \equiv [ Q_0, Q_1, \dots, Q_{N-1}]^T$ . The eigenvalues of the matrix $\mathbf{V}$ and the components of the corresponding normalized eigenvectors $\mathbf{e}^n$ are, respectively, $\omega_n^2 = m^2 + 4\sin^2 \frac{n\pi}{N}$, and $\mathbf{e}_{k}^{n} = \frac{1}{\sqrt{N}} e^{2\pi i kn/N},\ k=0,\dots, N-1$. Notice that the massless case is special because it contains a zero mode (for $m=0$, $\omega_0 =0$), so the matrix $\mathbf{V}$ is not invertible. To avoid the problems that arise, we can shift the mass by a small amount $\sim 1/N$, which vanishes in the continuum limit ($N\to\infty$).
We also wish to add a quartic interaction, $ H_{int} = \frac{\lambda}{4!} \int_0^L dx \phi^4 \to \frac{\lambda}{4!} \sum_n Q_n^4 $ which necessitates the addition of a mass counter term $ H_{c.t.} = \frac{\delta_m}{2} \int_0^L dx \phi^2 \to \frac{\delta_m}{2} \sum_n Q_n^2 $ due to renormalization, as explained in the supplementary material. We find that for weak coupling, the physically interesting case is stable for $\lambda>0$.
To diagonalize the Hamiltonian, we introduce new creation and annihilation operators, $a_k^\dagger$ and $a_k$, respectively, defined by ${a}_k = \sqrt{\frac{\omega_k}{2}} (\mathbf{e}^\dagger \mathbf{Q})_k + \frac{i}{\sqrt{2\omega_k}} (\mathbf{e}^\dagger \mathbf{P})_k$ where $\mathbf{e}$ is the matrix of the eigenvectors. Notice that $ \mathbf{e}$ is unitary, $\mathbf{e}^\dagger \mathbf{e} = \mathbf{I}$. These operators obey standard commutation relations, $[ a_k,a_l^\dagger ] = \delta_{kl}$ and the free Hamiltonian reads \begin{equation} H_0 = \sum_{k=0}^{N-1} \omega_k \left( a_k^\dagger a_k + \frac{1}{2} \right) .\end{equation} In this form, it is straightforward to construct the states in the Hilbert space.
\section{Initial cluster state preparation}\label{prep}
For the initial cluster state, in Refs.~\cite{Jordan2012,Brennen2014} the excited state was created \textit{after} creating the ground state. This is difficult because it involves manipulating a large number of qubits. In our approach, we create a single photon state in a single mode \textit{before} creating the cluster state. This is more accessible, as it involves creating the state $\ket{1}$ for a single mode. It can be done in a variety of ways, via a heralded single photon source, for instance. At the end of the computation, the field modes are all measured and the distribution of single photons across them determines the result.
To begin with, we build the system with $N$ oscillators representing the variables $(Q_n, P_n)$, $n=0,1,2,\dots$. The $n$th oscillator has a Hilbert space constructed by successive application of the creation operator $A_n^\dagger$ on the vacuum $|0\rangle_n$, which is annihilated by $A_n$. Here $|0\rangle_n$ is shorthand for a product state of vacuum fields
\begin{equation}\label{eq16} |0\rangle = |0\rangle_0 \otimes |0\rangle_1 \otimes \cdots \otimes |0\rangle_{N-1} \ , \end{equation} with $ A_n |0\rangle = 0$. For a scattering process, we are given an initial state typically consisting of a fixed number of particles, usually two, which undergoes evolution and then a measurement is performed (detection of particles) on the final state. Both initial and final states asymptote to eigenstates of the free Hamiltonian $H_0$. Thus quantum computation starts with preparation of an eigenstate of $H_0$.
\label{sec:groundstate}
First, we consider the ground state of $H_0$. It is the cluster state $|\Omega\rangle$ annihilated by all $a_k$, i.e., $ a_k |\Omega\rangle =0$ for $k=0,1,\dots, N-1 $. It can be constructed from the vacuum state \eqref{eq16} by acting with the Gaussian unitary $U^\dagger$, where $a_n = U^\dagger A_n U $. Noticing the relationship between the operators $a_k$ and $A_k$ we can use the Bloch-Messiah reduction \cite{Braunstein2005decomp} to determine $U=VSW^\dagger$ as a decomposition involving a multiport interferometer ($V$) followed by single mode squeezing ($S$) followed by a final multiport interferometer ($W$). These unitary operators can be realized with $O(N^2)$ gates \cite{Reck1994}. This is in contrast to the qubit version \cite{Jordan2012} where they require $O(N^{2.376})$ gates.
To implement $U$ we first perform the rotation \begin{eqnarray}\label{eq23} A_0 &\to & A_0' = \sum_{k=0}^{N-1} A_k \nonumber\\ A_n &\to& A_n' = \sum_{k=0}^{N-1} \cos \frac{2\pi nk}{N} A_k \nonumber\\ A_{N-n} &\to& A_{N-n}' = \sum_{k=0}^{N-1} \sin \frac{2\pi nk}{N} A_k \end{eqnarray} where $1\le n\le N/2$, which can be expressed in terms of rotations each involving only a couple of oscillators. Notice that if $N$ is even, $A_{N/2}$ does not have a partner; we obtain $A_{N/2} \to \sum_k (-)^k A_k$. Next, we squeeze each mode as $A_n' \to A_n'' = \cosh r_n A_n' + \sinh r_n {A_n'}^\dagger$ where $e^{2r_n} = \omega_n$ for $n\le N/2$, and $e^{-2r_n} = \omega_n$, for $n>N/2$. Finally, we untangle the pairs by rotating them, $A''_k\to a_k$ where $a_0=A_0''$, $a_n =( A_n'' + iA_{N-n}'' )/\sqrt 2$, and $a_{N-n} = ( iA_n'' + A_{N-n}'' )/\sqrt 2$.
Excited states can be constructed with the same number of gates, e.g., the single-particle state $ |k\rangle \equiv a_k^\dagger |\Omega\rangle$ can be constructed by acting upon the vacuum with $A_k^\dagger$. This turns the initial state of the $k$th mode into a one-photon state, $A_k^\dagger |0\rangle_k$, which can be accomplished in a variety of ways; see supplementary material. Having engineered $ A_k^\dagger |0\rangle_k$, we then apply the Gaussian unitary $U^\dagger$, to obtain the one-particle state
\begin{equation} {a_k}^\dagger |\Omega\rangle = U^\dagger A_k^\dagger |0\rangle \end{equation} Extending the above to the engineering of multi-particle states, $|k_1,k_2,\dots \rangle \propto {a_{k_1}}^\dagger {a_{k_2}}^\dagger \cdots |0\rangle$, is straightforward.
\section{Quantum Computation}\label{compute}
We wish to calculate a general scattering amplitude, which can be written as
\begin{equation} \mathcal{A} = \langle out | T \exp\left\{ i \int_{-T}^T dt (H_{int} (t) + H_{c.t.} (t) ) \right\} | in \rangle \end{equation} in the limit $T\to\infty$, where time evolution is defined with respect to the non-interacting Hamiltonian.
We start by preparing the initial state $|in\rangle$ as in the previous section and define initial time as $t=-T$. Then we act successively with evolution operators of the form \begin{equation}\label{eq33} U(t) = \exp\left\{ i \delta t (H_{int} (t) + H_{c.t.} (t) ) \right\} \end{equation} Time dependence is obtained via the free Hamiltonian, \begin{equation} Q_i (t) = e^{it H_0} Q_i (0) e^{-it H_0} \end{equation}
Therefore, the evolution \eqref{eq33} can be implemented as \begin{equation}\label{eq33a} U(t) = e^{it H_0}e^{ i \delta t (H_{int} + H_{c.t.} ) } e^{-it H_0} \end{equation} We deduce
\begin{equation} \mathcal{A} = \langle out | \left[ e^{i \delta t H_0} e^{ i \delta t (H_{int} + H_{c.t.} ) } \right]^N |in\rangle \end{equation} where we divided the time interval into $N = \frac{2T}{\delta t}$ segments.
The coupling constants in \eqref{eq33} are turned on and off adiabatically. This is achieved by splitting the time interval $[-T,T]$ into three segments, $[-T, -T_1]$, $[-T_1, T_1]$, and $[T_1,T]$. For $t\in [-T,-T_1]$, we turn the coupling constants on by replacing $\lambda \to \lambda(t)$, $\delta m \to \delta m (t)$, so that $\lambda (-T) = \delta m (-T) =0$, and $\lambda (-T_1) = \lambda$, $\delta m (-T_1) = \delta m$. Then for $t\in [-T_1,T_1]$ the coupling constants are held fixed. Finally, for $t\in [T_1,T]$, they are turned off adiabatically by reversing the process in the first time interval. In the case of small $\lambda$, the time dependence of the coupling constants can be chosen efficiently by making use of perturbative renormalization. Eqs.\ \eqref{eqA13} and \eqref{eqA14} inform the choice $\lambda(t) = \frac{T+t}{T-T_1} \lambda$, $\delta m (t) = \frac{\lambda(t)}{8\pi} \log \frac{64}{m^2}$, for $-T \le t \le -T_1$.
The unitary operators $e^{i\delta t H_0}$ and $e^{i\delta t H_{c.t.}}$ are Gaussian and can be implemented with second order nonlinear optical interactions and linear optics beam splitter networks. The interaction is implemented through a \emph{quartic} phase gate for each mode, \begin{equation}\label{eq41} e^{i\delta t H_{int} } = \prod_n e^{i\gamma Q_n^4 } \ \ , \ \ \ \ \gamma = \delta t \frac{\lambda}{4!} \end{equation} The quartic phase gate may be implemented in a similar manner to the cubic phase gate previously proposed~\cite{Marshall2014}.
\begin{figure*}
\caption{(Color Online) Sketch of an experimental setup for electromagnetic field modes used as qudits in a QFT calculation involving four field modes. The modes are encoded into electric field modes (colored red, blue, yellow, green), which are then prepared via beam splitters, swap gates, and squeezers for the compute stage. The compute stage consists of an interferometer, a quartic phase gate (black box, see Ref.~\cite{Marshall2014}), and free propagation. An uncompute stage, which is the inverse of the preparation stage, and a detection stage in the Fock basis, yield the scattering amplitudes into the four QFT field modes.}
\label{setup}
\end{figure*}
After evolution, we must project onto the state $|out\rangle$. This is similar to the state $|in\rangle$, and its construction depends on the number of desired particles. The latter are excitations created with $a_n^\dagger$, so in general,
\begin{equation}\label{eq95} |out\rangle = a_{n_1}^\dagger a_{n_2}^\dagger \cdots |\Omega\rangle = U^\dagger A_{n_1}^\dagger A_{n_2}^\dagger \cdots |0\rangle\end{equation} It follows that the next step is to \emph{uncompute} by applying the Gaussian unitary $U$ (which is the inverse operation to the preparation of the initial state), and then measure the number of photons in each mode. The final uncompute step projects the set of output modes onto the Fock basis. Thus, the scattering amplitude calculation is a mapping from one set of field modes on the input to a separate set of field modes on the output, as expected. That is, for each click on the photodetector for mode $n$, there is an operator $a_n^\dagger$ present in the final state \eqref{eq95}. If the QFT calculation involved an initial input state with two excitations spread across 100 field modes, say, then the entire calculation would involve two photons, for instance. We note that the calculation has made use of a quartic phase gate up to this point, and thus technically speaking a non-Gaussian operation would not be necessary during this measurement step in order to achieve an exponential speedup over the classical QFT algorithm. However, in order to achieve high accuracy in the final result, photon number resolving detectors with high efficiency~\cite{Nam2008} would be desirable for the measurment phase.
\section{Experimental implementation}\label{experiment}
An example of the experimental implementation is given in Fig.~\ref{setup}. For brevity the setup for calculating four space time points is given. For the electromagnetic field, the initial unitary rotation involves weighted beam splitters with the appropriate splitting to achieve the desired sums over the field operators (see appendix A.2). A swap gate is involved in the input state preparation stage. We note that a swap gate contains essentially the CV version of the CNOT operator along with parity operators~\cite{Wang2001}, but in some cases the gate can be simplified to a beam splitter interaction~\cite{Braunstein2005} such as for the electromagnetic field. Here we use a mode label swap operator, which is possible in systems with movable qubits, such as CV optical fields. Next, $H_{c.t.}$ is quadratic in position quadrature operators, which can be implemented with a series of phase shifts~\cite{Braunstein2005}. The non-Gaussian piece of the computation is then the quartic phase gate contained in $H_{int}$, which can be implemented via repeated application of the photon number-dependent phase gate~\cite{Marshall2014}. Lastly, the free propagation $H_0$ can be implemented by a calibrated free propagation before the uncompute stage. We note that the QFT field modes are encoded into the qudits which are themselves electromagnetic field modes, meaning that the free propagation contained in $H_0$ is not arbitrary. It must conform to the calculated QFT free propagation distance, and phase stability must be maintained throughout.
\section{Conclusion}\label{conclusion}
In conclusion, we developed a new algorithm for a continuous-variable quantum computer which gave an exponential speedup over the best known classical algorithms. This algorithm was the calculation of the scattering amplitudes in scalar bosonic quantum field theory, and as previously mentioned, arguably a natural choice for a continuous variable quantum computer to solve. At weak coupling, analytic calculations are possible, however, at strong coupling no such calculations are generally available, and one has to rely on numerical techniques. A widely used framework is lattice field theory which is based on the discretization of space into a finite set of points. The complexity of classical computations on a lattice increases exponentially with the number of lattice sites \cite{Creutz1985}.
Quantum computations offer a distinct advantage (first shown in Ref.~\cite{Jordan2014a} for qubits, and here for qumodes), since complexity only grows polynomially. Using continuous variables we also see an advantage over the original qubit proposal; specifically, in the preparation of the initial cluster state. There they required $O(N^{2.376})$ gates to synthesize the ground state. However, in our scheme, we required slightly less, $O(N^2)$ gates. Finally, we also gave an example of an experimental implementation on a continuous-variable cluster state quantum computer that calculated four space time points. We noted that such a scheme is feasible with current linear optical technology and consisted of a set of Gaussian operations along with the non-Gaussian quartic phase gate.
\acknowledgments We are grateful to Peter Rohde for valuable feedback. R. C. P. performed portions of this work at Oak Ridge National Laboratory, operated by UT-Battelle for the US Department of Energy under Contract No. DE-AC05-00OR22725. Work performed by the US government is not subject to copyright restrictions.
\appendix
\section{Renormalization}
Define the Green function $\mathbf{G} (t_1,t_2)$ as
\begin{equation} G_{ij}(t_1,t_2) = \langle 0 | \mathcal T(Q_i (t_1) Q_j(t_2)) |0\rangle, \end{equation} where $\mathcal T$ denotes the time-ordering operator. It obeys \begin{equation}\left[ \partial_{t_1}^2 + \mathbf{V} \right] \mathbf{G} (t_1,t_2) = -i \mathbf{I} \delta (t_1-t_2).\end{equation} Using the Fourier transform, \begin{equation} \mathbf{G} (t_1,t_2) = \int \frac{d\omega}{2\pi} e^{i\omega (t_1-t_2)} \tilde{\mathbf{G}} (\omega)\end{equation} we obtain \begin{equation} \tilde{\mathbf{G}} (\omega) = i\left[ -\omega^2 \mathbf{I} + \mathbf{V} \right]^{-1} = \sum_n \frac{-i}{\omega^2 - \omega_n^2} \mathbf{e}_n \mathbf{e}_n^\dagger, \end{equation} exhibiting poles at $\omega^2 = \omega_{n}^2$.
When we switch on the interaction term, \begin{equation} H_{int} = \frac{\lambda}{4!} \int_0^L dx \phi^4 \to \frac{\lambda}{4!} \sum_n Q_n^4,\end{equation} we have that at $\mathcal{O} (\lambda)$ the Green function is corrected by
\begin{equation}\delta G_{ij}(t_1,t_2) = \langle 0 | \mathcal T\left[ Q_i (t_1) Q_j(t_2) \int dt H_{int} (t) \right] |0\rangle.\end{equation} For the Fourier transform, we obtain \begin{equation}\delta \tilde{\mathbf{G}} (\omega) = \lambda [\tilde{\mathbf{G}} (\omega)]^2 \int \frac{d\omega'}{2\pi} \mathrm{Tr}\, \tilde{\mathbf{G}} (\omega')\end{equation} which leads to a shift of the poles, \begin{equation} \tilde{\mathbf{G}} (\omega) + \delta \tilde{\mathbf{G}} (\omega) = \sum_n \frac{-i}{\omega^2 - \omega_n^2 - \Sigma} \mathbf{e}_n \mathbf{e}_n^\dagger + \mathcal{O} (\lambda^2), \end{equation} where \begin{equation} \Sigma = \frac{\lambda}{2N} \int \frac{d\omega'}{2\pi} \sum_n \frac{-i}{{\omega'}^2 -\omega_i^2} = \frac{\lambda}{4N} \sum_n \frac{1}{\omega_n} \end{equation} The shift can be corrected by the addition of the counter term \begin{equation} H_{c.t.} = \frac{\delta_m}{2} \int_0^L dx \phi^2 \to \frac{\delta_m}{2} \sum_n Q_n^2,\end{equation} with $\delta_m = -\Sigma + \mathcal{O} (\lambda^2)$, i.e., the mass parameter in the Hamiltonian is not physical, but bare, \begin{equation} m_0^2 = m^2 + \delta_m = m^2 - \frac{\lambda}{4N} \sum_n \frac{1}{\omega_n} + \mathcal{O} (\lambda^2). \end{equation} For large $N$, the sum can be approximated by an integral, \begin{equation} \Sigma = \frac{\lambda}{4} \int_0^1 \frac{dk}{\sqrt{m^2 + 4\sin^2 k\pi}} \end{equation} which has a logarithmic divergence at small $m^2$ (i.e., length scale $1/m$ large in units of lattice spacing, which is the physically interesting limit). We easily obtain \begin{equation} \label{eqA13}\Sigma = \frac{\lambda}{8\pi} \log \frac{64}{m^2} + \mathcal{O} (m^2) \end{equation} The bare mass is \begin{equation} \label{eqA14} m_0^2 = m^2 -\Sigma + \mathcal{O} (\lambda^2) = m^2 - \frac{\lambda}{8\pi} \log \frac{64}{m^2} + \mathcal{O} (\lambda^2, m^2) \end{equation} Notice that for weak coupling (small $\lambda$), the physically interesting case has $m_0^2 < 0$ (a stable system, as long as $\lambda >0$).
\subsection{Ground State Construction} To find the required transformation $U$, we work as follows. Notice that for $n=0$, \begin{equation} a_0 = \frac{1}{2\sqrt{N}} \sum_{k=0}^{N-1}\left[ \left( \sqrt{m} + \frac{1}{\sqrt{m}} \right) A_k + \left( \sqrt{m} - \frac{1}{\sqrt{m}} \right) A_k^\dagger \right] \end{equation} where we used $\omega_0 =m$. For $n\ne 0$, we consider pairs $(a_n , a_{N-n})$. We have \begin{eqnarray} a_n + a_{N-n} &=& \frac{1}{2\sqrt{N}} \sum_{k=0}^{N-1} \cos \frac{2\pi nk}{N} \left[ \left( \sqrt{\omega_n} + \frac{1}{\sqrt{\omega_n}} \right) A_k \right. \nonumber\\ && \left. + \left( \sqrt{\omega_n} - \frac{1}{\sqrt{\omega_n}} \right) A_k^\dagger \right] \nonumber\\ a_n - a_{N-n} &=& \frac{i}{2\sqrt{N}} \sum_{k=0}^{N-1} \sin \frac{2\pi nk}{N} \left[ \left( \sqrt{\omega_n} + \frac{1}{\sqrt{\omega_n}} \right) A_k \right. \nonumber\\ && \left. - \left( \sqrt{\omega_n} - \frac{1}{\sqrt{\omega_n}} \right) A_k^\dagger \right] \end{eqnarray} where we used $\omega_n = \omega_{N-n}$.
The above expressions suggest that we transform $A_n$ into $a_n$ in three steps as detailed in Sec. \ref{sec:groundstate}
\subsection{Example: $N=4$}
To illustrate the above algorithm, we consider the case in which space has been discretized to four points. The rotation ($\mathbf{A}' = \mathbf{O} \mathbf{A}$) is described by the orthogonal matrix \begin{equation} \mathbf{O} = \frac{1}{2} \left[ \begin{array}{cccc} 1 & 1 & 1 & 1 \\ \sqrt{2} & 0 & -\sqrt{2} & 0 \\ 1 & -1 & 1 & -1 \\ 0 & \sqrt{2} & 0 & -\sqrt{2} \end{array} \right] \end{equation} We have \begin{equation} \mathbf{O} = R_{02} \left( \frac{\pi}{4} \right) S_{01} R_{13} \left( \frac{\pi}{4} \right) R_{02} \left( \frac{\pi}{4} \right) \end{equation} where $R_{ij} (\theta)$ is a rotation in the $ij$-plane of angle $\theta$ and $S_{ij}$ is the swap $i\leftrightarrow j$. Therefore the rotation $\mathbf{O}$ can be implemented with four two-mode unitaries.
Next, we squeeze each mode as $ A_n' \to A_n'' = \cosh r_n A_n' + \sinh r_n {A_n'}^\dagger$, where $e^{2r_0} = \omega_0$, $e^{2r_1} = \omega_1$, $e^{2r_2} = \omega_2$, and $e^{-2r_3} = \omega_3$. Notice that $r_3 = - r_1$, because $\omega_3 = \omega_1$.
Finally, we perform the rotation, $A_1'' \to \frac{1}{\sqrt{2}} ( A_1'' +iA_3'')$, $A_3'' \to \frac{1}{\sqrt{2}} (iA_1'' + A_3'')$, to arrive at the desired modes, \begin{eqnarray} a_0 &=& \frac{1}{2} \sum_n \left[ \cosh r_0 A_n + \sinh r_0 \sum_n A_n^\dagger \right] \nonumber\\
a_1 &=& \frac{1}{2} \sum_n i^n \left[ \cosh r_1 A_n + \sinh r_1 \sum_n A_n^\dagger \right] \nonumber\\
a_2 &=& \frac{1}{2} \sum_n (-1)^n \left[ \cosh r_2 A_n + \sinh r_2 \sum_n A_n^\dagger \right] \nonumber\\
a_3 &=& \frac{1}{2} \sum_n (-i)^n \left[ \cosh r_3 A_n + \sinh r_3 \sum_n A_n^\dagger \right] \end{eqnarray}
Each of the above steps is implemented with a Gaussian unitary involving at most two modes.
\section{Excited States} To generate the required one-photon state, two methods can be used. One can first squeeze the vacuum of the $k$th mode with an optical parametric amplifier to
\begin{equation} S_k(s) |0\rangle_k \ \ , \ \ \ \ S_k(s) = e^{\frac{s}{2} ({A_k^\dagger}^2 - A_k^2 )} \end{equation} Then pass the squeezed state through a (highly transmitting) beam splitter of transmittance $T$, and place a photodetector on the auxiliary output port. A click of the detector heralds a successful photon subtraction, which is described by the non-unitary operator \begin{equation} \sqrt{1-T}\, T^{A_k^\dagger A_k/2} A_k \end{equation} The transmittance has to be high so that the probability of detecting two or more photons is negligible. If no photon is detected, the process is repeated until a photon is detected. Finally, apply anti-squeezing $S_k^\dagger (s')$.
We obtain the state (unnormalized)
\begin{equation}\label{eq72} S_k^\dagger (s') T^{A_k^\dagger A_k/2} A_k S_k(s) |0\rangle_k \end{equation} If the squeezing parameters are chosen so that \begin{equation} T = \frac{\tanh s'}{\tanh s} \end{equation} then it is straightforward to show that \eqref{eq72} is the desired state,
\begin{equation} S_k^\dagger (s') T^{A_k^\dagger A_k/2} A_k S_k(s) |0\rangle_k \propto A_k^\dagger |0\rangle_k. \end{equation}
Optionally, one may also use a heralded single photon source. Such a source would consist of a parametric downconverter with a high efficiency heralding detector. To obtain exactly one photon when operating the source with high brightness (but on average less than one pair per pulse), the heralding detector would consist of a high efficiency photon number resolving detector, such as a transition edge sensor.
\section{Generalization to Arbitrary Dimensions}
Generalization to arbitrary spatial dimension $d$ is straightforward. The free-scalar Hamiltonian in the continuum reads \begin{equation}\label{eq61} H_0 = \frac{1}{2} \int d^dx \left[ \pi^2 + (\mathbf{\nabla} \phi )^2 + m^2 \phi^2 \right] \end{equation} where $\mathbf{x} \in [0,L]^d$, with the fields obeying standard commutation relations, \begin{equation} [ \phi ( \mathbf{x} ) \ , \ \pi(\mathbf{x}') ] = i \delta^d (\mathbf{x} - \mathbf{x}' ) \end{equation} Each coordinate $x_i$ ($i=1,\dots, d$) is discretized as before, $x_i = n_i a$, $n_i = 0,1,\dots, N-1$, and we define $Q_{\mathbf{n}} \equiv \phi (\mathbf{x})$, $P_{\mathbf{n}} \equiv \pi (\mathbf{x})$, $A_{\mathbf{n}} = \frac{1}{\sqrt{2}} (Q_{\mathbf{n}} + iP_{\mathbf{n}})$, where $\mathbf{n} \in \mathbb{Z}_N^d$.
The Hamiltonian \eqref{eq61} can then be rendered in the form \eqref{eq8}, where $\mathbf{V}$ has eigenvalues and corresponding normalized eigenvectors, \begin{eqnarray} \omega_{\mathbf{k}}^2 &=& m^2 + 4 \sum_{i=1}^d \sin^2 \frac{k_i}{2} \ , \nonumber\\ \mathbf{e}_{\mathbf{k}}^{\mathbf{n}} &=& \frac{1}{N^{d/2}} e^{i\mathbf{k} \cdot \mathbf{n}} \end{eqnarray} where $\mathbf{k} \in \frac{2\pi}{N} \mathbb{Z}_N^d$ (the dual lattice). The eigenvectors form a unitary matrix.
The discretized Hamiltonian is diagonalized as \begin{equation} H_0 = \sum_{\mathbf{k} \in \Gamma} \omega_{\mathbf{k}} \left( a_{\mathbf{k}}^\dagger a_{\mathbf{k}} + \frac{1}{2} \right) \end{equation} where $a_{\mathbf{k}}$ is the annihilation operator defined in Sec.~\ref{sec2} (extended to $d$ dimensions in an obvious way).
Introducing an interaction term, $H_{int} = \frac{\lambda}{4!} \sum_{\mathbf{n}} Q_{\mathbf{n}}^4$, and the attendant counter term, $H_{c.t.} = \frac{\delta_m}{2} \sum_{\mathbf{n}} Q_{\mathbf{n}}^2$, and working as in the one-dimensional case, we obtain a shift in the poles of the Green function, \begin{equation} \Sigma = \frac{\lambda}{4} \sum_{\mathbf{k} \in \Gamma} \frac{1}{\omega_{\mathbf{k}}} + \mathcal{O} (\lambda^2) \end{equation} which is related to the counter-term parameter $\delta_m$ via $\delta_m = -\Sigma + \mathcal{O} (\lambda^2)$. For large $N$, the sum is approximated by an integral over the hypercube $[0,2\pi]^d$. For $d=1$, it reduces to the previous result, whereas for $d>1$, we obtain at lowest order in $m$ and $\lambda$, \begin{equation} \Sigma = C_d \lambda + \dots \end{equation} Numerically, $C_2 \approx 0.16$, and $C_3 \approx 0.11$.
\end{document} |
\begin{document}
\title{Some inequalities for interval-valued functions on time scales}
\author{Dafang Zhao$^{1,2}$ \and Guoju Ye$^{1}$ \and Wei Liu$^{1}$ \and Delfim F. M. Torres$^{3}$}
\authorrunning{D. F. Zhao et al.}
\institute{Dafang Zhao \at \email{[email protected]} \and Guoju Ye \at \email{[email protected]} \and Wei Liu \at \email{[email protected]} \and Delfim F. M. Torres \at \email{[email protected]}\\ \and $^{1}$College of Science, Hohai University, Nanjing, Jiangsu 210098, P. R. China.\\ $^{2}$School of Mathematics and Statistics, Hubei Normal University, Huangshi, Hubei 435002, P. R. China.\\ $^{3}$Center for Research and Development in Mathematics and Applications (CIDMA),\\ Department of Mathematics, University of Aveiro, 3810-193 Aveiro, Portugal.\\}
\date{Submitted: 20-Dec-2017 / Revised: 31-Aug-2018 / Accepted: 11-Sept-2018}
\maketitle
\begin{abstract} We introduce the interval Darboux delta integral (shortly, the $ID$ $\Delta$-integral) and the interval Riemann delta integral (shortly, the $IR$ $\Delta$-integral) for interval-valued functions on time scales. Fundamental properties of $ID$ and $IR$ $\Delta$-integrals and examples are given. Finally, we prove Jensen's, H\"{o}lder's and Minkowski's inequalities for the $IR$ $\Delta$-integral. Also, some examples are given to illustrate our theorems.
\keywords{interval-valued functions \and time scales \and Jensen's inequality \and H\"{o}lder's inequality \and Minkowski's inequality} \end{abstract}
\section{Introduction} \label{intro}
Interval analysis was initiated by Moore for providing reliable computations \cite{M66}. Since then, interval analysis and interval-valued functions have been extensively studied both in mathematics and its applications: see, e.g., \cite{B13,C13,C15,CB,CC,G17,J01,L15,M79,M09,O15,S09,WG00,Z17}. Rece-\\ntly, several classical integral inequalities have been extended to the context of interval-valued functions by Chalco-Cano et al. \cite{C12,CL15}, Costa \cite{C17}, Costa and Rom\'{a}n-Flores \cite{CF}, Flores-Franuli\v{c} et al. \cite{FC}, Rom\'{a}n-Flores et al. \cite{R16,R13}.
Motivated by \cite{C81,C17,R16}, we introduce the $ID$ and $IR$ $\Delta$-integrals, and present some integral inequalities on time scales. A time scale $\mathbb{T}$ is an arbitrary nonempty closed subset of the real numbers $\mathbb{R}$ with the subspace topology inherited from the standard topology of $\mathbb{R}$. The theory of time scales was born in 1988 with the Ph.D. thesis of Hilger \cite{H4}. The aim is to unify various definitions and results from the theories of discrete and continuous dynamical systems, and to extend them to more general classes of dynamical systems. It has undergone tremendous expansion and development on various aspects by several authors over the past three decades: see, e.g., \cite{BCT1,BCT2,BP1,BP2,FB15,FT16,V16,W05,YZ,YZT,ZL16}.
In 2013, Lupulescu introduced the Riemann $\Delta$-inte-\\gral for interval-valued functions on time scales and presented some of its basic properties \cite{L13}. Nonetheless, to our best knowledge, there is no systematic theory of integration for interval-valued functions on time scales. In this work, in order to complete the theory of $IR$ $\Delta$-integration and improve recent results given in \cite{C81,C17,R16}, we introduce the $ID$ $\Delta$-integral and the $IR$ $\Delta$-integral on time scales. We show that the $ID$ $\Delta$-integral ia a generalization of the $IR$ $\Delta$-integral. Also, some basic properties for the $ID$ and $IR$ $\Delta$-integrals, and some examples, are given. Finally, we present Jensen's inequality, H\"{o}lder's inequality and Minkowski's inequality for the $IR$ $\Delta$-integral. Some celebrated inequalities are derived as consequences of our results.
The paper is organized as follows. After a Section~\ref{sec:2} of preliminaries, in Section~\ref{sec:3} the $ID$ and $IR$ $\Delta$-integrals for interval-valued functions are introduced. Moreover, some basic properties and examples are given. In Section~\ref{sec:4}, we prove Jensen's, H\"{o}lder's and Minkowski's inequalities for the general $IR$ $\Delta$-integral. We end with Section~\ref{sec:5} of conclusions.
\section{Preliminaries} \label{sec:2}
In this section, we recall some basic definitions, notations, properties and results on interval analysis and the time scale calculus, which are used throughout the paper. A real interval $[u]$ is the bounded, closed subset of $\mathbb{R}$ defined by $$
[u]=[\underline{u},\overline{u}]=\{x\in\mathbb{R}|\ \underline{u}\leq x\leq\overline{u}\}, $$ where $\underline{u}, \overline{u}\in \mathbb{R}$ and $\underline{u}\leq\overline{u}$. The numbers $\underline{u}$ and $\overline{u}$ are called the left and the right endpoints of $[\underline{u},\overline{u}]$, respectively. When $\underline{u}$ and $\overline{u}$ are equal, the interval $[u]$ is said to be degenerate. In this paper, the term interval will mean a nonempty interval. We call $[u]$ positive if $\underline{u}>0$ or negative if $\overline{u}<0$. The partial order ``$\leq$" is defined by $$ [\underline{u},\overline{u}]\leq[\underline{v},\overline{v}]\Longleftrightarrow \underline{u}\leq\underline{v},\overline{u}\leq\overline{v}. $$ The inclusion ``$\subseteq$" is defined by $$ [\underline{u},\overline{u}]\subseteq[\underline{v},\overline{v}]\Longleftrightarrow \underline{v}\leq\underline{u},\overline{u}\leq\overline{v}. $$ For an arbitrary real number $\lambda$ and $[u]$, the interval $\lambda [u]$ is given by \begin{equation*} \lambda[\underline{u},\overline{u}]= \begin{cases} [\lambda\underline{u},\lambda\overline{u}]& \text{if $\lambda>0$},\\ \{0\}& \text{if $\lambda=0$},\\ [\lambda\overline{u},\lambda\underline{u}]& \text{if $\lambda<0$}. \end{cases} \end{equation*} For $[u]=[\underline{u},\overline{u}]$ and $[v]=[\underline{v},\overline{v}]$, the four arithmetic operators (+,-,$\cdot$,/) are defined by $$ [u]+[v]=[\underline{u}+\underline{v},\overline{u}+\overline{v}], $$ $$ [u]-[v]=[\underline{u}-\overline{v},\overline{u}-\underline{v}], $$ $$ [u]\cdot[v]=\big[\min\{\underline{u}\underline{v},\underline{u}\overline{v}, \overline{u}\underline{v},\overline{u}\overline{v}\}, \max\{\underline{u}\underline{v},\underline{u}\overline{v}, \overline{u}\underline{v},\overline{u}\overline{v}\}\big], $$ \begin{equation*} \begin{split} [u]/[v]=\big[&\min\{\underline{u}/\underline{v},\underline{u}/\overline{v}, \overline{u}/\underline{v},\overline{u}/\overline{v}\},\\ &\max\{\underline{u}/\underline{v},\underline{u}/\overline{v}, \overline{u}/\underline{v},\overline{u}/\overline{v}\}\big], {\rm where}\ \ 0\notin[\underline{v},\overline{v}]. \end{split} \end{equation*} We denote by $\mathbb{R}_{\mathcal{I}}$ the set of all intervals of $\mathbb{R}$, and by $\mathbb{R}^{+}_{\mathcal{I}}$ and $\mathbb{R}^{-}_{\mathcal{I}}$ the set of all positive intervals and negative intervals of $\mathbb{R}$, respectively. The Hausdorff--Pompeiu distance between intervals $[\underline{u},\overline{u}]$ and $[\underline{v},\overline{v}]$ is defined by $$ d\big([\underline{u},\overline{u}],[\underline{v},\overline{v}]\big)
=\max\Big\{|\underline{u}-\underline{v}|,|\overline{u}-\overline{v}|\Big\}. $$ It is well known that $(\mathbb{R}_{\mathcal{I}}, d)$ is a complete metric space.
Let $\mathbb{T}$ be a time scale. We define the half-open interval $[a,b)_{\mathbb{T}}$ by $$ [a,b)_{\mathbb{T}} =\left\{t\in \mathbb{T}: a \leq t < b\right\}. $$ The open and closed intervals are defined similarly. For $t\in \mathbb{T}$, we denote by $\sigma$ the forward jump operator, i.e., $\sigma(t):=\inf\{s>t: s\in \mathbb{T}\}$, and by $\rho$ the backward jump operator, i.e., $\rho(t):=\sup\{s<t: s\in \mathbb{T}\}$. Here, we put $\sigma(\sup\mathbb{T})=\sup\mathbb{T}$ and $\rho(\inf\mathbb{T})=\inf\mathbb{T}$, where $\sup\mathbb{T}$ and $\inf\mathbb{T}$ are finite. In this situation, $\mathbb{T}^{\kappa}:=\mathbb{T}\backslash \{\sup\mathbb{T}\}$ and $\mathbb{T}_{\kappa}:=\mathbb{T}\backslash\{\inf\mathbb{T}\}$, otherwise, $\mathbb{T}^{\kappa}:=\mathbb{T}$ and $\mathbb{T}_{\kappa}:=\mathbb{T}$. If $\sigma(t)>t$, then we say that $t$ is right-scattered, while if $\rho(t)<t$, then we say that $t$ is left-scattered. If $\sigma(t)=t$ and $t< \sup\mathbb{T}$, then $t$ is called right-dense, and if $\rho(t)=t$ and $t> \inf\mathbb{T}$, then $t$ is left-dense. The graininess functions $\mu$ and $\eta$ are defined by $\mu(t):=\sigma(t)-t$ and $\eta(t):=t-\rho(t)$, respectively.
A function $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}$ is called right-dense continuous ($rd$-continuous) if it is right continuous at each right-dense point and there exists a finite left limit at all left-dense points. The set of $rd$-continuous function $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}$ is denoted by $C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$.
A function $f$ is said to be an interval function of $t$ on $[a,b]_{\mathbb{T}}$ if it assigns a nonempty interval $$ f(t)=\big[\underline{f}(t), \overline{f}(t)\big] $$ to each $t\in[a,b]_{\mathbb{T}}$. We say that $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is continuous at $t_{0}\in[a,b]_{\mathbb{T}}$ if for each $\epsilon>0$ there exists a $\delta>0$ such that $$ d(f(t),f(t_{0}))<\epsilon $$
whenever $|t-t_{0}|<\delta$. The set of continuous function $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is denoted by $C([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$. It is clear that $f$ is continuous at $t_{0}$ if and only if $\underline{f}$ and $\overline{f}$ are continuous at $t_{0}$.
A division of $[a,b]_{\mathbb{T}}$ is any finite ordered subset $D$ having the form $$ \mathcal{D}=\{a=t_{0}<t_{1}<\cdots <t_{n}=b\}. $$ We denote the set of all divisions of $[a,b]_{\mathbb{T}}$ by $\mathcal{D}=\mathcal{D}([a,b]_{\mathbb{T}})$.
\begin{lemma}[Bohner and Peterson \cite{BP2}] \label{lem1} For every $\delta>0$ there exists some division $D\in\mathcal{D}([a,b]_{\mathbb{T}})$ given by $$ a=t_{0}<t_{1}<\cdots <t_{n-1}<t_{n}=b $$ such that for each $i\in\{1,2,\ldots,n\}$ either $t_{i}-t_{i-1}\leq\delta$ or $$ t_{i}-t_{i-1}>\delta \ \ {\rm and}\ \ \rho(t_{i})=t_{i-1}. $$ \end{lemma}
Let $\mathcal{D}(\delta,[a,b]_{\mathbb{T}})$ be the set of all $D\in\mathcal{D}([a,b]_{\mathbb{T}})$ that possess the property indicated in Lemma \ref{lem1}. In each interval $[t_{i-1},t_{i})_{\mathbb{T}}$, where $1\leq i\leq n$, choose an arbitrary point $\xi_{i}$ and form the sum $$ S(f,D,\delta)=\displaystyle \sum^{n}_{i=1}f(\xi_{i})(t_{i}-t_{i-1}), $$ where $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}(or \ \mathbb{R}_{\mathcal{I}})$. We call $S(f,D,\delta)$ a Riemann $\Delta$-sum of $f$ corresponding to $D\in \mathcal{D}(\delta,[a,b]_{\mathbb{T}})$.
\begin{definition}[Bohner and Peterson \cite{BP2}] \label{defn2.1} A function $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}$ is called Riemann $\Delta$-integrable on $[a,b]_{\mathbb{T}}$ if there exists an $A\in \mathbb{R}$ such that for each $\epsilon>0$ there exists a $\delta>0$ for which
$$\big|S(f,\mathcal{D},\delta), A\big|<\epsilon$$ for all $D\in \mathcal{D}(\delta,[a,b]_{\mathbb{T}})$. In this case, $A$ is called the Riemann $\Delta$-integral of $f$ on $[a,b]_{\mathbb{T}}$ and is denoted by $A=(R)\int_{a}^{b}f(t)\Delta t$ or $A=\int_{a}^{b}f(t)\Delta t$. The family of all Riemann $\Delta$-integrable functions on $[a,b]_{\mathbb{T}}$ is denoted by $\mathcal{R}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. \end{definition}
\section{The Interval Darboux and Riemann delta integrals} \label{sec:3}
Let $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ be such that $f(t)=\big[\underline{f}(t), \overline{f}(t)\big]$ for all $t\in [a,b]_{\mathbb{T}}$. We denote $$ M=\sup\{\overline{f}(t):t\in [a,b)_{\mathbb{T}}\},\ \ m =\inf\{\underline{f}(t):t\in [a,b)_{\mathbb{T}}\}, $$ and for $1\leq i\leq n$, $$ M_{i}=\sup\{\overline{f}(t):t\in [t_{i-1},t_{i})_{\mathbb{T}}\}, $$ $$ m_{i}=\inf\{\underline{f}(t):t\in [t_{i-1},t_{i})_{\mathbb{T}}\}. $$ The lower Darboux $\Delta$-sum $L(\underline{f},D)$ of $\underline{f}$ with respect to $D\in\mathcal{D}([a,b]_{\mathbb{T}})$ is the sum $$ L(\underline{f},D)=\sum_{i=1}^{n}m_{i}(t_{i}-t_{i-1}), $$ and the upper Darboux $\Delta$-sum $U(\overline{f},D)$ is $$ U(\overline{f},D)=\sum_{i=1}^{n}M_{i}(t_{i}-t_{i-1}). $$
\begin{definition}[The Interval Darboux delta integral] \label{defn3} Let $I=[a,b]_{\mathbb{T}}$, where $a,b \in \mathbb{T}$. The lower Darboux $\Delta$-integral of $\underline{f}$ on $[a,b]_{\mathbb{T}}$ is defined by $$ (D)\infint_{a}^{b}\underline{f}(t)\Delta t =\sup_{D\in \mathcal{D}([a,b]_{\mathbb{T}})}\Big\{L(\underline{f},D)\Big\} $$ and the upper Darboux $\Delta$-integral of $\overline{f}$ on $[a,b]_{\mathbb{T}}$ is defined by $$ (D)\supint_{a}^{b}\overline{f}(t)\Delta t =\inf_{D\in \mathcal{D}([a,b]_{\mathbb{T}})}\Big\{U(\overline{f},D)\Big\}. $$ Then, we define the $ID$ $\Delta$-integral of $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ on $[a,b]_{\mathbb{T}}$ as the interval $$ (ID)\int_{a}^{b}f(t)\Delta t=\Bigg[(D)\infint_{a}^{b}\underline{f}(t)\Delta t, (D)\supint_{a}^{b}\overline{f}(t)\Delta t\Bigg]. $$ The family of all $ID$ $\Delta$-integrable functions on $[a,b]_{\mathbb{T}}$ is denoted by $\mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. \end{definition}
\begin{theorem} \label{thm2} Let $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ be such that $$ f(t)=\big[\underline{f}(t), \overline{f}(t)\big] $$ for all $t\in [a,b]_{\mathbb{T}}$. Then $f\in \mathcal{ID}_{(\Delta,\ [t,\sigma(t)]_{\mathbb{T}})}$ and $$ (ID)\int_{t}^{\sigma(t)}f(s)\Delta s=\big[\mu(t)\underline{f}(t),\mu(t)\overline{f}(t)\big]. $$ \end{theorem}
\begin{proof} If $\sigma(t)=t$, then the result is obvious. If $\sigma(t)>t$, then $\mathcal{D}([a,b]_{\mathbb{T}})$ only contains one single element given by $$ t=s_{0}<s_{1}=\sigma(t). $$ Since $[s_{0},s_{1})=[t,\sigma(t))=\{t\}$, we have $$ L(f,D)=\underline{f}(t)(\sigma(t)-t)=\mu(t)\underline{f}(t), $$ $$ U(f,D)=\overline{f}(t)(\sigma(t)-t)=\mu(t)\overline{f}(t). $$ Consequently, we obtain $$ (ID)\int_{t}^{\sigma(t)} f(s)\Delta s =\big[\mu(t)\underline{f}(t),\mu(t)\overline{f}(t)\big]. $$ The result is proved. \end{proof}
\begin{remark} \label{rmk3.1} It is clear that if $f$ is a real-valued function, then our Definition~\ref{defn3} implies the definition of Darboux $\Delta$-integral introduced by \cite{BP2}. We also have the following:
\noindent(1) If $\mathbb{T}=\mathbb{R}$, then Definition~\ref{defn3} implies the definition of Darboux interval integral introduced by Caprani et al. \cite{C81,R82}.
\noindent(2) If $\mathbb{T}=\mathbb{Z}$, then each function $f:\mathbb{Z}\rightarrow \mathbb{R}_{\mathcal{I}}$ is $ID$ $\Delta$-integrable on $[a,b]_{\mathbb{T}}$. Moreover, $$ (ID)\int_{a}^{b}f(t)\Delta t=\Bigg[\sum_{t=a}^{b-1} \underline{f}(t), \sum_{t=a}^{b-1}\overline{f}(t)\Bigg]. $$
\noindent(3) If $\mathbb{T}=h\mathbb{Z}$, then each function $f:h\mathbb{Z}\rightarrow \mathbb{R}_{\mathcal{I}}$ is $ID$ $\Delta$-integrable on $[a,b]_{\mathbb{T}}$. Moreover, $$ (ID)\int_{a}^{b}f(t)\Delta t=\Bigg[ \sum_{k=\frac{a}{h}}^{\frac{b}{h}-1}\underline{f}(kh)h, \sum_{k=\frac{a}{h}}^{\frac{b}{h}-1}\overline{f}(kh)h\Bigg]. $$ \end{remark}
\begin{example} \label{ex1} Suppose that $[a,b]_{\mathbb{T}}=[0,1]$, $\mathbb{Q}$ is the set of rational numbers in $[0,1]$, and $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is defined by \begin{equation*} f(t)= \begin{cases} [-1,0], & \text{if $t\in \mathbb{Q}$},\\ [1,2], & \text{if $t\in[0,1]\backslash \mathbb{Q}$}. \end{cases} \end{equation*} Then, \begin{equation*} \begin{split} (ID)\int_{0}^{1}f(t)\Delta t&=\Bigg[(D)\infint_{0}^{1} \underline{f}(t)dt, (D)\supint_{0}^{1}\overline{f}(t)dt\Bigg]\\ &=[-1,2]. \end{split} \end{equation*} \end{example}
\begin{example} \label{ex2} Suppose that $[a,b]_{\mathbb{T}}=\big\{0,\frac{1}{3},\frac{1}{2},1\big\}$ and \\ $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is defined by \begin{equation*} f(t)= \begin{cases} [-1,0], & \text{if $t=0$},\\ [-\frac{1}{3},\frac{1}{3}], & \text{if $t=\frac{1}{3}$},\\ [-\frac{1}{2},\frac{1}{2}], & \text{if $t=\frac{1}{2}$},\\ [1,2], & \text{if $t=1$}. \end{cases} \end{equation*} Then, \begin{equation*} \begin{split} (D)\infint_{0}^{1}\underline{f}(t)\Delta t &=(-1)\cdot\frac{1}{3}+\left(-\frac{1}{3}\right)\cdot\frac{1}{6} +\left(-\frac{1}{2}\right)\cdot\frac{1}{2}\\ &=-\frac{23}{36}, \end{split} \end{equation*} \begin{equation*} (D)\supint_{0}^{1}\overline{f}(t)\Delta t=0\cdot\frac{1}{3} +\frac{1}{3}\cdot\frac{1}{6}+\frac{1}{2}\cdot\frac{1}{2}=\frac{11}{36}, \end{equation*} and therefore $$ (ID)\int_{0}^{1}f(t)\Delta t=\Bigg[-\frac{23}{36}, \frac{11}{36}\Bigg]. $$ \end{example}
\begin{theorem} \label{thm3} Let $f,\ g\in \mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$, and $\lambda$ be \\an arbitrary real number. Then,
\noindent(1) $\lambda f \in \mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ and $$ (ID)\int_{a}^{b}\lambda f(t)\Delta t=\lambda (ID)\int_{a}^{b}f(t)\Delta t; $$
\noindent(2) $f+g \in \mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ and \begin{equation*} \begin{split} &(ID)\int_{a}^{b}(f(t)+g(t))\Delta t\\ &\subseteq(ID)\int_{a}^{b}f(t)\Delta t+(ID)\int_{a}^{b}g(t)\Delta t; \end{split} \end{equation*}
\noindent(3) for $c\in [a,b]_{\mathbb{T}}$ and $a<c<b$, $$(ID)\int_{a}^{c}f(t)\Delta t +(ID)\int_{c}^{b} f(t)\Delta t=(ID)\int_{a}^{b} f(t)\Delta t; $$
\noindent(4) if $f\subseteq g$ on $[a,b]_{\mathbb{T}}$, then $$ (ID)\int_{a}^{b} f(t)\Delta t\subseteq (ID)\int_{a}^{b} g(t)\Delta t. $$ \end{theorem}
\begin{proof} We only prove that part (2) of Theorem~\ref{thm3} holds. The other relations are obvious. Suppose that $$ f(t)=\big[\underline{f}(t),\overline{f}(t)\big], \ g(t) =\big[\underline{g}(t),\overline{g}(t)\big]. $$ Select any division $D\in\mathcal{D}([a,b]_{\mathbb{T}})$ having the form $$ D=\{a=t_{0}<t_{1}<\cdots <t_{n}=b\}. $$ Then, \begin{equation*} \begin{split} &\inf_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\underline{f}(t)\} +\inf_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\underline{g}(t)\}\\ &\leq \inf_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\underline{f}(t) +\underline{g}(t)\}, \end{split} \end{equation*} \begin{equation*} \begin{split} &\sup_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\overline{f}(t)+\overline{g}(t)\}\\ &\leq\sup_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\overline{f}(t)\} +\sup_{t\in [t_{i-1},t_{i})_{\mathbb{T}}}\{\overline{g}(t)\},
\end{split} \end{equation*} and it follows that $$ L(\underline{f},D)+L(\underline{g},D)\leq L(\underline{f}+\underline{g},D), $$ $$ U(\underline{f},D)+U(\underline{g},D)\geq U(\underline{f}+\underline{g},D). $$ The intended result follows. \end{proof}
\begin{example} \label{ex3} Suppose that $[a,b]_{\mathbb{T}}=[0,1]$, $\mathbb{Q}$ is the set of rational numbers in $[0,1]$, and $f,g:[a,b]_{\mathbb{T}} \rightarrow \mathbb{R}_{\mathcal{I}}$ are defined by \begin{equation*} f(t)= \begin{cases} [-1,0], & \text{if $t\in \mathbb{Q}$},\\ [1,2], & \text{if $t\in[0,1]\backslash \mathbb{Q}$}, \end{cases} \end{equation*} \begin{equation*} g(t)= \begin{cases} [0,1], & \text{if $t\in \mathbb{Q}$},\\ [-2,-1], & \text{if $t\in[0,1]\backslash \mathbb{Q}$}. \end{cases} \end{equation*} Then $$ f(t)+g(t)=[-1,1] $$ for all $t\in[0,1]$. It follows that \begin{equation*} \begin{split} &(ID)\int_{0}^{1}f(t)\Delta t+(ID)\int_{0}^{1}g(t)\Delta t\\ &=[-1,2]+[-2,1]\\ &=[-3,3], \end{split} \end{equation*} \begin{equation*}
(ID)\int_{0}^{1}(f(t)+g(t))\Delta t =[-1,1]. \end{equation*} Therefore, we have \begin{equation*} \begin{split} &(ID)\int_{a}^{b}(f(t)+g(t))\Delta t\\ &\subseteq (ID)\int_{a}^{b}f(t)\Delta t+(ID)\int_{a}^{b}g(t)\Delta t. \end{split} \end{equation*} \end{example}
We now give Riemann's definition of integrability, which is equivalent to the Riemann $\Delta$-integral given in \cite[Definition 13]{L13}.
\begin{definition}[The Interval Riemann delta integral] \label{defn4} A function $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is called $IR$ $\Delta$-integrable on $[a,b]_{\mathbb{T}}$ if there exists an $A\in \mathbb{R}_{\mathcal{I}}$ such that for each $\epsilon>0$ there exists a $\delta>0$ for which $$d\big(S(f,\mathcal{D},\delta), A\big)<\epsilon$$ for all $D\in \mathcal{D}(\delta,[a,b]_{\mathbb{T}})$. In this case, $A$ is called the $IR$ $\Delta$-integral of $f$ on $[a,b]_{\mathbb{T}}$ and is denoted by $A=(IR)\int_{a}^{b}f(t)\Delta t$. The family of all $IR$ $\Delta$-integrable functions on $[a,b]_{\mathbb{T}}$ is denoted by $\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. \end{definition}
\begin{remark} \label{rmk3.2} Definitions \ref{defn3} and \ref{defn4} are not equivalent. If $f\in\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$, then $f\in\mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. However, the converse is not always true (see Example \ref{ex1}). It is clear that $f\in\mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$, but $f\notin\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. In fact, all bounded interval functions are $ID$ $\Delta$-integrable, but boundedness of $f$ is not a sufficient condition for $IR$ $\Delta$-integrability. If $f$ is a continuous function, then $f\in\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ if and only if $f\in\mathcal{ID}_{(\Delta,\ [a,b]_{\mathbb{T}})}$, in which case the value of the integrals agree. \end{remark}
The following two theorems can be easily verified and so the proofs are omitted.
\begin{theorem} \label{thm4} If $f\in C([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$, then $f\in\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ and $$ (IR)\int_{a}^{b}f(t)\Delta t=\Bigg[\int_{a}^{b} \underline{f}(t)\Delta t,\int_{a}^{b}\overline{f}(t)\Delta t\Bigg]. $$ \end{theorem}
\begin{theorem} \label{thm5} Let $f,\ g\in \mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$, and $\lambda$ be an arbitrary real number. Then,
\noindent(1) $\lambda f \in \mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ and $$ (IR)\int_{a}^{b}\lambda f(t)\Delta t=\lambda (IR)\int_{a}^{b}f(t)\Delta t; $$
\noindent(2) $f+g \in \mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$ and \begin{equation*} \begin{split} &(IR)\int_{a}^{b}(f(t)+g(t))\Delta t\\ &=(IR)\int_{a}^{b}f(t)\Delta t+(IR)\int_{a}^{b}g(t)\Delta t; \end{split} \end{equation*}
\noindent(3) for $c\in [a,b]_{\mathbb{T}}$ and $a<c<b$, $$ (IR)\int_{a}^{c}f(t)\Delta t+(IR)\int_{c}^{b} f(t)\Delta t =(IR)\int_{a}^{b} f(t)\Delta t; $$
\noindent(4) if $f\subseteq g$ on $[a,b]_{\mathbb{T}}$, then $$ (IR)\int_{a}^{b} f(t)\Delta t\subseteq (IR)\int_{a}^{b} g(t)\Delta t. $$ \end{theorem}
\begin{example} \label{ex4} Suppose that $\mathbb{T}=[-1,0]\cup 3^{\mathbb{N}_{0}}$, where $[-1,0]$ is a real-valued interval and $\mathbb{N}_{0}$ is the set of nonnegative integers. Let $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ be defined by \begin{equation*} f(t)= \begin{cases} [t,t+1], & \text{if $t\in [-1,0)$},\\ [1,2], & \text{if $t=0$},\\ [t,t^{2}+1], & \text{if $t\in 3^{\mathbb{N}_{0}}$}. \end{cases} \end{equation*} If $[a,b]_{\mathbb{T}}=[-1,3]_{\mathbb{T}}$, then \begin{equation*} \begin{split} &(IR)\int_{-1}^{3}f(t)\Delta t\\ &=\Bigg[\int_{-1}^{3}\underline{f}(t)\Delta t,\int_{-1}^{3}\overline{f}(t)\Delta t\Bigg]\\ &=\Bigg[\int_{-1}^{0}tdt+(R)\int_{0}^{1}\Delta t+\int_{1}^{3}t\Delta t,\\ &\ \ \ \ \ \ \int_{-1}^{0}(t+1)dt+\int_{0}^{1}2\Delta t+\int_{1}^{3}(t^{2}+1)\Delta t\Bigg]\\
&=\Bigg[\frac{1}{2}t^{2}\Big|_{-1}^{0}+1+2t^{2}\Big|_{1},\\
&\ \ \ \ \ \ \ \frac{1}{2}(t^{2}+t)\Big|_{-1}^{0}+2+2t(t^{2}+1)\Big|_{1}\Bigg]\\ &=\Big[2\frac{1}{2},6\frac{1}{2}\Big]. \end{split} \end{equation*} \end{example}
\section{Some inequalities for the interval Riemann delta integral} \label{sec:4}
We begin by recalling the notions of convexity on time scales.
\begin{definition}[Dinu \cite{D08}] \label{defn4.1} We say that $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}$ is a convex function if for all $x,y\in[a,b]_{\mathbb{T}}$ and $\alpha\in[0,1]$ we have \begin{equation} \label{1} f(\alpha x+(1-\alpha)y)\leq \alpha f(x)+(1-\alpha)f(y) \end{equation} for which $\alpha x+(1-\alpha)y\in[a,b]_{\mathbb{T}}$. If inequality \eqref{1} is reversed, then $f$ is said to be concave. If $f$ is both convex and concave, then $f$ is said to be affine. The set of all convex, concave and affine interval-valued functions are denoted by $SX([a,b]_{\mathbb{T}},\mathbb{R})$, $SV([a,b]_{\mathbb{T}},\mathbb{R})$, and $SA([a,b]_{\mathbb{T}},\mathbb{R})$, respectively. \end{definition}
We can now introduce the concept of interval-valued convexity.
\begin{definition} \label{defn4.2} We say that $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ is a convex interval-valued function if for all $x,y\in[a,b]_{\mathbb{T}}$ and $\alpha\in(0,1)$ we have \begin{equation} \label{4} \alpha f(x)+(1-\alpha)f(y) \subseteq f(\alpha x+(1-\alpha)y) \end{equation} for which $\alpha x+(1-\alpha)y\in[a,b]_{\mathbb{T}}$. If the set inclusion \eqref{4} is reversed, then $f$ is said to be concave. If $f$ is both convex and concave, then $f$ is said to be affine. The set of all convex, concave and affine interval-valued functions are denoted by $SX([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$, $SV([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$ and $SA([a,b]_{\mathbb{T}}, \mathbb{R}_{\mathcal{I}})$, respectively. \end{definition}
\begin{remark} \label{rmk4.0} It is clear that if $\mathbb{T}=\mathbb{R}$, then Definition~\ref{defn4.2} implies the definition of convexity introduced by Breckner \cite{B}. \end{remark}
\begin{theorem} \label{thm4.1} Let $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ be such that $$ f(t)=[\underline{f}(t),\overline{f}(t)] $$ for all $t\in [a,b]_{\mathbb{T}}$. Then,
\noindent(1) $f\in SX([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$ if and only if $\underline{f}\in SX([a,b]_{\mathbb{T}},\mathbb{R})$ and $\overline{f}\in SV([a,b]_{\mathbb{T}},\mathbb{R})$,
\noindent(2) $f\in SV([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$ if and only if $\underline{f}\in SV([a,b]_{\mathbb{T}},\mathbb{R})$ and $\overline{f}\in SX([a,b]_{\mathbb{T}},\mathbb{R})$,
\noindent(3) $f\in SA([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$ if and only if $\underline{f}, \overline{f} \in SA([a,b]_{\mathbb{T}},\mathbb{R})$. \end{theorem}
\begin{proof} We only prove that part (1) of Theorem~\ref{thm4.1} holds. Suppose that $f\in SX([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$ and consider $x,y\in[a,b]_{\mathbb{T}}$, $\alpha\in[0,1]$. Then, $$ \alpha f(x)+(1-\alpha)f(y) \subseteq f(\alpha x+(1-\alpha)y), $$ that is, \begin{equation} \label{5} \begin{split} &\big[\alpha\underline{f}(x)+(1-\alpha)\underline{f}(y), \alpha\overline{f}(x)+(1-\alpha)\overline{f}(y)\big]\\ &\subseteq \big[\underline{f}(\alpha x+(1-\alpha)y), \overline{f}(\alpha x+(1-\alpha)y)\big]. \end{split} \end{equation} It follows that $$ \alpha\underline{f}(x)+(1-\alpha)\underline{f}(y) \geq \underline{f}(\alpha x+(1-\alpha)y) $$ and $$ \alpha\overline{f}(x)+(1-\alpha)\overline{f}(y) \leq \overline{f}(\alpha x+(1-\alpha)y). $$ This shows that $$ \underline{f}\in SX([a,b]_{\mathbb{T}},\mathbb{R})\ {\rm and }\ \overline{f}\in SV([a,b]_{\mathbb{T}},\mathbb{R}). $$ Conversely, if $$ \underline{f}\in SX([a,b]_{\mathbb{T}},\mathbb{R})\ {\rm and }\ \overline{f}\in SV([a,b]_{\mathbb{T}},\mathbb{R}), $$ by Definition~\ref{defn4.2} and the set inclusion \eqref{5}, we have \\$f\in SX([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}})$. \end{proof}
\begin{theorem}[Dinu \cite{D08}] \label{thm4.2} A convex function on \\$[a,b]_{\mathbb{T}}$ is continuous on $(a,b)_{\mathbb{T}}$. \end{theorem}
\begin{theorem} \label{thm4.4} Let $f:[a,b]_{\mathbb{T}}\rightarrow \mathbb{R}_{\mathcal{I}}$ be such that $$ f(t)=[\underline{f}(t),\overline{f}(t)] $$ for all $t\in [a,b]_{\mathbb{T}}$. If $$ f\in SX([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}) \cup SV([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}) \cup SA([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}), $$ then $f\in\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. \end{theorem}
\begin{proof} Suppose that $$ f\in SV([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}) \cup SV([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}) \cup SA([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}). $$ Due to Theorems~\ref{thm4.1} and \ref{thm4.2}, it follows that $\underline{f}$ and $\overline{f}$ are continuous. Then, from Theorem~5.19 of \cite{BP2}, we have that $$ \overline{f}(t), \underline{f}(t)\in\mathcal{R}_{(\Delta,\ [a,b]_{\mathbb{T}})}. $$ Hence, $f\in\mathcal{IR}_{(\Delta,\ [a,b]_{\mathbb{T}})}$. \end{proof}
\begin{theorem}[Wong et al. \cite{W06}] \label{thm4.5} Let $a,b\in[a,b]_{\mathbb{T}}$ and $c,d\in\mathbb{R}$. Suppose that $g\in C_{rd}([a,b]_{\mathbb{T}},(c,d))$ and $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$ with $$
\int_{a}^{b}|h(s)|\Delta s>0. $$ If $f\in C((c,d),\mathbb{R})$ is convex, then \begin{equation}\label{6} \begin{split}
&f\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg)
\leq \frac{\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}. \end{split} \end{equation} If $f$ is concave, then inequality \eqref{6} is reversed. \end{theorem}
\begin{theorem}[Jensen's inequality] \label{thm4.6} \\Let $g\in C_{rd}([a,b]_{\mathbb{T}},(c,d))$ and $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$ with $$
\int_{a}^{b}|h(s)|\Delta s>0. $$ If $f\in C((c,d),\mathbb{R}^{+}_{\mathcal{I}})$ is a convex function, then \begin{equation*} \begin{split}
&\frac{(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}
\subseteq f\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg). \end{split} \end{equation*} \end{theorem}
\begin{proof} By hypothesis, we have $$
|h|\overline{f(g)},\ \
|h|\underline{f(g)}\in\mathcal{R}_{(\Delta,\ [a,b])}. $$
Hence, $|h|f(g)\in\mathcal{IR}_{(\Delta,\ [a,b])}$ and \begin{equation*} \begin{split}
&(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s\\
&=\Bigg[\int_{a}^{b}|h(s)|\underline{f(g)}(s)\Delta s,\int_{a}^{b}|h(s)|\overline{f(g)}(s)\Delta s\Bigg]. \end{split} \end{equation*} From Theorem~\ref{thm4.5}, it follows that $$
\underline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{(\int_{a}^{b}|h(s)|\Delta s}\Bigg)\leq\frac{\int_{a}^{b}|h(s)|\underline{f(g)}(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s} $$ and $$
\overline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg)\geq\frac{\int_{a}^{b}|h(s)|\overline{f(g)}(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}, $$ which implies \begin{equation*} \begin{split}
&\Bigg[\frac{\int_{a}^{b}|h(s)|\underline{f(g)}(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}, \frac{\int_{a}^{b}|h(s)|\overline{f(g)}(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg]\\
&\ \ \subseteq \Bigg[\underline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg),\overline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg)\Bigg], \end{split} \end{equation*} that is, \begin{equation*} \begin{split}
&\frac{\Bigg[\int_{a}^{b}|h(s)|\underline{f(g)}(s)\Delta s,
\int_{a}^{b}|h(s)|\overline{f(g)}(s)\Delta s\Bigg]}{\int_{a}^{b}|h(s)|\Delta s}\\
&\ \ \subseteq \Bigg[\underline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg),\overline{f}\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg)\Bigg]. \end{split} \end{equation*} Finally, we obtain \begin{equation*}
\frac{(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}
\subseteq f\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg). \end{equation*} The proof is complete. \end{proof}
\begin{example} \label{ex5} Suppose that $[a,b]_{\mathbb{T}}=[0,1]\cup \{\frac{3}{2}\}$, where $[0,1]$ is a real-valued interval. Let $g(s)=s^{2}$, $h(s)=e^{s}$, and $f(s)=[s^{2},4\sqrt{s}]$. Then \begin{equation*} \begin{split}
&\frac{(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\\ &=\frac{(IR)\int_{0}^{\frac{3}{2}}\big[s^{4}e^{s},4se^{s}\big] \Delta s}{\int_{0}^{\frac{3}{2}}e^{s}\Delta s}\\ &=\frac{\bigg[\int_{0}^{\frac{3}{2}}s^{4}e^{s}\Delta s, \int_{0}^{\frac{3}{2}}4se^{s}\Delta s\bigg]}{\int_{0}^{\frac{3}{2}}e^{s}\Delta s}\\ &=\frac{\bigg[\int_{0}^{1}s^{4}e^{s}ds+\int_{1}^{\frac{3}{2}}s^{4}e^{s}\Delta s,\int_{0}^{1}4se^{s}ds+\int_{1}^{\frac{3}{2}}4se^{s}\Delta s\bigg]}{ \int_{0}^{1}e^{s}ds+\int_{1}^{\frac{3}{2}}e^{s}\Delta s}\\ &=\frac{\bigg[9\frac{1}{2}e-24,4+2e\bigg]}{\frac{3}{2}e-1}\\ &=\Bigg[\frac{19e-48}{3e-2},\frac{8+4e}{3e-2}\Bigg], \end{split} \end{equation*} and \begin{equation*} \begin{split}
&f\Bigg(\frac{\int_{a}^{b}|h(s)|g(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg)\\ &=f\Bigg(\frac{\int_{0}^{\frac{3}{2}}s^{2}e^{s}\Delta s}{\int_{0}^{\frac{3}{2}}e^{s}\Delta s}\Bigg)\\ &=f\Bigg(\frac{\int_{0}^{1}s^{2}e^{s}ds+\int_{1}^{\frac{3}{2}}s^{2}e^{s}\Delta s}{\frac{3}{2}e-1}\Bigg)\\ &=f\Bigg(\frac{\frac{3}{2}e-2}{\frac{3}{2}e-1}\Bigg)\\ &=\Bigg[\bigg(\frac{3e-4}{3e-2}\bigg)^{2},4\sqrt{\frac{3e-4}{3e-2}}\Bigg]. \end{split} \end{equation*} It follows that \begin{equation*} \begin{split} &\Bigg[\frac{19e-48}{3e-2},\frac{8+4e}{3e-2}\Bigg] \subseteq \Bigg[\bigg(\frac{3e-4}{3e-2}\bigg)^{2},4\sqrt{\frac{3e-4}{3e-2}}\Bigg]. \end{split} \end{equation*} \end{example}
It is clear that if $[a,b]_{\mathbb{T}}=[0,1]$ and $h(s)\equiv1$, then we get a similar result given in \cite[Theorem~3.5]{C17} by T. M. Costa. Similarly, we can get the following results that generalize \cite[Theorem~3.4]{C17} and \cite[Corollary 3.3]{C17}.
\begin{theorem} \label{thm4.7} Let $g\in C_{rd}([a,b]_{\mathbb{T}},(c,d))$ and \\ $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$ with $$
\int_{a}^{b}|h(s)|\Delta s>0. $$ If $f\in C((c,d),\mathbb{R}^{+}_{\mathcal{I}})$ is a concave function, then \begin{equation*} \begin{split}
&\frac{(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}
\supseteq f\Bigg(\frac{\int_{a}^{b}|h(s)|\underline{f(g)}(s)\Delta s}{
\int_{a}^{b}|h(s)|\Delta s}\Bigg). \end{split} \end{equation*} \end{theorem}
\begin{theorem} \label{thm4.8} Let $g\in C_{rd}([a,b]_{\mathbb{T}},(c,d))$ and \\ $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$ with $$
\int_{a}^{b}|h(s)|\Delta s>0. $$ If $f\in C((c,d),\mathbb{R}^{+}_{\mathcal{I}})$ is an affine function, then \begin{equation*} \begin{split}
&\frac{(IR)\int_{a}^{b}|h(s)|f(g(s))\Delta s}{\int_{a}^{b}|h(s)|\Delta s}
= f\Bigg(\frac{\int_{a}^{b}|h(s)|
\underline{f(g)}(s)\Delta s}{\int_{a}^{b}|h(s)|\Delta s}\Bigg). \end{split} \end{equation*} \end{theorem}
\begin{theorem}[Agarwal et al. \cite{A14}] \label{thm4.9} \\Let $f,g,h\in C_{rd}([a,b]_{\mathbb{T}},(0,\infty))$. If $\frac{1}{p}+\frac{1}{q}=1$, with $p>1$, then \begin{equation*} \begin{split} &\int_{a}^{b}h(s)f(s)g(s)\Delta s\\ &\leq \Bigg(\int_{a}^{b}h(s)f^{p}(s)\Delta s\Bigg)^{\frac{1}{p}}\Bigg( \int_{a}^{b}h(s)g^{q}(s)\Delta s\Bigg)^{\frac{1}{q}}. \end{split} \end{equation*} \end{theorem}
Next we present a H\"{o}lder type inequality for interval-valued functions on time scales.
\begin{theorem}[H\"{o}lder's inequality] \label{thm4.10} \\Let $h\in C_{rd}([a,b]_{\mathbb{T}},(0,\infty))$, $f,g\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}^{+})$.\\ If $\frac{1}{p}+\frac{1}{q}=1$, with $p>1$, then \begin{equation*} \begin{split} &\int_{a}^{b}h(s)f(s)g(s)\Delta s\\ &\leq \Bigg(\int_{a}^{b}h(s)f^{p}(s)\Delta s\Bigg)^{\frac{1}{p}}\Bigg( \int_{a}^{b}h(s)g^{q}(s)\Delta s\Bigg)^{\frac{1}{q}}. \end{split} \end{equation*} \end{theorem}
\begin{proof} By hypothesis, we have \begin{equation*} \begin{split} &\int_{a}^{b}h(s)f(s)g(s)\Delta s\\ &=\int_{a}^{b}h(s)\big[\underline{f}(s)\underline{g}(s), \overline{f}(s)\overline{g}(s)\big]\Delta s\\ &=\Bigg[\int_{a}^{b}h(s)\underline{f}(s)\underline{g}(s)\Delta s, \int_{a}^{b}h(s)\overline{f}(s)\overline{g}(s)\Delta s\Bigg]\\ &\leq \Bigg[\bigg(\int_{a}^{b}h(s)\underline{f}^{p}(s)\Delta s \bigg)^{\frac{1}{p}}\bigg(\int_{a}^{b}h(s)\underline{g}^{q}(s)\Delta s\bigg)^{\frac{1}{q}},\\ &\ \ \ \ \bigg(\int_{a}^{b}h(s)\overline{f}^{p}(s) \Delta s\bigg)^{\frac{1}{p}}\bigg(\int_{a}^{b}h(s) \overline{g}^{q}(s)\Delta s\bigg)^{\frac{1}{q}}\Bigg]\\ &= \Bigg[\bigg(\int_{a}^{b}h(s)\underline{f}^{p}(s)\Delta s \bigg)^{\frac{1}{p}},\bigg(\int_{a}^{b}h(s)\overline{f}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}\Bigg]\\ &\ \ \ \ \cdot\Bigg[\bigg(\int_{a}^{b}h(s)\underline{g}^{q}(s)\Delta s\bigg)^{\frac{1}{q}},\bigg(\int_{a}^{b}h(s)\overline{g}^{q}(s)\Delta s\bigg)^{\frac{1}{q}}\Bigg]\\ &= \Bigg[\int_{a}^{b}h(s)\underline{f}^{p}(s)\Delta s,\int_{a}^{b}h(s) \overline{f}^{p}(s)\Delta s\Bigg]^{\frac{1}{p}}\\ &\ \ \ \ \cdot\Bigg[\int_{a}^{b}h(s)\underline{g}^{q}(s)\Delta s, \int_{a}^{b}h(s)\overline{g}^{q}(s)\Delta s\Bigg]^{\frac{1}{q}}\\ &=\Bigg(\int_{a}^{b}h(s)\Big[\underline{f}(s),\overline{f}(s)\Big]^{p}\Delta s\Bigg)^{\frac{1}{p}}\Bigg(\int_{a}^{b}h(s)\Big[\underline{g}(s), \overline{g}(s)\Big]^{q}\Delta s\Bigg)^{\frac{1}{q}}\\ &=\Bigg(\int_{a}^{b}h(s)f^{p}(s)\Delta s\Bigg)^{\frac{1}{p}}\Bigg( \int_{a}^{b}h(s)g^{q}(s)\Delta s\Bigg)^{\frac{1}{q}}. \end{split} \end{equation*} This concludes the proof. \end{proof}
For the particular case $p=q=2$ in Theorem~\ref{thm4.10}, we obtain the following Cauchy--Schwarz inequality.
\begin{theorem}[Cauchy--Schwarz inequality] \label{thm4.11} \\Let $h\in C_{rd}([a,b]_{\mathbb{T}},(0,\infty))$, $f,g\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}^{+})$. Then, \begin{equation*} \begin{split} &\int_{a}^{b}h(s)f(s)g(s)\Delta s\\ &\leq \sqrt{\Bigg(\int_{a}^{b}h(s)f^{2}(s)\Delta s\Bigg)\Bigg( \int_{a}^{b}h(s)g^{2}(s)\Delta s\Bigg)}. \end{split} \end{equation*} \end{theorem}
\begin{example} \label{ex6} Suppose that $[a,b]_{\mathbb{T}}=[0,\frac{\pi}{2}]$. Let $h(s)=s$, $f(s)=[s,s+1]$, and $g(s)=[\sin s,s]$ for $s\in[0,\frac{\pi}{2}]$. Then \begin{equation*} \begin{split} \int_{a}^{b}&h(s)f(s)g(s)\Delta s\\ &=\int_{0}^{\frac{\pi}{2}}\big[s^{2}\sin s,s^{3}+s^{2}\big]\Delta s\\ &=\bigg[\int_{0}^{\frac{\pi}{2}}s^{2}\sin s\Delta s, \int_{0}^{\frac{\pi}{2}}(s^{3}+s^{2})\Delta s\bigg]\\ &=\bigg[\pi-2,\frac{\pi^{4}}{64}+\frac{\pi^{3}}{24}\bigg], \end{split} \end{equation*} and \begin{equation*} \begin{split} &\sqrt{\Bigg(\int_{a}^{b}h(s)f^{2}(s)\Delta s\Bigg)\Bigg(\int_{a}^{b}h(s)g^{2}(s)\Delta s\Bigg)}\\ &=\sqrt{\bigg(\int_{0}^{\frac{\pi}{2}}\big[s^{3},s^{3}+2s^{2}+s\big]\Delta s \bigg)\bigg(\int_{0}^{\frac{\pi}{2}}\big[s\sin ^{2}s,s^{3}\big]\Delta s\bigg)}\\ &=\sqrt{\bigg[\int_{0}^{\frac{\pi}{2}}s^{3}ds,\int_{0}^{\frac{\pi}{2}}(s^{3}+2s^{2} +s)ds\bigg]\cdot \bigg[\int_{0}^{\frac{\pi}{2}}s\sin ^{2}sds,\int_{0}^{\frac{\pi}{2}}s^{3}ds\bigg]}\\ &=\sqrt{\bigg[\frac{\pi^{4}}{64},\frac{\pi^{4}}{64}+\frac{\pi^{3}}{12} +\frac{\pi^{2}}{8}\bigg]\cdot\bigg[\frac{\pi^{2}}{16}+\frac{1}{4},\frac{\pi^{4}}{64}\bigg]}\\ &=\sqrt{\bigg[\frac{\pi^{6}}{1024}+\frac{\pi^{4}}{256},\frac{\pi^{8}}{4096} +\frac{\pi^{7}}{768}+\frac{\pi^{6}}{512}\bigg]}\\ &=\Bigg[\sqrt{\frac{\pi^{6}}{1024}+\frac{\pi^{4}}{256}},\sqrt{\frac{\pi^{8}}{4096} +\frac{\pi^{7}}{768}+\frac{\pi^{6}}{512}}\Bigg]. \end{split} \end{equation*}
Consequently, we obtain \begin{equation*} \begin{split} &\bigg[\pi-2,\frac{\pi^{4}}{64}+\frac{\pi^{3}}{24}\bigg]\\ &\leq \Bigg[\sqrt{\frac{\pi^{6}}{1024}+\frac{\pi^{4}}{256}}, \sqrt{\frac{\pi^{8}}{4096}+\frac{\pi^{7}}{768}+\frac{\pi^{6}}{512}}\Bigg]. \end{split} \end{equation*} \end{example}
\begin{example} \label{ex7} Suppose that $[a,b]_{\mathbb{T}}=\{0,1,2,3\}$. Let $h(s)=s$, $f(s)=[s,s+1]$, and $g(s)=[\frac{s}{2},s]$ for $s\in\{0,1,2,3\}$. Then \begin{equation*} \begin{split} \int_{a}^{b}&h(s)f(s)g(s)\Delta s\\ &=\int_{0}^{3}\Big[\frac{s^{3}}{2},s^{3}+s^{2}\Big]\Delta s\\ &=\bigg[\int_{0}^{3}\frac{s^{3}}{2}\Delta s,\int_{0}^{3}s^{3}+s^{2}\Delta s\bigg]\\ &=\bigg[\frac{9}{2},14\bigg], \end{split} \end{equation*} and \begin{equation*} \begin{split} &\sqrt{\Bigg(\int_{a}^{b}h(s)f^{2}(s)\Delta s\Bigg)\Bigg( \int_{a}^{b}h(s)g^{2}(s)\Delta s\Bigg)}\\ &=\sqrt{\bigg(\int_{0}^{3}\big[s^{3},s^{3}+2s^{2}+s\big]\Delta s\bigg) \bigg(\int_{0}^{3}\Big[\frac{s^{3}}{4},s^{3}\Big]\Delta s\bigg)}\\ &=\sqrt{[9,22]\cdot\Big[\frac{9}{4},9\Big]}\\ &=\bigg[\frac{9}{2},3\sqrt{22}\bigg]. \end{split} \end{equation*} Consequently, we obtain $$ \bigg[\frac{9}{2},14\bigg]\leq \bigg[\frac{9}{2},3\sqrt{22}\bigg]. $$ \end{example}
\begin{theorem}[Agarwal et al.\cite{A14}; Wong et al.\cite{W05}] \label{thm4.12} Let $f,g,h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$ and $p>1$. Then, \begin{equation*} \begin{split}
&\bigg(\int_{a}^{b}|h(s)||f(s)+g(s)|^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&\leq \bigg(\int_{a}^{b}|h(s)||f(s)|^{p}\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)||g(s)|^{p}\Delta s\bigg)^{\frac{1}{p}}. \end{split} \end{equation*} \end{theorem}
By the same technique used in the proof of Theorem~4 in \cite{R16}, we get a more general result.
\begin{theorem}[Minkowski's inequality] \label{thm4.13} \\Let $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$, $f,g\in C([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}^{+})$ and \\$p>1$. Then, \begin{equation*} \begin{split}
&\bigg(\int_{a}^{b}|h(s)|(f(s)+g(s))^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&\leq \bigg(\int_{a}^{b}|h(s)|f^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|g^{p}(s)\Delta s\bigg)^{\frac{1}{p}}. \end{split} \end{equation*} \end{theorem}
\begin{proof} By hypothesis, we have \begin{equation*} \begin{split}
&\bigg(\int_{a}^{b}|h(s)|(f(s)+g(s))^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&=\bigg(\int_{a}^{b}|h(s)|\big[\underline{f}(s)+\underline{g}(s), \overline{f}(s)+\overline{g}(s)\big]^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&=\bigg(\int_{a}^{b}|h(s)|\big[(\underline{f}(s) +\underline{g}(s))^{p},(\overline{f}(s)+\overline{g}(s))^{p}\big]\Delta s\bigg)^{\frac{1}{p}}\\
&=\Bigg[\bigg(\int_{a}^{b}|h(s)|(\underline{f}(s)+\underline{g}(s))^{p}\Delta s\bigg)^{\frac{1}{p}},\\
&\ \ \ \ \ \ \ \ \ \bigg(\int_{a}^{b}|h(s)|(\overline{f}(s) +\overline{g}(s))^{p}\Delta s\bigg)^{\frac{1}{p}}\Bigg]\\
&\leq \Bigg[\bigg(\int_{a}^{b}|h(s)|\underline{f}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|\underline{g}^{p}(s)\Delta s\bigg)^{\frac{1}{p}},\\
&\ \ \ \bigg(\int_{a}^{b}|h(s)|\overline{f}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|\overline{g}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}\Bigg]\\
&= \Bigg[\bigg(\int_{a}^{b}|h(s)|\underline{f}^{p}(s)\Delta s\bigg)^{\frac{1}{p}},
\bigg(\int_{a}^{b}|h(s)|\overline{f}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}\Bigg]\\
&\ \ +\Bigg[\bigg(\int_{a}^{b}|h(s)|\underline{g}^{p}(s)\Delta s\bigg)^{\frac{1}{p}},
\bigg(\int_{a}^{b}|h(s)|\overline{g}^{p}(s)\Delta s\bigg)^{\frac{1}{p}}\Bigg]\\
&= \Bigg[\int_{a}^{b}|h(s)|\underline{f}^{p}(s)\Delta s,\int_{a}^{b}|h(s)| \overline{f}^{p}(s)\Delta s\Bigg]^{\frac{1}{p}}\\
&\ \ \ \ \ \ +\Bigg[\int_{a}^{b}|h(s)|\underline{g}^{p}(s)\Delta s,
\int_{a}^{b}|h(s)|\overline{g}^{p}(s)\Delta s\Bigg]^{\frac{1}{p}}\\
&=\bigg(\int_{a}^{b}|h(s)|\big[\underline{f}(s), \overline{f}(s)\big]^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&\ \ \ \ \ +\bigg(\int_{a}^{b}|h(s)|\big[\underline{g}(s), \overline{g}(s)\big]^{p}\Delta s\bigg)^{\frac{1}{p}}\\
&=\bigg(\int_{a}^{b}|h(s)|f^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|g^{p}(s)\Delta s\bigg)^{\frac{1}{p}}. \end{split} \end{equation*} The proof is complete. \end{proof}
\begin{example} \label{ex8} Suppose that $[a,b]_{\mathbb{T}}=[0,1]\cup \{2\}$. Let\\ $h(s)=s$, $f(s)=[s,2s]$, $g(s)=[s,e^{s}]$ and $p=2$. Then, \begin{equation*} \begin{split}
&\bigg(\int_{a}^{b}|h(s)|(f(s)+g(s))^{p}\Delta s\bigg)^{\frac{1}{p}}\\ &=\sqrt{\int_{0}^{2}\big[4s^{3},se^{2s}+4s^{2}e^{s}+4s^{3}\big]\Delta s}\\ &=\sqrt{\bigg[\int_{0}^{2}4s^{3}\Delta s,\int_{0}^{2}se^{2s}+4s^{2}e^{s}+4s^{3}\Delta s\bigg]}\\ &=\Bigg[\sqrt{5},\frac{\sqrt{5e^{2}+32e-11}}{2}\Bigg], \end{split} \end{equation*} and \begin{equation*} \begin{split}
&\bigg(\int_{a}^{b}|h(s)|f^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|g(^{p}s)\Delta s\bigg)^{\frac{1}{p}}\\ &=\sqrt{\int_{0}^{2}\big[s^{3},4s^{3}\big]\Delta s} +\sqrt{\int_{0}^{2}\big[s^{3},se^{2s}\big]\Delta s}\\ &=\bigg[\frac{\sqrt{5}}{2},\sqrt{5}\bigg]+\bigg[ \frac{\sqrt{5}}{2},\frac{\sqrt{5e^{2}+1}}{2}\bigg]\\ &=\bigg[\sqrt{5},\sqrt{5}+\frac{\sqrt{5e^{2}+1}}{2}\bigg]. \end{split} \end{equation*} Consequently, we obtain $$ \Bigg[\sqrt{5},\frac{\sqrt{5e^{2}+32e-11}}{2}\Bigg] \leq \bigg[\sqrt{5},\sqrt{5}+\frac{\sqrt{5e^{2}+1}}{2}\bigg]. $$ \end{example}
The next results follow directly from Theorems~\ref{thm4.10} and \ref{thm4.13}, respectively.
\begin{corollary} \label{cor4.1} Let $h\in C_{rd}([a,b]_{\mathbb{T}},(0,\infty))$, and\\ $f,g\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}^{-})$. If $\frac{1}{p}+\frac{1}{q}=1$, with $p>1$, then \begin{equation*} \begin{split} &\int_{a}^{b}h(s)f(s)g(s)\Delta s\\ &\leq \Bigg(\int_{a}^{b}h(s)(-f)^{p}(s)\Delta s\Bigg)^{\frac{1}{p}}\Bigg( \int_{a}^{b}h(s)(-g)^{q}(s)\Delta s\Bigg)^{\frac{1}{q}}. \end{split} \end{equation*} \end{corollary}
\begin{corollary} \label{cor4.2} Let $h\in C_{rd}([a,b]_{\mathbb{T}},\mathbb{R})$, $f,g\in C([a,b]_{\mathbb{T}},\mathbb{R}_{\mathcal{I}}^{-})$ and $p\in2^{\mathbb{N}}$. Then, \begin{multline*}
\bigg(\int_{a}^{b}|h(s)|(f(s)+g(s))^{p}\Delta s\bigg)^{\frac{1}{p}}\\
\leq \bigg(\int_{a}^{b}|h(s)|f^{p}(s)\Delta s\bigg)^{\frac{1}{p}}
+\bigg(\int_{a}^{b}|h(s)|g^{p}(s)\Delta s\bigg)^{\frac{1}{p}}. \end{multline*} \end{corollary}
\section{Conclusion} \label{sec:5}
We investigated Darboux and Riemann interval delta integrals for interval-valued functions on time scales. Inequalities for interval-valued functions were proved. Our results generalize previous inequalities presented by Costa \cite[Corollary 3.3, Theorem~3.4, Theorem~3.5]{C17} and Rom\'{a}n-Flores \cite[Theorem~4]{R16}.
\end{document} |
\begin{document}
\begin{frontmatter}{}
\title{A $k$-additive Choquet integral-based approach to approximate the SHAP values for local interpretability in machine learning}
\author[aff1,aff2]{Guilherme Dean Pelegrina\corref{fn1}} \ead{[email protected]} \author[aff1]{Leonardo Tomazeli Duarte} \ead{[email protected]} \author[aff2,aff3]{Michel Grabisch} \ead{[email protected]}
\cortext[fn1]{Corresponding author}
\address[aff1]{School of Applied Sciences - University of Campinas, Limeira, Brazil} \address[aff2]{Centre d’Économie de la Sorbonne - Université Paris I Panthéon-Sorbonne, Paris, France} \address[aff3]{Paris School of Economics - Université Paris I Panthéon-Sorbonne, Paris, France}
\begin{abstract}
Besides accuracy, recent studies on machine learning models have been addressing the question on how the obtained results can be interpreted. Indeed, while complex machine learning models are able to provide very good results in terms of accuracy even in challenging applications, it is difficult to interpret them. Aiming at providing some interpretability for such models, one of the most famous methods, called SHAP, borrows the Shapley value concept from game theory in order to locally explain the predicted outcome of an instance of interest. As the SHAP values calculation needs previous computations on all possible coalitions of attributes, its computational cost can be very high. Therefore, a SHAP-based method called Kernel SHAP adopts an efficient strategy that approximate such values with less computational effort. In this paper, we also address local interpretability in machine learning based on Shapley values. Firstly, we provide a straightforward formulation of a SHAP-based method for local interpretability by using the Choquet integral, which leads to both Shapley values and Shapley interaction indices. Moreover, we also adopt the concept of $k$-additive games from game theory, which contributes to reduce the computational effort when estimating the SHAP values. The obtained results attest that our proposal needs less computations on coalitions of attributes to approximate the SHAP values.
\end{abstract}
\begin{keyword} Local interpretability; Choquet integral; Machine learning; Shapley values \end{keyword}
\end{frontmatter}{}
\section{Introduction} \label{sec:intro}
In the last decade, Machine Learning (ML) models have been used to deal with problems that directly affect people's life, such as consumer credit scoring~\citep{Kruppa2013}, cybersecurity~\citep{Xin2018}, disease detection~\citep{Ahsan2022} and patient care evaluation~\citep{BenIsrael2020}. Aiming at dealing with such problems, complex ML models have been proposed to achieve good solutions in terms of accuracy. Examples include random forests~\citep{Fawagreh2014,Biau2016}, deep neural networks~\citep{LeCun2015,Goodfellow2016} and gradient boosting algorithms~\citep{Bentejac2021}. Despite the good performance in terms of accuracy, these models act as black box models, since the obtained results (predictions and/or classifications) are difficult to be interpreted. Therefore, there is an inherent trade-off between adopting an accurate model, whose structure is frequently complex, or an interpretable model, such as linear/logistic regression~\citep{Molnar2021}.
Interpretability plays an important role in machine learning-based automatic decisions and has been discussed in several recent works in the ML community~\citep{Lipton2018,Gilpin2018,Carvalho2019,Molnar2021,Setzu2021}. As stated by~\citet{Miller2019}, interpretability can be defined as ``\textit{the degree to which an observer can understand the cause of a decision}''. Therefore, we can argue that interpretability is as important as accuracy in automatic decisions since it can show if the model can or cannot be trusted. For example, suppose a situation in which a person asks for a credit to his/her bank manager. Moreover, suppose that, after an internal analysis based on a machine learning model, the bank system classifies that person as a possible default and, as a consequence, he/she would not receive the credit. He/she will naturally ask to the bank manager why such a classification was achieved. If the machine is a black box, the manager would not be able to explain such a classification and, therefore, the client may not to trust the algorithm. Therefore, in this situation, a local interpretation would be suitable to understand how each characteristic (e.g., salary, presence or absence of previous default, etc...) contributed to the default credit classification.
There are practically two main types of interpretability in machine learning: global and local ones (see~\citep{Molnar2021} for a further discussion on them). The aim of global interpretability methods consists in explaining the trained model as a whole. In other words, one attempts to derive the average behavior of a trained machine by taking all samples. An example of such a method is the partial dependence plot~\citep{Molnar2021}, whose goal is to provide the marginal effects that each feature has in the predicted outcome. On the other hand, methods for local interpretability attempts to explain, for a specific instance of interest (e.g., a person asking for a credit), how each attribute's value contributes to achieve the associated prediction or classification. In this paper, we deal with local interpretability. Moreover, we consider a model-agnostic approach, i.e., a method that can be applied to interpret the prediction or classification of any machine learning model.
Among the model-agnostic methods proposed in the literature, two are of interest in this paper: LIME (Local Interpretable Model-agnostic Explanations)~\citep{Ribeiro2016} and SHAP (SHapley Additive exPlanations)~\citep{Lundberg2017}. In summary, the idea in LIME to understand the prediction of a specific instance consists in, locally, adjusting an interpretable function (e.g., a linear model) based on a set of perturbed samples in the neighborhood of the instance of interest. When adjusting this linear function, one considers an exponential kernel that ensures that closer are the perturbed samples from the instance of interest, greater are their importance in the learning procedure. Although this function may not be complex enough to explain the model as a whole, it can locally provide a good understanding of the contribution of each attribute in the model prediction. The other approach, called SHAP, brings concepts from game theory to provide local interpretability. The idea is to explain a prediction by means of the Shapley value~\citep{Shapley1953} associated with each attribute value. An interesting aspect in such an approach, which leads to the SHAP values\footnote{In this paper, since the SHAP values are referred to as the Shapley values obtained by means of the SHAP formulation, we will frequently adopt SHAP values or Shapley values interchangeably in the context of machine learning interpretability.}, is that it satisfies desired properties in interpretability, such as local accuracy, missingness and consistency~\citep{Lundberg2017}. For that reason, the classical SHAP and its extended versions have been largely used in the literature~\citep{Lundberg2020,Chen2021,Aas2021}.
Although the Shapley value (as well as the SHAP value in SHAP method) appears as an interesting solution for model-agnostic machine learning interpretability, there is a drawback in its calculation. Since it lies on the marginal contribution of each attribute by taking into account all possible coalitions of attributes, the number of evaluations increases exponentially with the number of attributes. Precisely, if we have $m$ attributes, we need $2^m$ evaluations to calculate the Shapley values. This makes the calculation impracticable in situations where $m$ is large. In order to soften this inconvenience, one may adopt some approaches that approximate the Shapley values, such as the Shapley sampling values strategy~\citep{Strumbelj2010,Strumbelj2014} or the Kernel SHAP~\citep{Lundberg2017}. We address the latter in this paper.
The Kernel SHAP was also proposed in the original SHAP paper~\citep{Lundberg2017}. It provides a link between LIME and the use of SHAP values for local machine learning interpretability. Although the authors provide this link by assuming an additive function as the interpretable model and a specific kernel, the formulation is not straightforward and there is a lack of details in the proof. With respect to the SHAP values calculation, in order to reduce the computational effort, the authors adopted a clever strategy that selects the evaluations that are most promising to approximate such values. However, this strategy does not reduce the number of evaluations needed for an exact SHAP values calculation. Moreover, the authors do not assume further game theory concepts that could speed up the convergence.
Aiming at providing a straightforward formulation of a Kernel SHAP-based method and speeding up the SHAP values approximation, in this paper, we propose to adopt game theory-based concepts frequently used in multicriteria decision making: the Choquet integral~\citep{Choquet1954,Grabisch1996,Grabisch2010} and $k$-additive games~\citep{Grabisch1997b}. Instead of assuming an additive function as the interpretable model, as a first contribution of this paper, we show that the use of the non-additive function called Choquet integral also leads to the same desired properties for local interpretability. Indeed, we can directly associate the Choquet integral parameters to the Shapley indices, which include both Shapley values and Shapley interaction indices. While the Shapley values indicate the marginal contribution of each attribute, individually, the Shapley interaction indices provide the understanding about how they interact between them (positively or negatively). This is of interest in ML interpretability since it indicates if the simultaneous presence of two characteristics has a higher (or lower) contribution than both of them isolated. It is worth mentioning that~\citet{Lundberg2020} also discuss how the Shapley inter SHAP method could be adapted to find the Shapley interaction indices. However, in our proposal, they are obtained automatically.
Besides the aforementioned formulation, we can also assume some degree of additivity about the Choquet integral which contributes to reduce its number of parameters. Therefore, as a second contribution, we propose to adopt a $k$-additive Choquet integral. For instance, $2$-additive models have proved flexible enough to achieve good results in terms of generalization~\citep{Grabisch2002,Grabisch2006,Pelegrina2020}. As attested by numerical experiments, by reducing the number of parameters, we also decrease the number of evaluations needed to approximate the SHAP values.
The rest of this paper is organized as follow. Section~\ref{sec:theor} contains the theoretical aspects of Shapley values and the adopted Choquet integral. We also provide a description of LIME and SHAP as model-agnostic methods for local interpretability. In Section~\ref{sec:prop}, we present our Choquet integral-based formulation that leads to the Shapley interaction indices and how the concept of $k$-additive games can be used to reduce the effort when estimating the SHAP values. Thereafter, in Section~\ref{sec:exp}, we conduct some numerical experiments in order to attest our proposal. Finally, in Section~\ref{concl}, we present our concluding remarks and discuss future perspectives.
\section{Background} \label{sec:theor}
In this section, we present some theoretical aspects that will be used along this work. We start by some concepts frequently used in game theory and multicriteria decision making. Thereafter, we discuss both LIME and SHAP as well as the SHAP values approximation strategy used in Kernel SHAP.
It is worth recalling that, in this paper, we deal with local interpretability. Therefore, we consider a classical machine learning scenario where a model $f(\cdot)$ (e.g., a black box model) has been trained based on a set of $n_{tr}$ training data $\left(\mathbf{X},\mathbf{y} \right)$, where $\mathbf{X} = \left[\mathbf{x}_1, \ldots, \mathbf{x}_{n_{tr}} \right]$ and $\mathbf{y} = \left[y_1, \ldots, y_{n_{tr}} \right]$ represent the inputs and the output (e.g., a predicted value or a class), respectively. Our aim consists in explaining the predicted outcome $f(\mathbf{x}^*)$ of the instance of interest $\mathbf{x}^* = \left[x_1^*, \ldots, x_m^* \right]$, where $m$ is the number of attributes. Therefore, how $f(\cdot)$ was trained is not important in this paper. We only consider that we are able to use the model $f(\cdot)$ in order to predict the outcome of any instance.
\subsection{Shapley values and $k$-additive games} \label{subsec:shapley}
In cooperative game theory, a coalitional game is defined by a set $M=\left\{1, 2, \ldots, m\right\}$ of $m$ players and a function $\upsilon: \mathcal{P}(M) \rightarrow \mathbb{R}$, where $\mathcal{P}(M)$ is the power set of $M$, that maps subsets of players to real numbers. For a coalition of players $A$, $\upsilon(A)$ represents the payoff that this coalition can obtain by cooperation. By definition, one assumes $\upsilon(\emptyset) = 0$, i.e., there is no payoff when there is no coalition.
The Shapley value (or Shapley power index) is a well-known solution concept in cooperative game theory~\citep{Shapley1953}. In summary, the Shapley value of a player $j$ indicates its (positive or negative) marginal contribution in the game payoff when taking into account all possible coalitions of players in $M$. It is defined as follows: \begin{equation} \label{eq:power_ind_s}
\phi_{j} = \sum_{A \subseteq M\backslash \left\{j\right\}} \frac{\left(m-\left|A\right|-1\right)!\left|A\right|!}{m!} \left[\upsilon(A \cup \left\{j\right\}) - \upsilon(A) \right], \end{equation}
where $\left| A \right|$ represents the cardinality of subset $A$. An interesting property of the Shapley value (called efficiency, which will be further discussed in this paper) is that $\sum_{j=1}^m \phi_j = \upsilon(M) - \upsilon(\emptyset)$. For this reason, the Shapley value is a convenient way of sharing the payoff of the grand coalition between the players.
Similarly as in Equation~\eqref{eq:power_ind_s}, one may also measure the marginal effect of a coalition $\left\{j,j'\right\}$ in the payoffs. In this case, one obtains the Shapley interaction index, which is defined by~\citep{Murofushi1993,Grabisch1997a} \begin{equation} \label{eq:int_pair_ind_s}
I_{j,j'} = \sum_{A \subseteq M\backslash \left\{j,j'\right\}} \frac{\left(m-\left|A\right|-2\right)!\left|A\right|!}{\left(m-1\right)!} \left[\upsilon(A \cup \left\{j,j'\right\}) - \upsilon(A \cup \left\{j\right\}) - \upsilon(A \cup \left\{j'\right\}) + \upsilon(A)\right] \end{equation} and can be interpreted as the interaction degree of coalition $\left\{j,j'\right\}$ by taking into account all possible coalitions of players in $M$. The sign of $I_{j,j'}$ indicates the type of interaction between players $j,j'$: \begin{itemize}
\item If $I_{j,j'} < 0$, there is a negative interaction (also called redundant effect) between players $j,j'$.
\item If $I_{j,j'} > 0$, there is a positive interaction (also called complementary effect) between players $j,j'$.
\item If $I_{j,j'} = 0$, there is no interaction players $j,j'$.
\end{itemize}
Besides $\phi_{j}$ and $I_{j,j'}$, one may also define the interaction index for any $A \subseteq M$. In this case, the (generalized) interaction index is defined by~\citep{Grabisch1997a} \begin{equation} \label{eq:int_ind_s}
I(A) = \sum_{D \subseteq M\backslash A} \frac{\left(m-\left|D\right|-\left|A\right|\right)!\left|D\right|!}{\left(m-\left|A\right|+1\right)!} \left( \sum_{D' \subseteq A} \left(-1\right)^{\left| A \right| - \left| D' \right|}\upsilon(D \cup D') \right). \end{equation} However, one does not have a clear interpretation as for $\phi_{i}$ and $I_{i,i'}$.
It is important to remark that, given the interaction indices $I(A)$, one may recover the payoffs $\upsilon(A)$ through the linear transformation \begin{equation} \label{eq:iitomu_s}
\upsilon(A) = \sum_{D \subseteq M} \gamma^{\left|D \right|}_{\left| A \cap D \right|}I(D), \end{equation}
where $\gamma^{\left|D \right|}_{\left| A \cap D \right|}$ is defined by \begin{equation} \label{eq:gamma} \gamma^{r'}_{r} = \sum_{l=0}^{r}\binom{r}{l}\eta_{r'-l}, \end{equation} with \begin{equation} \eta_{r} = -\sum_{r'=0}^{r-1}\frac{\eta_{r'}}{r-r'+1}\binom{r}{r'} \end{equation} being the Bernoulli numbers and $\eta_0=1$. Since the relation between the game and the interaction indices is linear, it is common to represent the aforementioned transformations using matrix notation. Assume, for instance, that the vectors $\upsilon = [\upsilon(\emptyset), \upsilon(\left\{ 1 \right\}), \ldots, \upsilon(\left\{ m \right\}), \upsilon(\left\{ 1,2 \right\}), \ldots, \upsilon(\left\{ m-1,m \right\}), \ldots,$ $\upsilon(\left\{ 1, \ldots, m \right\}) ]$ and $\mathbf{I} = \left[I(\emptyset), \phi_1, \ldots, \phi_m, I_{1,2}, \ldots, I_{m-1,m}, \ldots, I(\left\{ 1, \ldots, m \right\}) \right]$ are represented in a cardinal-lexicographic order (i.e., the elements are sorted according to their cardinality and, for each cardinality, based on the lexicographic order). The transformation from the interaction indices to $\upsilon$ can be represented by $\upsilon = \mathbf{T} \mathbf{I}$, where $\mathbf{T} \in \mathbb{R}^{2^{m} \times 2^{m}}$ is the transformation matrix. For example, in a game with 3 players, we have \begin{equation*} \mathbf{T} =\left[\begin{matrix} 1 & -1/2 & -1/2 & -1/2 & 1/6 & 1/6 & 1/6 & 0 \\ 1 & 1/2 & -1/2 & -1/2 & -1/3 & -1/3 & 1/6 & 1/6 \\ 1 & -1/2 & 1/2 & -1/2 & -1/3 & 1/6 & -1/3 & 1/6 \\ 1 & -1/2 & -1/2 & 1/2 & 1/6 & -1/3 & -1/3 & 1/6 \\ 1 & 1/2 & 1/2 & -1/2 & 1/6 & -1/3 & -1/3 & -1/6 \\ 1 & 1/2 & -1/2 & 1/2 & -1/3 & 1/6 & -1/3 & -1/6 \\ 1 & -1/2 & 1/2 & 1/2 & -1/3 & -1/3 & 1/6 & -1/6 \\ 1 & 1/2 & 1/2 & 1/2 & 1/6 & 1/6 & 1/6 & 0 \\ \end{matrix}\right]. \end{equation*}
Another concept in game theory directly associated with the interaction indices is the concept of $k$-additive games. We say that a game is $k$-additive if $I(A) = 0$ for all $A$ such that $\left| A \right| > k$. As it will be further detailed in the next section, an advantage of such games is that one reduces the number of parameters to be defined (e.g., from $2^m$ to $m(m+1)/2$ when $k=2$). In the example with 3 players, for instance, if one assumes a $2$-additive game, the last column of $\mathbf{T}$ can be removed since $I({1,2,3}) = 0$.
\subsection{The Choquet integral} \label{subsec:choquet}
The (discrete) Choquet integral~\citep{Choquet1954} is a non-additive (more precisely, a piecewise linear) aggregation function that models interactions among attributes. It is defined on a set of parameters associated with all possible coalitions of attributes. It has been largely used in multicriteria decision making problems~\citep{Grabisch1996,Grabisch2010} and, in such situations, the parameters associated with the Choquet integral are called capacity coefficients. A capacity is a set function $\mu:2^{M} \rightarrow \mathbb{R}_{+}$ satisfying the axioms of normalization ($\mu(\emptyset) = 0$ and $\mu(M) = 1$) and monotonicity (if $A \subseteq D \subseteq M$, $\mu(A) \leq \mu(D) \leq \mu(M)$). However, the Choquet integral is not restricted to capacities~\citep{Grabisch2016}. Indeed, it can be defined by means of a game $\upsilon:2^{M} \rightarrow \mathbb{R}$ satisfying $\upsilon(\emptyset) = 0$. The Choquet integral definition based on a game $\upsilon$ is given as follows: \begin{equation} \label{eq:model_ci} f_{CI}(\mathbf{x}) = \sum_{j=1}^m (x_{(j)} - x_{(j-1)})\upsilon(\{(j), \ldots, (m)\}), \end{equation} where $\cdot_{(j)}$ indicates a permutation of the indices $j$ such that $0 \leq x_{(1)} \leq x_{(j)} \leq \ldots \leq x_{(m)} \leq 1$ (with $x_{(0)}=0$).
Since the Choquet integral is defined by means of a game, one may define it in terms of Shapley values and interaction indices. Therefore, one has a clear interpretation about the marginal contribution of each feature in the aggregation procedure as well as the interaction degree between them. For instance, if two attributes have a positive (resp. negative) interaction, the payoff of such a coalition is (resp. is not) greater than the sum of its individual payoffs. Moreover, one may also consider the case of a $k$-additive game and, therefore, a $k$-additive Choquet integral~\citep{Grabisch1997b}. For example, if one assumes a $2$-additive game,~\eqref{eq:model_ci} can be formulated as follows: \begin{equation} \label{eq:choquet_2add}
f_{CI}(\mathbf{x}) = \sum_j x_j \left( \phi_j - \frac{1}{2} \sum_{j'} \left| I_{j,j'} \right| \right) + \sum_{I_{j,j'} < 0} (x_j \vee x_{j'}) \left| I_{j,j'} \right| + \sum_{I_{j,j'} > 0} (x_j \wedge x_{j'}) I_{j,j'}, \end{equation} where $\vee$ and $\wedge$ represent the maximum and the minimum operators, respectively. Note that, when learning the Choquet integral parameters, if one assumes a $2$-additive model, one reduces the number of parameters from $2^m$ to $m(m+1)/2$. Therefore, $2$-additive and, more generally, $k$-additive models emerge as a strategy that reduces the computational complexity in optimization tasks and provides a more interpretable model (since one has less parameters to interpret). Moreover, it is also known from multicriteria decision making applications~\citep{Grabisch2002,Grabisch2006,Pelegrina2020} that, even if one adopts a $2$-additive model, the Choquet integral is still being flexible enough to model inter-attributes relations and can achieve a high level of generalization.
It is important to remark that, if one assumes that the game is $1$-additive, the Choquet integral becomes a weighted arithmetic mean.
\subsection{Model-agnostic methods for local interpretability} \label{subsec:lime_shap}
We describe in this section two famous model-agnostic methods for local interpretability: LIME and SHAP. At first, we briefly present the idea behind tabular LIME (i.e., LIME for tabular data). Then, we further discuss the SHAP method, specially the Kernel SHAP strategy. It is worth mentioning that, differently from~\citep{Ribeiro2016,Lundberg2017}, we here adopt a notation based on set theory in order to clearly define the elements used in the considered approaches.
\subsubsection{LIME} \label{subsubsec:lime}
The main idea of LIME~\citep{Ribeiro2016} for local explanations is to locally approximate a (generally) complex function $f(\cdot)$ (frequently obtained by a black box model) by an interpretable model $g(\cdot)$. For this purpose, in order to explain the outcome $f(\mathbf{x}^*)$ of an instance $\mathbf{x}^*$, one firstly generates a set of $q$ perturbed samples $\mathbf{z}_l$, $l=1, \ldots, q$, in the neighborhood of $\mathbf{x}^*$. For each sample $\mathbf{z}_l$, one also defines a binary vector $\mathbf{z}_l'$ such that $z_{l,j}' = 1$ if $z_{l,j}$ is close enough\footnote{In order to define how close $z_{l,j}$ is from $x_{j}^*$, for each attribute, LIME equally splits the training data (by taking the quantiles of the training data) into predefined bins. Therefore, if $z_{l,j}$ is on the same bin as $x_{j}^*$, $z_{l,j}' = 1$, or $z_{l,j}' = 0$ otherwise. For further details about this procedure, the interested reader may refer to~\citep{Garreau2020}} to $x_{j}^*$, or $z_{l,j}' = 0$ otherwise. Once all samples have been generated, LIME deals with the following optimization problem: \begin{equation}
\label{eq:lime_form}
\min_{g \in G} \mathcal{L}(f,g,\pi_{\mathbf{x}^*}) + \Omega(g), \end{equation} where $\mathcal{L}(f,g,\pi_{\mathbf{x}^*})$ is the loss function, $\pi_{\mathbf{x}^*}$ is a proximity measure between the instance to be explained and the perturbed samples and $\Omega(g)$ is a measure of complexity of the interpretable model $g(\cdot)$. In tabular LIME, the authors used the exponential kernel for the proximity measure, which leads to the expression \begin{equation}
\pi_{\mathbf{x}^*}(\mathbf{z}_l') = \exp \left(\frac{-\| \mathbbm{1} - \mathbf{z}_l' \|^2}{\alpha^2} \right), \end{equation}
where $\| \cdot \|$ is the Euclidean norm, $\mathbbm{1}$ is a vector of 1's and $\alpha$ is a positive bandwidth parameter (as default, the authors assumed $\alpha = \sqrt{0.75m}$). By assuming a weighted least squared function for $\mathcal{L}(f,g,\pi_{\mathbf{x}^*})$, a linear function $g(\mathbf{z}') = \beta_0 + \beta^T \mathbf{z}'$ (where $\beta =\left( \beta_1, \ldots, \beta_m \right)$) and letting $\Omega(\beta) = \lambda \| \beta \|^2$ represent a regularization term with $\lambda > 0$, LIME can be formulated as follows: \begin{equation}
\label{eq:lime_opt}
\min_{\beta_0, \beta_1, \ldots, \beta_m} \sum_{l=1}^q \pi_{\mathbf{x}^*}(\mathbf{z}_l') \left( f(\mathbf{z}_l) - \left(\beta_0 + \beta^T \mathbf{z}_l' \right) \right)^2 + \lambda \| \beta \|^2. \end{equation} After solving~\eqref{eq:lime_opt}, one can visualize the obtained parameters $\beta$ and, therefore, interpret the (positive or negative) contribution of each attribute in the predicted outcome in the vicinity of $\mathbf{x}^*$.
\subsubsection{SHAP} \label{subsubsec:shap}
Differently from LIME, the purpose of SHAP is to use the Shapley values in order to locally explain a prediction. The idea is to associate to each attribute its marginal contribution in the predicted outcome. In this section, we present a summary of the idea behind SHAP. Moreover, we discuss the Kernel SHAP, which is a kernel-based approach for approximating the SHAP values by using the LIME formulation. For further details, the interested reader may refer to~\citep{Lundberg2017,Lundberg2018,Lundberg2020,Aas2021}.
The idea that brings Shapley values into interpretability methods in machine learning associates players and payoffs in game theory to attributes and values of a subset of attributes in the model prediction, respectively. Before presenting the idea behind SHAP, let us define the characteristic vector of $A$. Recall that $M = \left\{1, \ldots, m \right\}$ represents the set of $m$ attributes. For any $A \subseteq M$, $\mathbf{1}_A \in \{0,1\}^m$ denotes the characteristic vector of $A$, i.e., a binary vector such that the $j$-th coordinate is 1, if $j \in A$, and 0, otherwise. For example, for $M = \left\{1, 2, 3 \right\}$, $\mathbf{1}_{\left\{2,3\right\}} = \left[0,1,1 \right]$ means a coalition of attributes $\left\{2, 3 \right\}$.
Based on the aforementioned definition, in order to explain the predicted outcome $f(\mathbf{x}^*)$ of an instance $\mathbf{x}^*$, the authors decompose $f(\mathbf{x}^*)$ by assuming the additive feature attribution function given by \begin{equation}
\label{eq:shap_g}
f(\mathbf{x}^*) = g(\mathbf{1}_M) = \phi_0 + \sum_{j \in M} \phi_j. \end{equation} Moreover, they argue that the only possible explanation model $g(\cdot)$ that follows Equation~\eqref{eq:shap_g} and satisfies the local accuracy, missingness and consistency properties (see Appendix A for the definitions) consists in defining $\phi_0 = \mathbb{E}\left[f(\mathbf{x})\right]$, i.e., the (overall) expected prediction when one does not know any attribute value from $\mathbf{x}^*$, and the (exact) SHAP values $\phi_{j}$, $j=1, \ldots, m$, given by \begin{equation} \label{eq:shap_values}
\phi_{j}(f,\mathbf{x}^*) = \sum_{A \subseteq M \backslash \left\{j\right\}} \frac{\left(m-\left| A \right|-1\right)! \left| A \right|!}{m!} \left[ \hat{f}_{\mathbf{x}^*}( A \cup \left\{j\right\}) - \hat{f}_{\mathbf{x}^*}( A) \right], \end{equation} where $\hat{f}_{\mathbf{x}^*}( A)$ is the expected model prediction given the knowledge on the attributes values of $\mathbf{x}^*$ that are present in coalition $A$, that is: \begin{equation} \label{eq:exp_pred}
\hat{f}_{\mathbf{x}^*}( A) = \mathbb{E}\left[f\left( \mathbf{x} \right) | x_j = x_j^* \, \, \forall \, \, j \in A \right]. \end{equation} Note in Equation~\eqref{eq:exp_pred} that one has missing values for all attributes $j' \in \overline{A}$, where $\overline{A}$ is the complement set of $A$ (if $A = M$, then $\hat{f}_{\mathbf{x}^*}( M) = \mathbb{E}\left[f\left( \mathbf{x}^* \right) \right] = f\left( \mathbf{x}^* \right)$ and there are no missing values). In this case, in order to calculate the expected prediction, one randomly samples these missing values from the training data. In this paper, as well as in the Kernel SHAP method, we assume independence among attributes. Therefore, the expected prediction can be calculated as follows: \begin{equation} \label{eq:exp_pred_indep} \hat{f}_{\mathbf{x}^*}( A) = \frac{1}{q} \sum_{l=1}^q f\left(\mathbf{x}_{A}^*,\mathbf{x}_{l,\overline{A}} \right), \end{equation} where $\mathbf{x}_{l,\overline{A}}$, $l=1, \ldots, q$, are samples from the training data. Note that, in comparison with the game theory formulation presented in Equation~\eqref{eq:power_ind_s}, $\hat{f}_{\mathbf{x}^*}( A)$ represents the payoff $\upsilon(A)$. Moreover, when all attributes are missing, i.e., $A = \emptyset$, one has $\hat{f}_{\mathbf{x}^*}( \emptyset ) = \mathbb{E}\left[f\left( \mathbf{x} \right)\right] = \phi_0$.
Among the properties satisfied by the SHAP values, the local accuracy plays an important role in local interpretability and differentiate SHAP from the original LIME formulation (as presented in Section~\ref{subsubsec:lime}). It states that one can decompose the predicted outcome $f(\mathbf{x}^*)$ by the sum of the SHAP values and the overall expected prediction $\phi_0$, i.e., $f(\mathbf{x}^*) = \phi_0 + \sum_{j=1}^m \phi_j$. Therefore, one may interpret the SHAP values as the contribution of each attribute when one moves from the overall expected prediction when all attributes are missing to the actual outcome $f(\mathbf{x}^*)$.
\subsubsection{Kernel SHAP} \label{subsubsec:kernel_shap}
An important remark in the exact SHAP values calculation is that one needs to sample all $2^m$ possible coalitions of attributes and calculate its expected model prediction. Therefore, this procedure may be computationally heavy for a large number of attributes. In order to overcome this inconvenience, the authors proposed a SHAP value-based formulation called Kernel SHAP~\citep{Lundberg2017}. Kernel SHAP emerges as the formulation of LIME method that leads to the SHAP values. For instance, the authors claimed that if one assumes \begin{itemize}
\item $\Omega(g) = 0$,
\item $\pi(A) = \frac{(m-1)}{\binom{m}{\left|A\right|} \left|A\right| (m - \left|A\right|)}$,
\item $\mathcal{L}(f,g,\pi) = \sum_{A \in \mathcal{M}} \pi(A) \left( \hat{f}_{\mathbf{x}^*}( A) - g(\mathbf{1}_A) \right)^2$,
where $g(\mathbf{1}_A) = \phi_0 + \sum_{j \in A} \phi_j$ and $\mathcal{M} \subseteq \mathcal{P}(M)$ (recall that $\mathcal{P}(M)$ is the power set of $M$), \end{itemize} the solution of the weighted least square problem \begin{equation}
\label{eq:shap_opt}
\min_{\phi_0, \phi_1, \ldots, \phi_m} \sum_{A \in \mathcal{M}} \frac{(m-1)}{\binom{m}{\left|A\right|} \left|A\right| (m - \left|A\right|)} \left( \hat{f}_{\mathbf{x}^*}( A) - \left( \phi_0 + \sum_{j \in A} \phi_j \right)\right)^2 \end{equation} leads to the SHAP values. Note that, differently from the LIME formulation, $\pi(A)$ in Kernel SHAP only depends on coalition $A$. Moreover, $\pi(A)$ tends to infinity when $A = M$. Therefore, in the optimal solution, $\hat{f}_{\mathbf{x}^*}( M) = f(\mathbf{x}^*) = g(\mathbf{1}_M) = \phi_0 + \sum_{j=1}^m \phi_j$. This ensures that $f(\mathbf{x}^*)$ is explained by the sum of the SHAP values and the overall expected prediction $\mathbb{E}\left[f(\mathbf{x})\right]$. Similarly, when $A = \emptyset$, the associated $\pi(\emptyset)$ also tends to infinity. This ensures that $\hat{f}_{\mathbf{x}^*}( \emptyset ) = \mathbb{E}\left[f(\mathbf{x})\right] = g(\mathbf{1}_{\emptyset}) = \phi_0$. In practice, we replace these infinite values by a big constant (e.g., $10^6$).
As~\eqref{eq:shap_opt} is a weighted least square problem, one may easily represent it (as well as its solution) by means of matrices and vectors (we borrow such a formulation from~\citep{Aas2021}). Suppose that $n_{\mathcal{M}}$ represents the number of elements in $\mathcal{M}$ (i.e., the number of coalitions considered in the optimization problem~\eqref{eq:shap_opt}). Let us also define $\phi = \left[ \phi_0, \phi_1, \ldots, \phi_m \right]$ and $\mathbf{Z} \in \left\{0,1\right\}^{n_{\mathcal{M}} \times (m+1)}$ as the matrix such that the first column is 1 for every row and the remaining $m+1$ columns are composed, in each row, by all $\mathbf{1}_A$, $A \in \mathcal{M}$. Moreover, assume that $\mathbf{f} \in \mathbb{R}^{n_{\mathcal{M}} \times 1}$ and $\mathbf{W} \in \mathbb{R}^{n_{\mathcal{M}} \times n_{\mathcal{M}}}$ are the vector of evaluations $\hat{f}_{\mathbf{x}^*}( A)$ and the diagonal matrix whose elements are given by $\pi(A)$, respectively, associated with all $A \in \mathcal{M}$. For example, in a problem with 3 attributes ($M=\left\{1,2,3 \right\}$) and using $\emptyset$, $\left\{1 \right\}$, $\left\{2 \right\}$, $\left\{1,3 \right\}$ and $M$ as the coalitions of attributes, we have the following: \begin{equation*} \phi = \left[\begin{matrix} \phi_0 \\ \phi_1 \\ \phi_2 \\ \phi_3 \end{matrix}\right], \, \, \mathbf{Z} = \left[\begin{matrix} 1 & 0 & 0 & 0 \\ 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 0 \\ 1 & 1 & 0 & 1 \\ 1 & 1 & 1 & 1 \\ \end{matrix}\right], \, \, \mathbf{f} = \left[\begin{matrix} \hat{f}_{\mathbf{x}^*}( \emptyset) \\ \hat{f}_{\mathbf{x}^*}( \left\{1 \right\} ) \\ \hat{f}_{\mathbf{x}^*}( \left\{2 \right\} ) \\ \hat{f}_{\mathbf{x}^*}( \left\{1,3 \right\} ) \\ \hat{f}_{\mathbf{x}^*}( M) \\ \end{matrix}\right]\text{ and }\mathbf{W} = \left[\begin{matrix} 10^6 & 0 & 0 & 0 & 0 \\ 0 & \pi(\left\{1 \right\}) & 0 & 0 & 0 \\ 0 & 0 & \pi(\left\{2 \right\}) & 0 & 0 \\ 0 & 0 & 0 & \pi(\left\{1,3 \right\}) & 0 \\ 0 & 0 & 0 & 0 & 10^6 \\ \end{matrix}\right]. \end{equation*} Based on the vector/matrix notation, one may represent the optimization problem~\eqref{eq:shap_opt} as \begin{equation}
\label{eq:shap_opt_matrix}
\min_{\phi} \left(\mathbf{f} - \mathbf{Z}\phi \right)^T \mathbf{W} \left(\mathbf{f} - \mathbf{Z}\phi \right), \end{equation} whose solution is given by \begin{equation}
\label{eq:shap_opt_matrix_sol}
\phi = \left(\mathbf{Z}^T\mathbf{W}\mathbf{Z} \right)^{-1} \mathbf{Z}^T\mathbf{W}\mathbf{f}. \end{equation}
Remark that $\mathbf{S} = \left(\mathbf{Z}^T\mathbf{W}\mathbf{Z} \right)^{-1} \mathbf{Z}^T\mathbf{W}$ can be calculated independently of the instance of interest $\mathbf{x}^*$. Therefore, an interesting aspect in Kernel SHAP is that, even if one would like to explain the outcome of several instances of interest, one only needs to calculate $\mathbf{S}$ once. The only element that varies in Equation~\eqref{eq:shap_opt_matrix_sol} is the vector of evaluations $\mathbf{f}$, which is dependent on the instance of interest under analysis.
Another remark in Kernel SHAP is that, if $\mathcal{M} = \mathcal{P}(M)$, Equation~\eqref{eq:shap_opt_matrix_sol} leads to the exact SHAP values (as in Equation~\eqref{eq:shap_values}). Therefore, in this exact calculation, one needs the expected predictions $\hat{f}_{\mathbf{x}^*}( A )$ for all possible coalitions $A$, which can be infeasible for a large number of attributes. However, the clever strategy used in Kernel SHAP aims at selecting the most promising expected predictions to approximate the SHAP values. For instance, if one considers the weighting kernel $\pi(A)$, one may note that the majority of $A$ has a low contribution in the SHAP value calculation. Therefore, the aim in Kernel SHAP consists in defining a subset $\mathcal{M}$ from $\mathcal{P}(M)$ such that the elements $A \in \mathcal{M}$ are sampled\footnote{In order to avoid double selecting the same $A$, in the experiments conducted in this paper, we adopted a sampling procedure without replacement. Therefore, after sampling a coalition, we update the probability distribution by removing the associated kernel weight and normalizing the probabilities.} from a probability distribution following the weighting kernel $\pi(A)$. Greater is the weight associated with $A$, greater is the chance that $A$ is sampled from $\mathcal{P}(M)$.
\section{A more general model for local interpretability based on Shapley values} \label{sec:prop}
As highlighted in Section~\ref{sec:intro}, we have two main contributions in this paper: to provide a straightforward formulation of the Kernel SHAP method based on the Choquet integral, and to adopt the concept of $k$-additive games in order to reduce the number of evaluations needed to approximate the SHAP values. Both contributions are presented in the sequel.
\subsection{The Choquet integral as an interpretable model for Kernel SHAP formulation}
We here show that we need not consider an additive function as the interpretable model in order to explain a prediction based on the Shapley values. Indeed, if we adopt the non-additive function called Choquet integral, we also achieve such values. Recall the Choquet integral function defined in Equation~\eqref{eq:model_ci}. The idea is to define the local interpretable model $g(\cdot)$ as \begin{equation}
\label{eq:choquet_lime}
g(\mathbf{1}_A) = \phi_0 + f_{CI}(\mathbf{1}_A), \end{equation} where $\phi_0$ is the intercept parameter. In order to simplify the notation, let us also define $\bar{f}_{\mathbf{x}^*}( A) = \hat{f}_{\mathbf{x}^*}( A) - \phi_0$. In this case and based on the LIME formulation for local interpretability, one obtains the following loss function: \begin{equation}
\label{eq:choquet_lime_loss}
\mathcal{L}(f,g,\pi) = \sum_{A \in \mathcal{M}}\pi'(A) \left( \bar{f}_{\mathbf{x}^*}( A) - f_{CI}(\mathbf{1}_A) \right)^2, \end{equation} where the weights $\pi'(A)$ have the same values for all $A$ (e.g., 1) except for the empty set and the grand coalition $M$, whose associated weights are big numbers (e.g., $10^6$). We clarify these choices soon.
An interesting aspect on the Choquet integral and that can be easily checked from Equation~\eqref{eq:model_ci} is that, when we only have binary data (which is our case since $\mathbf{1}_A$ is a binary vector), $f_{CI}(\mathbf{1}_A) = \upsilon(A)$. Therefore, we may redefine the loss function presented in~\eqref{eq:choquet_lime_loss} as \begin{equation}
\label{eq:choquet_lime_loss_game}
\mathcal{L}(f,g,\pi) = \sum_{A \in \mathcal{M}} \pi'(A) \left( \bar{f}_{\mathbf{x}^*}( A) - \upsilon(A) \right)^2. \end{equation} Remark that, for $A = \emptyset$, we minimize $\bar{f}_{\mathbf{x}^*}( \emptyset) - \upsilon(\emptyset) = \hat{f}_{\mathbf{x}^*}( \emptyset) - \phi_0 - \upsilon(\emptyset) = \upsilon(\emptyset)$, since $\hat{f}_{\mathbf{x}^*}( \emptyset) = \phi_0$ by definition. In order to ensure that $\upsilon(\emptyset) = 0$ (according to the definition of a game), we assume a big number for $\pi'(\emptyset)$ when solving the optimization problem. Similarly, when $A=M$, we minimize the difference between $\hat{f}_{\mathbf{x}^*}( M)$ and $\phi_0 + \upsilon(M)$. In this case, since $\upsilon(M) = \upsilon(\emptyset) + \sum_{j=1}^m \phi_j$, the big weight $\pi'(M)$ ensures that $\phi_0 + \sum_{j=1}^m \phi_j = \hat{f}_{\mathbf{x}^*}( M) = f(\mathbf{x}^*)$ (the local accuracy property).
Furthermore, if one considers the linear transformation presented in Equation~\eqref{eq:iitomu_s}, the loss function can be directly defined in terms of the generalized Shapley interaction indices. In this case, we have the following optimization problem: \begin{equation}
\label{eq:shap_opt_ci}
\min_{\mathbf{I}} \sum_{A \in \mathcal{M}} \pi'(A) \left( \bar{f}_{\mathbf{x}^*}( A) - \sum_{D \subseteq M} \gamma^{\left|D \right|}_{\left| A \cap D \right|}I(D) \right)^2, \end{equation} where $\gamma$ is defined as in Equation~\eqref{eq:gamma}. As in the Kernel SHAP, our proposal also leads to the exact SHAP values if $\mathcal{M}=\mathcal{P}(M)$. We prove it in the sequel.
\begin{theor}{} \label{theo1} If $\mathcal{M} = \mathcal{P}(M)$, the solution of~\eqref{eq:shap_opt_ci} leads to the exact SHAP values as calculated in Equation~\eqref{eq:shap_values}. \end{theor}
\begin{proof}
Assume $\mathcal{M} = \mathcal{P}(M)$. In this scenario, the optimization problem~\eqref{eq:shap_opt_ci} has a unique solution such that $\sum_{D \subseteq M} \gamma^{\left|D \right|}_{\left| A \cap D \right|}I(D) = \upsilon(A) = \bar{f}_{\mathbf{x}^*}( A)$. From the obtained game and the linear transformation presented in Equation~\eqref{eq:power_ind_s}, we have that $\phi_{j} = \sum_{A \subseteq M\backslash \left\{j\right\}} \frac{\left(m-\left|A\right|-1\right)!\left|A\right|!}{m!} \left[\upsilon(A \cup \left\{j\right\}) - \upsilon(A) \right]$. It remains to show that $\phi_{j} \equiv \phi_{j}(f,\mathbf{x}^*)$.
Recall that we defined $\bar{f}_{\mathbf{x}^*}( A) = \hat{f}_{\mathbf{x}^*}( A) - \phi_0$ and, then, $\upsilon(A) = \hat{f}_{\mathbf{x}^*}( A) - \phi_0$ in the optimal solution. Therefore, we have the following: \begin{equation} \label{eq:proof_equiv}
\begin{split}
\phi_{j} & = \sum_{A \subseteq M\backslash \left\{j\right\}} \frac{\left(m-\left|A\right|-1\right)!\left|A\right|!}{m!} \left[\hat{f}_{\mathbf{x}^*}( A \cup \left\{j\right\}) - \phi_0 - \hat{f}_{\mathbf{x}^*}( A) + \phi_0 \right] \\
& = \sum_{A \subseteq M\backslash \left\{j\right\}} \frac{\left(m-\left|A\right|-1\right)!\left|A\right|!}{m!} \left[\hat{f}_{\mathbf{x}^*}( A \cup \left\{j\right\}) - \hat{f}_{\mathbf{x}^*}( A) \right] \\
& = \phi_{j}(f,\mathbf{x}^*),
\end{split} \end{equation} which proves that our proposal also converges to the exact SHAP values when $\mathcal{M} = \mathcal{P}(M)$. \end{proof}
Similarly as in the Kernel SHAP, we may here also rewrite the optimization problem in vector/matrix notation. For this purpose, let us represent $\hat{\mathbf{f}} \in \mathbb{R}^{n_{\mathcal{M}} \times 1}$ as the vector $\mathbf{f}$ (as defined in Section~\ref{subsubsec:kernel_shap}) discounted by $\phi_0$ and $\bar{\mathbf{W}} \in \mathbb{R}^{n_{\mathcal{M}} \times n_{\mathcal{M}}}$ as the diagonal matrix whose elements are 1's except for the elements associated with the empty set and the grand coalition $M$, whose weights are a big number (e.g., $10^6$). Moreover, we define $\upsilon_{\mathcal{M}}$ as the vector of payoffs for all coalitions $A$ such that $A \in \mathcal{M}$. In addition, we consider $\mathbf{T}_{\mathcal{M}}$ as the transformation matrix whose rows are composed by the rows of $\mathbf{T}$ (as defined in Section~\ref{subsec:shapley}) associated with all coalitions $A$ such that $A \in \mathcal{M}$. For example, in the same problem when $M=\left\{1,2,3 \right\}$ and using $\emptyset$, $\left\{1 \right\}$, $\left\{2 \right\}$, $\left\{1,3 \right\}$ and $M$ as the coalitions of attributes, we have the following: \begin{equation*} \upsilon_{\mathcal{M}} = \left[\begin{matrix} \upsilon(\emptyset) \\ \upsilon(\left\{1 \right\}) \\ \upsilon(\left\{2 \right\}) \\ \upsilon(\left\{1,3 \right\}) \\ \upsilon(M) \end{matrix}\right] = \left[\begin{matrix} 1 & -1/2 & -1/2 & -1/2 & 1/6 & 1/6 & 1/6 & 0 \\ 1 & 1/2 & -1/2 & -1/2 & -1/3 & -1/3 & 1/6 & 1/6 \\ 1 & -1/2 & 1/2 & -1/2 & -1/3 & 1/6 & -1/3 & 1/6 \\ 1 & 1/2 & -1/2 & 1/2 & -1/3 & 1/6 & -1/3 & -1/6 \\ 1 & 1/2 & 1/2 & 1/2 & 1/6 & 1/6 & 1/6 & 0 \end{matrix}\right] \left[\begin{matrix} I(\emptyset) \\ \phi_1 \\ \phi_2 \\ \phi_3 \\ I_{1,2} \\ I_{1,3} \\ I_{2,3} \\ I(\left\{1,2,3 \right\}) \end{matrix}\right] = \mathbf{T}_{\mathcal{M}} \mathbf{I}. \end{equation*} \begin{equation*} \hat{\mathbf{f}} = \left[\begin{matrix} \hat{f}_{\mathbf{x}^*}( \emptyset ) - \phi_0 \\ \hat{f}_{\mathbf{x}^*}( \left\{1 \right\} ) - \phi_0 \\ \hat{f}_{\mathbf{x}^*}( \left\{2 \right\} ) - \phi_0 \\ \hat{f}_{\mathbf{x}^*}( \left\{1,3 \right\} ) - \phi_0 \\ \hat{f}_{\mathbf{x}^*}( M ) - \phi_0 \\ \end{matrix}\right]\text{ and }\bar{\mathbf{W}} = \left[\begin{matrix} 10^6 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 & 10^6 \\ \end{matrix}\right]. \end{equation*} The vector/matrix notation leads to the following optimization problem: \begin{equation}
\label{eq:shap_opt_matrix_ci}
\min_{\mathbf{I}} \left(\bar{\mathbf{f}} - \mathbf{T}_{\mathcal{M}} \mathbf{I} \right)^T \bar{\mathbf{W}} \left(\bar{\mathbf{f}} - \mathbf{T}_{\mathcal{M}} \mathbf{I} \right), \end{equation} whose solution is given by \begin{equation}
\label{eq:shap_opt_matrix_sol_ci}
\mathbf{I} = \left(\mathbf{T}_{\mathcal{M}}^T\bar{\mathbf{W}}\mathbf{T}_{\mathcal{M}} \right)^{-1} \mathbf{T}_{\mathcal{M}}^T\bar{\mathbf{W}}\bar{\mathbf{f}}. \end{equation}
It is important to note that, differently from the Kernel SHAP formulation discussed in Section~\ref{subsubsec:kernel_shap}, in our proposal we obtain all Shapley interaction indices (which, obviously, include the SHAP values). Therefore, this can also be infeasible for a large number of attributes, since the number of parameters is given by $2^m$. However, one may exploit some degree of additivity about the Choquet integral which contributes to reduce its number of parameters. We discuss this aspect in the next section.
\subsection{$k$-additive games for local interpretability} \label{subsec:kadd}
As a second contribution, we propose to adopt the concept of $k$-additive games in the Choquet integral-based formulation for local interpretability. Called here $k_{ADD}$-SHAP, our proposal consists in dealing with the following weighted least square problem: \begin{equation}
\label{eq:shap_opt_ci_kadd}
\min_{\phi_0,\mathbf{I}_k} \sum_{A \in \mathcal{M}} \pi'(A) \left( \bar{f}_{\mathbf{x}^*}( A) - \sum_{\substack{D \subseteq M, \\ \left| D \right| \leq k}} \gamma^{\left|D \right|}_{\left| A \cap D \right|}I(D) \right)^2, \end{equation}
where $\mathbf{I}_k = \left[I(\emptyset), \phi_1, \ldots, \phi_m, I_{1,2}, \ldots, I(\left\{m-k, \ldots, m\right\} \right]$ is the vector of Shapley interaction indices, in a cardinal-lexicographic order, for all $I(D)$ such that $\left| D \right| \leq k$. By using the vector/matrix notation, we may rewrite~\eqref{eq:shap_opt_ci_kadd} as follows: \begin{equation}
\label{eq:shap_opt_matrix_ci_kadd}
\min_{\mathbf{I}_k} \left(\bar{\mathbf{f}} - \mathbf{T}_{\mathcal{M},k} \mathbf{I}_k \right)^T \bar{\mathbf{W}} \left(\bar{\mathbf{f}} - \mathbf{T}_{\mathcal{M},k} \mathbf{I}_k \right), \end{equation} whose solution is given by \begin{equation}
\label{eq:shap_opt_matrix_sol_ci_kadd}
\mathbf{I}_k = \left(\mathbf{T}_{\mathcal{M},k}^T\bar{\mathbf{W}}\mathbf{T}_{\mathcal{M},k} \right)^{-1} \mathbf{T}_{\mathcal{M},k}^T\bar{\mathbf{W}}\bar{\mathbf{f}}, \end{equation}
where $\mathbf{T}_{\mathcal{M},k}$ is equal to $\mathbf{T}_{\mathcal{M}}$ up to the columns associated with all $I(D')$ such that $\left| D' \right| \leq k$ ($I(D') = 0$ for all coalitions $D'$ such that $\left| D' \right| > k$).
Note that, as in~\eqref{eq:shap_opt_ci_kadd} (or\eqref{eq:shap_opt_matrix_ci_kadd}) we restrict the feasible domain to Shapley indices whose cardinalities are at most $k$, we can not guarantee to achieve the exact SHAP values even if $\mathcal{M} = \mathcal{P}(M)$. In other words, Theorem~\ref{theo1} is not valid for the proposed $k_{ADD}$-SHAP. However, as already mentioned in Section~\ref{subsec:choquet}, an advantage of such a model is that one both drastically reduces the number of parameters to be determined while still having a flexible model to generalize the relation between inputs and outputs. Therefore, in order to approximate the exact SHAP values, we avoid over-parametrization and we may need less evaluations when adopting~\eqref{eq:shap_opt_matrix_ci_kadd} compared to~\eqref{eq:shap_opt_matrix}. It is important to note that, even if in the Kernel SHAP formulation one only searches for the Shapley values, implicitly, one needs the expected predicted evaluations on all coalitions of attributes in order to calculate the exact SHAP values.
With respect to how to select the subset $\mathcal{M}$ of evaluations, we consider the same strategy as in Kernel SHAP. We sample the elements $A \in \mathcal{M}$ according to the probability distribution defined by $p_A = \frac{\pi(A)}{\sum_{A \subseteq M} \pi(A)}$. As we adopt in this paper a sampling procedure without replacement, after sampling a coalition, we update the probability distribution and normalize it. Moreover, as $p_\emptyset$ and $p_M$ are much greater than the other probabilities, it is very likely that both the empty set and the grand coalition $M$ are sampled to compose the subset $\mathcal{M}$.
Equivalently as in the Kernel SHAP formulation, $\mathbf{S}_{\mathcal{M},k} = \left(\mathbf{T}_{\mathcal{M},k}^T\bar{\mathbf{W}}\mathbf{T}_{\mathcal{M},k} \right)^{-1} \mathbf{T}_{\mathcal{M},k}^T\bar{\mathbf{W}}$ (or $\mathbf{S}_{\mathcal{M}} = \left(\mathbf{T}_{\mathcal{M}}^T\bar{\mathbf{W}}\mathbf{T}_{\mathcal{M}} \right)^{-1} \mathbf{T}_{\mathcal{M}}^T\bar{\mathbf{W}}$) can also be calculated independently of the instance of interest $\mathbf{x}^*$. Therefore, in order to explain the outcome of several instances of interest, one only needs to calculate $\mathbf{S}_{\mathcal{M},k}$ (or $\mathbf{S}_{\mathcal{M}}$) once.
\section{Numerical experiments} \label{sec:exp}
In this section, we present some numerical experiments in order to check the validity and interest of our model\footnote{All codes can be accessed in \url{https://github.com/GuilhermePelegrina/k_addSHAP}.}. The experiments are based on two datasets frequently used in the literature: Diabetes~\citep{Efron2004} and Red Wine Quality~\citep{Cortez2009}. In the sequel, we provide a brief description of both datasets: \begin{itemize} \item Diabetes dataset: This dataset contains $m=10$ attributes (age, sex, body mass index, average blood pressure and six blood serum measurements) that describe $n=442$ diabetes patients. All collected data are centralized (with zero mean) and with standard deviation equal to $0.0476$. For each patient, one also has as the predicted value a measure of the diabetes progression. The mean and the standard deviation for the diabetes progression measure are $152.13$ and $77.00$, respectively. In our experiments, we split the dataset into training (80\%, i.e., $n_{tr}=353$ samples) and test (20\%, i.e., $n_{te}=89$ samples).
\item Red Wine Quality dataset: In this dataset, one has $m=11$ attributes describing $n=1599$ red wines. Both mean and standard deviation (std) of attributes are described in Table~\ref{tab:wine}. For each wine, one also has a score (between 0 and 10) indicating its quality. In our experiments, we use this data for the purpose of classification and, therefore, we assume that a good (resp. a bad) wine has a score greater than 5 (resp. at most 5). In total, one has 855 good wine (class value 1) and 744 bad wine (class value 0). Moreover, we split the dataset into training (80\%, i.e., $n_{tr}=1279$ samples) and test (20\%, i.e., $n_{te}=320$ samples). \end{itemize}
\begin{table}[ht]
\begin{center}
\caption{Summary of the Wine dataset.}\label{tab:wine}
{
\renewcommand{1.0}{1.0}
\small
\begin{tabular}{cccccccc}
\cline{1-2}\cline{4-5}\cline{7-8} \textbf{Attributes} & \makecell{ \textbf{Mean} \\ \textbf{($\pm$ std)} } & & \textbf{Attributes} & \makecell{ \textbf{Mean} \\ \textbf{($\pm$ std)} } & & \textbf{Attributes} & \makecell{ \textbf{Mean} \\ \textbf{($\pm$ std)} } \\
\cline{1-2}\cline{4-5}\cline{7-8}
fixed acidity & \makecell{ $8.320$ \\ ($\pm 1.740$) } & & chlorides & \makecell{ $0.087$ \\ ($\pm 0.047$) } & & pH & \makecell{ $3.311$ \\ ($\pm 0.154$) } \\
\cline{1-2}\cline{4-5}\cline{7-8}
volatile acidity & \makecell{ $0.528$ \\ ($\pm 0.179$) } & & free sulfur dioxide & \makecell{ $15.875$\\ ($\pm 10.457$) } & & sulphates & \makecell{ $0.658$ \\ ($\pm 0.169$) } \\
\cline{1-2}\cline{4-5}\cline{7-8}
citric acid & \makecell{ $0.271$ \\ ($\pm 0.195$) } & & total sulfur dioxide & \makecell{ $46.468$ \\ ($\pm 32.885$) } & & alcohol & \makecell{ $10.423$ \\ ($\pm 1.065$) } \\
\cline{1-2}\cline{4-5}\cline{7-8}
residual sugar & \makecell{ $2.539$ \\ ($\pm 1.409$) } & & density & \makecell{ $0.997$ \\ ($\pm 0.002$) } & & & \\
\cline{1-2}\cline{4-5}\cline{7-8}
\end{tabular}
}
\end{center} \end{table}
Besides different datasets, we also evaluate our proposal by assuming two training models: Neural Network and Random Forest\footnote{We borrowed these methods from the Scikit-learn library~\citep{Pedregosa2011} in Python and adopted the following parameters: \begin{itemize}
\item Neural Network: $max_{iter} = 10^6$ for both MLPRegressor and MLPClassifier.
\item Random Forest: $n\_estimators=1000$, $max\_depth=None$ and $min\_samples\_split=2$ for both RandomForestRegressor and RandomForestClassifier. \end{itemize}}. Recall that the purpose of this paper is to address interpretability in any trained machine learning models. We do not work on improving the model itself. So we attempt to explain the contributions of attributes regardless how the model is accurate.
\subsection{Experiment varying the number of expected prediction evaluations until the exact SHAP values convergence} \label{subsec:exp1}
In the first experiment, we verify the convergence of the proposed $k_{ADD}$-SHAP and the Kernel SHAP to the exact SHAP values. For each dataset and test sample (recall that we use the training data to calculate the expected predictions given the coalitions in $\mathcal{M}$), we vary the number of expected prediction evaluations, apply both $k_{ADD}$-SHAP and Kernel SHAP and calculate the squared error when estimating the exact SHAP values. Let us represent, for a given test sample $i'$, the SHAP values obtained by the Equation~\eqref{eq:shap_values} (the exact SHAP values), the $k_{ADD}$-SHAP and the Kernel SHAP as $\phi^{exact,i'}$, $\phi^{k_{ADD},i'}$ and $\phi^{Kernel,i'}$, respectively. The squared error between $\phi^{exact,i'}$ and $\phi^{k_{ADD},i'}$ is given as follows: \begin{equation} \label{eq:error}
\varepsilon_{k_{ADD},i'} = \sum_{j=1}^m \left(\phi_j^{exact,i'} - \phi_j^{k_{ADD},i'} \right)^2. \end{equation} In order to calculate the squared error with respect to the Kernel SHAP, one only needs to replace $\phi_j^{k_{ADD},i'}$ by $\phi^{Kernel,i'}$. By increasing $n_{\mathcal{M}}$, i.e., the number of coalitions selected to calculate the expected prediction evaluations used in the estimation procedure, the aim is to verify the convergence to the exact SHAP values. We show the obtained results by taking the median (50th percentile or $q_{0.5}$ - a central tendency measure), the 90th percentile and the 10th percentile ($q_{0.9}$ and $q_{0.5}$, respectively, both used to indicate the dispersion around the median) over $s = 501$ simulations. For each simulation, we calculate the errors when estimating the exact SHAP values of all test samples. For the proposed $k_{ADD}$-SHAP, the percentile $q_a$, $a=0.1,0.5,0.9$, is calculated as follows: \begin{equation} \label{eq:average_error}
\bar{\varepsilon}_{a,k_{ADD}} = q_a \left( \frac{1}{n_{te}}\sum_{i'=1}^{n_{te}} \varepsilon_{k_{ADD},i'}^{1}, \ldots, \frac{1}{n_{te}}\sum_{i'=1}^{n_{te}} \varepsilon_{k_{ADD},i'}^{s} \right) \end{equation} where $\varepsilon_{k_{ADD},i'}^{r}$, $r=1, \ldots, s$ represents the squared error for test sample $i'$ in simulation $r$. Equation~\eqref{eq:average_error} can be easily adapted to calculate the metrics when adopting the Kernel SHAP.
The results are presented in Figures~\ref{fig:convergence_diabetes} and~\ref{fig:convergence_wine}. The central line represents the average median and the shaded area indicates the averaged dispersion between the 10th and 90th percentiles. For both datasets and trained models, the $3_{ADD}$-SHAP leads to a faster approximation to the exact SHAP values in comparison with the Kernel SHAP. Moreover, the dispersion was lower for the $3_{ADD}$-SHAP even with reduced numbers of expected prediction evaluations. For Kernel SHAP, one achieves a high dispersion for low number of expected prediction evaluations (see, especially, Figure~\ref{fig:convergence_diabetes}), which decreases as one includes more samples. With respect to the $2_{ADD}$-SHAP, it has a good performance (better than the Kernel SHAP) for few evaluations, however, it diverges as more samples are include in the SHAP values estimation. An explanation for these results is that the $2_{ADD}$-SHAP could rapidly approximate the exact SHAP values when less evaluations are used because it can avoid over-parametrization when only few data are considered. However, when increasing the number of expected prediction evaluations, the $2_{ADD}$-SHAP has not enough flexibility to model the data and the adjusted parameters could not converge to the correct ones. As can be also seen in Figure~\ref{fig:convergence_wine}, the $3_{ADD}$-SHAP also diverges for a high number of evaluations (recall from Section~\ref{subsec:kadd} that we can not guarantee to achieve the exact SHAP values even if $\mathcal{M} = \mathcal{P}(M)$), however, it still achieves a very low error. Clearly, as we increase $k$, the parameters become more flexible to model the data and estimate the exact SHAP values.
\begin{figure}\label{fig:convergence_diabetes_2add_mlp}
\label{fig:convergence_diabetes_2add_rf}
\label{fig:convergence_diabetes_3add_mlp}
\label{fig:convergence_diabetes_3add_rf}
\label{fig:convergence_diabetes}
\end{figure}
\begin{figure}\label{fig:convergence_wine_2add_mlp}
\label{fig:convergence_wine_2add_rf}
\label{fig:convergence_wine_3add_mlp}
\label{fig:convergence_wine_3add_rf}
\label{fig:convergence_wine}
\end{figure}
\subsection{Experiment comparing the obtained SHAP values} \label{subsec:exp2}
In this experiment, we compare the obtained SHAP values with the exact ones. For an instance of interest among the test data, we use the previous experiment and select the SHAP values that lead to the median error over all the simulations. For ease of visualization, we only plotted the five attributes that contribute the most (either positively or negatively) according to the exact SHAP values. As an illustrative example and without loss of generality, we selected a test sample $\mathbf{x}^*$ from the Diabetes dataset that has the attributes values described in Table~\ref{tab:diabetes_samp} (recall that this dataset is already centered with zero mean). The predicted measure of diabetes progression is equal to 84, which is less than the overall expected prediction provided by both Neural Networks and Random Forest (154.92 and 153.81, respectively). This means that the SHAP values help to explain, for the instance of interest $\mathbf{x}^*$, how each attribute value contributes to decrease the diabetes progression measure from the overall prediction until the actual 84.
\begin{table}[!h]
\begin{center}
\caption{Summary of the selected test sample - Diabetes dataset.}\label{tab:diabetes_samp}
{
\renewcommand{1.0}{1.0}
\small
\begin{tabular}{cccccccc}
\cline{1-2}\cline{4-5}\cline{7-8} \textbf{Attributes} & \textbf{Values} & & \textbf{Attributes} & \textbf{Values} & & \textbf{Attributes} & \textbf{Values} \\
\cline{1-2}\cline{4-5}\cline{7-8}
age & $0.009$ & & blood serum 1 & $0.099$ & & blood serum 5 & $-0.021$ \\
\cline{1-2}\cline{4-5}\cline{7-8}
sex & $-0.045$ & & blood serum 2 & $0.094$ & & blood serum 6 & $0.007$ \\
\cline{1-2}\cline{4-5}\cline{7-8}
body mass index & $-0.024$ & & blood serum 3 & $0.071$ & & & \\
\cline{1-2}\cline{4-5}\cline{7-8}
average blood pressure & $-0.026$ & & blood serum 4 & $-0.002$ & & & \\
\cline{1-2}\cline{4-5}\cline{7-8}
\end{tabular}
}
\end{center} \end{table}
Figure~\ref{fig:shapley_diabetes} presents the estimated SHAP values when using $n_{\mathcal{M}}=290$, $n_{\mathcal{M}}=590$, $n_{\mathcal{M}}=890$ different coalitions of attributes to calculate the expected prediction evaluations. As a first remark, we note that the estimated SHAP values (specially the illustrated five ones) for the Neural Network (Figures~\ref{fig:shapley_diabetes_5_290_mlp},~\ref{fig:shapley_diabetes_5_590_mlp} and~\ref{fig:shapley_diabetes_5_890_mlp}) practically do not change regardless the number of predicted evaluations. All approaches led to very small errors, i.e., they could rapidly approximate the exact SHAP values associated with the Neural Networks model. For the Random Forest, we see that the contributions provided by the $3_{ADD}$-SHAP are close to the exact ones even with small number of predicted evaluations (see Figure~\ref{fig:shapley_diabetes_5_290_rf}). As one increases the number of evaluations, the Kernel SHAP converges to the exact SHAP values.
\begin{figure}
\caption{Comparison between the estimated SHAP values provided by the $2_{ADD}$-SHAP, $3_{ADD}$-SHAP and Kernel SHAP for different machine learning models and varying the number of coalitions used to calculate the expected prediction evaluations (Diabetes dataset).}
\label{fig:shapley_diabetes_5_290_mlp}
\label{fig:shapley_diabetes_5_290_rf}
\label{fig:shapley_diabetes_5_590_mlp}
\label{fig:shapley_diabetes_5_590_rf}
\label{fig:shapley_diabetes_5_890_mlp}
\label{fig:shapley_diabetes_5_890_rf}
\label{fig:shapley_diabetes}
\end{figure}
Regarding the Red Wine dataset, we selected as an illustrative example a test sample classified as a good wine. The attributes values described in Table~\ref{tab:wine_samp}. The overall expected probability prediction for class 1 (good wine) for both Neural Networks and Random Forest is approximately 0.53. In this case, the SHAP values indicates the contributions of attributes that increase the probability of being classified as a good wine from the overall expected probability until the actual classification (class value equals to 1).
\begin{table}[!h]
\begin{center}
\caption{Summary of the selected test sample - Red Wine dataset.}\label{tab:wine_samp}
{
\renewcommand{1.0}{1.0}
\small
\begin{tabular}{cccccccc}
\cline{1-2}\cline{4-5}\cline{7-8} \textbf{Attributes} & \textbf{Values} & & \textbf{Attributes} & \textbf{Values} & & \textbf{Attributes} & \textbf{Values} \\
\cline{1-2}\cline{4-5}\cline{7-8}
fixed acidity & $9.4$ & & chlorides & $0.08$ & & pH & $3.15$ \\
\cline{1-2}\cline{4-5}\cline{7-8}
volatile acidity & $0.3$ & & free sulfur dioxide & $6$ & & sulphates & $0.92$ \\
\cline{1-2}\cline{4-5}\cline{7-8}
citric acid & $0.56$ & & total sulfur dioxide & $17$ & & alcohol & $11.7$ \\
\cline{1-2}\cline{4-5}\cline{7-8}
residual sugar & $2.8$ & & density & $0.9964$ & & & \\
\cline{1-2}\cline{4-5}\cline{7-8}
\end{tabular}
}
\end{center} \end{table}
Figure~\ref{fig:shapley_wine} presents the estimated SHAP values when using $n_{\mathcal{M}}=420$, $n_{\mathcal{M}}=1020$ and $n_{\mathcal{M}}=1800$ coalitions of attributes. As in the previous dataset, we can see that, even for a reduced number of samples, the $3_{ADD}$-SHAP converges faster to the exact SHAP values. When the number of expected prediction evaluations increases, the Kernel SHAP converges to the exact SHAP values while the $2_{ADD}$-SHAP slightly diverges.
\begin{figure}
\caption{Comparison between the estimated SHAP values provided by the $2_{ADD}$-SHAP, $3_{ADD}$-SHAP and Kernel SHAP for different machine learning models and varying the number of coalitions used to calculate the expected prediction evaluations (Red Wine dataset).}
\label{fig:shapley_wine_18_420_mlp}
\label{fig:shapley_wine_18_420_rf}
\label{fig:shapley_wine_18_1020_mlp}
\label{fig:shapley_wine_18_1020_rf}
\label{fig:shapley_wine_18_1800_mlp}
\label{fig:shapley_wine_18_1800_rf}
\label{fig:shapley_wine}
\end{figure}
\subsection{Illustrative example and results visualization} \label{subsec:exp3}
The purpose of this last experiment is to apply our proposal to visualize the attributes contribution towards the actual predicted outcome. We use as an illustrative example the Red Wine dataset and applied the $3_{ADD}$-SHAP. We also consider the test sample used in the previous experiment, which is classified as a good wine. Based on 1500 predicted evaluations and using the Random Forest, the contributions of attributes are presented in Figure~\ref{fig:example_wine_shapley}. Note that there are three attributes that contribute the most into the predicted outcome: alcohol, sulphates and volatile acidity. They are all positively contributing to predict the sample as a good wine.
\begin{figure*}
\caption{Attributes contribution towards the predicted outcome - $3_{ADD}$-SHAP and Red Wine dataset. }
\label{fig:example_wine_shapley}
\end{figure*}
Recall that, more than the contribution of features, our proposal automatically provides the interaction degree between them. We highlight that these interaction effects do not come up with the original Kernel SHAP formulation. Indeed, further adaptations must be made in Kernel SHAP in order to retrieve the interaction effects~\citep{Lundberg2020}. Figure~\ref{fig:example_wine_interaction} shows the interaction degree between attributes for the considered test sample. It indicates that, although volatile acidity, sulphates and alcohol (attributes 1, 9 and 10, respectively) contributes the most to the predicted outcome, there are negative interactions between alcohol and both volatile acidity and sulphates. This suggests that there are some redundancies between alcohol and the other two attributes when predicting the sample as a good wine.
\begin{figure*}
\caption{Interaction degree between attributes - $3_{ADD}$-SHAP and Red Wine dataset. }
\label{fig:example_wine_interaction}
\end{figure*}
\section{Conclusions and future perspectives} \label{concl}
Interpretability in machine learning has become as important as accuracy in real problems. For instance, even if there is a correct classification (e.g., a denied credit), the explanation about how this result was achieved is required to ensure the model trustfulness. A very famous model-agnostic algorithm for machine learning interpretability is the SHAP method. Based on the Shapley values, the SHAP method indicates the contribution of each attribute in the predicted outcome. For this purpose, we look at the machine learning task as a cooperative game theory problem and calculate the marginal contribution of each attribute by taking the predicted outcomes of all possible coalitions of attributes. A point of attention in this calculation is that, as the number of predicted outcomes evaluations exponentially increases with the number of attributes, one may not be able to obtain the exact SHAP values.
In order to reduce the computational effort of SHAP method, the Kernel SHAP emerges as a clever strategy to approximate the SHAP values. However, its formulation is not easy to follow and any further considerations about the modeled game is assumed when approximating the SHAP values. In this paper, we first proposed a straightforward Choquet integral-based formulation for local interpretability. As the parameters used in the Choquet integral are directly associated with the Shapley values, our formulation also leads to the SHAP values. Therefore, we can also exploit the benefits of the SHAP values when interpreting local predictions. Moreover, our formulation also provides the interaction effects between attributes without further adaptations in the algorithm. Therefore, we can interpret the marginal contribution of each attribute towards the predicted outcome and how they interact between them.
As a second contribution, we exploit the concept of $k$-additive games. The use of $k$-additive models has revealed to be useful in multicriteria decision making problems in order to reduce the number of parameters in capacity-based aggregation functions (such as the Choquet integral) while keeping a good level of flexibility in data modeling. Therefore, as attested in the numerical experiments, when adopting $k$-additive games (specially the $3$-additive, which leads to the proposed $3_{ADD}$-SHAP), we could approximate the SHAP values using less predicted outcomes evaluations in comparison with the Kernel SHAP. As one reduced the number of parameters in the Choquet integral formulation, one avoided over-parametrization in scenarios with a low number of predicted outcomes evaluations. On the other hand, as we restricted the modeling data domain, in the scenario with all evaluations the proposed $k_{ADD}$-SHAP may slightly diverge from the exact SHAP values. However, as could be seen in the experiments, this difference is very low (mainly for the $3_{ADD}$-SHAP) and it does not affect the interpretability.
Future works include to extend the proposed approach when assuming that the attributes are dependent. In such a scenario, the formulation could be adjusted in order to better approximate the Shapley values~\citep{Aas2021}. Another perspective consists in evaluating the use of other game-based aggregation functions to deal with local interpretability. However, as some of them do not ensure the efficiency property, one must be careful in how one can apply them in the context of machine learning in a way that the feature attribution makes sense for local or global interpretability.
\section*{Acknowledgments} Work supported by S\~{a}o Paulo Research Foundation (FAPESP) under the grants \#2020/09838-0 (BI0S - Brazilian Institute of Data Science), \#2020/10572-5 and \#2021/11086-0.
\section*{Appendix A}
We here describe the desired properties satisfied by SHAP values, which are derived from the Shapley values properties~\citep{Shapley1953,Young1985}. Recall that $f(\mathbf{x})$ is the predicted outcome of a trained model $f(\cdot)$, $\mathbf{x}$ is the instance to be explained and $\mathbf{z}'$ is a binary vector. The proofs are provided in the original SHAP paper~\citep{Lundberg2017}.
\begin{Properties}
\item \textbf{Local accuracy (or efficiency)} \\
\begin{equation}
f(\mathbf{x}) = \phi_0 + \sum_{j=1}^m \phi_j(f,\mathbf{x})
\end{equation}
The local accuracy property states that the predicted outcome $f(\mathbf{x})$ can be decomposed by the sum of the SHAP values and the overall expected prediction $\phi_0$.
\item \textbf{Missingness} \\
If, for all subset of attributes represented by the coalition $\mathbf{z}'$,
\begin{equation}
f\left( h_{\mathbf{x}}(\mathbf{z}')\right) = f\left( h_{\mathbf{x}}(\mathbf{z}'\backslash j)\right),
\end{equation}
then $\phi_j(f,\mathbf{x}) = 0$. This property states that, if adding attribute $j$ into the coalition the expected prediction remains the same, the marginal contribution of such an attribute is null.
\item \textbf{Consistency (or monotonicity)} \\
For any two models $f(\cdot)$ and $f'(\cdot)$, if
\begin{equation}
f'\left( h_{\mathbf{x}}(\mathbf{z}')\right) - f'\left( h_{\mathbf{x}}(\mathbf{z}' \backslash j)\right) \geq f\left( h_{\mathbf{x}}(\mathbf{z}')\right) - f\left( h_{\mathbf{x}}(\mathbf{z}'\backslash j)\right)
\end{equation}
for any binary vector $\mathbf{z}' \in \left\{0,1\right\}^m$, then $\phi_j(f',\mathbf{x}) \geq \phi_j(f,\mathbf{x})$. The consistency property states that, if one changes the trained model and the contribution of an attribute $j$ increases or stays the same regardless of the other inputs, the marginal contribution of such an attribute should not decrease. \end{Properties}
\biboptions{authoryear}
\end{document} |
\begin{document}
\makeatletter
\def\ps@pprintTitle{
\let\@oddhead\@empty
\let\@evenhead\@empty
\let\@oddfoot\@empty
\let\@evenfoot\@oddfoot
}
\makeatother
\begin{frontmatter}
\title{System Identification and $H_\infty$-based Control of Quadrotor Attitude}
\author[mainaddress]{Ali Noormohammadi-Asl \corref{correspondingauthor}} \cortext[correspondingauthor]{Corresponding author} \ead{[email protected]}
\author[mainaddress]{Omid Esrafilian } \ead{[email protected]} \author[mainaddress]{Mojtaba Ahangar Arzati } \ead{[email protected]} \author[mainaddress]{Hamid D. Taghirad} \ead{[email protected]} \address[mainaddress]{\textbf{A}dvanced \textbf{R}obotics and
\textbf{A}utomated \textbf{S}ystems (ARAS)\fntext[arassite]{website: aras.kntu.ac.ir}\\Faculty of Electrical Engineering, K. N. Toosi University of Technology}
\begin{abstract} The attitude control of a quadrotor is a fundamental problem, which has a pivotal role in a quadrotor stabilization and control. What makes this problem more challenging is the presence of uncertainty such as unmodelled dynamics and unknown parameters. In this paper, to cope with uncertainty, an $H_\infty$ control approach is adopted for a real quadrotor. To achieve $H_\infty$ controller, first a continuous-time system identification is performed on the experimental data to encapsulate a nominal model of the system as well as a multiplicative uncertainty. By this means, $H_\infty$ controllers for both roll and pitch angles are synthesized. To verify the effectiveness of the proposed controllers, some real experiments and simulations are carried out. Results verify that the designed controller does retain robust stability, and provide a better tracking performance in comparison with a well-tuned PID and a $\mu$ synthesis controller. \end{abstract}
\begin{keyword} Quadrotor\sep Attitude control\sep Continuous-time system identification\sep Linear uncertain system\sep Robust $H_\infty$ control. \end{keyword}
\end{frontmatter}
\section{Introduction}
Quadrotors are widely considered due to their remarkable features and applications in a variety of areas such as monitoring \cite{wang2016detecting,berra2017commercial}, surveillance \cite{acevedo2014decentralized, kingston2008decentralized}, search and rescue \cite{silvagni2017multipurpose,khan2015information}, agriculture \cite {tokekar2016sensor,tetila2017identification}, and delivery of goods \cite{gawel2017aerial,arbanas2016aerial}.
Quadrotors are also of paramount interest owing to the advantages of low cost, maneuverable, small size, simple design structure. The quadrotor is a nonlinear system with coupled states. In general, there are six states including the robot position $\left(x,y,z\right)$ and the angles (yaw, pitch, roll) with only four inputs, resulting in an underactuated system. Despite the mentioned advantages of quadrotors, the open-loop instability, coupled states, and underactuation make their control challenging. A quadrotor control is accompanied with several other serious challenges, particularly when it turns from a simple model in the theory to a practical problem in which many other key factors such as parameters uncertainty, unmodelled dynamics, and input constraints need to be considered. The main focus of this paper is the stabilization and attitude control of a quadrotor considering such uncertainties and input constraints.
Modeling and control of a quadrotor have been extensively studied in the literature. PID control is the most common controller widely used in commercial quadrotoros \cite{hoffmann2007quadrotor,szafranski2011different,garcia2012robust,zhou2017robust}. In \cite{szafranski2011different} three different PID structures are employed for the attitude control. In \cite{garcia2012robust} a robust PID controller is designed using affine parameterization. In \cite{zhou2017robust} a cascade control for the attitude control is employed and a robust compensator is added to reduce the wind disturbance effect. Linear Quadratic Regulator (LQR) is another linear-based controller used for quadrotors \cite{bouabdallah2004pid,liu2013robust}. In LQR controllers, setting proper weights is difficult and needs trial and error. Linear-based controllers have simple designs and have been tested successfully on experimental and simulated platforms. Linear controllers, however, are based on a linear approximation of the actual system model by neglecting the nonlinear behavior of the system which may increase the probability of the system failure. Moreover, they are limited to low velocities as well as small angles of deviation, and their stability is guaranteed only near the selected operating point.
To transcend the limitations of the linear controllers, many nonlinear control approaches have been proposed.
In \cite{flores2013lyapunov}, a lyapunov-based controller is designed using singular perturbation theory for a quadrotor stabilization. Backstepping-based control has also been developed by many researchers \cite{huo2014attitude,das2009backstepping,djamel2016attitude,shao2018robust}. A backstepping controller using a lagrangian form of the quadrotor dynamics is designed in \cite{das2009backstepping}. In \cite{djamel2016attitude} an optimal backstepping controller using $\text{H}_\infty$ is proposed. Sliding mode control is another nonlinear control method which is capable of dealing with some classes of uncertainties and external disturbances \cite{chen2016robust,jia2017integral,yang2016attitude,zheng2014second,wang2019disturbance}. In \cite{chen2016robust,jia2017integral}, a combination of the sliding mode control and backstepping control techniques are used for the control of quadrotors. Despite the mentioned capabilities, sliding mode control suffers from chattering, and it is necessary to know the upper bounds of uncertainties.
In order to reduce the chattering effect, different approaches like fuzzy gain scheduling \cite{yang2016attitude} and higher order sliding mode have been proposed\cite{zheng2014second}. A fuzzy state observer is proposed in \cite{mallavalli2018fault} to estimate the unknown nonlinear functions of the uncertain system model, then an integral terminal sliding mode controller is used. Many other robust nonlinear control techniques have also been proposed to cope with the quadrotor uncertainties. In \cite{liu2017robust}, a robust compensating technique is used to control quadrotors with time-varying uncertainties and delays. The design of a nonlinear $\text{H}_\infty$ controller is studied in \cite{raffo2010integral} to achieve the robustness. In \cite{kerma2012nonlinear}, a nonlinear $\text{H}_\infty$ output feedback controller coupled with a high order sliding mode estimator is utilized to control the quadrotor in the presence of parameters uncertainty and external disturbances. The adaptive control is another approach for controlling systems with unknown model parameters and have been widely used for controlling various practical systems\cite{hu2018adaptive,zhang2016active,jiang2019hydrothermal}. An adaptive controller is employed in \cite{tran2018adaptive} for trajectory tracking of a quadrotor with considering the input constraints and uncertain parameters in its nonlinear model.
In order to design a nonlinear-based controller, the quadrotor nonlinear model is required, which is hard to obtain. For instance, inertia matrix, center of gravity and some other parameters of a quadrotor are not easily accessible and it is difficult to obtain their accurate values. In most of the previous studies, a symmetric model for the quadrotor is assumed, however in real robots this assumption does not hold. Furthermore, some factors, like wind, cause environmental disturbances. Especially in an actual flight near obstacles or low attitude, undesirable wind effects on the stability and performance of a quadrotor is more obvious. Considering the wind model makes designing a nonlinear controller arduous and challenging. An approach to overcome the model uncertainty, such as unmodelled dynamics and parameter uncertainty, is using system identification techniques. In \cite{liu2018parameter}, a closed-loop multivariable extremum seeking algorithm (MESA) is suggested for parameter identification of a quadrotor. The black-box model identification using a continuous-time approach is adopted in \cite{bergamasco2014identification} to obtain a linear model for the dynamics of a quadrotor.
Linear robust control is a popular approach in the control theory that can help to mitigate the effect of unmodeled dynamics in linear-based controllers. This approach has been used in many practical problems such as surgery robots\cite{agand2017decentralized}, parallel robots \cite{bataleblu2016robust}, harmonic drive systems \cite{taghirad2001h}, and vibration control of elastic gantry crane and 3D bar structure \cite{golovin2019robust,mystkowski2016mu}; and the results confirm its remarkable ability to deal with uncertainty. In \cite{safaee2013system,wang2013robust}, the linear robust $\text{H}_\infty$ controller is employed for attitude and altitude control of a helicopter. In order to utilize the $\text{H}_\infty$ controller, a comprehensive identification phase is required, which provides information about the linear model of the system and the associated uncertainty.
In this paper, there is no information available about the model and parameters (e.g. CAD model) of the quadrotor. Thus, in order to overcome the aforementioned limitations of linear and nonlinear controllers, embracing lack of system model and different sources of uncertainty, a linear robust controller has been derived and implemented on a real quadrotor. In this case, a continuous time system identification from the sampled data of the real robot is required to gather information about both the system model and uncertainty. The main contributions and phases of this paper are as follows: \begin{itemize} \item The first phase is performing a system identification based on the experimental frequency response estimates to obtain a nominal linear model aligned with a multiplicative uncertainty block. This phase is the principle and most challenging part of control design because of system identification difficulties and its importance in designing an effective controller. In this phase, the first step is designing appropriate experiments which provide us with informative and usable data for system identification. Then, by applying existing tools and methods such as MATLAB identification and CONTSID toolbox, the nominal models and uncertainty weighting functions are acquired. \item Having obtained information about the system in the previous phase, a linear $H_\infty$ control is synthesized. In this phase, it is important to choose proper sensitivity and input weighting functions to achieve desired tracking and regulating performance. \item Finally, the controller is implemented on the robot and its high performance and robustness are shown by simulation and experimental results. In addition, a PID and a $\mu$ analysis controllers are designed to compare their performance with $\text{H}_\infty$ controller. \end{itemize}
\section{System and uncertainty encapsulation}\label{sec2} In many systems, uncertainty emerges due to the dynamical perturbation, such as unmodelled and high frequency dynamics, in various parts of a system. To capture uncertainty in the system, we utilize the classical multiplicative perturbation model. This model helps to encapsulate the various sources of uncertainty, e.g. parametric and unmodelled dynamics, in a full block of multiplicative uncertainty. Considering the nominal system transfer function as $G_0$, the family of uncertain systems may be shown as follows: \begin{equation}\label{eq21} \mathcal{G}=\left\{G\left(s\right) \mid G=\left(1+\Delta W\right)G_0 \right\} , \end{equation} where $W$ is the uncertainty weighting function, and $\Delta$ is a stable perturbation satisfying $\lVert\Delta\rVert_\infty<1$.
Hence, for designing a robust control for a nonlinear system, an approximate linear model along with a weighting function providing information about the uncertainty profile, are required. In order to obtain a controller having proper performance in practice, it is necessary to perform a system identification on the experimental data generated from the real system. There are some challenges, however, for a quadrotor system identification: \begin{itemize} \item The quadrotor robots are inherent unstable, thus a closed loop identification method is recommended. \item For the robust control design a continuous-time model is needed. \item The quadrotor system is a multi-input multi-output (MIMO) system, but due to its open-loop instability and piloting difficulty, the experimental data is better to be obtained for each channel separately, then perform the system identification. \end{itemize} In what follows, we dwell on the closed loop system identification. For this, first we derive a theoretical model for the system, then identify the system based on the gathered experimental data.
\subsection{Theoretical model} Modeling the dynamic of quadrotors has been studied extensively in the literature. Many models attempt to consider parameters and factors that are difficult to calculate or their values may vary in different situations such as the presence of wind. The goal of this paper is to design a controller that is largely independent of modeling parameters. To this end, first we model the dynamic of the robot. As shown in Fig. \ref{fig21} for a drone with four rotors, which rotate in opposite directions (two rotors rotate in a clockwise direction and two other rotate counterclockwise), the angular accelerations pertain to the pitch, roll, and yaw angles are modeled by the following nonlinear models: \begin{equation}\label{eq22} \begin{aligned} &\ddot{\phi}=\frac{J_r\dot{\theta}\left(\Omega_1+\Omega_3-\Omega_2-\Omega_4\right)}{I_{xx}} +\frac{I_{yy}-I_{zz}}{I_{xx}}\dot{\psi}\dot{\theta}+\frac{bl\left(\Omega_2^2-\Omega_4^2\right)}{I_{xx}}, \\ &\ddot{\theta}=\frac{J_r\dot{\phi}\left(-\Omega_1-\Omega_3+\Omega_2+\Omega_4\right)}{I_{yy}} +\frac{I_{zz}-I_{xx}}{I_{yy}}\dot{\psi}\dot{\phi}+\frac{bl\left(\Omega_3^2-\Omega_1^2\right)}{I_{yy}}, \\ &\ddot{\psi}=\frac{d\left(\Omega_1^2+\Omega_3^2-\Omega_2^2-\Omega_4^2\right)}{I_{zz}} +\frac{I_{xx}-I_{yy}}{I_{zz}}\dot{\theta}\dot{\phi}, \end{aligned} \end{equation} where $\phi$, $\theta$, and $\psi$ stand for the roll, pitch and yaw angles, respectively. The remaining parameters are listed in table \ref{table21}. $\left(I_{yy}-I_{zz}\right)\dot{\psi}\dot{\theta}$, $\left(I_{zz}-I_{xx}\right)\dot{\psi}\dot{\phi}$ and $\left(I_{xx}-I_{yy}\right)\dot{\theta}\dot{\phi}$ are body gyro effects. Considering $\Omega_r=\left(\Omega_1+\Omega_3-\Omega_2-\Omega_4\right)$, $J_r\dot{\theta}\Omega_r$ and $J_r\dot{\phi}\Omega_r$ represent propeller gyro effects.
\begin{figure}
\caption{Quadrotor body frame, rotor arrangement and corresponding forces}
\label{fig21}
\end{figure}
\begin{table}[t] \caption{List of symbols in Eq. \ref{eq22}} \label{table21} \begin{center}
\begin{tabular}{|c||c|}
\hline
Symbol & Description \\
\hline\hline $I_{xx,yy,zz}$ & Inertia moments\\ \hline $J_r$ & Rotor inertia\\ \hline $\Omega_i$ & Propeller angular rate\\ \hline $d$ & Drag factor\\ \hline $b$ & Thrust factor\\ \hline $l$ & Horizontal distance: propeller center to CoG{\textsuperscript{\tiny {*}}}\\
\hline \multicolumn{2}{l}{\small {Center of Gravity}} \end{tabular} \end{center} \end{table}
In order to analyze the quadrotor model in the presence of a PID controller, the gyroscopic effects can be ignored compared to the motors' action. Thus, Eq. (\ref{eq22}) is rewritten as: \begin{equation}\label{eq23} \begin{aligned} &\ddot{\phi}=\frac{bl\left(\Omega_2^2-\Omega_4^2\right)}{I_{xx}}, \\ &\ddot{\theta}=\frac{bl\left(\Omega_3^2-\Omega_1^2\right)}{I_{yy}}, \\ &\ddot{\psi}=\frac{d\left(\Omega_1^2+\Omega_3^2-\Omega_2^2-\Omega_4^2\right)}{I_{zz}}. \end{aligned} \end{equation} The dynamics of the rotor is considered as $\frac{T_1}{s+T_2}$, in which $T_1,T_2\in \mathbb{R}^+$ and $T_2$ is the pole of the system and $\sfrac{T_1}{T_2}$ is the DC gain. Thus, the model of the robot is obtained as follows: \begin{equation}\label{eq24} \begin{aligned} &\phi\left(s\right)=\frac{T_1^2bl}{s^2\left(s+T_2\right)^2I_{xx}}\left(u_4^2\left(s\right)-u_2^2\left(s\right) \right),\\ &\theta\left(s\right)=\frac{T_1^2bl}{s^2\left(s+T_2\right)^2I_{yy}}\left(u_3^2\left(s\right)-u_1^2\left(s\right) \right),\\ &\psi\left(s\right)=\frac{T_1^2d}{s^2\left(s+T_2\right)^2I_{zz}}\left(u_1^2\left(s\right)+u_3^2\left(s\right)-u_2^2\left(s\right) -u_4^2\left(s\right)\right), \end{aligned} \end{equation} where $u_1$, $u_2$, $u_3$, and $u_4$ are motors inputs. Assuming the motors have fast response, the Eq. (\ref{eq24}) can be rewritten as follows, which is a double-integrator unstable system: \begin{equation}\label{eq25} \begin{aligned} &\phi\left(s\right)=\frac{A_1}{s^2}U_1;\qquad U_1=u_4^2\left(s\right)-u_2^2\left(s\right),\\ &\theta\left(s\right)=\frac{A_2}{s^2}U_2; \qquad U_2=u_3^2\left(s\right)-u_1^2\left(s\right),\\ &\psi\left(s\right)=\frac{A_3}{s^2}U_3;\qquad U_3=u_1^2\left(s\right)+u_3^2\left(s\right)-u_2^2\left(s\right) -u_4^2\left(s\right), \end{aligned} \end{equation} where $A_1, A_2$, and $A_3 \in \mathbb{R}^+$. The system model in the presence of a PID controller, as shown in Fig. \ref{fig22}, is obtained as: \begin{equation}\label{eq26} \begin{aligned} &G_{roll}\left(s\right)=\frac{A_1\left(k_{d_r}s^2+k_{p_r}s+k_{i_r}\right)}{s^3+A_1\left(k_{d_r}s^2+k_{p_r}s+k_{i_r}\right)}U_1,\\ &G_{pitch}\left(s\right)=\frac{A_2\left(k_{d_p}s^2+k_{p_p}s+k_{i_p}\right)}{s^3+A_2\left(k_{d_p}s^2+k_{p_p}s+k_{i_p}\right)}U_2, \end{aligned} \end{equation} where $K_{p_{r,p}}$, $K_{d_{r,p}}$, and $K_{i_{r,p}}$ are the nonnegative proportional, derivative, and integral gains, respectively. These equations give a useful insight into the number of poles and zeros of the system in the identification phase. \begin{figure}
\caption{PID control structure for quadrotor}
\label{fig22}
\end{figure}
\subsection{Control problem} Attitude control is the integral part of the stabilization problem and helps to reach the desired 3d orientation. In this paper, we focus on the roll and pitch angles because they play the main role in the stability of the quadrotor. Due to the coupling of the system's states, the performance of roll and pitch has considerable effect on other states. For example, when the quadrotor moves forward, the pitch angle changes and then returns to zero. Thus, a fast and reliable control on roll and pitch angles is crucial. In addition, the disturbance exerts significant influence on the performance and stability of roll and pitch angles and consequently the overall performance of the robot.
By considering $X=\left(\phi,\dot{\phi},\theta,\dot{\theta}\right)$, the control problem is to reach $X_d=\left(\phi_d,0,\theta_d,0\right)$ in which $\phi_d$ and $\theta_d$ are desired roll and pitch angles. In our case, the $\phi_d$ and $\theta_d$ are set to zero to stabilize the quadrotor. However, as mentioned before, overcoming model uncertainty is the main goal of this paper which should be achieved with considering two following conditions. \begin{enumerate}
\item Having a high tracking performance and disturbance rejection
To achieve this goal, we need that the controller provides a short settling time (about $0.3s$) and a small overshoot (about $10^{-6} rad$).
\item Avoiding actuator saturation
Due to the limitations of the motor, the control effort should be in a feasible range. In addition, actuator saturation may cause instability of the system. \end{enumerate} Briefly, the problem is finding a controller providing the stability for the quadrotor by striking a balance between robustness and performance of the system, in the presence of uncertainty and the actuator saturation.
\subsection{Experimental setup} The quadrotor used in the experiments (Fig \ref{fig2a}) is based on a typical quadrotor design with some structural modifications. The power management circuit board and the carbon fiber tubes are used as the frame hub and motors arm, respectively. The main goal in the design of this quadrotor is achieving a compact integration of the mechanical structure and electronic devices in order to reduce the weight while maintaining a symmetric mass distribution with a center of gravity close to the center of the cross. The vehicle propeller-tip to propeller-tip distance is $38 \,cm$. This robot, embracing all the modules, sensors, battery, motors, and props has a mass of $570\,g$, and provides a total thrust of 850 g. The flight time is approximately 9 minutes with a 3-cell $1300\, mAh$ Li-Po battery.
Three main units which play significant roles in the control of the robot are explained below, and the block diagram of their structure and interconnections are illustrated in Fig. \ref{fig2b}.
\textbf{\em{Embedded electronics:}} The robot configuration consists of a flight control, Inertial Measurement Unit (IMU), radio control unit, telemetry module, and external IO. The flight control board, which is the main board of the robot, runs on an ARM STM32F407 micro-controller, which can operate up to $168\, MHz$. Other units interface with this controller via I2C and UART protocols. Each brushless motor has its own ESC (Electronic Speed Control) units, which is connected to the main board. PWM (Pulse-Width Modulation) is used to communicate with ESC at the speed of $400\, Hz$, and the main board sends control signals to ESC to adjust the rotational speed of the motor.
\textbf{\em{Inertial Measurement Unit (IMU):}} The IMU utilized in this quadrotor is MPU-6050, which contains a 3-axis gyroscope, 3-axis accelerometer, 3-axis magnetometer. The main board communicates with the MPU-6050 via I2C bus and runs a sensor fusion algorithm with the frequency of $500 \,Hz$. The quadrotor attitude is computed by using Mahony filter algorithm \cite{mahony2008nonlinear}, which estimates the absolute roll and pitch angles via fusing the acceleration vector and angular speeds measured by the gyroscopes. The angular speed which is measured by the gyroscope associated with yaw axis is used for estimating the yaw angle.
\textbf{\em{Wireless communication systems:}} We established two types of system to support real time communications between the flying quadrotor and our personal computer as a ground control system (GCS): a digital radio telemetry unit ($915\,MHz$) and an analog radio link ($2.4\,GHz$). We use NRF modules as a digital radio telemetry for data acquisition. The telemetry wireless interfaces with the main board using a UART serial protocol. Necessary flight data can be either saved on the internal memory of the micro-controller or sent to the GCS at rates of up to $115200\, bps$ using telemetry communication. Futaba remote controller is used as the analog radio link. The radio control (RC) receiver is connected to the main board through the SBUS protocol that allows to receive flight commands at the speed of $100 \,K bps$.
\begin{figure}
\caption{Quadrotor used in this paper}
\label{fig2a}
\caption{Block diagram of quadrotor’s units Interconnection}
\label{fig2b}
\caption{Experimental setup }
\label{fig2}
\end{figure}
\subsection{System identification} The problem of system identification of an aircraft or rotorcraft is a topic of interest and have been widely studied. In most of the studies, the system identification methods are used to obtain parameters value in the model of the system. However, in this paper, a black-box system identification, merely based on input and output experimental data, is performed.
The first and basic step in the identification phase is selecting a proper input for applying to the system. In the frequency-domain identification, which is desired in this paper, a periodic excitation input is much preferred because it helps to cover and sweep the desired frequency spectrum effectively. The excitation inputs are better to be applied automatically (e.g programmed on the microcontroller) rather than manually (e.g radio controller). Thus, in order to obtain a continuous time model, a frequency-domain identification method is performed in which periodic chirp functions with different frequencies and amplitudes are implemented on the on-board micro-controller, and are used as exciting inputs. During the experiments, the identification for the pitch and roll angles are executed separately, and the quadrotor is supposed to track the reference chirp input. For example, if the chirp signal is selected as a reference input for the roll angle, the quadrotor uses the implemented simple PID controller for the roll angle to track the reference. Setting proper frequencies and amplitudes for chirp functions is important, because an improper frequency or amplitude, which are beyond the quadrotor ability to track, may cause inappropriate tracking and produces misleading information for identification. In this identification, the frequency and amplitude range vary in $\left[0.05 Hz,5 Hz\right]$ and $\left[5^\circ,11^\circ\right]$, respectively, which are feasible and reasonable pitch and roll angles.
Prior to the identification phase, the probable existence of delay is studied using correlation analysis method, and according to its results, the delay of the system is negligible. Secondly, the effect of exciting the roll angle on the pitch angle, and vice versa, is analyzed. In Fig. \ref{fig231}, the pitch angle, the desired reference pitch angle, and the roll angle are plotted, for one of the experiences. As shown in this figure, the coupling is negligible and the roll angle is almost independent of the pitch angle, thus their coupling can be considered as an unmodelled system uncertainty. The same condition holds for the pitch angle as well (Fig. \ref{fig232}). As a result, off-diagonal elements of the system model can be considered zero, which implies that the system can be represented by two SISO subsystems. Although this assumption may make the controller task more difficult at high frequencies, the controller design becomes much easier.
\begin{figure}
\caption{Coupling effect of the pitch on the roll}
\label{fig231}
\caption{Coupling effect of the roll on the pitch}
\label{fig232}
\caption{The coupling effects}
\label{fig23}
\end{figure}
After performing several experiments for both pitch and roll angles, a linear transfer function should be fitted for each of them to obtain the stable closed-loop system model. We use MATLAB identification toolbox for obtaining continuous-time transfer function using time-domain data. In this step, it is important to select a proper number of poles and zeros. For this purpose, in each identification, we use different combinations of zeros and poles. However, to reduce the number of these combinations, the number of poles and zeros can be assumed close to the approximate analytical transfer function of the system, obtained in Eq. (\ref{eq26}). Finally, for the model identification, the number of poles and zeros are considered in the ranges of $\left[2,5\right]$ and $\left[0,3\right]$, respectively. Consequently, for each experiment, fifteen proper transfer functions are obtained, and one of them is selected as the best-fitted model. In order to select a transfer function, fitness percentage of different fitted models has a decisive role. However, there are two other factors which must be considered. First, it is preferred that the selected transfer functions have similar frequency responses, meaning that the zero/pole numbers should be close to each other. Second, the transfer functions with right hand-side zeros are not selected because the quadrotor system does not show the characteristics of a system with right half-plane zeros.
Based on the obtained transfer functions, for the pitch angle, the real part of poles mainly lie in the range of $\left[-6,0\right]$, and for the roll angle, real part of poles are in the ranges of $\left[-7,0\right]$ and $\left[-10,-8\right]$. In both cases, the zeros lie in the range of $\left[-1,1\right]$. These zeros, which are located near the imaginary axis, mostly appear in pairs with poles very close to them, and therefore, they may be ignored due to the zero-pole cancellation.
By analyzing the obtained transfer functions according to the three aforementioned criteria, it is observed that in the most cases, transfer functions with three poles and no zero can suitably describe the quadrotor system behavior. Figure \ref{fig261} and \ref{fig262} show the histogram of the real part of poles in the transfer functions of the pitch and roll angles, with three poles and no zero, respectively. According to these figures, it is expected that poles lie in the range of $\left[-7, -3\right]$ and $\left[-10, -8\right]$. In Fig. \ref{fig27}, the Bode diagram of the transfer functions obtained for the pitch and roll angle are depicted. It is clear from Fig. \ref{fig27} that the behavior of the transfer functions is similar, especially in frequencies less than $15 Hz$ where the robot mostly operates in. Now, by having the system transfer functions pertain to both axes of pitch and roll, we select one of them as the nominal transfer function. Hence, the transfer function which lies near to the median of all responses, is considered as the nominal model. The selected nominal model transfer function for the pitch and roll angle are as follows: \begin{align} G_{pitch}\left(s\right)=\frac{1547.4}{\left(s+5.373\right)\left(s^2+10.12s+390.4\right)},\label{eq27}\\ G_{roll}\left(s\right)=\frac{2049.8}{\left(s+6.764\right)\left(s^2+19.03s+426.2\right)}. \end{align}
\begin{figure}
\caption{Pitch}
\label{fig261}
\caption{Roll}
\label{fig262}
\caption{Distribution of poles in transfer functions with 3 poles and no zero}
\label{fig26}
\end{figure}
\begin{figure}
\caption{Pitch}
\label{fig271}
\caption{Roll}
\label{fig272}
\caption{Bode diagram of selected transfer functions in the experiments}
\label{fig27}
\end{figure} In order to verify the nominal models, the real experimental data (dashed) and the time-domain response (solid) are depicted in Fig. \ref{fig281} and \ref{fig282}. For the sake of brevity, only two of the experiment results are shown. In these figures, the applied inputs to the nominal model are the same as the real experiments. \begin{figure}
\caption{Pitch}
\label{fig281}
\caption{Roll}
\label{fig282}
\caption{Comparison of transfer functions time-response and real data (for the same input)}
\label{fig28}
\end{figure}
\subsection{Uncertainty weighting function} As mentioned before, based on Eq. (\ref{eq21}), $W$ is a fixed transfer function for normalizing $\Delta$. In other words, $W\Delta$ expresses the normalized system variation away from unity at each frequency, and based on Eq. (\ref{eq21}), may be obtained by $G\left(jw\right)/G_0\left(jw\right)-1=\Delta\left(jw\right) W\left(jw\right)$. Since $\lVert\Delta\rVert_\infty<1$, the following can be concluded, which provide a practical approach for obtaining the uncertainty weighting function: \begin{equation} \lvert\frac{G\left(jw\right)}{G_0\left(jw\right)}-1\rvert \leq\lvert W\left(jw\right)\rvert \qquad \forall w. \end{equation}
After selecting a nominal model between all best-fitted transfer functions, other systems can be considered as a perturbed model of the nominal system, and $\lvert G\left(jw\right)/G_0\left(jw\right)-1 \rvert$ is plotted for each experiment (as illustrated in Fig. \ref{fig29}). As a result, by estimating a transfer function, which is an upper bound for those variations, the uncertainty weighting function (the dashed red line in Fig. \ref{fig291} and \ref{fig292}) is acquired. The uncertainty weighting functions for pitch and roll angles are obtained as follows: \begin{align} W_{pitch}=\frac{1659.6\left(s^2+2.868s+60.44\right)}{\left(s+2.477e04\right)\left(s+9.678\right)},\\ W_{roll}=\frac{1.9017\left(s^2+3.813s+91.61\right)}{s^2+43.53s+545.3}. \end{align}
The above-mentioned steps for obtaining the nominal models of the systems and the uncertainty weighting functions are summarized as below: \begin{enumerate}[
leftmargin=*,
label={\textbf{\textit{Step \arabic*.}}}
]
\item Designing appropriate experiments and obtaining 8 and 9 data packages for pitch and roll angles, respectively.
\item Fitting 15 transfer functions for each data package.
\item Selecting the best-fitted transfer function for each data package.
\item Selecting the nominal models among 8 and 9 transfer functions obtained for pitch and roll angles.
\item Considering the other transfer functions as perturbed models, obtaining the $\lvert G\left(jw\right)/G_0\left(jw\right)-1 \rvert$s, and plotting them for roll and pitch angles.
\item Obtaining the uncertainty weighting functions, which are an upper bound for the variations obtained in {\textbf{\textit{Step 5}}}, for
each roll and pitch angle. \end{enumerate}
\begin{figure}
\caption{Pitch}
\label{fig291}
\caption{Roll}
\label{fig292}
\caption{Multiplicative uncertainty profiles}
\label{fig29}
\end{figure}
\section{Robust ${H}_\infty$ Control ÙSynthesis}
The goal of ${H}_\infty$ controller is providing a robust stable closed-loop system with high performance tracking and disturbance rejection, in the presence of uncertainty and the actuator saturation. Based on Fig. \ref{fig31}, in order to achieve this goal the following objectives shall be met simultaneously: \begin{enumerate} \item $\rVert T_{z_1y_d}\lVert_\infty=\rVert W_sS\lVert_\infty\le 1$, where $T_{z_1y_d}=\frac{W_s}{1+CP}=W_sS$ is the transfer function from $y_d$ to $z_1$, which is the nominal tracking performance in an asymptotic sense. $W_s$ is a frequency dependent sensitivity weighting function for normalizing and shaping the closed loop performance specification. \item $\rVert T_{z_2y_d}\lVert_\infty=\rVert W_uU\lVert_\infty\le 1$, in which $T_{z_2y_d}=\frac{W_uC}{1+CP}=W_uCS=W_uU$ is the transfer function from $y_d$ to $z_2$, and expresses the nominal performance of the control effort. $W_u$ is a frequency dependent weighting function for normalizing and shaping the control input. \item $\rVert T_{z_3y_d}\lVert_\infty=\rVert WT\lVert_\infty\le 1$, which is the result of the small gain theorem. $T_{z_3y_d}=\frac{WCP}{1+CP}=WT$ is the transfer function from $y_d$ to $z_3$, and $W$ represents the uncertainty weighting function. \end{enumerate} Considering this system with input, $y_d$, and output vector $\mathbf{z}=[z_1, z_2, z_3]^T$, the aforementioned conditions can be merged in an induced norm of the transfer function from $y_d$ to $\mathbf{z}$, in which the goal is to find a controller to minimize this norm. This problem is called a mixed-sensitivity problem and is formulated as follows: \begin{equation} \gamma_{opt}= \min_{C}{\left\lVert T_zy_d \right\rVert}=\min_{C}{\left\lVert\begin{matrix}
W_sS \\
W_uU \\
WT
\end{matrix}\right\rVert_\infty} . \end{equation} \begin{figure}
\caption{Block diagram of closed-loop system as a generalized regulator problem in $H_\infty$ framework}
\label{fig31}
\end{figure} In order to define the sensitivity weighting function, first an ideal closed-loop transfer function is designed. In this case, based on the standard second order systems, $\omega_n$ and $\zeta$ are chosen such that the following overshoot and the settling time are satisfied \begin{align} &T_{cl}=\frac{\omega_n^2}{s^2+2\zeta \omega_ns+\omega_n^2},\\ &t_s\approx\frac{4}{\zeta\omega_n}=0.3,\\ &M_p=e^{\frac{-\pi\zeta} {\sqrt{1-\zeta^2}}}=10^{-6}, \qquad 0<\zeta<1 . \end{align} To achieve $W_s$, we use the inverse of the desired sensitivity function: \begin{align} &T_{id}=\frac{247.3}{s^2+30.67s+247.3}\label{tid},\\ &W_s=a\frac{1}{S_{id}}=a\frac{1}{1-T_{id}}=a\frac{s^2+30.67s+247.3}{s\left(s+30.67\right)} \label{eq317}, \end{align} where $a\le1$ is a tuning parameter. Eq. (\ref{eq317}) is modified by inserting a non-dominant pole and by slightly transferring its purely imaginary pole to the left half plane in order to convert $W_s$ to a strictly proper and stable weighting function, respectively. Hence, $W_s$ is corrected as: \begin{equation}\label{eq318} W_s=a\frac{\left(s^2+30.67s+247.3\right)}{\left(s+1000\right)\left(s+30.67\right)\left(s+0.001\right)}. \end{equation}
In the obtained nominal system, Eq. (\ref{eq27}), the input of the system is the desired angle, which should be less than $20^\circ$. Therefore, $W_u$ is set to $0.05$. Having set the weighting functions, the mixed sensitivity problem can be solved using the robust toolbox of MATLAB. After a few correction steps, the controller for the pitch angle by tuning $a=0.88$ with $\gamma_{opt}=0.9929$ is synthesized as follows: \begin{equation*}\resizebox{0.98\hsize}{!}{$ C_{pitch}\left(s\right)=\frac{3.3227e05\left(s+ 2.477e04\right)\left(s+ 461.2\right)\left(s +50.77\right)\left(s+ 9.678\right)\left(s +5.373\right)\left(s^2+10.12s+ 390.4\right)} {\left(s+2.477e04\right) \left(s+1.673e04\right) \left(s+1000 \right)\left(s+30.67\right) \left(s+8.082\right) \left(s+0.001\right) \left(s^2+80.13s + 3556\right)}. $}\end{equation*}
The singular values of the closed-loop system, and the frequency response of each I/O pair are depicted in Fig. \ref{fig32}. The solid blue line shows the maximum singular value of the closed loop system, which is flat within a large frequency range, and is less than one in all range of the frequency domain. The dotted orange line shows the Bode diagram of $WT$ in which the maximum value is 0.42 indicating the robustness of the closed loop system with a margin of greater than two. The performance transfer function and the control effort transfer function are shown by the red and green lines, respectively. Based on Fig. \ref{fig32}, the magnitude of $WT$ starts to reduce significantly in frequencies higher than about 30 Hz due to the limited bandwidth of the system. About this frequency, a slight decrease also appears in the magnitude of $W_sS$ until roughly 900 Hz where a dramatic drop occurs. Conversely, the magnitude of $ W_uU$ increases in these frequencies to compensate for the lack of the system stability and performance. However, since the robot working frequency is less than approximately 20 Hz, high frequencies performance of the system can be ignored. In low frequencies, as expected, the magnitude of $W_sS$ increases when that of $WT$ decreases. \begin{figure}
\caption{The closed loop system singular values and Bode plot of $W_sS$, $ W_uU$, $WT$ (Pitch)}
\label{fig32}
\end{figure}
As mentioned in section \ref{sec2}, a PD controller has been employed to identify the system. Therefore, to implement the final controller a cascade architecture must be used in which the PD controller is in the inner loop, and the $H_\infty$ controller is functioning in the outer loop as illustrated in Fig. \ref{fig36}. The closed-loop step response of the nominal (red) and some uncertain samples of the system are shown in Fig. \ref{fig331}. It can be seen that the controller can robustly stabilize the system and provide a fast response with a reasonable control effort, which is depicted in Fig. \ref{fig332}. \begin{figure}
\caption{Block diagram of the control structure}
\label{fig36}
\end{figure} \begin{figure}
\caption{Time response}
\label{fig331}
\caption{Controll effort}
\label{fig332}
\caption{Closed-loop control of nominal and uncertain systems for the unit-step input (Pitch)}
\label{fig33}
\end{figure}
The same procedure is applied to obtain the $H_\infty$ controller for the roll angle. $W_u$ is set to 0.05, and $W_s$ is same as that of the pitch angle, in Eq. (\ref{eq318}). After solving the mixed-sensitivity problem, $\gamma_{opt}$ value is obtained 0.9925, for $a = 0.92$ and the following controller: \begin{equation*}\resizebox{0.98\hsize}{!}{$ C_{roll}\left(s\right)=\frac {4.3173e06 \left(s+371.5\right) \left(s+59.89\right) \left(s+6.764\right) \left(s^2 + 43.53s + 545.3\right)\left(s^2 + 19.03s + 426.2\right)} {\left(s+2.175e05\right) \left(s+1000\right) \left(s+30.67\right) \left(s+27.95\right) \left(s+19.41\right) \left(s+0.001\right)\left(s^2 + 80.99s + 3749\right)} . $}\end{equation*} The singular values of the closed-loop system and the frequency response of each I/O pair are illustrated in Fig. \ref{fig34}. The analysis of this plot is the same as that of the pitch angle, which is omitted for the sake of brevity. The step responses of the nominal and perturbed systems in the presence of $H_\infty$ controller are depicted in Fig. \ref{fig351}. The control efforts are also shown in Fig. \ref{fig352}. \begin{figure}
\caption{The closed loop system singular values and Bode plot of $W_sS$, $ W_uU$, $WT$ (Roll)}
\label{fig34}
\end{figure} \begin{figure}
\caption{Time response}
\label{fig351}
\caption{Controll effort}
\label{fig352}
\caption{Closed-loop control of nominal and uncertain systems for the unit-step input (Roll)}
\label{fig35}
\end{figure}
\section{Experimental results} \subsection{Real Implementation} In this section, we verify the performance and the applicability of the controllers which were designed in the preceding section by implementing on the real robot. The control structure is the same as Fig. \ref{fig36} for both pitch and roll angles. By setting the time-step of the system to 0.001, the continuous controllers are discretized with the frequency of $1\, kHz$ for the practical implementation. To make this task easier, an order reduction is performed that decreases the order of the controllers from 8 to 6. Finally, the controllers for both axes after applying the order reduction are given by: \begin{align*} &C_{pitch}=\frac{3.3227e05 (s+461.5) (s+45.87) (s+5.903) (s^2 + 9.836s + 388.1)}{(s+1.673e04) (s+1000) (s+25.13) (s+0.001)(s^2 + 79.66s + 3577)},\\ &C_{roll}=\frac{4.3173e06 (s+371.8) (s+53.33) (s+7.035) (s^2 + 18.34s + 428.1)}{(s+2.175e05) (s+1000) (s+27.87) (s+0.001) (s^2 + 80.84s + 3820)}. \end{align*} Based on the frequency response of each I/O pair, this reduction has a negligible effect.
Having implemented the controllers, some tests are performed in the outdoor and indoor environments to evaluate the $H_{\infty}$ controller ability in coping with uncertainties. In the first experiment, the operator drives the quadrotor using a radio-controller to assess the qualitative performance of the robot in comparison with a well-tuned PID controller. The tuning parameters of the PID controller for both roll and pitch angles are selected as follows: \begin{equation*} K_p=2.6, K_i=0.2, K_d=0.65 \end{equation*}
According to this experience, the maneuverability has been enhanced, and the quadrotor can quickly regulate the roll and the pitch angle in the presence of the external disturbances such as the unexpected wind effects (i.e. caused by the drone itself while flying near the walls or the weather) or some manual disturbances applied by the operator. Since the states of the quadrotor are coupled, a refinement on the altitude control of the robot is also observed. The video of this experiment is also available online\footnote{\href{https://youtu.be/vAi4x2_XSQQ}{https://youtu.be/vAi4x2\_XSQQ}}.
To evaluate the robustness, stability and tracking performance of the proposed controllers, an experiment is done in which the quadrotor should track the reference input in the outdoor environment, in the presence of light wind. Fig. \ref{fig411} shows the tracking performance of the pitch controller and the ability of the roll controller in regulating the roll angle close to zero. In addition, as it is shown by data-tip in Fig. \ref{fig411}, the performance of the system (e.g. settling time and overshoot) is close to that of the desired transfer function obtained in Eq. (\ref{tid}). The performance of the well-tuned PID controller is also depicted in Fig \ref{fig412}. According to Fig. \ref{fig41}, the $H_{\infty}$ controller has significantly enhanced the performance of the system.
\begin{figure}
\caption{Robust $H_{\infty}$ controller}
\label{fig411}
\caption{PID controller}
\label{fig412}
\caption{Tracking and regualting performance of $H_{\infty}$ and PID controllers }
\label{fig41}
\end{figure}
\subsection{Simulation Results} After validating the practicability and efficiency of the proposed controllers through real experiments, in what follows, the performance of the controllers are evaluated in the simulation and is compared with a well-tuned PID and another robust controller obtained by $\mu$-synthesis method. The robust stability and robust performance of the controllers are also analyzed using the structural singular values. The gains of PID controller are as below: \begin{equation*} \begin{aligned} &Roll: \; &&K_p=1.18, K_i=10.6, K_d=0.0329\\ &Pitch: \; &&K_p=1.01, K_i=10.2, K_d=0.0132 \end{aligned} \end{equation*}
Fig. \ref{fig421} illustrates the step-response of an uncertain system in the presence of the disturbances and uniform random sensor noise. The amplitude of the disturbances is $0.1$, and the maximum value of the random sensor noise is $0.02$. Fig. \ref{fig422}, depicts the roll angle performance in a similar experiment. Based on these figures, the $H_{\infty}$ controller's overshoots for the pitch and roll systems are about $1\%$ and $0.1\%$, respectively, which are less than that of PID and $\mu$ synthesis controllers which are approximately $10\%$. In addition, the $H_{\infty}$ controller regulates the system faster, and its settling time is roughly $0.35\, sec$. The settling times of both PID and $\mu$ synthesis controllers are about $1\,sec$. $H_{\infty}$ controller also has better performance when disturbances occur.
\begin{figure}
\caption{Pitch}
\label{fig421}
\caption{Roll}
\label{fig422}
\caption{$H_{\infty}$, PID and $\mu$ synthesis controllers performance in the presence of disturbance and sensor noise}
\label{fig42}
\end{figure}
In Fig. \ref{fig43}, the robust stability of the $H_{\infty}$, $\mu$ synthesis, and PID controllers are evaluated by structural singular values. As expected, the $H_{\infty}$ and $\mu$ synthesis controllers fulfill the robust stability condition and their $\mu$ values are less than $1$. The robust stability condition is also met for the PID controller, however, in frequencies about $20 \, rad/sec$ gets close to $1$. Moreover, when the PID parameters are set in such a way that the system has a faster response, the robust stability will be lost. The robust performance analysis of the roll and pitch systems using structural singular values are depicted in Fig. \ref{fig44}. Predictably, the $\mu$ synthesis controller offers the robust performance in contrast to $H_{\infty}$ and PID controllers. The $\mu$ synthesis controller is a conservative approach to have the robust performance, which results in poor time-domain performance in comparison with the $H_{\infty}$ controller. Moreover, $\mu$ synthesis method produces a high-order controller which makes its implementation on a real robot challenging \cite{mystkowski2013robust}.
\begin{figure}
\caption{Pitch}
\label{fig431}
\caption{Roll}
\label{fig432}
\caption{Robust stability of $H_{\infty}$, PID and $\mu$ synthesis controllers }
\label{fig43}
\end{figure}
\begin{figure}
\caption{Pitch}
\label{fig441}
\caption{Roll}
\label{fig442}
\caption{Robust performance of $H_{\infty}$, PID and $\mu$ synthesis controllers }
\label{fig44}
\end{figure}
\section{Conclusion} This research has studied the robust attitude control of a quadrotor with taking into account the uncertainty and inputs constraints. Knowing that the model and parameters of the system do not exist, a continuous-time black-box model identification based on the real sampled data is adopted to acquire both nominated linear model of the system and the uncertainty model encapsulating the deviation of the nonlinear system from the nominal model. Now, the essential prerequisites for applying the robust $H_{\infty}$ controller is provided. By solving the mixed-sensitivity problem, a robust stabilizing $H_{\infty}$ controller is obtained which satisfies tracking, disturbance attenuation and input saturation objectives. Having accomplished that, the pitch and roll controllers are implemented on the robot, and qualitative and quantitative experiments are performed to verify the performance of $H_{\infty}$ controllers. According to both experimental and simulation results, the controllers successfully fulfill the aforementioned objectives. Finally, by comparing the performance of the proposed controllers with PID and $\mu$ synthesis controllers, it is demonstrated that the $H_{\infty}$ controller has a better performance.
\end{document} |
\begin{document}
\title{Weak periodic solutions and numerical case studies of the Fornberg-Whitham equation } \author{ G\"unther H\"ormann\thanks{Faculty of Mathematics, University of Vienna, A-1090 Wien, Austria} ~~\&~~ Hisashi Okamoto\thanks{ Dept. of Math., Gakushuin University, Tokyo, 171-8588. Partially supported by JSPS Kakenhi 18H01137. The present work was initiated during HO's stay in the Erwin Schr\"odinger
Institute in Vienna, Austria. Its support is highly appreciated. He also acknowledges the support of JSPS A3 foresight program: Modeling and Simulation of Hierarchical and
Heterogeneous Flow Systems with Applications to Materials Science. } }
\maketitle
\begin{abstract} Spatially periodic solutions of the Fornberg-Whitham equation are studied to
illustrate the mechanism of wave breaking and the formation of shocks for a large class of initial data.
We show that these solutions can be considered to be weak solutions satisfying the entropy condition. By numerical experiments, we show that the breaking waves become shock-wave type in the time evolution. \end{abstract}
\section{Introduction}
We denote by $\mathbb{T} = \mathbb{R} / \mathbb{Z}$ the one-dimensional torus group and identify functions on $\mathbb{T}$ with $1$-periodic functions on $\mathbb{R}$. The Fornberg-Whitham
equation for the wave height $u \colon \mathbb{T} \times [0,\infty[ \to \mathbb{R}$ as a
function of a spatial variable $x \in \mathbb{T}$ and time $t \geq 0$ reads \begin{equation} u_t + uu_x + \left( 1 - \partial_x^2 \right)^{-1} u_x = 0 \qquad ( x \in \mathbb{T}, t > 0) \label{eq:fw01} \end{equation} and is supplied with the initial condition \begin{equation}\label{eq:fw02} u(x,0) = u_0(x) \qquad (x \in \mathbb{T}). \end{equation}
Well-posedness results, local in time, with strong solutions in Sobolev and Besov
spaces have been obtained for both cases, periodic and non-periodic, in \cite{Holmes16, HolTho17}. We cannot always expect globally defined strong solutions. This was predicted with a sketch of proof by \cite{seliger} and later proved rigorously in \cite{CE, Haziot17, hoermann}. These results proved the existence of wave-breaking, i.e., that the solution $u$ remains bounded but $u_x$ becomes singular,
if the initial data displays sufficient asymmetry in its slope. However two questions seem to have remained unanswered: (i) What can we say about the solution after a singularity has emerged? (ii) What is the nature of the singularity?
Our first goal is to prove existence of globally defined weak solutions which extend beyond the time of wave-breaking and singularity formation. The second goal of the present paper is to show numerically that the singularity
developing in such a solution looks very similar to a shock-wave solution of
the inviscid Burgers equation. We emphasize that for the case of the real line in place of the torus, similar investigations have been carried out in \cite{FS},
where the Fornberg-Whitham equation was formulated as and named Burgers-Poisson system (hence we have been unaware of that paper until almost completion of our current paper).
\section{Weak entropy solutions}
A well-known machinery exists for weak solution concepts for nonlinear partial differential equations in the form of hyperbolic conservation laws, but our Equation \eqref{eq:fw01} involves a non-local term and the question is
whether or not this can be harmful to global existence. As we will prove, this is not a big hurdle.
In the literature on nonlinear conservation laws, one of the fundamental references is Kru\v{z}kov's classic paper \cite{K}, where he considered equations of the form $$
u_t + \left( \varphi(u) \right)_x + \psi(u) = 0 \qquad (x \in \mathbb{R}, t > 0). $$ He proved, under mild assumptions on the functions $\varphi$, $\psi$ and on the initial data $u_0$, that a weak solution exists globally in time and that it is unique in an appropriate class of functions. His setting is different from ours in two respects: First, we have $x$ belonging to the one-dimensional torus, while Kru\v{z}kov described the case with $x$ on the real line. Second, in Kru\v{z}kov's paper
$\phi$ and $\psi$ are ordinary functions, while our equation involves a non-local dependence on $u$ in $\psi$. The first difference causes no problem.
The second issue indeed forces us to alter some of the technicalities along the way, but a close examination of the very lucid presentation of the proofs in \cite{K} reveals that the essence of most arguments can be applied to
our equation with only slight modifications, which we will indicate in the sequel.
Note first that we may write $(1 - \partial_x^2)^{-1} u_x = K \ast u_x = K' \ast u$, where the convolution is in the $x$-variable only and with kernel function given by $K \colon \mathbb{T} \to \mathbb{R}$ is given by $K(x) = (e^x + e^{1-x})/(2 (e-1)) = \frac{\sqrt{e}}{e-1}
\cosh(x - \frac{1}{2}) $ for
$0 \leq x \leq 1$ (see, e.g., \cite[Section 3]{hoermann}). Note that $K$ is continuous but is not $C^1$ on $\mathbb{T}$. Note also that the derivative $K'$ is not continuous but is bounded.
We will relate to Kru\v{z}kov's notation in \cite{K} as closely as possible, but will switch the function arguments from $(t,x)$ to $(x,t)$ and occasionally write $u(t)$ to denote the function $x \mapsto u(x,t)$ with a frozen $t$. Compared with the main terms of the equation as labeled by Kru\v{z}kov we set $$
\varphi(u(x,t)) = u(x,t)^2/2 $$ and $$
(\psi u)(x,t) = \big((1 - \partial_x^2)^{-1} u_x(.,t)\big)(x) = (K' \ast u(.,t))(x). $$ The term involving $\varphi$ conforms perfectly with the specifications from \cite{K} and requires no extra consideration at all. For the linear, but non-local, term given by the operator $\psi$ we will describe below suitable adaptations in the proof of uniqueness and make note of an alternative a priori estimate in the proof of existence. Overall in our
situation, spatial periodicity, i.e., the compactness of $\mathbb{T}$, simplifies several
estimates along the way in following the various proofs of key results in \cite{K}. Moreover, we have $L^\infty(\mathbb{T}) \hookrightarrow L^1(\mathbb{T})$.
We use the convolution representation $K' \ast u$ in place of Kru\v{z}kov's term $\psi(u)$ in the following definition of the weak solution concept (where we also incorporate the initial condition into the basic inequality). \begin{definition} Let $u_0 \in L^\infty(\mathbb{T})$ and $T > 0$.
A function $u \in L^\infty(\mathbb{T} \times [0,T])$ is called a weak entropy solution of \eqref{eq:fw01}--\eqref{eq:fw02} if the following \eqref{eqn:entropy-sol} holds true:
\begin{align}
0 &\leq \int_{0}^{T} \int_{\mathbb{T}} \bigg(
|u(x,t) - \lambda| \partial_t \phi(x,t) + \sgn( u(x,t)-\lambda) \frac{u^2(x,t) - \lambda^2}{2}\partial_x \phi(x,t)\nonumber\\
&-\sgn(u(x,t)-\lambda)K'*(u(\cdot,t)-\lambda) (x) \phi(x,t)
\bigg) \,dx \,dt
+ \int_{\mathbb{T}} |u_0(x) - \lambda|\phi(x,0)\,dx
\label{eqn:entropy-sol}
\end{align}
for any $\lambda \in \mathbb{R}$ and for any nonnegative $C^1$-function $\phi$ of compact support in $\mathbb{T} \times \mathbb{R}$. \end{definition}
As is well-known, upon putting $\lambda = \pm \sup |u(x,t)|$ we may deduce that a weak
entropy solution is also a weak (distributional) solution in the sense that in
the integro-differential equation \eqref{eq:fw01} the term $u u_x$ may be interpreted as $\partial_x (u^2)/2$, since $u \in L^\infty$. Thus, we have $$
\text{\ div}_{(x,t)} \begin{pmatrix} u \\ u^2/2 \end{pmatrix} =\partial_t u + \partial_x \big(\frac{u^2}{2}\big) = - K' \ast u + u_0 \otimes \delta
\qquad \text{in } \mathcal{D}'(\mathbb{T} \times ]-T,T[), $$ if we extend $u$ by setting it to $0$ in $\mathbb{T} \times ]-T,0[$. We may therefore
call on Lemma 1.3.3 and the discussion of the weak solution concept in Section 4.3 in \cite{Dafermos}: Upon possibly changing $u$ on a null-set
we may assume that $t \mapsto u(t)$ is continuous $[0,T] \to L^\infty(\mathbb{T})$ with respect to the weak$^*$ topology on $L^\infty(\mathbb{T})$ and we have, in particular, $$
\lim_{t \downarrow 0} \| u(t) - u_0 \|_{L^1} = 0, $$ as required originally by Kru\v{z}kov in \cite[Definition 1]{K}.
\subsection{Uniqueness} We first show that the solution of \eqref{eqn:entropy-sol} is unique. Existence will be proved later.
To show uniqueness of weak entropy solution we need an adaptation of \cite[Theorem 1]{K}---noting that for large $R$ we simply have $K = [0,T_0]
\times \mathbb{T}$ and $S_\tau = \mathbb{T}$ in that statement---and of its proof to our
situation with the non-local linear term $\psi u = K' \ast u$. This requires an appropriate replacement of the constant $\gamma$ in \cite[Equation (3.1)]{K} and an alternative argument in the course of the
proof of \cite[Theorem 1]{K}, namely on the lines following \cite[Equation (3.12)]{K} concerning the term $I_4$ in Kru\v{z}kov's notation (defined there in \cite[Equation (3.4)]{K}), since in our case we cannot directly have a pointwise estimate calling on
the mean value theorem for a classical differentiable function $\psi$. Instead, with two weak solutions $u$ and $v$ with initial values $u_0$ and $v_0$, respectively, we obviously have $$
\sup_{x \in \mathbb{T}} |K'\ast( u(\cdot, t) - v(\cdot, t))(x)| \leq \|K'\|_{L^\infty(\mathbb{T})}
\int_{\mathbb{T}} |u(y,t) - v(y,t)| dy
= \|K'\|_{L^\infty(\mathbb{T})} \| u(t) - v(t)\|_{L^1(\mathbb{T})}, $$ which implies the following replacement of the next to last inequality on page 228 in \cite{K} (with mollifier $\delta_h$ and cut-off $\chi_\varepsilon$
as chosen by Kru\v{z}kov) \begin{multline*}
\int_0^{T_0} \int_{\mathbb{T}} \Big[ \big( \delta_h(t - \rho) - \delta_h(t - \tau) \big) \chi_\varepsilon(x,t)
|u(x,t) - v(x,t)|
\\ + \|K'\|_{L^\infty(\mathbb{T})} \chi_\varepsilon(x,t) \| u(t) - v(t)\|_{L^1(\mathbb{T})} \Big] dx dt \geq 0. \end{multline*} Therefore, we replace the inequality stretching in \cite{K} from the bottom of page 228 to the top of page 229 by \begin{multline*}
\mu(\tau) := \int_{\mathbb{T}} |u(x,\tau) - v(x,\tau)| dx \leq\\ \int_{\mathbb{T}} |u(x,\rho) - v(x,\rho)| dx +
\|K'\|_{L^\infty(\mathbb{T})} \int_\rho^\tau \int_\mathbb{T} \| u(t) - v(t)\|_{L^1(\mathbb{T})} dx dt \\
= \mu(\rho) + \|K'\|_{L^\infty(\mathbb{T})} \int_\rho^\tau \| u(t) - v(t)\|_{L^1(\mathbb{T})} dt. \end{multline*} Then, sending $\rho \to 0$, we arrive at $$
\| u(\tau) - v(\tau)\|_{L^1(\mathbb{T})} \leq \| u_0 - v_0\|_{L^1(\mathbb{T})} +
\|K'\|_{L^\infty(\mathbb{T})} \int_0^\tau \| u(t) - v(t)\|_{L^1(\mathbb{T})}. $$ (Here, the $L^1$-continuity of the weak solution is used.) Now Gronwall's inequality implies the following uniqueness result. \begin{theorem} For any $T > 0$, the weak solution
to \eqref{eq:fw01}--\eqref{eq:fw02} is unique in $\mathbb{T} \times [0,T]$.
More precisely, if $u$ and $v$ are weak solutions with initial values $u_0$ and $v_0$, respectively, then for every $t \in [0,T]$, $$
\int_{\mathbb{T}} |u(x,t) - v(x,t)| dx \leq e^{t \| K' \|_{\infty}} \int_{\mathbb{T}} |u_0(x) - v_0(x)| dx. $$ \end{theorem}
\subsection{Existence}
We will establish the global existence here under the condition that
$ u_0 \in L^{\infty}(\mathbb{T})$.
The key idea in \cite{K} is to consider a parabolic regularization of \eqref{eq:fw01} in the form \begin{equation} u_t + u u_x + \left( 1 - \partial_x^2 \right)^{-1} u_x = \varepsilon u_{xx} \label{eq:vis01} \end{equation} with a small parameter $ \varepsilon > 0$. Our aim is to show that, at least for a
subsequence of $\varepsilon \to 0$, the corresponding solutions (all with the same initial value $u_0$) converge strongly in $L^1$ and are uniformly bounded in $L^{\infty}$.
For any $\varepsilon > 0$, we will show that a strong solution $u$ to \eqref{eq:vis01} with initial value \begin{equation}\label{eq:vis01a}
u(0) = u_0 \in L^2(\mathbb{T}) \end{equation} exists uniquely and globally in time. Due to the nonlocal term, we cannot directly call on the same references that Kru\v{z}kov uses in \cite{K} on
page 231, but the desired result can be shown by a careful iteration of a standard contraction argument, which we describe in Appendix. The solution $u$ of
\eqref{eq:vis01} and \eqref{eq:vis01a}
belongs to $ C([0,T]; L^2(\mathbb{T})) \cap C(]0,T] ; H^1(\mathbb{T}))$ and, in fact, is smooth if $t > 0$. Moreover, if $u_0 \in L^{\infty}(\mathbb{T}) \subset L^{2}(\mathbb{T})$, then
$ t \mapsto \| u(t) \|_{L^\infty}$ is continuous on $[0,T]$.
Let $T > 0$ be arbitrary and consider the unique solution of \eqref{eq:vis01}
and \eqref{eq:vis01a}. For every $\varepsilon > 0$ we denote now the solution of
\eqref{eq:vis01}--\eqref{eq:vis01a} by $u^{\varepsilon}$.
We now have to find an alternative way to obtain
Kru\v{z}kov's basic estimate (4.6) in \cite{K}, which he got from a direct application of the maximum principle. To arrive at our analogue of the basic estimate in \eqref{eq:maxprinc} below, we proceed as follows: Multiplying \eqref{eq:vis01} by $u^{\varepsilon}$ and integrating over the spatial variable $x$ yields $$
\int_\mathbb{T} u^{\varepsilon} u_t^{\varepsilon} dx + \int_\mathbb{T} (u^{\varepsilon})^2 u_x^{\varepsilon} dx +
\int_\mathbb{T} (\psi u^{\varepsilon}) u^{\varepsilon} dx = \varepsilon \int_\mathbb{T} u^{\varepsilon} u_{xx}^{\varepsilon} dx. $$ Noting that $u^{\varepsilon} \mapsto \psi u^{\varepsilon} = (1- \partial_x^2)^{-1} \partial_x u^{\varepsilon} = K' \ast u^{\varepsilon}$
is a skew-symmetric linear operator with respect to the standard inner product on $L^2(\mathbb{T})$, writing $u^{\varepsilon} u_t^{\varepsilon} = \partial_t (u^{\varepsilon})^2/2$
and $(u^{\varepsilon})^2 u_x^{\varepsilon} = \partial_x((u^{\varepsilon})^3)/3$, and integrating by parts on the right-hand side, yields (thanks to periodicity) \begin{equation} \frac{1}{2} \frac{d}{dt} \int_{\mathbb{T}} (u^{\varepsilon})^2 dx = - \epsilon \int_{\mathbb{T}} (u_x^{\varepsilon})^2 dx. \label{eq:vis02} \end{equation} In particular, we have for every $t \in [0,T]$ the a priori estimate \begin{equation}
\| u^{\varepsilon}(t) \|_{L^2(\mathbb{T})} \le \| u_0 \|_{L^2(\mathbb{T})}, \label{eq:vis03} \end{equation} which is independent of $\varepsilon$.
From \eqref{eq:vis03} and since $( 1 - \partial_x^2)^{-1}$ is a pseudodifferential operator of order $-2$, the $H^1$-norm of $v^\varepsilon(t) := ( 1 - \partial_x^2)^{-1} u^\varepsilon_x(t)$
is bounded by $c \| u_0 \|_{L^2(\mathbb{T})}$ for some constant $c$ independent of $\varepsilon$ and of $t$. Via the continuous imbedding $H^1(\mathbb{T})
\hookrightarrow L^\infty(\mathbb{T})$ we thus have $\| v^\varepsilon (t) \|_{L^{\infty}(\mathbb{T})}
\leq c' \| u_0 \|_{L^2(\mathbb{T})}$ for some constant $c'$ independent of
$\varepsilon$ and of $t$. We now interpret the original equation in the form $$
\partial_t u^\varepsilon (x,t) - \varepsilon \partial_x^2 u^\varepsilon(x,t) + u^\varepsilon(x,t)
\partial_x u^\varepsilon(x,t) = - v^\varepsilon(x,t) $$ and apply the theorem on page 230 in \cite{J} to obtain $$
|u^\varepsilon(x,t) | \le \| u^\varepsilon(\cdot,s) \|_{L^{\infty}} + (T-s) \sup_{s\le \tau \le T, y \in
\mathbb{T}} | v^\varepsilon(y,\tau)| $$ for all $ 0 < s \le t \le T$. (Here we regard the factor $u^\varepsilon(x,t)$ in the term $ u^\varepsilon(x,t) \partial_x u^\varepsilon(x,t)$ as a coefficient to the first order derivative and also note that in our case
the zero order coefficient vanishes. Although the text in \cite{J}
assumes more regularity of the coefficients and data, continuity is sufficient.)
We then let $ s \rightarrow 0$ to obtain \begin{equation}\label{eq:maxprinc}
\forall x \in \mathbb{T}, \forall t \in [0,T] : \quad
|u^\varepsilon(x,t)| \leq \| u_0 \|_{L^{\infty}(\mathbb{T})} + c'\, T \| u_0 \|_{L^2(\mathbb{T})}. \end{equation} This inequality is our analogue of Kru\v{z}kov's basic estimate (4.6) in \cite{K}
and we may now return to follow along his lines again more closely (note that we are closest to what he classifies as `Case A' in his paper on page 230 in next
to the last paragraph) to establish a uniform modulus of $L^1$-continuity. In fact, \cite[Equation (4.7)]{K} for $w(x,t) := u^\varepsilon(x+z,t) - u^\varepsilon(x,t)$ holds in our case with $e_i =0$ and replacing the term $c w$ by $K' \ast w$, preserving the Lipschitz continuity properties noted on top of page 232
in \cite{K} (in our case even globally on the compact torus). Lemma 5 in \cite{K} is applicable in essentially the same way in establishing the key
modulus of continuity estimate \cite[Equation (4.15)]{K} (with an appropriate
proof variant taking into account the convolution term and showing
(4.13) directly from (1.3) there). Thus, we have at least sketched how
everything is in place and sufficient to apply Kru\v{z}kov's method in
proving existence from compactness in $L^1$ (via boundedness and equicontinuity of the $L^1$-norm, \cite[Theorem 4.26]{Brezis2011}) and
uniform $L^\infty$-bounds. In combination with the above uniqueness result, we have the following statement. \begin{theorem} Let $T > 0$ be arbitrary. For any $u_0 \in L^\infty(\mathbb{T})$, there exists a unique weak entropy solution of \eqref{eq:fw01} and \eqref{eq:fw02} on the time interval $[0,T]$. \end{theorem}
\section{Numerical experiments by finite differences}
We now study numerical solutions and will observe that many of these exhibit singularities of shock-wave type. The solutions have been computed numerically under periodic boundary conditions and employing Godunov's finite difference method.
We employ the following finite difference method with the uniform grid sizes $ h = \Delta x, \tau = \Delta t$. The nonlinear term is discretized by $$ u^{n+1}_k = u_k^n - \frac{\tau}{h} \big( g(u_k^n, u_{k+1}^n) - g(u_{k-1}^n, u_k^n) \big). $$ where the numerical flux is defined by $$ g(u_{k-1}^n, u_k^n) = \left\{ \begin{array}{ll} f\left( u_{k-1}^n \right) \qquad & \hbox{if} \quad u_{k-1}^n \ge u_k^n ~~ \hbox{and} ~~ f\left( u_{k-1}^n\right) \ge
f\left( u_k^n\right), \\ f\left( u_{k}^n \right) \qquad & \hbox{if} \quad u_{k-1}^n \ge u_k^n ~~ \hbox{and} ~~ f\left( u_{k-1}^n\right) \le
f\left( u_k^n\right), \\ f\left( u_{k-1}^n \right) \qquad & \hbox{if} \quad u_{k-1}^n \le u_k^n ~~ \hbox{and} ~~ f'\left( u_{k-1}^n\right) \ge 0, \\ f\left( u_{k}^n \right) \qquad & \hbox{if} \quad u_{k-1}^n \le u_k^n ~~ \hbox{and} ~~ f'\left( u_{k-1}^n\right) \le 0, \\ 0 \qquad & \hbox{otherwise}. \end{array} \right. $$ Here $u_k^n$ is the approximation for $ u(kh,n\tau)$, and $f(u) = u^2/2$. This is the Godunov method as explicated in \cite{CM}.
The nonlocal term is discretized by the following central difference scheme: We put $ v = (1 - \partial_x^2)^{-1} u_x$, whence we have $ v - v_{xx} = u_x$. The function $v$ is determined by solving the linear equation $$ v_k - \frac{ v_{k+1} -2 v_k + v_{k-1}}{h^2} = \frac{u_{k+1} - u_{k-1}}{2h}, $$ or, $$ ( 1 + 2h^{-2}) v_k - h^{-2} ( v_{k+1} + v_{k-1} )= \frac{u_{k+1} - u_{k-1}}{2h}. $$
We set $ N = 1000$, $ h = 1/N$, and $\tau = 0.4h/q$, where $q$ is the typical size of the initial data. We first test the following two initial data: \begin{align*} & data 1 \hskip 1cm u_0(x) = \cos (2\pi x + 0.5) + 1, \\ & data 2 \hskip 1cm u_0(x) = 0.2 \cos (2\pi x) + 0.1\cos (4\pi x) - 0.3 \sin (6\pi x) + 0.5 \end{align*} The corresponding profiles of $u$ from \emph{data}1 are shown in Figure \ref{zu:1}. As the profile moves to the right, the formation of a shock is clearly visible. After the emergence of the shock, the jump height at the discontinuity decreases as $t$ increases, namely, if $ \xi(t)$ denotes the position of the discontinuity at time $t$, the difference of the one-sided limits $u(\xi(t)-0) - u(\xi(t)+0)$ is a decreasing function of $t$.
The solution with \emph{data}2 is shown in Figure \ref{zu:2}. In this case, even multiple shocks are formed and merging of some of the shocks over time can be observed.
\begin{figure}
\caption{ The solution from \emph{ data}1. $ 0 \le t \le 0.65$. }
\label{zu:1}
\end{figure}
\begin{figure}
\caption{ data2 }
\label{zu:2}
\end{figure}
These and other experiments suggest the development of wave-breaking singularities into shock discontinuities. In fact, as far as we have computed, only shock-type singularities in wave solutions have been found. Moreover, the computations also suggest that global solutions exist, if the initial data are small---the recent result in \cite[Theorem 1.5]{Itasaka} on Fornberg-Whitham-type equations with nonlinear term $\partial_x (u^p/p)$, $p \geq 5$, seems to support our observation.
\subsection{Remarks on shock conditions and asymptotic properties}
In our experiments all initial functions of the following form $u_0 = \sum_{1 \le k \le 3} [ a_k \cos(2k\pi x) + b_k \sin(2k\pi x) ] $
which are periodic in $x$, but not necessarily symmetric, produce a shock spontaneously, if their $L^{\infty}$-norms are large. If the initial data is small, the numerical solution exists for quite a long time. For instance, if $ u(x,0) = q\cos(2\pi x)$, a shock wave was observed for $ q > 0.015$. For $ q < 0.01$ the solution seems to exist forever. For $ 0.01 < q < 0.015$, our experiments are indecisive. It may exist forever, or it may have a shock after quite a long, numerically undetectable time has passed. Note that the guaranteed time of existence according
to \cite[Theorem 1]{Holmes16} is inverse proportional to $\| u_0 \|_{H^2}$. The paper mentioned above, \cite{Itasaka} discussing a whole class of Fornberg-Whitham-type equations, contains also detailed information
on the existence time.
The propagation speed of the shock is the same as in the case of the inviscid Burgers equation \begin{equation} u_t + uu_x = 0 \qquad (x \in \mathbb{T}, t > 0). \label{eq:bur} \end{equation} Indeed,
for any $w$ in $L^2$, the function $ \left( 1 - \partial_x^2 \right)^{-1} w_x$ belongs to $H^2({\mathbb T})$, hence is continuous, and therefore does not contribute to the Rankine-Hugoniot jump relation $$ c = \frac{u^+ + u^-}{2}. $$ Here, $c$ is the propagation speed of the shock and $u^+$ and $ u^-$ denote the limit values of $u$ from the right and the left at the discontinuity,
respectively.
Although the formation of a shock is quite similar in both equations, the asymptotic behavior may be different. In the case of the Burgers equation, the jump discontinuity vanishes in the limit and the solution converges to a constant function as $ t \rightarrow \infty$ (for a proof see \cite{lax} or \cite[Theorems 6.4.9 and 11.12.5]{Dafermos}). On the other hand, the Fornberg-Whitham equation possesses many traveling wave solutions that obviously do not decay and it is not yet conclusively shown whether
or not for the discontinuous solutions displayed above the jump heights definitely
decay to zero. We are not aware of a decisive result whether discontinuous
\emph{periodic} traveling waves exist, although we discuss below a negative result in the case of a single shock. (Existence of non-periodic discontinuous wave solutions has been shown in \cite{FS,hoermann2}.)
Another feature of smooth solutions $u$ to the inviscid Burgers equation is that
it preserves the values of maxima and minima of $ u(t,\cdot)$ as $t$ progresses. The Fornberg-Whitham equation does not keep those values constant due to the presence of the nonlocal term. However, our experiments suggest that, although $\max_x u(x,t)$ and $ \min_x u(x,t)$ is not a constant function of $t$, they are nearly constant, or, at least they seem to stay bounded as $t \to \infty$.
The number of peaks of any solution to the inviscid Burgers equation is non-increasing, while we observe that in case of our equation they may increase (and decrease) as $t$ varies.
Although several of the minor discrepancies exist as indicated, the solutions of \eqref{eq:fw01} have some
similarity with solutions of the inviscid Burgers equation in any time interval until and little after the formation of a shock discontinuity.
\subsection{Traveling waves} In this subsection we first investigate the stability of traveling wave
solutions and then the non-existence of such with a single shock.
Consider a continuously differentiable traveling wave $u(x,t) = U(x-ct)$ with speed $c > 0$. Inserting this into \eqref{eq:fw01} and integrating once we
deduce that the profile function $U$ satisfies the equation \begin{equation} -cU + \frac{U^2}{2} + \left( 1 - \partial_x^2 \right)^{-1} U = 0. \label{eq:U} \end{equation} (The constant of integration may be set to zero without losing generality, if $U$ and $c$ are suitably normalized.)
If we define $V$ by $$ V = -cU + \frac{U^2}{2}, $$ then $2 V + c^2 = (U - c)^2 \geq 0$ and $U(x) = c \pm \sqrt{ c^2 + 2V(x)}$, where we choose the negative sign for the root, since only this connects
$U(x) = 0$ with $V(x) = 0$. We obtain the differential equation \begin{equation} V - V_{xx} + c - \sqrt{ c^2 + 2V} = 0, \label{eq:V} \end{equation} which possesses $V \equiv 0$ as trivial solution for all $c$. Linearization of \eqref{eq:V} at $V \equiv 0$ yields $$ V - V_{xx} - \frac{V}{c} = 0. $$ In terms of the Fourier coefficients $(\hat{V}(m))_{m \in \mathbb{Z}}$ for the
$1$-periodic function $V$, this means $\left(1 + (2 \pi m)^2 - \frac{1}{c}\right) \cdot \hat{V}(m) = 0$ for every
$m \in \mathbb{Z}$. We obtain for every $n \in \mathbb{N}$ a nontrivial solution to the linearized equation in the form $V_n(x) := \cos (2\pi n x)$, if the speed attains the bifurcation value $$
c = c_n := \frac{1}{ 1 + (2\pi n)^2}, $$ otherwise we are left with $V \equiv 0$ as the only solution.
In the case of $n=1$, nontrivial bifurcating solutions to the nonlinear
differential equation exist in the interval $ c_1 \approx 0.0247 < c < 0.02695$. As $c$ tends to the upper limit, $\min ( c^2 + 2V)$ tends to zero, and the profile $U = c - \sqrt{c^2 + 2V}$
tends to a function with a corner singularity. This Lipshitz continuous traveling wave, called peakon, has already been known to exist for a long time (cf.\ \cite{FW, W}) and occurs also as part of the analysis in \cite{ZT} . Here we have computed these traveling waves in the context above,
i.e., as the solutions of the boundary value problem \eqref{eq:V} and illustrate some of them in Figure \ref{zu:travel}.
\begin{figure}
\caption{ traveling wave $U$; $c = 0.025, 0.0255, 0.026, 0.0269$.
}
\label{zu:travel}
\end{figure}
In order to investigate stability, we picked the solution corresponding to
$ c = 0.0255$, input it as initial data, and computed the time dependent
solution shown in Figure \ref{zu:trav}, which illustrates that the fixed wave profile travels at constant speed. In the time interval used by us, $ 0 < t < 30$, the effect of numerical viscosity is invisible, but for long time intervals, it is expected to influence the numerical solution.
\begin{figure}
\caption{
The time dependent solution with the traveling wave as the initial data. }
\label{zu:trav}
\end{figure}
We consider a perturbation of the initial data in the form $ U(x) + \delta d(x)$, where $\delta$ is $5\%$ of the amplitude of $U$ and $d(x)$ is given as follows: \begin{equation} \label{eq:dis2} d(x) = \cos(2k\pi x) \quad ( k = 2,3,4) \quad \text{ or } \quad d(x) = \left\{ \begin{array}{ll} 1 - \cos(4\pi x) \qquad & ( 0 \le x < 1/2), \\ 0 & (\hbox{otherwise}). \end{array} \right. \end{equation} Note that the latter disturbance is asymmetric. It turns out that in the time interval
$ 0 \le t \le 300$ the solution stays in a small neighborhood of the original
solution $U$ and only a small oscillation could be observed as is shown in Figure \ref{zu:trav4}, where the points $( u_{300}^n,u_{600}^n)$ with $n$ corresponding to $ 0 \le t \le 300$ were plotted. If the initial perturbation is null, these describe a closed curve; once the initial disturbance is added, the corresponding curve departs from the closed orbit, but remains in a certain neighborhood. These and similar experiments may be interpreted as support for the conjecture of stability of the traveling wave with speed $ c = 0.0255$. More experiments carried out for the case $ c = 0.0265$ gave similar results.
\begin{figure}
\caption{
The time dependent solution with $d$ as in the second case of \eqref{eq:dis2}. The points $( u_{300}^n,u_{600}^n)$ with $n$ corresponding to $ 0 \le t \le 300$ are plotted. }
\label{zu:trav4}
\end{figure}
\subsubsection{Nonexistence of traveling waves with a single shock}
In view of the solutions shown earlier that created shocks after wave breaking, with jump height decreasing as time progresses, it is natural to ask whether
periodic traveling wave solutions with jump discontinuities exist. For the non-periodic
case it has been shown that discontinuous traveling waves with single jumps exist (see \cite{FS,hoermann2}). Contrary to this, there is no periodic traveling wave with a single jump, as we will argue in the following.
Suppose $U$ is the profile function for a \emph{discontinuous} traveling wave solution that is piecewise $C^1$ on the torus $\mathbb{T}$, i.e., $C^1$ except for a
single point $x_0 \in \mathbb{T}$ where $U$ as well as $U'$ possess one-sided limits. Since the Fornberg-Whitham equation is invariant under translations in the $x$-variable, we may restrict to the case $x_0 = 0$, thus we may think of the profile function as a $C^1$-function $U \colon [0,1] \to \mathbb{R}$ with $U(0) \neq U(1)$.
The Rankine-Hugoniot shock condition requires $$
U(0) + U(1) = 2c. $$
We come back to Equation \eqref{eq:U} for the profile function $U$, but this time keep track of the constant of integration, for later convenience in the form $$
-cU + \frac{U^2}{2} + \left( 1 - \partial_x^2 \right)^{-1} U = \beta - \frac{c^2}{2} + c, $$ with $\beta \in \mathbb{R}$. We now put $Y := U - c$ and $ Z := Y Y' = ( Y^2 /2 )'$, note that $\left( 1 - \partial_x^2 \right)^{-1} 1
= K \ast 1 = \int_{\mathbb{T}} K(x) \, dx = 1$, and insert into the equation for $U$ to obtain $$
\frac{Y^2}{2} + \left( 1 - \partial_x^2 \right)^{-1} Y = \beta, $$ which also shows that $Z = (Y^2/2)' = - K' \ast Y$ is continuous as a function on the torus and piecewise $C^1$, i.e.\ can be thought of as $C^1$- function $Z \colon [0,1] \to \mathbb{R}$ with \begin{equation}
Z(0) = Z(1). \label{eq:Zcont} \end{equation}
We may therefore apply $(1 - \partial_x^2)$ and arrive at $$
\beta = \frac{Y^2}{2} - \left( \frac{Y^2}{2} \right)'' + Y = \frac{Y^2}{2} - Z' + Y. $$ We collect the equations for $Y$ and $Z$ in the following first-order system \begin{align}
Y Y' &= Z, \label{eq:Y}\\
Z' &= \frac{Y^2}{2} + Y - \beta = \frac{1}{2} \left( (Y + 1)^2 - (2 \beta +1) \right), \label{eq:Z} \end{align} for the $C^1$-functions $Y$ and $Z$ on $[0,1]$. The requirements on $U$,
including the Rankine-Hugoniot condition, now read \begin{equation}
Y(0) \neq Y(1) \quad\text{and}\quad Y(0) + Y(1) = 0, \label{eq:Ycond} \end{equation} while for $Z$ we have the periodic continuity condition \eqref{eq:Zcont}.
We split the further analysis into two cases depending on the value of $\beta$:
\emph{Case $2 \beta + 1 \leq 0$}: Equation \eqref{eq:Z} implies that $Z' \geq 0$. By \eqref{eq:Zcont}, this leaves only the option that $Z' = 0$, hence
$0 \leq (Y+1)^2 = 2 \beta + 1 \leq 0$, so that $Y$ would have to be constant
(equal to $-1$), which is a contradiction to \eqref{eq:Ycond}.
\emph{Case $2 \beta + 1 > 0$}: As a preliminary observation, we note the following: The conditions \eqref{eq:Ycond} imply that either $Y(0) < 0 < Y(1)$ or $Y(0) > 0 > Y(1)$, hence there exists $s \in \,]0,1[$ such that $Y(s) = 0$; by \eqref{eq:Y}, we have also $Z(s) = 0$, thus the trajectory in the $(Y,Z)$-phase diagram passes
through the origin $(0,0)$ and, due to \eqref{eq:Ycond} and \eqref{eq:Zcont},
has as end point $(Y(1),Z(1)$ precisely the reflection at the $Z$-axis of
its starting point $(Y(0),Z(0))$.
Since $2 \beta + 1 > 0$, the vector field defining the right-hand sides of
\eqref{eq:Y} and \eqref{eq:Z} has equilibirum points in $(-1 \pm \sqrt{2 \beta + 1 },0)$
and the qualitative analysis in \cite[Section 3]{FS} may be applied to show that no
trajectory satisfying all the above specifications can exist (the quantities $u$, $E$, $d$ used there correspond here to $Y$, $Z$, $\beta$, respectively).
In summary, we have shown that there is no periodic traveling wave with a single shock.
The question remains whether there exist traveling wave solutions with two or more shock discontinuities (and being piecwise $C^1$). Reviewing the above line of arguments, the reasoning in the first case, $2 \beta + 1 \leq 0$, seems to essentially stay valid (with monotonicity arguments on subintervals instead), whereas in the second case, $2 \beta + 1 > 0$, the crucial consequence that $Y$ (and hence $Z$) has to vanish somewhere is lost, if $Y$ is allowed to have an additional jump discontinuity. We have to leave this issue open for potential
future analysis and note that, even in that case, $Z$ still has to be a continuous function on all of $\mathbb{T}$ thanks to the relation $Z = (Y^2/2)' = - K' \ast Y$.
\section{Concluding remarks}
The Godunov method is of first order and in further numerical studies one might
want to employ a method of higher order such as the ENO (Essentially Non-Oscillatory)
scheme for better accuracy. Furthermore, the computation of $(1-\partial_x^2)^{-2}\partial_x u$ from $u$ in our approach may contain a significant truncation error. Therefore, there admittedly is a lot of room for improvement in the numerical experiments. Nevertheless, we do expect our numerical solutions to correctly show several
main qualitative features. For instance, the almost generic emergence of
shocks in the wave solution $u$ appears to be undoubted and our experiments
strongly indicate that many of the traveling wave solutions are stable. On the other hand, the large time behavior of solutions in general cannot be assessed quantitatively by our method and we clearly lack theoretical insight.
In summary, we are left with (at least) the following questions which we were unable to answer in terms of a rigorous analysis so far: \begin{enumerate} \item Existence of periodic traveling waves with jump discontinuities
(i.e, non-decreasing shocks), although the case of a single jump could be ruled out. \item Boundedness of the spatial minimum and maximum of $u(t)$ as a function of $t$ and independent of the existence time T. \item Wave breaking in more generic cases than those covered by the asymmetry condition in the wave breaking theorems. \item Global (in time) existence of strong solutions for (generic) initial data with small Sobolev norm. \end{enumerate}
\appendix
\section{Appendix: Proof of the existence of a global solution to the regularized equation}
We point out that Fujita and Kato's theorem on the local existence of a strong solution of the Navier-Stokes equations (\cite{FK}) can be used to prove the existence of a strong solution of \eqref{eq:vis01} and \eqref{eq:vis01a}. Their method is explained and put into a more general context in \cite{H} and \cite{pazy}. The same techniques have also been applied in \cite{CO}, which indeed involves
a nonlinearity similar to that in the Fornberg-Whitham equation. Given some familiarity with the theory of analytic semigroups, one would quickly see that the proof described
below is only a slight variation of the classical methods. However, we think that
there will be some benefit or at least convenience for the reader in outlining the proof here.
We first note that $ A := - \epsilon \partial_x^2$ generates an analytic semigroup of operators in $L^2(\mathbb{T})$ (see, e.g., \cite{EN, kato, pazy}). In terms of the semigroup, the Cauchy problem \eqref{eq:vis01} and \eqref{eq:vis01a}
is equivalent to the following fixed point problem \begin{equation} u(t) = e^{-tA} u_0 + \int_0^t e^{-(t-s)A} \left[ - u(s) u_x(s) - (1-\partial_x^2)^{-1} u_x(s) \right] ds =: F(u)(t) \label{eq:fu} \end{equation} We will show that this equation has a unique solution for every $ u_0 \in L^2(\mathbb{T})$. The proof is carried out by a successive approximation as follows: $u^{(0)}(t) = e^{-tA} u_0$,
$u^{(n+1)}(t ) = F( u^{(n)})(t)$ for $ n = 0,1,\ldots$ and the solution is then obtained by showing that $F$ is a contraction mapping with respect to a suitable metric.
In the sequel we will denote by $c$ or $c'$ various constants that may depend
on $\epsilon$ but not on $t$. Furthermore, let $\gamma_0$ be a constant such that $$
\left\| \partial_x e^{-tA} v \right\| \le \gamma_0 \| v\| t^{-1/2} \qquad (v \in L^2), $$
where here and hereafter $\| ~~\|$ denotes the $L^2$-norm.
Let $R > 0$ and suppose that we are given a continuous map $t \mapsto w(t)$,
$[0,T] \to L^2({\mathbb T})$, which takes its values for $t > 0$ in $H^1(\mathbb{T})$ and satisfies \begin{equation}
\| w(t) \| \le R \qquad ( 0 \le t \le T),
\qquad \| w_x(t) \| \le Rt^{-1/2} \qquad (0 < t \le T). \label{eq:uuu} \end{equation}
We then have $ \| w(t) \|_{L^{\infty}} \le c \| w(t) \|^{1/2} \| \partial_x w(t) \|^{1/2} \le c R t^{-1/4}$ and it is not difficult to deduce \begin{align*}
\| F(w)(t)\| & \le \| u_0 \| + \int_0^t \| e^{-(t-s)A} \|
\left[ \| w(s) \|_{L^{\infty}} \| w_x(s) \| + \| (1-\partial_x^2)^{-1} w_x(s) \| \right] ds \\
& \le \| u_0 \| + \int_0^t \left[ cR^2s^{-3/4} + cR \right] ds
= \| u_0 \| + 4 cR^2 t^{1/4} + cR t. \end{align*} Similarly, \begin{align*}
\| \partial_x F(w)(t)\| & \le \gamma_0\| u_0 \| t^{-1/2} + \int_0^t \| \partial_x e^{-(t-s)A} \|
\left[ \| w(s) \|_{L^{\infty}} \| w_x(s) \| + \| (1-\partial_x^2)^{-1} w_x(s) \| \right] ds \\
& \le \gamma_0 \| u_0 \|t^{-1/2} + \int_0^t \gamma_0 (t-s)^{-1/2} \left[ c R^2 s^{-3/4} + cR \right] ds \\ &
= \gamma_0 \| u_0 \| t^{-1/2} + \gamma_0 c R^2 t^{-1/4}B(1/2,1/4) +
2 \gamma_0 c R t^{1/2}, \end{align*} where $\gamma_0 $ is as above and $B(\cdot,\cdot)$ denotes Euler's beta function.
Now we may take any $R > \max\{ \| u_0 \|, \gamma_0 \| u_0 \| \}$. Then we may choose $T$ small enough such that
any $w$ satisfying \eqref{eq:uuu} implies that \eqref{eq:uuu} holds also with $F(w)$ in place of $w$.
If we equip $Y_T := C([0,T] ; L^2) \cap C(]0,T] ; H^1)$ with the (complete) norm $$
\| u \|_* := \max \left\{
\max_{0 \le t \le T} \| u(t) \|, \sup_{0 < t \le T} t^{1/2} \| u_x(t) \| \right\}, $$ then the corresponding closed ball $B_R$ of radius $R$ around $0$ in $Y_T$ is mapped into itself (upon noting that $t \mapsto \partial_x F(u)(t)$ is continuous $]0,T] \to L^2$ for every $u \in Y_T$).
Our next task is to show that the map $F$ is a contraction with respect to
the norm $\| ~~ \|_*$ (for some $T > 0$ and on bounded subsets). The proof is similar to the above estimates. Indeed, \begin{align*}
\| F(w)(t) - F(z)(t) \| \le & \int_0^t
\| e^{-(t-s)A} \|
\bigg[ \| w(s) \|_{L^{\infty}} \| w_x(s) - z_x(s) \| \\ & +
\| w(s) - z(s) \|_{L^{\infty}} \| z_x(t) \| +
\left\| (1-\partial_x^2)^{-1} \big( w_x(s) - z_x(s) \big) \right\| \bigg] ds \\
& \le \int_0^t \Big( c\| w\|_* \| w-z \|_* s^{-3/4} +
c\| z\|_* \| w-z \|_* s^{-3/4} + c \| w-z \|_* \Big) ds \\
& \le c' t^{1/4} \big( \| w \|_* + \| z\|_* \big) \| w-z \|_* + ct \| w- z\|_*. \end{align*}
and similarly for $ \| \partial_x F(u) - \partial_x F(z)\| $. Thus, for $T$ sufficiently small, $F$ becomes a contractive mapping on $B_R$ and we
obtain a unique solution $u \in Y_T$ of \eqref{eq:fu}.
\begin{theorem} Let $R>0$ and $\varepsilon > 0$, then there exists $T >0$ such that
\eqref{eq:vis01} and \eqref{eq:vis01a} possesses a unique strong solution in $[0,T]$ for every $u_0 \in L^2({\mathbb T})$ such
that $\max\{ \| u_0 \|, \gamma_0 \| u_0 \| \} < R$. \end{theorem}
We emphasize that here $T$ depends on $R$ and $\epsilon$, but not on any individual $u_0$ as far as $u_0$ satisfies $\max\{ \| u_0 \|, \gamma_0 \| u_0 \| \} < R$. Since the spatial $L^2$-norm in the solution is conserved over time,
the solution may be continued any number of times, thus we obtain a solution globally in time. Therefore, we conclude the global unique existence of a solution to the parabolic equation \eqref{eq:vis01}. The following additional features of this solution are used
in the proof of the weak solution: Due to the properties of the heat kernel, $$ u \in C^\infty(\mathbb{T} \times\, ]0,T]) $$ and moreover, if $u_0 \in L^\infty$, although $t \mapsto u(t)$ may be discontinuous at $ t = 0$ as a map into $L^{\infty}$, the real-valued map
$t \mapsto \| u(t) \|_{L^{\infty}}$ is continuous on the closed interval $[0,T]$.
\end{document} |
\begin{document}
\title{Honeycomb arrays}
\author{Simon R.\ Blackburn\thanks{Department of Mathematics, Royal Holloway, University of London, Egham, Surrey TW20 0EX, United Kingdom. \texttt{\{s.blackburn, a.panoui\}@rhul.ac.uk}}\\ Anastasia Panoui$^*$\\ Maura B. Paterson\thanks{Department of Economics, Mathematics and Statistics, Birkbeck, University of London, Malet Street, London WC1E 7HX, United Kingdom. \texttt{[email protected]}}\\ and\\ Douglas R. Stinson\thanks{David R.\ Cheriton School of Computer Science, University of Waterloo, Waterloo Ontario, N2L 3G1, Canada. \texttt{[email protected]}}}
\maketitle
\begin{abstract} A honeycomb array is an analogue of a Costas array in the hexagonal grid; they were first studied by Golomb and Taylor in 1984. A recent result of Blackburn, Etzion, Martin and Paterson has shown that (in contrast to the situation for Costas arrays) there are only finitely many examples of honeycomb arrays, though their bound on the maximal size of a honeycomb array is too large to permit an exhaustive search over all possibilities.
The present paper contains a theorem that significantly limits the number of possibilities for a honeycomb array (in particular, the theorem implies that the number of dots in a honeycomb array must be odd). Computer searches for honeycomb arrays are summarised, and two new examples of honeycomb arrays with 15 dots are given. \end{abstract}
\section{Introduction}
Honeycomb arrays were introduced by Golomb and Taylor~\cite{GolombTaylor} in 1984, as a hexagonal analogue of Costas arrays. Examples of honeycomb arrays are given in Figures~\ref{honey137_figure} to~\ref{honey45_figure} below. \begin{figure}
\caption{A Lee sphere, and three natural directions}
\label{fig:Lee_example}
\end{figure}A honeycomb array is a collection of $n$ dots in a hexagonal array with two properties: \begin{itemize} \item \textbf{(The hexagonal permutation property)} There are three natural directions in a hexagonal grid (see Figure~\ref{fig:Lee_example}). Considering `rows' in each of these three directions, the dots occupy $n$ consecutive rows, with exactly one dot in each row. \item \textbf{(The distinct differences property)} The $n(n-1)$ vector differences between pairs of distinct dots are all different. \end{itemize}
Golomb and Taylor found $10$ examples of honeycomb arrays (up to symmetry), and conjectured that infinite families of honeycomb arrays exist. Blackburn, Etzion, Martin and Paterson~\cite{BlackburnEtzion} recently disproved this conjecture: there are only a finite number of honeycomb arrays. Unfortunately, the bound on the maximal size of a honeycomb array that Blackburn et al.\ provide is far too large to enable an exhaustive computer search over all open cases. In this paper, we prove a theorem that significantly limits the possibilities for a honeycomb array with $n$ dots. (In particular, we show that $n$ must be odd.) We report on our computer searches for honeycomb arrays, and give two previously unknown examples with $15$ dots.
We now introduce a little more notation, so that we can state the main result of our paper more precisely.
We say that a collection of dots in the hexagonal grid is a \emph{hexagonal permutation} if it satisfies the hexagonal permutation property. A collection of dots is a \emph{distinct difference configuration} if it satisfies the distinct difference property. So a honeycomb array is a hexagonal permutation that is a distinct difference configuration.
We say that hexagons are \emph{adjacent} if they share an edge, and we say that two hexagons $A$ and $B$ are \emph{at distance $d$} if the shortest path from $A$ to $B$ (travelling along adjacent hexagons) has length $d$. A \emph{Lee sphere of radius $r$} is a region of the hexagonal grid consisting of all hexagons at distance $r$ or less from a fixed hexagon (the \emph{centre} of the sphere). The region in Figure~\ref{fig:Lee_example} is a Lee sphere of radius $2$. Note that a Lee sphere of radius $r$ intersects exactly $2r+1$ rows in each of the three natural directions in the grid. A \emph{honeycomb array of radius $r$} is a honeycomb array with $2r+1$ dots contained in a Lee sphere of radius $r$.
There are many other natural regions of the hexagonal grid that have the property that they intersect $n$ rows in each direction. One example, the tricentred Lee sphere of radius $r$, is shown in Figure~\ref{fig:tricentred_example}: it is the union of three Lee spheres of radius $r$ with pairwise adjacent centres, and intersects exactly $2r+2$ rows in any direction.\begin{figure}
\caption{A tricentred Lee sphere}
\label{fig:tricentred_example}
\end{figure}
Does there exist a honeycomb array with $2r+2$ dots contained in a tricentred Lee sphere of radius~$r$? Golomb and Taylor did not find any such examples: they commented~\cite[Page~1156]{GolombTaylor} that all known examples of honeycomb arrays with $n$ dots were in fact honeycomb arrays of radius $r$, but stated ``we have not proved that this must always be the case''. We prove the following:
\begin{theorem} \label{thm:main} Let $n$ be an integer, and suppose there exists a hexagonal permutation $\pi$ with $n$ dots. Then $n$ is odd, and the dots of $\pi$ are contained in a Lee sphere of radius $(n-1)/2$. \end{theorem}
Since any honeycomb array is a hexagonal permutation, the following result follows immediately from Theorem~\ref{thm:main}:
\begin{corollary} \label{cor:honeycomb} Any honeycomb array is a honeycomb array of radius $r$ for some integer $r$. In particular, a honeycomb array must consist of an odd number of dots. \end{corollary}
So if we are looking for honeycomb arrays, we may restrict ourselves to searching for honeycomb arrays of radius $r$.
The structure of the remainder of the paper is as follows. In Section~\ref{sec:anticodes}, we state the results on the hexagonal grid that we need. In Section~\ref{sec:brooks}, we remind the reader of the notion of a brook, or bee-rook, and state a theorem on the maximum number of non-attacking brooks on a triangular board. In Section~\ref{sec:honeycomb} we prove Theorem~\ref{thm:main}, we summarise our computer searches for honeycomb arrays, and we provide a list of all known arrays. This last section also contains a conjecture, and some suggestions for further work.
\section{The hexagonal grid} \label{sec:anticodes}
Because the hexagonal grid might be difficult to visualise, we use an equivalent representation in the square grid (see Figure~\ref{fig:hex_to_square}). In this representation, we define each square to be adjacent to the four squares it shares an edge with, and the squares sharing its `North-East' and `South-West' corner vertices. The map $\xi$ in Figure~\ref{fig:hex_to_square} distorts the centres of the hexagons in the hexagonal grid to the centres of the squares in the square grid. The three types of rows in the hexagonal grid become the rows, the columns and the diagonals that run from North-East to South-West. For brevity, we define a \emph{standard diagonal} to mean a diagonal that runs North-East to South-West. \begin{figure}
\caption{From the hexagonal to the square grid}
\label{fig:hex_to_square}
\end{figure}
\begin{figure}
\caption{The region $S_{i}(n)$}
\label{fig:anticode}
\end{figure} For non-negative integers $n$ and $i$ such that $0\leq i\leq n-1$, define the region $S_i(n)$ of the square grid as in Figure~\ref{fig:anticode}. Note that $S_{i}(n)$ and $S_{n-1-i}(n)$ are essentially the same region: one is obtained from the other by a reflection in a standard diagonal. The regions $\xi^{-1}(S_i(n))$ are important in the hexagonal grid, as they are the maximal anticodes of diameter $n-1$; see Blackburn et al.~\cite[Theorem~5]{BlackburnEtzion}. Note that the region $\xi^{-1}(S_{r}(2r+1))$ is a Lee sphere of radius $r$. Regions of the form $\xi^{-1}(S_{r}(2r+2))$ or $\xi^{-1}(S_{r+1}(2r+2))$ are tricentred Lee spheres of radius $r$. Also note that the regions $S_i(n)$ as $i$ varies are precisely the possible intersections of an $n\times n$ square region with $n$ adjacent standard diagonals, where each diagonal intersects the $n\times n$ square non-trivially.
In the lemma below, by a `region of the form $X$', we mean a region that is a translation of $X$ in the square grid.
\begin{lemma} \label{lem:anticode} Let $\pi$ be a hexagonal permutation with $n$ dots, and let $\xi(\pi)$ be the image of $\pi$ in the square grid. Then the dots in $\xi(\pi)$ are all contained in a region of the form $S_{i}(n)$ for some $i$ in the range $0\leq i\leq n-1$. \end{lemma} \textbf{Proof:} Let $R$ be the set of squares that share a row with a dot of $\xi(\pi)$. Similarly, let $C$ and $D$ be the sets squares sharing respectively a column or a standard diagonal with a dot of $\xi(\pi)$. The dots in $\xi(\pi)$ are contained in $R\cap C\cap D$.
Since $\pi$ is a hexagonal permutation, $R$ consists of $n$ adjacent rows and $C$ consists of $n$ adjacent columns. Hence $R\cap C$ is an $n\times n$ square region. (Since there is exactly one dot in each row and column of the square $R\cap C$, the dots in $\xi(\pi)$ correspond to a permutation; this justifies the terminology `hexagonal permutation'.)
Now, $D$ consists of $n$ adjacent standard diagonals; each diagonal contains a dot in $\xi(\pi)$, and so each diagonal intersects $R\cap C$ non-trivially. Hence $R\cap C\cap D$ is a region of the form $S_i(n)$, as required.
$\Box$
\section{Brooks on a triangular board} \label{sec:brooks}
A \emph{brook} is a chess piece in the square grid that moves like a rook plus half a bishop: it can move any distance along a row, a column or a standard (North-East to South-West) diagonal. Brooks were first studied by Bennett and Potts~\cite{BennettPotts}, who pointed out connections to constant-sum arrays and hexagonal lattices. A set of brooks in a square grid is \emph{non-attacking} if no two brooks lie in a row, a column or a standard diagonal.
Under the correspondence $\xi$ between the square and hexagonal grids mentioned in the previous section, brooks in the square grid correspond to \emph{bee-rooks} in the hexagonal grid: pieces that can move any distance along any row, where a row can go in each of the three natural directions. A set of bee-rooks is therefore \emph{non-attacking} if no two bee-rooks lie in the same row of the hexagonal grid. In particular, bee-rooks placed on the dots in a hexagonal permutation $\pi$ are non-attacking, and so the corresponding set $\xi(\pi)$ of brooks in the square grid is non-attacking.
A \emph{triangular board of width $w$} is the region $S_{0}(w)$ in the square grid depicted in Figure~\ref{fig:triangular_board}. Let $b(w)$ be the maximum number of non-attacking brooks that can be placed in the triangular board of width $w$. The following theorem is proved by Nivasch and Lev~\cite{NivaschLev} and in Vaderlind, Guy and Larson~\cite[P252 and R252]{VaderlindGuy}: \begin{figure}
\caption{A triangular board of width $w$}
\label{fig:triangular_board}
\end{figure}
\begin{theorem} \label{thm:brooks} For any positive integer $w$, $b(w)=\lfloor (2w+1)/3\rfloor$. \end{theorem}
Three of the present authors have found an alternative proof for this theorem, using linear programming techniques~\cite{BlackburnPaterson}. See Bell and Stevens~\cite{BellStevens} for a survey of similar combinatorial problems.
\section{Honeycomb arrays} \label{sec:honeycomb}
We begin this section with a proof of Theorem~\ref{thm:main}. We then describe our searches for honeycomb arrays. We end the section by describing some avenues for further work.
\noindent \textbf{Proof of Theorem~\ref{thm:main}:} Let $\pi$ be a hexagonal permutation with $n$ dots. By Lemma~\ref{lem:anticode}, the dots of $\xi(\pi)$ are contained in a region of the form $S_i(n)$ where $0\leq i\leq n-1$. When $i=(n-1)/2$ (so $n$ is odd and $\xi^{-1}(S_i(n))$ is a Lee sphere) the theorem follows. Suppose, for a contradiction, that $i\not=(n-1)/2$.
By reflecting $\pi$ in a horizontal row in the hexagonal grid, we produce a hexagonal permutation $\pi'$ such that $\xi(\pi')$ is contained in a region of the form $S_{(n-1)-i}(n)$. By replacing $\pi$ by $\pi'$ if necessary, we may assume that $i<(n-1)/2$.
\begin{figure}
\caption{A triangular board covering $\pi$}
\label{fig:covering_triangle}
\end{figure} Consider the triangular board of width $n+i$ in Figure~\ref{fig:covering_triangle} containing $S_i(n)$. Since no two dots in $\xi(\pi)$ lie in the same row, column or standard diagonal, the dots in $\xi(\pi)$ correspond to $n$ non-attacking brooks in this triangular board. But this contradicts Theorem~\ref{thm:brooks}, since \[ \frac{2(n+i)+1}{3}<\frac{2n+(n-1)+1}{3}= n. \] This contradiction completes the proof of the theorem.
$\Box$
Theorem~\ref{thm:main} tells us that the only honeycomb arrays are those of radius $r$ for some non-negative integer $r$. A result of Blackburn et al~\cite[Corollary~12]{BlackburnEtzion} shows that $r\leq 643$. We now report on our computer searches for examples of honeycomb arrays. The known honeycomb arrays are drawn in Figures~\ref{honey137_figure}, \ref{honey9_figure}, \ref{honey15_figure}, \ref{honey2127_figure} and~\ref{honey45_figure}. This list includes two new examples not known to Golomb and Taylor~\cite{GolombTaylor}, namely the second and third examples of radius $7$; we found these examples as follows.
A \emph{Costas array} is a set of $n$ dots in an $n\times n$ region of the square grid, with the distict difference property and such that every row and column of the array contains exactly one dot. Golomb and Taylor observed that some Costas arrays produce honeycomb arrays, by mapping the dots in the Costas array into the hexagonal grid using the map $\xi^{-1}$ given by Figure~\ref{fig:hex_to_square}. Indeed, it is not difficult to see that all honeycomb arrays must arise in this way. We searched for honeycomb arrays by taking each known Costas array with $200$ or fewer dots, and checking whether the array gives rise to a honeycomb array. For our search, we made use of a database of all known Costas arrays with 200 or fewer dots that has been made available by James K. Beard~\cite{Beard}. This list is known to be complete for Costas arrays with $27$ or fewer dots; see Drakakis et al.~\cite{DrakakisRickard} for details. So our list of honeycomb arrays of radius $13$ or less is complete.
\begin{figure}
\caption{Honeycomb arrays of radius $0$, $1$ and $3$}
\label{honey137_figure}
\end{figure}
\begin{figure}
\caption{Honeycomb arrays of radius 4}
\label{honey9_figure}
\end{figure}
\begin{figure}
\caption{Honeycomb arrays of radius $7$}
\label{honey15_figure}
\end{figure}
\begin{figure}
\caption{Honeycomb arrays of radius $10$ and $13$}
\label{honey2127_figure}
\end{figure}
\begin{figure}
\caption{A honeycomb array of radius $22$}
\label{honey45_figure}
\end{figure}
It is a remarkable fact that all known honeycomb arrays possess a non-trivial symmetry (a horizontal reflection as we have drawn them). Indeed, apart from a single example of radius $3$ (the first radius $3$ example in Figure~\ref{honey137_figure}) all known honeycomb arrays possess a symmetry group of order $6$: the group generated by the reflections along the three lines through opposite `corners' of the hexagonal sphere. We implemented an exhaustive search for honeycomb arrays with $r\leq 31$ having this $6$-fold symmetry: we found no new examples. We also checked all constructions of honeycomb arrays from Costas arrays in Golomb and Taylor~\cite{GolombTaylor} (whether symmetrical or not) for $r\leq 325$, and again we found no new examples.
After these searches, we feel that we can make the following conjecture:
\begin{conjecture} The list of known honeycomb arrays is complete. So there are are exactly $12$ honeycomb arrays, up to symmetry. \end{conjecture}
Theorem~\ref{thm:main} shows that hexagonal permutations are always contained in some Lee sphere. But such permutations have been prevously studied in several contexts: Bennett and Potts~\cite{BennettPotts} study them as non-attacking configurations of bee-rooks and as the number of zero-sum arrays; Kotzig and Laufer~\cite{Kotzig} study them as the number of $\sigma$-permutations; Bebeacua, Mansour, Postnikov and Severini~\cite{BebeacuaMansour} study them as X-rays of permutations with maximum degeneracy. Let $h_n$ be the number of hexagonal permutations with $2n-1$ dots. The On-Line Encyclopedia of Integer Sequences~\cite[Sequence A002047]{OEIS} quotes a computation due to Alex Fink that computes the first few terms of the sequence $h_n$: \[
\begin{array}{c|cccccccccc} n&1&2&3&4&5&6&7&8&9&10\\\hline h_n&1&2&6&28&244&2544&35600&659632&15106128&425802176 \end{array} \] Kotzig and Laufer ask: How big can $h_n$ be? It seems that the sequence grows faster than exponentially with $n$. We ask a more precise question: Is it true that $(\log h_n)/n\log n$ tends to a constant as $n\rightarrow\infty$?
\paragraph{Acknowledgements} Part of this work was completed under EPSRC Grant EP/D053285/1. The authors would like to thank Tuvi Etzion for discussions, funded by a Royal Society International Travel Grant, which inspired this line of research.
\end{document} |
\begin{document}
\maketitle \begin{abstract}
We present quasi-Banach spaces which are surprisingly similar to certain dual Banach spaces. In particular we show a quasi-Banach space which shares many properties of the dual of the Schreier space: it is $\ell_1$-saturated and does not have the
Schur property, its dual is isometrically isomorphic to the double dual of the Schreier space and it has 'the same' extreme
points of the unit ball as the dual of the Schreier space. \end{abstract} \section{Introduction}
A combinatorial Banach space is the completion of $c_{00}$, the space of all finitely supported sequences of real numbers, with respect to the following norm
\begin{equation}\label{dd1}
\lVert x \rVert_\mathcal{F} = \sup_{F\in \mathcal{F}} \sum_{k\in F} |x(k)|, \end{equation}
where $\mathcal{F}$ is a family of finite subsets of $\omega$ (which is hereditary and covers $\omega$, see Section \ref{schreier-like} for the details). We denote such completion by $X_\mathcal{F}$. One of the most 'famous' examples of a Banach space of this form is the \emph{Schreier space} induced by so called Schreier family $\mathcal{S}$ (see Section \ref{schreier-like}) and so combinatorial Banach spaces are sometimes called Schreier-like spaces. They were studied mainly for $\mathcal{F}$ being compact families, e.g. by Castillo and Gonzales (\cite{Castillo-Gonzales}, \cite{Castillo93}), Lopez Abad and Todorcevic (\cite{Jordi-Stevo}). Recall that a family $\mathcal{F} \subseteq \mathcal{P}(\omega)$ is compact if it is compact as a subset of $2^\omega$, via the natural identification of $\mathcal{P}(\omega)$ and $2^\omega$. In \cite{NaFa} Borodulin-Nadzieja and Farkas considered Schreier-like spaces for families which are not necessarily compact and which were motivated by the theory of analityc P-ideals.
The main motivation of our article is an attempt to study Banach spaces dual to combinatorial Banach spaces. Even in the case of Schreier space, not much seems to be known about its dual (see \cite{Lipieta}). Perhaps the reason lies in the lack of a nice description of the dual norm. Seeking such a description we came up with the following function $\lVert \cdot\rVert^{\mathcal{F}} \colon c_{00} \to \mathbb{R}$:
\begin{equation} \label{dd2}
\lVert x \rVert^\mathcal{F} = \inf\{ \sum_{F \in \mathcal{P}} \sup_{i\in F} |x(i)|\colon \mathcal{P}\subseteq \mathcal{F} \mbox{ is a partition of }\omega\}. \end{equation} Perhaps this formula does not look tempting at first glance, but in a 'combinatorial' sense it is dual to $\lVert \cdot \rVert_\mathcal{F}$. Indeed, we can think about evaluating $\lVert x \rVert_{\mathcal{F}}$ as partititioning $\omega$ into pieces from
$\mathcal{F}$, suming up $|x(i)|$ for $i$ from one piece of the partition and then maximizing the result, for all partitions and all pieces. On the other hand, evaluating $\lVert x \rVert^\mathcal{F}$ comes down to partitioning $\omega$ into pieces from $\mathcal{F}$, taking maximum of $|x(i)|$ for $i$ from one piece of the partition, summing up those maxima and then minimazing the result for all possible partitions.
For certain families $\mathcal{F}$ the completion of $c_{00}$ in $\lVert \cdot \rVert^\mathcal{F}$ (which we will denote by $X^\mathcal{F}$) is a Banach space which is isometrically isomorphic to $X^*_\mathcal{F}$ (see Proposition \ref{for-partitions}). E.g. notice that if $\mathcal{F}$ consists of singletons, then $X_\mathcal{F}$ is isometrically isomorphic to $c_0$ and $X^\mathcal{F}$ is isometrically isomorphic to $\ell_1$.
In general, if a family $\mathcal{F} \subseteq [\omega]^{<\omega}$ is compact, hereditary and covering $\omega$, then $X^\mathcal{F}$ shares many properties with $X^*_\mathcal{F}$. In fact it is difficult to find a property distinguishing those spaces apart of the fact that for many families $\mathcal{F}$ the space $X^\mathcal{F}$ is not a Banach space! More precisely, $\lVert \cdot \rVert^\mathcal{F}$ does not need to satisfy the triangle inequality (and it does not e.g. for $\mathcal{F}$ being the Schreier family).
The lack of triangle inequality is not something welcomed in the theory of Banach spaces. However, we will argue that $X^\mathcal{F}$ is an interesting object anyway. First of all, if $\mathcal{F}\subseteq [\omega]^{<\omega}$ is compact, herediatary and covering $\omega$, then $\lVert \cdot \rVert^\mathcal{F}$ enjoys some perverted form of triangle inequality. Namely, for every $x,y \in c_{00}$ \[ \lVert x+y \rVert^\mathcal{F} \leq 2\lVert x \rVert^\mathcal{F} + \lVert y \rVert^\mathcal{F}. \] Consequently, $\lVert x+y \rVert^\mathcal{F} \leq \dfrac{3}{2} (\lVert x\rVert^\mathcal{F}+ \lVert y \rVert^\mathcal{F})$ for each $x,y\in c_{00}$ and so $\lVert \cdot \rVert^\mathcal{F}$ is a quasi-norm (see Section \ref{quasi-banach} for the definition and basic facts concerning quasi-norms). Thus, by Aoki-Rolewicz theorem (Theorem \ref{aoki_rolewicz}), it is metrizable and it makes sense to define the completion of $c_{00}$ with respect to $\lVert \cdot \rVert^\mathcal{F}$, which we denote by $X^\mathcal{F}$. The theory of quasi-Banach spaces is quite well developed and it seems that most of the important results and techniques known in Banach space theory, works well enough in the realm of quasi-Banach spaces (see \cite{Kalton_quasi}).
As we have mentioned, despite the fact that $X^\mathcal{F}$ is typically not a Banach space, it very much resembles $X^*_\mathcal{F}$, at least if $\mathcal{F}$ is compact. In particular we prove that \begin{itemize}
\item The space $X^\mathcal{F}$ and $X^*_\mathcal{F}$ have isometrically isomorphic duals (Theorem \ref{isomorphism}).
\item The unit balls of $X^\mathcal{F}$ and $X^*_\mathcal{F}$ have 'the same' extreme points, although if $\lVert \cdot \rVert^\mathcal{F}$ is not a norm, then the unit ball in $X^\mathcal{F}$ is not convex (Proposition \ref{extreme_to_extreme}).
\item Both $X^\mathcal{F}$ and $X^*_\mathcal{F}$ have CSRP, i.e. their unit balls can be nicely approximated by their extreme points (Theorem \ref{CSRPthm}). \end{itemize}
Then we concentrate on the particular case of the Schreier family. It seems that the space $X^*_\mathcal{S}$, the dual to the Scheier space, is a rather mysterious object and not much of it is known. Recently, Galego, Gonz\'{a}lez and Pello (\cite{Galego}) proved that it is $\ell_1$-saturated (i.e. every closed infinite-dimensional subspace contains an isomorphic copy of $\ell_1$) and it is known that it does not enjoy the Schur property. In fact, for years it was an open problem whether such spaces, $\ell_1$-saturated but without Schur property, exists at all. The first example was constructed by Bourgain (\cite{Bourgain}) and then several other involved constructions of such spaces have been presented (see e.g. \cite{Hagler}, \cite{Popov}). The result of Galego, Gonz\'{a}lez and Pello says that you do not have to \emph{construct} an $\ell_1$-saturated space without a Schur property: the dual to Schrier space is already a good example (although the proof contained in \cite{Galego} is rather difficult).\footnote{The authors of \cite{Galego} prove this result en passant. They do not mention it neither in the abstract nor in the introduction, nor they relate it to the Bourgain construction. We are grateful to Jes\'us Castillo for informing us about this result.}
We show that the space $X^\mathcal{S}$ is also $\ell_1$-saturated (Theorem \ref{ell1}) and it does not have Schur property (Proposition \ref{Schur}). The proof is considerably simpler that those from \cite{Galego} and the previous constructions.
All the above results indicate that the spaces $X^\mathcal{F}$ are kind of quasi-Banach alter-egos of $X^*_\mathcal{F}$. Since the quasi-norm $\lVert \cdot \rVert^\mathcal{F}$ is much easier to deal with than the dual norm of $\lVert \cdot \rVert_\mathcal{F}$, the spaces $X^\mathcal{F}$ can be used as a kind of 'test spaces' where some concrete properties can be checked to state appropriate conjectures concerning $X^*_\mathcal{F}$. Perhaps there is some general reason why $X^\mathcal{F}$ and $X^*_\mathcal{F}$ are so similar. Finding a concrete theorem explaining the connection $X^\mathcal{F}$ and $X^*_\mathcal{F}$ might allow to prove results about $X^*_\mathcal{F}$ dealing with the quasi-norm $\lVert \cdot \rVert^\mathcal{F}$ instead of bother oneself with the dual norm to $\lVert \cdot \rVert_\mathcal{F}$.
The article is organized as follows. In Section 3 we introduce the basic notions and recall some facts concerning quasi-Banach spaces. In Section 4 we present basic definitions and facts about combinatorial Banach spaces. In Section 5 we deal with the function defined by (\ref{dd2}). We prove that $\lVert \cdot \rVert^{\mathcal{F}}$ is a nice quasi-norm and that $X^{\mathcal{F}}$ is a quasi-Banach space for every compact family $\mathcal{F}$ (which is hereditary and which covers $\omega$). In Section 6 we present similarities between $X^{\mathcal{F}}$ and the dual to $X_{\mathcal{F}}$. Finally, in Section 7 we prove that $X^{\mathcal{S}}$ is $\ell_{1}$-saturated and that it does not have the Schur property, for the Schreier family $\mathcal{S}$.
\section{Preliminaries} \label{preliminaries}
In this section we present the standard facts and notations, used in the further sections of this article.
By $\omega$ we denote the set of natural numbers. For various technical reasons for the sake of this article we will not recognize 0 as a natural number. If $k \in \omega$, then $[\omega]^{\leq k} = \lbrace A \subseteq \omega: \lvert A \rvert \leq k \rbrace$ and $[\omega]^{< \omega}$ is the family of all finite subsets of $\omega$.
Recall that the Cantor set $2^{\omega}$ is a compact and complete metric space equipped with the metric
$$
d(x,y) = \dfrac{1}{2^{k}},
$$
where $k$ is the smallest natural number for which $x(k) \neq y(k)$ (and $d(x,y)=0$ in case $x=y$).
By $\mathcal{P}(\omega)$ we denote the power set of $\omega$ which we identify with a Cantor set $2^{\omega}$ via the map
$$\mathcal{P}(\omega) \ni A \mapsto \chi_{A} \in 2^{\omega},$$
where $\chi_{A}$ denotes the characteristic function of a set $A$.
Henceforth we do not distinguish subset of $\omega$ from its characteristic function, so if we write that a family is open, closed, compact etc. we mean openness, closedness, and compactness of a subset of $2^{\omega}$. In addition, we use interchangebly the following notations
$$
j \in A \hspace{0.1cm} (\notin A)
$$
and
$$
A(j) = 1 \hspace{0.1cm} (=0).
$$
If $A,B \subseteq \omega$ are two finite sets, then by $A < B$ we mean that $\max(A) < \min(B)$. In what follows $\mathcal{F}$ will always denote a family of finite subsets of $\omega$ which is hereditary (i.e. it is closed under taking subsets)
and which covers
$\omega$. If $\mathcal{A}$ is a family of subsets of $\omega$ then by its \emph{hereditary closure} we mean the smallest hereditary family containing $\mathcal{A}$.
\begin{df} A family $\mathcal{P}\subseteq \mathcal{P}(\omega)$ is a \emph{partition} if
\begin{itemize}
\item $\emptyset\in \mathcal{P}$,
\item $\bigcup \mathcal{P} = \omega$,
\item $\mathcal{P}$ consists of pairwise disjoint element.
\end{itemize}
\end{df}
Notice that our definition of partition is non-standard. We want $\emptyset$ to be an element of a partition, since then it forms a compact subset of $2^\omega$ and we are going to use this fact later on.
We denote by $\mathbb{P}_{\mathcal{F}}$ the family of all partitions $\mathcal{P}\subseteq \mathcal{F}$.
In this paper we consider spaces of sequences of real numbers. \\ For $x \in \mathbb{R}^{\omega}$ by its \textit{support} we mean the set $supp(x) = \lbrace n \in \omega: x(n) \neq 0 \rbrace$ and $c_{00}$ is the set of all sequences $x$ for which $supp(x)$ is finite.
For $A \subseteq \omega$ the function $P_{A}: \mathbb{R}^{\omega} \rightarrow \mathbb{R}^{\omega}$ given by
$$
P_{A}(x)(k) =
\begin{cases}
x(k), & \text{if } k \in A \\
0, & \text{otherwise,}
\end{cases}
$$
is called the \textit{projection} of $x$ onto coordinates from $A$. In particular if $n \in \omega$, then by $P_{n}$ we denote the projection onto the initial segment $\lbrace 1,2,...,n \rbrace$ and by $P_{\omega \setminus n}$ - the projection onto
the complement of this segment.
\subsection{Quasi-Banach spaces} \label{quasi-banach} \begin{df} \label{quasi_norm_def} A \emph{quasi-norm} on a vector space $X$ is a function $\lVert \cdot \rVert: X \rightarrow \mathbb{R}$ satysfing \begin{itemize} \item $\lVert x \rVert = 0 \Leftrightarrow x = 0$
\item For every $\lambda \in \mathbb{R}$ $\lVert \lambda x \rVert = | \lambda | \lVert x \rVert$ \item There is $c \geq 1$ such that $\lVert x + y \rVert \leq c ( \lVert x \rVert + \lVert y \rVert )$. \end{itemize} \end{df}
The minimal constant $c$ working above is sometimes called the \emph{modulus of concavity} of the quasi-norm. In particular, for $c=1$ we get the definition of a norm. In what follows sometimes we will allow quasi-norms to take possibly infinite values. If $\lVert \cdot \rVert$ is a quasi-norm (taking only finite values) on a vector space $X$, then the pair $(X, \lVert \cdot \rVert)$ is called \emph{quasi-normed space}.
An important class of quasi-norms are so called $p$-norms. A quasi-norm is a $p$-norm if it satisfies the following condition: \begin{equation} \label{p_norm} \left \vvvert x + y \right \vvvert^{p} \leq \left \vvvert x \right \vvvert^{p} + \left \vvvert y \right \vvvert^{p} \end{equation}
The classical Aoki-Rolewicz theorem (\cite{Aoki}, \cite{Rolewicz}, see also \cite[Theorem 1.3]{Kalton-F}), says that every quasi-norm can be renormed to a $p$-norm:
\begin{thm}[Aoki-Rolewicz] \label{aoki_rolewicz}
Let $(X, \lVert \cdot \rVert)$ be a quasi-normed space. Then there exists $p \in (0, 1] $ and $p$-norm $\left \vvvert \cdot \right \vvvert$ on $X$ which is equivalent to $\lVert \cdot \rVert$. If $c$ is a modulus of concavity of $\lVert \cdot
\rVert$, then $c = 2^{\frac{1}{p}-1}$. \end{thm}
The appropriate $p$-norm $\left \vvvert \cdot \right \vvvert$ is given by the formula \begin{equation} \label{equi_q_norm} \left \vvvert x \right \vvvert = \inf \bigg \{ \Big ( \sum \limits_{i=1}^{n} \lVert x_i \rVert^{p} \Big )^{\frac{1}{p}}: n \in \omega, x_1,...,x_n \in X, x = \sum \limits_{i=1}^{n} x_i \bigg \}. \end{equation}
The important consequence of Aoki-Rolewicz theorem is that every quasi-normed space is metrizable. The compatible metric $d$ can be defined by $d(x,y) = \left \vvvert x - y \right \vvvert^{p}$ (see \cite{Kalton_quasi}). A quasi-normed space is called a \emph{quasi-Banach} space, if the quasi-norm induces complete metrizable topology on $X$.
Note that a quasi-Banach space $X$ which is not a Banach space cannot be locally convex, i.e. the unit ball, $B_X$, is not a convex set. By this, it turns out that results which hold in Banach spaces and are based somehow on the local convexity (e.g. Hahn-Banach extension property or Krein-Milman theorem) are no longer true, in general, in quasi-Banach spaces (see \cite{Kalton_quasi}). However, the standard results of Banach space theory such as Open Mapping Theorem, Uniform Boundedness Principle, and Closed Graph Theorem can be applied in quasi-Banach spaces since they depend only on the completeness of the space. Later in the article we use the Open Mapping Theorem in the context of quasi-Banach spaces, so we recall it below.
\begin{thm}[Open Mapping Theorem] \label{open_mapping} Let $X$ and $Y$ be quasi-Banach spaces and $T: X \rightarrow Y$ be continuous linear operator onto $Y$. Then $T$ is open. \end{thm}
\section{Combinatorial spaces} \label{schreier-like}
In \cite{NaFa} the authors presented an approach towards Banach spaces with unconditional bases motivated by the theory of analityc P-ideals on $\omega$. We will briefly overview the basic definitions and results which we are going to use in more general case, namely for quasi-Banach spaces.
\begin{df}\label{nicenorm} \cite{NaFa}
We say that a function $\varphi\colon \mathbb{R}^{\omega} \rightarrow [0,\infty]$ is \textit{nice}, if
\begin{enumerate}[(i)]
\item (\textit{Non-degeneration}) $\varphi(x) < \infty$ for every $x \in c_{00}$.
\item (\textit{Monotonicity}) For every $x,y \in \mathbb{R}^\omega$, if $\lvert x(n) \rvert \leq \lvert y(n) \rvert$ for ach each $n \in \omega$, then $\varphi(x) \leq \varphi(y)$.
\item (\textit{Lower semicontinuity}) $\lim \limits_{n \rightarrow \infty} \varphi(P_{n}(x)) = \varphi(x)$ for every $x\in \mathbb{R}^\omega$.
\end{enumerate} \end{df} For an extended quasi-norm $\varphi: \mathbb{R}^{\omega} \rightarrow [0,\infty]$ define
\begin{center}
$FIN(\varphi) = \lbrace x \in \mathbb{R}^{\omega}: \varphi(x) < \infty \rbrace,$\\
$EXH(\varphi) = \lbrace x \in \mathbb{R}^{\omega}: \lim \limits_{n \rightarrow \infty} \varphi(P_{\omega \setminus n}(x)) = 0 \rbrace. $
\end{center}
\begin{thm}\label{5.1}(\cite[Proposition 5.1]{NaFa}) If $\varphi: \mathbb{R}^{\omega} \rightarrow [0,\infty]$ is a nice extended \emph{norm}, then $FIN(\varphi)$ and $EXH(\varphi)$ (normed with $\varphi$) are Banach spaces with unconditional bases.
\end{thm}
Note that the above theorem can be reversed: every Banach space with an unconditional basis is isometrically isomorphic to $EXH(\varphi)$ for some nice extended norm $\varphi$ (\cite[Proposition 5.1]{NaFa}). Now we will present two structure
theorems about spaces the spaces $FIN(\varphi)$ and $EXH(\varphi)$.
\begin{thm}\label{5.4}(\cite[Theorem 5.4]{NaFa})\label{Banach} Suppose that $\varphi: \mathbb{R}^{\omega} \rightarrow [0,\infty]$ is an extended norm. Then the following conditions are equivalent:
\begin{itemize}
\item $EXH(\varphi) = FIN(\varphi)$,
\item $EXH(\varphi)$ does not contain an isomorphic copy of $c_0$.
\end{itemize}
\end{thm}
\begin{thm}\label{5.5}(\cite[Proposition 5.5]{NaFa}) Suppose that $\varphi: \mathbb{R}^{\omega} \rightarrow [0,\infty]$ is an extended norm. Then the following conditions are equivalent:
\begin{itemize}
\item $FIN(\varphi)$ is isometrically isomorphic to $EXH(\varphi)^{**}$,
\item $EXH(\varphi)$ does not contain an isomorphic copy of $\ell_1$.
\end{itemize}
\end{thm}
In this article we consider particular norms induced by families of subsets of $\omega$. More precisely, if $\mathcal{F} \subseteq \mathcal{P}(\omega)$ is hereditary and covering $\omega$, then for a sequence $x$ consider the following expression
\begin{equation} \label{Lower_norm}
\lVert x \rVert_{\mathcal{F}} = \sup_{F \in \mathcal{F}} \sum_{k \in F} \lvert x(k) \rvert.
\end{equation}
It is not difficult to check that this is a nice extended norm and so $EXH(\lVert \cdot \rVert_{\mathcal{F}})$ is a Banach space (which we will denote, for simplicity, by $X_\mathcal{F}$). See \cite{NaFa} for examples of spaces of this form.
The spaces of the form $X_\mathcal{F}$ are quite well studied for compact families $\mathcal{F}$ (see e.g. \cite{Jordi-Stevo}, \cite{Castillo-Gonzales}). The most notable representative here is the Schreier space,
$X_\mathcal{S}$, where $\mathcal{S}$ is so-called \textit{Schreier family}: $$ \mathcal{S} = \lbrace A \subseteq \omega: \lvert A \rvert \leq \min(A) \rbrace. $$
\section{Dual quasi-norms} \label{dual-norms}
Now we are going to introduce the main notion of this article.
\begin{equation} \label{Upper_norm}
\lVert x \rVert^{\mathcal{F}} = \inf_{\mathcal{P} \in \mathbb{P}_{\mathcal{F}}} \sum_{F \in \mathcal{P}} \sup_{k \in F} \lvert x(k) \rvert.
\end{equation}
If $\mathcal{P}$ is a partition of $\omega$, then by $||x||^\mathcal{P}$ we will denote $\| x\|^\mathcal{F}$ where $\mathcal{F}$ is the hereditary closure of $\mathcal{P}$. Note that in this case
$$
\lVert x \rVert^{\mathcal{P}} = \sum_{P \in \mathcal{P}} \sup_{k \in P} \lvert x(k) \rvert.
$$
So, for a family $\mathcal{F} \subseteq \mathcal{P}(\omega)$ we have
\[ \lVert x \rVert^\mathcal{F} = \inf_{\mathcal{P}\in \mathbb{P}_\mathcal{F}} \lVert x \rVert^\mathcal{P}. \]
Note that in general the expression (\ref{Upper_norm}) does not define a norm.
\begin{eg}\label{notnorm} Let $\mathcal{S}$ be the Schreier family. Consider the finitely supported sequences $x = (0,1,1,0,0,0,...)$, $y = (0,0,1,1,1,0,0,0,...)$. We can easily check that $\lVert x \rVert^{\mathcal{S}} = \lVert y \rVert^{\mathcal{S}} = 1$, but $\lVert x+y \rVert^{\mathcal{S}} = 3$, so the triangle inequality is not satisfied. \end{eg}
However, it turns out that $\lVert \cdot \rVert^{\mathcal{F}}$ is a quasi-norm, at least if $\mathcal{F}$ is a compact family. Moreover, it is a nice quasi-norm (in the sense of Definition \ref{nicenorm}).
Perhaps quite surprisingly, before showing that $\lVert \cdot \rVert^\mathcal{F}$ is a quasi-norm and then showing that it is nice, we will do the opposite: first we will check that $\lVert \cdot \rVert^\mathcal{F}$ satisfies all the conditions of Definition \ref{nicenorm}. The reason is that lower semicontinuity will make our life easier allowing to focus on finitely supported sequences.
It is easy to check that if $\mathcal{F}$ is a family covering $\omega$, then $\lVert \cdot \rVert^\mathcal{F}$ is monotone and non-degenerated (in the sense of Definition \ref{nicenorm}). However, it is not necessarily lower semicontinuous. Consider e.g. the
family $\mathcal{F}$ of all finite subsets of $\omega$ and $x$ defined by $x(k)=1$ for each $k$. Then $\lVert P_n(x) \rVert^\mathcal{F} = 1$ for each $n$ but $\lVert x \rVert^\mathcal{F} = \infty$. We will show that if we additionally assume that
$\mathcal{F}$ is compact, then $\lVert \cdot \rVert^\mathcal{F}$ is lower semicontinuous and so it is nice.
\begin{thm} \label{nice_condition} If $\mathcal{F} \subseteq \mathcal{P}(\omega)$ is compact, hereditary and covering $\omega$, then
$\lVert \cdot \rVert^{\mathcal{F}}$ is a nice quasi-norm.
\end{thm}
Before we start the proof we recall some definitions and facts about the Vietoris topology.
Fix a compact $\mathcal{F}\subseteq \mathcal{P}(\omega)$. Every partition can be considered as a subset of $2^{\omega}$ and thus we can treat a set $\mathbb{P}_{\mathcal{F}}$ as a subset of the power set of
$2^{\omega}$. We can endow this set with a Vietoris topology.
\begin{df}
Let $X$ be a compact topological space. By $\mathcal{K}(X)$ denote the family of all closed subsets of $X$. The \textit{Vietoris topology} is the one generated by sets of the form
\begin{equation} \label{Vietoris}
\big <U_{1},U_{2},...,U_{n} \big > = \lbrace K \in \mathcal{K}(X): K \subseteq \bigcup_{i \leq n} U_{i} \wedge \forall i \leq n \ K \cap U_{i} \neq \emptyset \rbrace,
\end{equation}
where $U_{i}$ are open subsets of $X$.
\end{df}
Note that if $X$ is a compact space, then $K(X)$ endowed with the Vietoris topology is compact as well. Also, $K(X)$ is metrizable (by the Hausdorff metric).
In our case, the role of $X$ is played by $\mathcal{F}$. According to the above, we would like to consider $\mathbb{P}_{\mathcal{F}}$ as a subspace of $\mathcal{K}(\mathcal{F})$. Notice that according to our definition of partition, it contains
$\emptyset$ and so it is a closed subset of $\mathcal{F}$ (in fact it forms a sequence converging to $\emptyset$).
\begin{lem}
$\mathbb{P}_{\mathcal{F}}$ is closed in $\mathcal{K}(\mathcal{F})$. Consequently, $\mathbb{P}_{\mathcal{F}}$ is a compact subspace of $\mathcal{K}(\mathcal{F})$.
\end{lem}
\begin{proof}
Let $\mathcal{G} \in \overline{\mathbb{P}_{\mathcal{F}}}$. We need to prove that $\mathcal{G}$ is a partition, i.e.
\begin{enumerate}[(i)]
\item $\emptyset \in \mathcal{G}$,
\item $\bigcup \mathcal{G} = \omega$,
\item All elements of $\mathcal{G}$ are pairwise disjoint.
\end{enumerate}
Of those (i) is straightforward.
Suppose now that there exists $n \in \omega$ such that $n \notin \mathcal{G}$. Put $U = \lbrace x \in 2^{\omega}: x(n) = 0 \rbrace$. $U$ is an open subset of $2^{\omega}$. Take a basic (in Vietoris topology) set $\mathcal{K}_{U}$ = $\lbrace K \in
\mathcal{K}(\mathcal{F}): K \subseteq U \rbrace$, being an open neighborhood of $\mathcal{G}$. Then $\mathcal{K}_{U} \cap \mathbb{P}_{\mathcal{F}} = \emptyset$. Indeed, otherwise, there would be partition $\mathcal{P}$ such that $\mathcal{P}
\subseteq U$, which is impossible, because there is $A \in \mathcal{P}$ such that $n \in A$. The set $\mathcal{K}_{U}$, therefore, testifies that $\mathcal{G} \notin \overline{\mathbb{P}_{\mathcal{F}}}$, which is a contradiction. It proves $(ii)$.
To prove (iii) suppose that there are $A,B \in \mathcal{G}$ such that $A \cap B \neq \emptyset$ and $A \setminus B \ne \emptyset$. Let $n \in A \cap B$ and $m \in A \setminus B$. Consider the following open subsets in $2^{\omega}$
\begin{center}
$U_{1} = \lbrace x \in 2^{\omega}: x(n) = 1 \wedge x(m) = 1 \rbrace$,\\
$U_{2} = \lbrace x \in 2^{\omega}: x(n) = 1 \wedge x(m) = 0 \rbrace$,\\
$U_{3} = 2^{\omega}$,
\end{center}
and the basic set $\big < U_{1}, U_{2}, U_{3} \big>$. Then $\mathcal{G} \cap U_{1} \neq \emptyset$, because $A \in U_{1}$ and $\mathcal{G} \cap U_{2} \neq \emptyset$, since $B \in U_{2}$. So the set $\big <U_{1}, U_{2}, U_{3} \big >$ is an open
neighborhood of $\mathcal{G}$. If $\mathbb{P}_{\mathcal{F}} \cap \big < U_{1}, U_{2}, U_{3} \big> \neq \emptyset$, then there is a partition $\mathcal{P}$ and sets $K,L \in \mathcal{P}$ such that $n,m \in K$, $n \in L$, and $m \notin L$. But it
is impossible, since elements of $\mathcal{P}$ are pairwise disjoint. It implies that $\mathbb{P}_{\mathcal{F}} \cap \big <U_{1}, U_{2}, U_{3} \big > = \emptyset$, which is a contradiction.
\end{proof}
\begin{proof}[Proof of Theorem \ref{nice_condition}] As we have mentioned it is enough to show lower semicontinuity. Fix $x \in \mathbb{R}^{\omega}$.
Assume that $\lVert x \rVert^{\mathcal{F}} = D$ (possibly $D=\infty$). Then for every partition $\mathcal{P}$ we have $\lVert x \rVert^{\mathcal{P}} \geq D$. For each $n \in \omega$ put $x_{n} = P_{n}(x)$. Suppose that there exists $M < D$ such that
$\lVert x_{n} \rVert^{\mathcal{F}} < M$ for each $n$. Then for every $n$ there is a partition $\mathcal{P}_{n}$ such that $\lVert x_{n} \rVert^{\mathcal{P}_{n}} < M$. By compactness, we may assume (passing to a subsequence if needed) that
$(\mathcal{P}_{n})_{n \in \omega}$ converges (in the Vietoris topology) to a partition $\mathcal{P}$. Since $\lVert x \rVert^\mathcal{P} \geq D$, there is $N \in \omega$ such that $\lVert x_{N} \rVert^{\mathcal{P}}\geq D$. There are only finitely many elements $R_{1},R_{2},...,R_{j}$ of $\mathcal{P}$ having non-empty intersection with $\lbrace 1,2,...,N \rbrace$. For $k \leq j$ put
\[ U_{k} = \lbrace x \in 2^{\omega}: \forall i \in R_{k} \ x(i) = 1 \rbrace \]
and consider the basic open set $\big < U_{1},U_{2},...,U_{j}, U_{j+1} \big>$, where $U_{j+1} = 2^{\omega}.$ Then \[ \mathcal{P} \in \big < U_{1},U_{2},...,U_{j},U_{j+1} \big>.\] Indeed, trivially $\mathcal{P} \cap U_{j+1} = \mathcal{P}$
and for $k \leq j$ we have $ R_k \in \mathcal{P} \cap U_{k} \neq \emptyset$. Since $\mathcal{P}_{n}$ converges to $\mathcal{P}$, there is $k>N$ such that $\mathcal{P}_{k}
\in \big < U_{1},U_{2},...,U_{j}, U_{j+1} \big>$. It means that \[ \{P\cap \{1,\dots,N\}\colon P \in \mathcal{P}_k\} = \{P\cap \{1,\dots,N\} \colon P \in \mathcal{P}\}.\]
So, \[ \lVert x_k \rVert^{\mathcal{P}_k} \geq \lVert x_N \rVert^{\mathcal{P}_k} = \lVert x_N \rVert^{\mathcal{P}} > M, \]
a contradiction. \end{proof}
Now we can prove that $\lVert \cdot \rVert^\mathcal{F}$ is indeed a quasi-norm.
\begin{thm}\label{quasi-quasi} Let $\mathcal{F}$ be a compact hereditary family. Then for every $x,y\in \mathbb{R}^\omega$
\begin{itemize}
\item[(a)] if $x,y$ have disjoint supports, then $\lVert x+y \rVert^\mathcal{F} \leq \lVert x \rVert^\mathcal{F} + \lVert y \rVert^\mathcal{F}$,
\item[(b)] $\lVert x+y \rVert^{\mathcal{F}} \leq 2\lVert x \rVert^{\mathcal{F}} + \lVert y \rVert^{\mathcal{F}}$,
\item[(c)] $\lVert x+y \rVert^{\mathcal{F}} \leq \dfrac{3}{2} (\lVert x \rVert^{\mathcal{F}} + \lVert y \rVert^{\mathcal{F}})$ and so $\lVert \cdot \rVert^\mathcal{F}$ is a quasi-norm.
\end{itemize} \end{thm}
\begin{proof}
Of the above (a) is clear and (c) is a direct consequence of (b).
So, we will check (b).
Let $x, y \in \mathbb{R}^\omega$. By lower semicontinuity of $\lVert \cdot \rVert^\mathcal{F}$ it is enough to consider the case when $x$ and $y$ are finitely supported. Moreover, we will assume that $x(k), y(k)\geq 0$ for every $k$ (since $\lVert
x+y\rVert \leq \lVert |x| + |y| \rVert$ and $\lVert x \rVert = \lVert |x| \rVert$ for each $x,y\in X^\mathcal{F}$) (by $|x|$ we mean a sequence defined by $|x|(k) = |x(k)|$ for each $k \in \omega$).
Assume that $x$ is of the form $x = a\chi_F$, where $F\in \mathcal{F}$ and let $A = \mathrm{supp}(y)$. Then, using (a), we have
\[ \lVert x + y \rVert^\mathcal{F} \leq \lVert P_A(x+y) \rVert^\mathcal{F} + \lVert P_{\omega\setminus A}(x)\rVert^\mathcal{F} \leq (\lVert y \rVert^\mathcal{F} + a) + a = 2\lVert x\rVert^\mathcal{F} + \lVert y \rVert^\mathcal{F}. \]
Next, suppose that $x$ is of the form $x = a_0\chi_{F_0} + \dots + a_n \chi_{F_n}$, where $F_i\in \mathcal{F}$ and $F_i \cap F_j = \emptyset$ for every $i,j\leq n$. Then, by the above inequality
\begin{multline}\label{ppp}
\lVert x+y \rVert^\mathcal{F} \leq 2 \lVert a_0\chi_{F_0}\rVert^\mathcal{F} + \lVert a_1\chi_{F_1} + \dots + a_n\chi_{F_n} + y \rVert^\mathcal{F} \leq \dots \\ \leq 2(\lVert a_0\chi_{F_0}\rVert^\mathcal{F} + \dots + \lVert a_n\chi_{F_n}\rVert^\mathcal{F}) +
\lVert y \rVert^\mathcal{F}
= 2(a_0 + \dots + a_n) + \lVert y \rVert^\mathcal{F} \leq 2\lVert x \rVert^\mathcal{F}+\lVert y \rVert^\mathcal{F}.
\end{multline}
Finally, assume $x$ is finitely supported and $\mathcal{P}$ is a partition of $\omega$ such that $\lVert x \rVert^\mathcal{F} = \lVert x \rVert^\mathcal{P}$. Of course only finitely many elements of $\mathcal{P}$, say $F_0, \dots, F_n$, have nonempty intersection with
the support of $x$. Let $a_i = \max \limits_{k\in F_i} x(k)$. Then $\lVert x \rVert^\mathcal{F} = a_0 + \dots + a_n$. Let $x' = a_0 \chi_{F_0} + \dots + a_n \chi_{F_n}$ and notice that $\lVert x' \rVert^\mathcal{F} = \lVert x \rVert^\mathcal{F}$ (as we
may assume that all $F_i$'s are maximal). Then, by monotonicity of $\lVert \cdot \rVert^\mathcal{F}$ and by (\ref{ppp})
\[ \lVert x+y \rVert^\mathcal{F} \leq \lVert x' + y \rVert^\mathcal{F} \leq 2\lVert x' \rVert^\mathcal{F} + \lVert y\rVert^\mathcal{F} = 2\lVert x \rVert^\mathcal{F} + \lVert y\rVert^\mathcal{F}. \] \end{proof}
Below we prove a natural counterpart of Theorem \ref{Banach} (mimicking the proof from \cite{NaFa}).
\begin{thm}\label{quasi-Banach} If $\mathcal{F}$ is a compact hereditary family and $\mathcal{F}$ covers $\omega$, then $FIN(\lVert \cdot \rVert^\mathcal{F})$ and $EXH(\lVert \cdot \rVert^\mathcal{F})$ are quasi-Banach spaces. \end{thm}
\begin{proof} That $\lVert \cdot \rVert^\mathcal{F}$ is a nice quasi-norm follows directly from Theorem \ref{quasi-quasi} and of Theorem \ref{nice_condition}.
We are going to show that $FIN(\lVert \cdot \rVert^\mathcal{F})$ is complete and then that $X^\mathcal{F}$ is a closed subset of $FIN(\lVert \cdot \rVert^\mathcal{F})$. The
For simplicity denote $\varphi = \lVert \cdot \rVert^\mathcal{F}$.
First we will prove that $FIN(\varphi)$ is complete. Let $(x_n)$ be a Cauchy sequence in $FIN(\varphi)$. Applying monotonicity, $\varphi(P_{\{k\}}(x_n-x_m))\leq \varphi(x_n-x_m)$ for every $k,n,m$, and hence $(P_{\{k\}}(x_n))_{k\in\omega}$ is a Cauchy sequence in the $k$th $1$-dimensional coordinate space of $\mathbb{R}^\omega$ (which is a quasi-Banach space, as $\varphi$ is finite on $c_{00}$), $P_{\{k\}}(x_n)\xrightarrow{n\to\infty}y_k$ for some $y_k$. Put $y=(y_k)$. We will first show that $y\in FIN(\varphi)$. The sequence
$\{x_n\colon n\in\omega\}$ is bounded, let say $\varphi(x_n)\leq B$ for every $n$. We show that $\varphi(y)\leq 3B$, i.e. (by the lower semicontinuity of $\varphi$) $\varphi(P_M(y))\leq 3B$ for every $M\in\omega$. Fix an $M>0$. If $n$ is large enough, say $n\geq n_0$, then
$\varphi(P_{\{k\}}(y-x_n))\leq \dfrac{B}{M}$ for every $k<M$ and hence \[\varphi(P_M(y))\leq \dfrac{3}{2}(\varphi(P_M(y-x_n))+\varphi(P_M(x_n))) \leq \dfrac{3}{2}(\sum_{k<M}\varphi(P_{\{k\}}(y-x_n))+\varphi(x_n))\leq 3B.\] The first inequality follows from Theorem \ref{quasi-quasi}(c) and the second from Theorem \ref{quasi-quasi}(a).
Now we will prove that $x_n\to y$. If not, then there are $\varepsilon>0$ and $n_0<n_1<\dots<n_j<\dots$ such that $\varphi(x_{n_j}-y)>\varepsilon$, that is, $\varphi(P_{M_j}(x_{n_j}-y))>\varepsilon$ for some $M_j\in \omega\setminus\{0\}$ for every $j$. Pick
$j_0$ such that $\varphi(x_{n_{j_0}}-x_n)< \dfrac{\varepsilon}{2}$ for every $n\geq n_{j_0}$ and then pick $j_1>j_0$ such that $\varphi(P_{\{k\}}(x_{n_{j_1}}-y))\leq \dfrac{\varepsilon}{2 M_{j_0}}$ for every $k<M_{j_0}$. Then, using Theorem \ref{quasi-quasi}(a) \[
\varepsilon<\varphi(P_{M_{j_0}}(x_{n_{j_0}}-y))\leq \varphi(P_{M_{j_0}}(x_{n_{j_0}}-x_{n_{j_1}}))+\sum_{k<M_{j_0}}\varphi(P_{\{k\}}(x_{n_{j_1}}-y)) < \varepsilon,\] a contradiction.
Now we will show that $EXH(\varphi)=\overline{c_{00}}$. The space $c_{00}$ is dense in $EXH(\varphi)$ because $\varphi(x-P_n(x))=\varphi(P_{\omega\setminus n}(x))\xrightarrow{n\to\infty}0$ for every $x\in EXH(\varphi)$. We have to show that $EXH(\varphi)$ is closed. Let
$x\in FIN(\varphi)$ be an accumulation point of $EXH(\varphi)$. For any $\varepsilon>0$ we can find $y\in EXH(\varphi)$ such that $\varphi(x-y)<\varepsilon$, and then $n_0$ such that $\varphi(P_{\omega\setminus n}(y))<\varepsilon$ for every
$n\geq n_0$. If $n\geq n_0$ then $\varphi(P_{\omega\setminus n}(x))\leq \dfrac{3}{2}(\varphi(P_{\omega\setminus n}(x-y))+\varphi(P_{\omega\setminus n}(y)))< 3\varepsilon$.
\end{proof}
We will be mainly interested in $EXH(\lVert \cdot \rVert^\mathcal{F})$ and so it will be convenient to denote $X^\mathcal{F} = EXH(\lVert \cdot \rVert^\mathcal{F})$. The main corollary of this section is the following reformulation of (the part of) Theorem \ref{quasi-Banach}:
\begin{thm} If $\mathcal{F}$ is compact, hereditary and covering $\omega$, then $X^\mathcal{F}$ is a quasi-Banach space. \end{thm}
The following is a simple consequence of (a) of Theorem \ref{quasi-quasi}.
\begin{cor} If a family $\mathcal{F}$ is a hereditary closure of a partition $\mathcal{P}$, then the formula (\ref{Upper_norm}) defines a norm. \end{cor}
In what follows we will be mainly interested in the families of finite subsets of $\omega$. In this case $FIN(\lVert \cdot \rVert)^\mathcal{F} = EXH(\lVert \cdot \rVert^\mathcal{F})$:
\begin{prop}\label{exh=fin} If $\mathcal{F} \subseteq [\omega]^{<\omega}$ is a compact hereditary family covering $\omega$, then \[ X^\mathcal{F} = FIN(\lVert \cdot \rVert^\mathcal{F}) \] \end{prop} \begin{proof} For each $x \in \mathbb{R}^{\omega}$ and for fixed $n$ we can write $x = x_{n} + x'_{n}$, where $x_{n} = P_{n}(x)$ and $x'_{n} = P_{\omega \setminus n}(x)$. Thus if $x \in X^{\mathcal{F}}$ then $\lVert x'_{n} \rVert^{\mathcal{F}} \rightarrow 0$ and \[ \lVert x \rVert^{\mathcal{F}} \leq \dfrac{3}{2} (\lVert x_{n} \rVert^{\mathcal{F}} + \lVert x'_{n} \rVert^{\mathcal{F}}) < \infty, \] because $x_{n}$ is finitely supported. It shows that $X^{\mathcal{F}} \subseteq FIN(\lVert \cdot \rVert^{\mathcal{F}})$. On the other hand, if $x \in FIN(\lVert \cdot \rVert^{\mathcal{F}})$, then there is a partition $\mathcal{G} = \lbrace G_{k}: k \in \omega \rbrace \subseteq \mathcal{F}$ such that $\mathlarger{\sum}_{k \in \omega} \sup \limits_{j \in G_{k}} \lvert x(j) \rvert < \infty$. It implies that \[\mathlarger{\sum}_{k \geq n} \sup \limits_{j \in G_{k}} \lvert x(j) \rvert \xrightarrow{n \rightarrow \infty} 0. \]
Let $\varepsilon>0$ and fix $m$ such that $\mathlarger{\sum}_{k\geq m} \sup \limits_{j\in G_k} |x(j)| < \varepsilon$. Let $n > \max (\bigcup \limits_{i<m} G_i)$. Then
\[ \lVert x'_n \rVert^\mathcal{F} \leq \lVert x'_n \rVert^\mathcal{G} \leq \sum_{k\geq m} \sup_{j\in G_k} |x(j)| < \varepsilon. \] It finishes the proof. \end{proof}
\section{$X^\mathcal{F}$ and the dual of $X_\mathcal{F}$}\label{how-close}
The most interesting aspect of the spaces of the form $X^\mathcal{F}$ is the fact that they very much resemble the dual spaces to $X_\mathcal{F}$. In fact they are so much similar that the only distinguishing property we have managed to find is the fact that $X^*_\mathcal{F}$ is always a Banach space whereas $X^\mathcal{F}$ typically is not.
We will first prove two general results. At first, we show that if $\mathcal{F}$ is sufficiently simple, $X^\mathcal{F}$ is a Banach space and it is isometrically isomorphic to $X^*_\mathcal{F}$ (see Proposition \ref{for-partitions}). Moreover, for every (reasonable) family $\mathcal{F}$ the dual space to $X^\mathcal{F}$ is a Banach space which is isometrically isometric to $X^{**}_\mathcal{F}$ (see Theorem \ref{isomorphism}). So, $X^*_\mathcal{F}$ and $X^\mathcal{F}$ share the same duals.
First of all we show that for some particular families $\mathcal{F}$ spaces $X^{\mathcal{F}}$ and $X^*_{\mathcal{F}}$ are indeed the same
\begin{prop} \label{for-partitions}
Suppose $\mathcal{P}$ is a partition of $\omega$ (into finite sets) and $\mathcal{F}$ is its hereditary closure. Then $X_\mathcal{F}^*$ is isometrically isomorphic to $X^\mathcal{F}$.
\end{prop}
\begin{proof}
Enumerate $\mathcal{P} = \{P_1, P_2, \dots\}$. Let $f$ be a functional on $X_{\mathcal{F}}$. It is given by
\[ f(x) = \sum\limits_{n \in \omega} x(n)y(n) \] for $y(n) = f(e_{n})$. For each $n \in \omega$ let $k_{n}$ be such index that $y(k_{n}) = \sup \lbrace \lvert y(k) \rvert: k \in P_{n} \rbrace$. Then for every $x$ from the unit ball of $X_{\mathcal{F}}$ we have \[ \big \lvert \sum\limits_{n \in \omega} x(n)y(n) \big \rvert \leq \sum \limits_{n \in \omega} \sum_{k \in P_{n}} \lvert x(k)y(k) \rvert \leq \sum \limits_{n \in \omega} \lvert y(k_{n}) \rvert \sum_{k \in P_{n}} \lvert x(k) \rvert \leq \sum \limits_{n \in \omega} \lvert y(k_{n}) \rvert = \lVert y \rVert^{\mathcal{F}}. \] It implies that the norm of the functional $f$ (denoted further by $\lVert y \rVert^{*}$) is bounded by $\lVert y \rVert^{\mathcal{F}}$.\\ Now, for $l \in \omega$ define the sequence $x_{l}$ given by $$ x_{l}(m) = \begin{cases}
1, & \text{if } m \in \lbrace k_{1}, ..., k_{l} \rbrace,\\
0, & \text{otherwise.} \end{cases} $$ It is easy to see that $\lVert x_{l} \rVert_{\mathcal{F}} = 1$ and then we have \[ \lVert y \rVert^{*} = \sup \limits_{\lVert x \rVert_{\mathcal{F}} \leq 1} \big \lvert \sum\limits_{n \in \omega} x(n)y(n) \big \rvert \geq \big \lvert \sum\limits_{n \in \omega} x_{l}(n)y(n) \big \rvert = \big \lvert \sum\limits_{m \leq l} y(k_{m}) \big \rvert = \sum\limits_{m \leq l} y(k_{m}). \] Since the above inequality holds for every $l$, we have $$ \sup \limits_{\lVert x \rVert_{\mathcal{F}} \leq 1} \big \lvert \sum\limits_{n \in \omega} x(n)y(n) \big \rvert \geq \sum\limits_{m \in \omega} y(k_{m}) = \lVert y \rVert^{\mathcal{F}}. $$ This inequality finishes the proof.
\end{proof}
\begin{rem} The above proposition can be reversed. Namely, if there are two maximal (in the sense of inclusion) elements $F_0, F_1$ of $\mathcal{F}$ such that $F_0 \cap F_1 \ne \emptyset$, then we can easily modify Example \ref{notnorm}:
\[ \lVert \chi_{F_0} + \chi_{F_1}\rVert^\mathcal{F} \geq 3 > 2 = \lVert \chi_{F_0} \rVert^\mathcal{F} + \lVert \chi_{F_1} \rVert^\mathcal{F}. \]
By the above and by Theorem \ref{quasi-quasi} for every compact, hereditary family $\mathcal{F}$ which covers $\omega$ and which is not a hereditary closure of a partition, the modulus of concavity of $\lVert \cdot \rVert^\mathcal{F}$ equals $\dfrac{3}{2}$. Thus, by Aoki-Rolewicz
Theorem
(Theorem \ref{aoki_rolewicz}) for $\mathcal{F}\subseteq \mathcal{P}(\omega)$ being compact, hereditary and covering $\omega$ we have dichotomy:
\begin{itemize}
\item either $\mathcal{F}$ is a hereditary closure of a partition and then $\lVert \cdot \rVert^\mathcal{F}$ is a norm,
\item or $\lVert \cdot \rVert^\mathcal{F}$ is equivalent to a $p$-norm, where $p=\log_3 2$.
\end{itemize}
\end{rem}
For general case of compact, hereditary family $\mathcal{F}$ covering $\omega$ we are able to prove a slightly weaker result. Let $T\colon X^{\mathcal{F}} \rightarrow X^{*}_{\mathcal{F}}$ be a linear operator given by \begin{equation} \label{identity_operator} T(y)(x) = \sum \limits_{k \in \omega} x(k)y(k) \end{equation} for $x \in X_{\mathcal{F}}$.
\begin{prop}\label{identityoperator} The mapping $T$ defined above is injective and continuous. Consequently, $X^\mathcal{F}$ can be embedded to $X^*_{\mathcal{F}}$. \end{prop}
\begin{proof} It is plain to check that it is injective. In addition we have the following sequence of (in)equalities
\[ \sup_{x \in X_{\mathcal{F}}} \dfrac{\big \lvert \sum \limits_{k \in \omega} x(k)y(k) \big \rvert }{ \lVert x \rVert_{\mathcal{F}}} = \sup_{x \in X_{\mathcal{F}}} \dfrac{ \big \lvert \sum \limits_{k \in \omega} x(k)y(k) \big \rvert }{ \sup \limits_{\mathcal{P} \in \mathbb{P}_{\mathcal{F}}} \lVert x \rVert_{\mathcal{P}}} = \sup_{x \in X_{\mathcal{F}}} \inf_{\mathcal{P} \in \mathbb{P}_{\mathcal{F}}} \dfrac{\big \lvert \sum \limits_{k \in \omega} x(k)y(k) \big \rvert }{\lVert x \rVert_{\mathcal{P}}} \stackrel{\text{(*)}}{\leq}$$ $$ \stackrel{\text{(*)}}{\leq} \inf_{\mathcal{P} \in \mathbb{P}_{\mathcal{F}}} \sup_{x \in X_{\mathcal{F}}} \dfrac{\big \lvert \sum \limits_{k \in \omega} x(k)y(k) \big \rvert }{\lVert x \rVert_{\mathcal{P}}} \stackrel{\text{(**)}}{=} \inf_{\mathcal{P} \in \mathbb{P}_{\mathcal{F}}} \lVert y \rVert^{\mathcal{P}} = \lVert y \rVert^{\mathcal{F}}, \] where the inequality (*) follows from the general fact saying that \[ \sup_{a\in A} \inf_{b\in B} f(a,b) \leq \inf_{b\in B} \sup_{a\in A} f(a,b)\] whatever $A,B$ and $f\colon A\times B \to \mathbb{R}$ are, and equality (**) is a consequence of
Proposition \ref{for-partitions}. It implies that $\lVert T(y) \rVert \leq \lVert y \rVert^{\mathcal{F}}$ for every $y \in X^{\mathcal{F}}$, so $T$ is continuous. In particular $T[B_{X^{\mathcal{F}}}]$ is a subspace of the dual unit ball
$B_{X^{*}_{\mathcal{F}}}$ and by Theorem \ref{open_mapping} the space $X^{\mathcal{F}}$ is isomorphic with a subspace of $X^*_{\mathcal{F}}$, which finishes the proof. \end{proof}
For each compact family $\mathcal{F}$ the space $X^\mathcal{F}$ is a (quasi-Banach) pre-dual of
$(X_{\mathcal{F}})^{**}$. In other words $X^\mathcal{F}$ and $X^*_\mathcal{F}$ have the same dual spaces.
\begin{thm} \label{isomorphism}
If $\mathcal{F}\subseteq [\omega]^{<\omega}$ is a compact hereditary family covering $\omega$, then $(X^{\mathcal{F}})^{*}$ is isometrically isomorphic to $(X_{\mathcal{F}})^{**}$.
\end{thm}
\begin{proof}
By Theorem \ref{5.5} (and the fact that $X_\mathcal{F}$ does not contain an isomorphic copy of $\ell_1$ if $\mathcal{F}$ is as above) the space $(X_{\mathcal{F}})^{**}$ is isometrically isomorphic to $FIN(\lVert \cdot \rVert_{\mathcal{F}})$. We need to prove that $\lVert y \rVert_{*}^{\mathcal{F}} = \lVert y \rVert_{\mathcal{F}}$, where $\lVert \cdot \rVert_{*}^{\mathcal{F}}$ denotes the functional norm on $X^{\mathcal{F}}$.
Take any $y \in c_{00}$. Then there is a set $F_{0} \in \mathcal{F}$ such that $\lVert y \rVert_{\mathcal{F}} = \mathlarger{\sum}_{n \in F_{0}} \lvert y(n) \rvert$. Consider $x_{0} \in X^\mathcal{F}$ given by
$$
x_{0}(n) =
\begin{cases}
sgn(y(n)), & \text{if } n \in F_{0},\\
0, & \text{otherwise.}
\end{cases}
$$
This is a unit vector in $X^{\mathcal{F}}$ and thus
$$
\lVert y \rVert_{*}^{\mathcal{F}} \geq \lvert \sum_{n \in F_{0}} x_{0}(n)y(n) \rvert = \sum_{n \in F_{0}} \lvert y(n) \rvert = \lVert y \rVert_{\mathcal{F}}.
$$
To prove the second inequality, fix any $x \in c_{00}$ such that $\lVert x \rVert_\mathcal{F} = 1$. There exists partition $\mathcal{P} = \lbrace F_{1}, F_{2}, ..., F_{j} \rbrace$ of the support of $x$ for which the infimum in the definition of quasi-norm is obtained, namely
$$
\lVert x \rVert^{\mathcal{F}} = \sum_{i=1}^{j} \sup \limits_{k \in F_{i}} \lvert x(k) \rvert.
$$
Let $x'$ be defined by $x'(j)=a_i \cdot sgn(y(j))$ if $j\in F_i$,
where $a_{i} = \sup \limits_{k \in F_{i}} \lvert x(k) \rvert$ (if $j\notin \bigcup_i F_i$, then let $x'(j)=0$). Then $\lVert x' \rVert^\mathcal{F} = \lVert x \rVert^\mathcal{F} = 1$ and
$$
\big \lvert \sum_{n \in \omega}x(n)y(n) \big \rvert \leq \sum_{n \in \omega} \lvert x(n)y(n) \rvert \leq \sum_{n \in \omega} \lvert x'(n)y(n) \rvert.
$$ Moreover $$
\sum_{n \in \omega} \lvert x'(n)y(n) \rvert = \sum_{i=1}^{j} \sum_{n \in F_{i}} \lvert x'(n)y(n) \rvert = \sum_{i=1}^{j} a_{i} \sum_{n \in F_{i}} \lvert y(n) \rvert \leq \sum_{i=1}^{j} a_{i} \lVert y \rVert_{\mathcal{F}} = \lVert y \rVert_{\mathcal{F}}
$$
which implies that $\lVert y \rVert_{*}^{\mathcal{F}} \leq \lVert y \rVert_{\mathcal{F}}$ and finishes the proof.
\end{proof}
\begin{rem} In this article we are interested in the spaces induced by families of finite sets, but perhaps one can prove a more general version of the above theorem, working for families containing also infinite sets, but with $FIN(\lVert \cdot \rVert^\mathcal{F})$ instead of
$X^\mathcal{F}$ (for $\mathcal{F}$ as in Theorem \ref{isomorphism}, those spaces coincide, see Theorem \ref{exh=fin}). E.g. notice that if $\mathcal{F}$ is the family of all finite subsets of $\omega$,
then $X_\mathcal{F}$ is isometrically isomorphic to $\ell_1$ and $FIN(\lVert \cdot \rVert^\mathcal{F})$ is isometrically isomorphic to $\ell_\infty$. \end{rem}
\subsection{Geometric properties of $X^{\mathcal{F}}$}
\begin{df} Let $K$ be a subset of a vector space $X$ we say that $e \in K$ is an \emph{extreme point} of $K$ if there do not exist $x,y \in K$ and $t \in (0,1)$ such that $e = (1-t)x + ty$. Equivalently, $e$ is an extreme point of $K$ if and only if the following condition is satisfied \begin{equation} \label{extreme} e+x, e-x \in K \Rightarrow x = 0 \end{equation} The set of all extreme points of $K$ we denote by $E(K)$. \end{df}
Although the notion of extreme point is usually considered in the context of convex sets, the definition itself does not require a priori the convexity. Thus we can consider extreme points in the case of non-convex sets as well.
In the case of (quasi)-Banach spaces $X$ it is common to denote by $E(X)$ the set of all extreme points of the unit ball of $X$ and we will follow this notation.
In this section we work with general compact, hereditary family $\mathcal{F}$. The combinatorial spaces and their duals were discovered geometrically in the context of extreme points: see e.g. \cite{Antunes}. The authors show that the set of extreme points of the unit ball in $X^{*}_{\mathcal{F}}$ is of the form
\begin{equation} \label{dual_extreme} \Big \lbrace \sum \limits_{i \in F} \varepsilon_i e^{*}_{i}: F \in \mathcal{F}^{MAX}, \varepsilon_{i} \in \lbrace -1,1 \rbrace \Big \rbrace \end{equation}
where \begin{itemize} \item $e^{*}_{i}$ are functionals given by $e^{*}_{i}(e_{j}) = 1$ if $i=j$ and $e^{*}_{i}(e_{j}) = 0$ otherwise for a Schauder basis $(e_{i})$, $i,j \in \omega$. \item $\mathcal{F}^{MAX}$ is a family of \emph{maximal} sets from $\mathcal{F}$, i.e. these sets $F$ for which $F \cup \lbrace k \rbrace \notin \mathcal{F}$ for every $k \in \omega$. \end{itemize}
Actually, the fact that $E(X^{*}_{\mathcal{F}})$ is given by (\ref{dual_extreme}) was proven only for Schreier space and for \emph{higher order Schreirer spaces}. However, that result holds also for general compact, hereditary family $\mathcal{F}\subseteq [\omega]^{<\omega}$ (see Remark 4.4 from \cite{Antunes}). We will show that $X^{\mathcal{F}}$ has basically \emph{the same} extreme points (interpreting elements of $X^\mathcal{F}$ as elements of $X^*_\mathcal{F}$ in the sense of Proposition \ref{identityoperator}):
\begin{prop} \label{extreme_to_extreme}
Assume that $\mathcal{F}\subseteq [\omega]^{<\omega}$ is a compact, hereditary family covering $\omega$.
A vector $y\in X^\mathcal{F}$ is an extreme point of the unit ball of $X^\mathcal{F}$ if and only if it is of the form \begin{equation} \label{seq_1} y(i) = \begin{cases} \varepsilon_{i}, & \text{if } i \in F\\ 0 & \text{otherwise,} \end{cases} \end{equation}
for some $F\in \mathcal{F}^{MAX}$ and $\varepsilon_i\in \{-1,1\}$. \end{prop}
\begin{proof}
First, assume that $y$ equals 1, up to an absolute value, on some maximal set $F \in \mathcal{F}$. Now suppose that for $u \in X^{\mathcal{F}}$ we have $y+u, y-u \in B_{X^{\mathcal{F}}}$. Then for every $k \in supp(y) \cap supp(u)$ we have $|1 \pm
u(k)| \leq 1$ or $|-1 \pm u(k)| \leq 1$. In both cases it implies that $u(k) = 0$, hence $supp(y) \cap supp(u) = \emptyset$. Thus $y(m) + u(m) = \pm 1$ for some $m \in \omega$. In particular, by maximality of $F$, $\lVert y \pm u \rVert^{\mathcal{F}} \geq 1$, and thus $u = 0$.
Now suppose that $y \in E(X^{\mathcal{F}})$. Then $\lVert y \rVert^{\mathcal{F}} = 1$.
Let $\mathcal{P} \in \mathbb{P}_{\mathcal{F}}$ for which $\lVert y \rVert^{\mathcal{F}} = \lVert y \rVert^{\mathcal{P}}$. Notice that for every $P\in \mathcal{P}$ we have $|y(i)| = |y(j)|$ for every $i,j\in P$. Suppose otherwise. Then there is
$P\in \mathcal{P}$ and $j,i\in P$ such that $|y(j)|<|y(i)|$ and so for $\eta< |y(i)|-|y(j)|$ we would have $\lVert y \pm \eta e_j \rVert^\mathcal{F} \leq \lVert y \pm \eta e_j \rVert^\mathcal{P} \leq 1$, hence $y$ would not be an extreme point.
It follows, that if $supp(y)\in \mathcal{F}$, then $y$ is of the promised form. If $supp(y)\notin \mathcal{F}$, then we may find distinct $P_0, P_1\in \mathcal{P}$ and $a_0, a_1 \ne 0$ such that $y(i)=a_j$ for $i\in P_j$, $j\in \{0,1\}$. Since
$\lVert y \rVert^\mathcal{F} = 1$, $|a_0|, |a_1|<1$. But then for sufficiently small $\eta>0$ and for $u$ defined by \begin{equation} u(i) = \begin{cases} a_0 + \eta, & \text{if } i \in P_0\\
a_1 - \eta, & \text{if } i \in P_1\\ 0 & \text{otherwise,} \end{cases} \end{equation} we would have \[ \lVert y \pm u \rVert^\mathcal{F} \leq \lVert y \pm u \rVert^\mathcal{P} = \lVert y\rVert^\mathcal{P} = 1. \]
So, $y$ has to be of the form as in the proposition. \end{proof}
One of the few properties of the spaces dual to combinatorial spaces which are exposed in the literature is that their balls can be well approximated by extreme points.
\begin{df} \label{CSRP} We say that a quasi-Banach space $X$ has \emph{convex series representation property (CSRP)} if for every $x \in B_{X}$ there exists a sequence $(\lambda_{n})$ of positive real numbers with $\sum \limits_{n \in \omega} \lambda_{n} = 1$ and a sequence $(u_{n})$ of extreme points of $B_{X}$ such that \begin{equation} \label{CSRP_eq} x = \sum \limits_{n \in \omega} \lambda_{n} u_{n} \end{equation} \end{df}
It is known (see \cite{Antunes}) that $X^{*}_{\mathcal{F}}$ has CSRP, for $\mathcal{F}$ as above. Using the shape of $E(X^{\mathcal{F}})$, we will construct sequences as in Definition \ref{CSRP} and show that $X^{\mathcal{F}}$ has CSRP as well.
\begin{thm}\label{CSRPthm}
For any compact, hereditary family $\mathcal{F}\subseteq [\omega]^{<\omega}$ covering $\omega$, the space $X^{\mathcal{F}}$ has CSRP \end{thm}
\begin{proof} We have to show that for every $x\in B_X$ there exists an appropriate sequence of extreme points and coefficients. First, we will prove it assuming that $supp(x) \in \mathcal{F}$. Then we will generalize it for the
case $x\in c_{00}$ and at the end we will show the final result.
\textbf{1) $supp(x)\in \mathcal{F}$}
Assume $supp(x) \subseteq F_0$ for some $F_0 \in \mathcal{F}^{MAX}$. Put $\alpha = \min \{ |x(k)|\colon k \in supp(x) \}$. Define $\lambda_0 = \min \{ \alpha, 1 - \alpha \}$ and $\lambda_n = \dfrac{1-\lambda_0}{2^n}$ for $n \geq 1$. Let $u_0$ be an extreme point defined by $$ u_0(k) = \begin{cases} sgn(x(k)), &\text{if } k \in supp(x)\\ 1, &\text{if } k \in F_0 \setminus supp(x) \\ 0 &\text{otherwise.} \end{cases} $$ Put $v_0 = \lambda_0 u_0$ and define $S_0 = \{ k \in \omega: x(k) = v_0(k) \}$. Note that, a priori, it is possible for $S_0$ to be empty. If not, let $G_0\subseteq \omega$ be such that $F_0 < G_0$ and $F_1:= (F_0 \setminus S_0) \cup G_0 \in \mathcal{F}^{MAX}$ (for $S_0 = \emptyset$ we take $G_0 = \emptyset$ as well).
We iterate this construction for $n \geq 1$, i.e. we put \begin{equation} \label{inductive_construction} u_n(k) = \begin{cases} sgn(x(k) - \sum \limits_{j < n} v_j(k)), &\text{if } k \in F_{n-1}\\ 1, &\text{if } k \in F_n \setminus F_{n-1}\\ 0, &\text{otherwise,} \end{cases} \end{equation}
we let $v_n = \lambda_n u_n$, $S_n = \{ k \in \omega\colon x(k) - \sum \limits_{j < n} v_j(k) = v_n(k) \}$ and let $G_n$ be such a (possibly empty) set that $F_n < G_n$ and $F_{n+1}:= (F_n \setminus S_n) \cup G_n$ is maximal. Note that on each step of the construction the sequence $x - \sum \limits_{j \leq n} v_{j}$ is supported on a subset of the maximal set $F_n$.
Now we show that the sequences $(\lambda_n)$ and $(u_n)$ are as desired by Definition \ref{CSRP}. It is rather clear that $\sum \limits_{n \in \omega} \lambda_n = 1$ and for every $n$ the vector $u_n$ is an extreme point in $X^{\mathcal{F}}$ (by Lemma \ref{extreme_to_extreme}). It remains to check that the series $\sum \limits_{n \in \omega} \lambda_n u_n$ is convergent to $x$ in a quasi-norm $\lVert \cdot \rVert^{\mathcal{F}}$.
\textbf{Claim.} For every $k \in \omega$ $r_n(k) := \Big \lvert x(k) - \sum \limits_{j \leq n} v_j(k) \Big \rvert \leq \dfrac{1-\lambda_0}{2^n}$.
\begin{proof}[Proof of claim] We prove it by induction with respect to $n$. If $n = 0$ and $k \in supp(x)$ then we have $ - \lambda_0 < x(k) - \lambda_0 \leq 1 - \lambda_0$ for $x(k) > 0$. Since $\lambda_0 \leq 1 - \lambda_0$ by the definition, the inequality holds for $x(k) > 0$. The definition of $\lambda_0$ implies also immediately that $r_0(k) \leq 1-\lambda_0$ for $k \notin supp(x)$. Finally, if $x(k) < 0$, then $v_0(k) = - \lambda_0$ and then $ -1 + \lambda_0 \leq x(k) - v_0(k) < \lambda_0 \leq 1 - \lambda_0$. \\ Now suppose that $r_n(k) \leq \dfrac{1-\lambda_0}{2^n}$ for some $n$. If $x(k) - \sum \limits_{j \leq n} v_j(k) > 0$, then $v_{n+1}(k) = \lambda_{n+1}$, if $k \in F_{n}$ and thus $$ -\dfrac{1-\lambda_0}{2^{n+1}} = -\lambda_{n+1} \leq x(k) - \sum \limits_{j \leq n+1} v_j(k) \leq \dfrac{1-\lambda_0}{2^n} - \dfrac{1-\lambda_0}{2^{n+1}} = \dfrac{1-\lambda_0}{2^{n+1}} $$ If $x(k) - \sum \limits_{j \leq n} v_j(k) < 0$ then $v_{n+1}(k) = - \lambda_{n+1}$ and the case is symmetric. Thus for $k \in F_{n}$ $r_{n+1}(k) \leq \dfrac{1-\lambda_0}{2^{n+1}}$. If $k \in F_{n+1} \setminus F_n$ then $ x(k) - \sum \limits_{j \leq n+1} v_j(k) = -v_{n+1}(k) = - \lambda_{n+1}$ which finishes the proof of the claim. \end{proof} Note that the above claim implies that $\sum \limits_{n \in \omega} \lambda_n u_n$ is convergent to $x$ since $$ \lVert x - \sum \limits_{j \leq n} \lambda_n u_n \rVert^{\mathcal{F}} = \max_{k \in F_n} \Big \lvert x(k) - \sum \limits_{j \leq n}v_n(k) \Big \rvert \leq \dfrac{1 - \lambda_0}{2^n} \xrightarrow{n \rightarrow \infty} 0 $$ It finishes the proof for $x$ having a support in $\mathcal{F}$.
\textbf{2) $x\in c_{00}$.}
If $x$ is a finitely supported sequence, then $x = \sum \limits_{i=1}^{m} x_i $ for some $m \in \omega$, where $x_i$ are sequences with supports contained in some $F_i \in \mathcal{F}^{MAX}$. Then, we make a similar construction as in the previous
case for each $x_i$ separately. Put $M = \lVert x \rVert^{\mathcal{F}}$, and for each $1 \leq i \leq m$ let $\alpha_i = \min \limits_{k \in F_i} |x(k)|$, $\beta_i = \max \limits_{k \in F_i} |x(k)|$. Next, define a sequence $(\lambda^{i}_{n})_n$ by taking $\lambda^{i}_{0} = \min \{\alpha_i, \dfrac{\beta_i}{M}-\alpha_i \}$ and $\lambda^{i}_{n} = \dfrac{\frac{\beta_{i}}{M} - \lambda^{i}_{0}}{2^n}$ for $n \geq 1$. For each $i$ $\sum \limits_{n \in \omega} \lambda^{i}_{n} = \dfrac{\beta_i}{M}$ and thus $\sum \limits_{i=1}^{m} \sum \limits_{n \in \omega} \lambda^{i}_{n} = 1$.
The sequence of extreme points $(u^{i}_{n})_n$ define as in the first case. Then, repeating arguments from the previous case for every $i$, we obtain $$ \big \lVert x_i - \sum \limits_{j \leq n} \lambda^{i}_{j} u^{i}_{j} \big \rVert^{\mathcal{F}} \leq \dfrac{\frac{\beta_{i}}{M} - \lambda^{i}_{0}}{2^n}. $$
Thus for each $n$ we get
\begin{equation} \label{total convergence} \big \lVert x - \sum \limits_{i=1}^{m} \sum \limits_{j \leq n} \lambda^{i}_{j}u^{i}_{j} \big \rVert^{\mathcal{F}} \leq \big(\dfrac{3}{2} \big)^{m} \sum \limits_{i=1}^{m} \big \lVert x_i - \sum \limits_{j \leq n} \lambda^{i}_{j} u^{i}_{j} \big \rVert^{\mathcal{F}} \leq \big(\dfrac{3}{2} \big)^{m} \sum \limits_{i=1}^{m} \dfrac{\frac{\beta_i}{M} - \lambda^{i}_{0}}{2^n} \end{equation} and the last expression tends to $0$ when $n \rightarrow \infty$. Hence every finitely supported sequence can be expressed as a convex series of extreme points.
\textbf{3) The general case.}
For $x \in X^{\mathcal{F}}$ the result follows from lower semicontinuity of $\lVert \cdot \rVert^{\mathcal{F}}$ and the previous cases. Indeed, for any $\varepsilon > 0$ find $N \in \omega$ such that $\lVert x - P_{N}(x) \rVert^{\mathcal{F}} < \dfrac{\varepsilon}{3}$. For that $N$ we can find a convex combination as in the second case, converging to $P_{N}(x)$. Namely, for sufficiently big $n \in \omega$ $$ \big \lVert P_{N}(x) - \sum \limits_{i=1}^{m} \sum \limits_{j \leq n} \lambda^{i}_{j}u^{i}_{j} \big \rVert^{\mathcal{F}} < \dfrac{\varepsilon}{3} $$ Thus using \emph{quasi-triangle inequality} we have $$ \lVert x - \sum \limits_{i=1}^{m} \sum \limits_{j \leq n} \lambda^{i}_{j}u^{i}_{j} \big \rVert^{\mathcal{F}} \leq \dfrac{3}{2} \Big ( \big \lVert x - P_{N}(x) \big \rVert^{\mathcal{F}} + \big \lVert P_{N}(x) - \sum \limits_{i=1}^{m} \sum \limits_{j \leq n} \lambda^{i}_{j}u^{i}_{j} \big \rVert^{\mathcal{F}} \Big ) < \varepsilon $$ It finishes the proof. \end{proof}
\section{$X^\mathcal{S}$ is $\ell_1$-saturated and does not have the Schur property} \label{l1-schur}
In this section we will focus on the relations between $X^\mathcal{F}$ and $X^*_\mathcal{F}$ for the particular case of the Schreier family $\mathcal{S}$. Among combinatorial spaces, $X_\mathcal{S}$ is the most studied (apart of $c_0$ and $\ell_1$, of course). However, the literature concerning the dual to the Schreier space is not rich. One of its main properties is that it is an example of a Banach space which is $\ell_1$-saturated but which does not enjoy the Schur property.
\begin{df} \label{schur_def}
We say that a space $X$ has a \textit{Schur property} if every weakly null sequence is convergent to zero in the norm.
\end{df}
\begin{df} \label{l1-def}
A Banach space $X$ is \textit{$\ell_{1}$-saturated} if its every closed, infinitely dimensional subspace $E$ contains an isomorphic copy of $\ell_{1}$. Equivalently, if $E = \overline{span(x_{n})}$ for some sequence $(x_{n})$ in $X$, then there is a sequence $(y_{n})$ in $E$, which is equivalent to the standard $\ell_{1}$-norm.
\end{df}
In theory of Banach spaces the classical theorem of Rosenthal \cite{Rosenthal-l1} implies that every Banach space with Schur property is $\ell_{1}$-saturated. First example of an $\ell_{1}$-saturated Banach space without Schur property was constructed by Bourgain (\cite{Bourgain})). Then Azimi and Hagler (\cite{Hagler}) and Popov (\cite{Popov}) presented another examples. Yet another example of such space is a dual to the Schreier space. The lack of Schur property is the result of \cite{Lipieta}. On the other hand, satisfying of $\ell_{1}$-saturation property follows form \cite{Galego}.
We will show that Definition \ref{schur_def} and Definition \ref{l1-def} are also valid for quasi-Banach spaces and that $X^{\mathcal{S}}$ is an $\ell_{1}$-saturated quasi-Banach space which does not have Schur property.
The part of Schur property we prove using Theorem \ref{isomorphism}.
\begin{prop} \label{Schur}
Suppose that $\mathcal{F}$ is a family of finite subsets of $\omega$ with the following property: for each infinite $M\subseteq \omega$ and each $k\in \omega$ there is $F\in \mathcal{F}$ such that $F\subseteq M$ and $|F|>k$. Then $X^\mathcal{F}$
does not have the Schur property. In particular, $X^\mathcal{S}$ does not have the Schur property.
\end{prop}
\begin{proof}
Consider the sequence $(e_{n})$, the standard Schauder basis. We claim that this sequence is weakly null, but it is not convergent to zero in the quasi-norm.
Indeed, fix $\varphi \in (X^{\mathcal{F}})^{*}$ and denote $y(n) = \varphi(e_{n})$. By Theorem \ref{isomorphism} we have $y = (y(n)) \in FIN(\lVert \cdot \rVert_{\mathcal{F}})$, so $\lVert y \rVert_{\mathcal{F}} < \infty$. If $ \lim
\limits_{n \rightarrow \infty} y(n) \neq 0$, then there is an infinite $M\subseteq \omega$ and $c>0$ such that $\lvert y(m) \rvert \geq c$ for each $m\in M$. By the assumption, for each $k\in \omega$ there is $F\in \mathcal{F}$ such that $F\subseteq M$,
$|F|>k$ and so
$\mathlarger{\sum}_{i \in F} \lvert y(i) \rvert \geq c \cdot k$. Hence, $\lVert y \rVert_\mathcal{F} = \sup \limits_{F \in \mathcal{F}} \mathlarger{\sum}_{i \in F} \lvert y(i) \rvert = \infty$, a contradiction.
On the other hand, for every $n \in \omega$ $\lVert e_{n} \rVert^{\mathcal{F}} = 1$, so $(e_{n})$ is not convergent to zero in norm.
The Schreier family $\mathcal{S}$ satisfies the above condition.
\end{proof}
As we can see not only Schreier space does not enjoy the Schur property but it is more general phenomenon for spaces of this type.
The proof that $X^{\mathcal{S}}$ is $\ell_{1}$-saturated is, however, slightly more technical and uses more of the particular shape of the Schreier family $\mathcal{S}$. First, we introduce the definition which is necessary to prove that fact.
\begin{df} We say that the sequence $x$ is \textit{$k$-stable} if for every set $A \in [\omega]^{\leq k}$ $$\lVert P_{\omega \setminus A}(x) \rVert^{\mathcal{S}} \geq \frac{1}{2} \lVert x \rVert^{\mathcal{S}}.$$
\end{df}
\begin{lem} \label{simple_lemma}
Let $x_{1},x_{2} \in c_{00}$ be such that $supp(x_1) < supp(x_2)$. If $\mathcal{P}$ is a partition such that for every $P \in \mathcal{P}$ $P \cap supp(x_1) = \emptyset$ or $P \cap supp(x_2) = \emptyset$, then $$ \lVert x_{1} + x_{2}
\rVert^{\mathcal{P}} = \lVert x_{1} \rVert^{\mathcal{P}} + \lVert x_{2} \rVert^{\mathcal{P}}.$$
\end{lem}
\begin{proof}
Let $P \in \mathcal{P}$. Then, either $x_{1}$ or $x_{2}$ vanishes on $P$, so for each $k \in P$ $\lvert x_{1}(k) + x_{2}(k) \rvert = \lvert x_{1}(k) \rvert + \lvert x_{2}(k) \rvert$. It implies immediately that $\lVert x_{1} + x_{2} \rVert^{\mathcal{P}} = \lVert x_{1} \rvert^{\mathcal{P}} + \lVert x_{2} \rVert^{\mathcal{P}}$
\end{proof}
\begin{prop} \label{stable-proposition}
Let $x,y \in c_{00}$ be such that
\begin{enumerate}[(i)]
\item $supp(x) < supp(y)$
\item $y$ is $k_{0}$-stable, where $k_{0} = (\max supp(x))^{2}$.
\end{enumerate}
Then for each $\lambda$ $$\lVert x+ \lambda y \rVert^{\mathcal{S}} \geq \lVert x \rVert^{\mathcal{S}} + \frac{\lambda}{2}\lVert y \rVert^{\mathcal{S}}.$$
\end{prop}
\begin{proof} First, notice that if $y$ is $k$-stable, then $\lambda y$ is $k$-stable and so we may assume that $\lambda=1$.
Since $x, y, x+y$ are finitely supported, there exist partitions $\mathcal{P}_{0}, \mathcal{P}_{1}$ and $\mathcal{P}_{2}$ such that
\begin{center}
$\lVert x \rVert^{\mathcal{S}} = \lVert x \rVert^{\mathcal{P}_{0}}$,\\
$\lVert y \rVert^{\mathcal{S}} = \lVert y \rVert^{\mathcal{P}_{1}}$,\\
$\lVert x+y \rVert^{\mathcal{S}} = \lVert x+y \rVert^{\mathcal{P}_{2}}.$
\end{center}
Let $A = \lbrace n \in supp(y): \forall P \in \mathcal{P}_{2} \ (n \in P \Rightarrow P \cap supp(x) \neq \emptyset) \rbrace$. Note that the sequences $x$, $P_{\omega \setminus A}(y)$ and the partition $\mathcal{P}_{2}$ satisfy the assumption of Lemma \ref{simple_lemma}. In addition,
\[ |A| \leq \sum_{k \in supp(x)}k \leq (\max supp(x))^2 = k_0 \]
So, using the assumption of $k_{0}$-stability we obtain
$$
\lVert x+y \rVert^{\mathcal{S}} = \lVert x+y \rVert^{\mathcal{P}_{2}} \geq \lVert x + P_{\omega \setminus A}(y) \rVert^{\mathcal{P}_{2}} = \lVert x \rVert^{\mathcal{P}_{2}} + \lVert P_{\omega \setminus A}(y) \rVert^{\mathcal{P}_{2}} \geq \lVert x
\rVert^{\mathcal{P}_{0}} + \lVert y \rVert^{\mathcal{P}_{2}} \geq \lVert x \rVert^{\mathcal{S}} + \dfrac{\lVert y \rVert^{\mathcal{S}}}{2}. $$ \end{proof}
\begin{thm}\label{ell1} $X^{S}$ is $\ell_{1}$-saturated \end{thm}
\begin{proof}
Let $(x_{n})$ be a sequence in $X^{\mathcal{S}}$ and let $E$ be its closed subspace. We are going to show that $E$ contains an isomorphic copy of $\ell_1$. By the standard arguments (see e.g. \cite{BePe}) we may assume that $E =
\overline{span(x_{n})}$, where for each $n\in \omega$ we have $x_{n} \in c_{00}$, $\lVert x_{n} \rVert^{\mathcal{S}} = 1$ and there exists a finite subset $F_{n}$ such that $supp(x_{n}) \subseteq F_{n}$. Additionally, we assume that $F_{n} < F_{n+1}$.
It is enough to construct a sequence $(y_n)$ of unit vectors in $E$ which will be equivalent to the standard $\ell_1$-basis, i.e. such that for each sequence $(\lambda_i)_{i\leq n}$ of scalars \[ \lVert \sum_{i=1}^{n}\lambda_{i}y_{i} \rVert^{\mathcal{S}} \geq \dfrac{1}{2} \mathlarger{\sum}_{i=1}^{n} \lvert \lambda_{i} \rvert. \]
The sequence $(y_n)$ will be of a form of \emph{block sequence} of $(x_n)$, i.e. $y_{n} = \sum_{i=1}^{k} \alpha_{i}x_{n_{i}}$ for some $k \in \omega$ and an increasing sequence of natural numbers $(n_{i})$.
We define by induction sequences of natural numbers $(k_{n})$, $(l_{n})$ and the sequence of vectors $(y_{n})$. Let $l_{1} = 1$, $y_{1} = x_{1}$ and $k_{1} = (\max supp(x_{1}))^{2}$. Next, let \begin{itemize}
\item[(1)] $l_{n+1}\in \omega$ be such that $\mathlarger{\sum}_{i=l_{n}+1}^{l_{n+1}}x_{i}$ is $k_{n}$-stable, \item[(2)] $y_{n+1} = \dfrac{\mathlarger{\sum}_{i=l_{n}+1}^{l_{n+1}}x_{i}}{\lVert \mathlarger{\sum}_{i=l_{n}+1}^{l_{n+1}}x_{i}\rVert}$ and \item[(3)] $k_{n+1} = \big(\max supp(y_{n+1})\big)^{2}$. \end{itemize}
We will show that we are able to perform such construction. Only condition (1) needs some explanations.
\textbf{Claim.} For each $l\in \omega$ and each $k\in \omega$ there is $L>l$ such that $\mathlarger{\sum}_{i=l}^{L}x_{i}$ is $k$-stable.
Indeed, denote $x = \sum \limits_{i=l}^\infty x_i$ and notice that $\lVert x \rVert^\mathcal{S} = \infty$. Otherwise, $x \in FIN(\lVert \cdot \rVert^\mathcal{S})$ and so $x\in X^\mathcal{S}$, by Proposition \ref{exh=fin}. But this is impossible since $\lVert x_i \rVert^\mathcal{S} = 1$ for each $i$. So, we may find $L$ big enough so that \[ \lVert \sum_{i=l}^L x_i \rVert^\mathcal{S} > 2k. \]
But if $|A|\leq k$, then (since $x_i$'s have disjoint supports)
\[ \lVert P_A(\sum_{i=l}^L x_i) \rVert^\mathcal{S} \leq |A|\lVert x_i\rVert^\mathcal{S} \leq k \] and so $L$ is as desired.
Having this construction, fix $n \in \omega$ and a sequence $(\lambda_{i})_{i=1}^{n}$. Of course $\lVert y_{i} \rVert^{\mathcal{S}} = 1$ for each $i$ and subsequently using Proposition \ref{stable-proposition} we have $$ \lVert \sum_{i=1}^{n}\lambda_{i}y_{i} \rVert^{\mathcal{S}} = \lVert \sum_{i=1}^{n-1}\lambda_{i}y_{i} + \lambda_{n}y_{n} \rVert^{\mathcal{S}} \geq \lVert \sum_{i=1}^{n-1}\lambda_{i}y_{i} \rVert^{\mathcal{S}} + \frac{\lvert \lambda_{n}\rvert}{2} \geq ... \geq \lvert \lambda_{1} \rvert + \frac{1}{2}\mathlarger{\sum}_{i=2}^{n} \lvert \lambda_{i} \rvert \geq \frac{1}{2} \mathlarger{\sum}_{i=1}^{n} \lvert \lambda_{i} \rvert $$ and so $(y_n)$ is equivalent to $\ell_1$-basis. \end{proof}
\end{document} |
\begin{document}
\begin{abstract}
We study the problem of finding the Artin $L$-functions with the
smallest conductor for a given Galois type. We adapt standard
analytic techniques to our novel situation of fixed Galois type and
get much improved lower bounds on the smallest conductor. For small
Galois types we use complete tables of number fields to determine
the actual smallest conductor. \end{abstract}
\title{Artin $L$-functions of small conductor}
\setcounter{tocdepth}{1} \tableofcontents
\section{Overview} \label{overview}
Artin $L$-functions $L(\Chi,s)$ are remarkable analytic objects built from number fields. Let $\overline{\Q}$ be the algebraic closure of the rational number
field $\Q$ inside the field of complex numbers $\C$. Then Artin $L$-functions are indexed by continuous complex characters $\Chi$ of the absolute Galois group $\G = \Gal(\overline{\Q}/\Q)$, with the unital character $1$ giving the Riemann zeta function $L(1,s) = \zeta(s)$. An important problem in modern number theory is to obtain a fuller understanding of these higher analogs of the Riemann zeta function. The analogy is expected to be very tight: all Artin $L$-functions are expected by the Artin conjecture to be entire except perhaps for a pole at $s=1$; they are all expected to satisfy the Riemann hypothesis that all zeros with $\mbox{Re}(s) \in (0,1)$ satisfy $\mbox{Re}(s)=1/2$.
The two most basic invariants of an Artin $L$-function $L(\Chi,s)$
are defined via the two explicit elements of $\G$, the
identity $e$
and the complex conjugation element $\sigma$.
These invariants are the degree $n = \Chi(e)$ and the
signature $r = \Chi(\sigma)$ respectively.
A measure of the complexity
of $L(\Chi,s)$ is its conductor $D \in \Z_{\geq 1}$,
which can be computed from the discriminants of related number fields.
It is best for purposes such as ours to focus instead
on the root conductor $\delta = D^{1/n}$.
In this paper, we aim to find the simplest Artin $L$-functions exhibiting a given Galois-theoretic behavior. To be more precise,
consider triples $(G,c,\chi)$ consisting of a finite group $G$,
an involution $c \in G$, and a faithful character $\chi$. We say that $\Chi$
has {\em Galois type} $(G,c,\chi)$ if there is a surjection $h : \G \rightarrow
G$ with $h(\sigma) = c$, and
$\Chi = \chi \circ h$. Let $\cL(G,c,\chi)$ be the
set of $L$-functions of type $(G,c,\chi)$, and
let $\cL(G,c,\chi;B)$ be the subset
consisting of $L$-functions with root conductor
at most $B$. Two natural problems for any given Galois type $(G,c,\chi)$ are
\begin{itemize}
\item[\textbf{1}:] Use known and the above
conjectured properties of $L$-functions to get a lower bound $\frak{d}(G,c,\chi)$ on the root conductors
of $L$-functions in $\cL(G,c,\chi)$.
\item[\textbf{2}:] Explicitly
identify the sets $\cL(G,c,\chi;B)$ with $B$ as large as possible.
\end{itemize}
This paper gives answers to both problems, although for
brevity we often fix only $(G,\chi)$ and work instead
with the sets $\cL(G,\chi;B) := \cup_c \cL(G,c,\chi;B)$.
There is a large literature on a special case of the situation
we study. Namely let $(G,c,\phi)$ be a Galois type
where $\phi$ is the character of a transitive permutation
representation of $G$. Then the set
$\cL(G,c,\phi;B)$ is exactly
the set of Dedekind zeta functions $\zeta(K,s)$
arising from a corresponding
set $\cK(G,c,\phi;B)$ of arithmetic equivalence classes of
number fields. In this context, root conductors
are just root discriminants, and lower bounds date back to
Minkowski's work on the geometry of numbers.
Use of Dedekind zeta functions as in {\bf 1} above began with
work of Odlyzko \cite{od-disc1,od-disc1a,od-disc2}, Serre
\cite{serre-minorations}, Poitou
\cite{poitou-minorations,poitou-petits}, and Martinet \cite{martinet}. Extensive responses to {\bf 2} came shortly
thereafter, with papers often focusing on a single degree
$n=\phi(e)$. Early
work for quartics, quintics, sextics,
and septics include respectively
\cite{bf-quartic2,f-quartic1,bfp-quartic3},
\cite{spd}, \cite{pohst,BMO,olivier1,olivier2,olivier3}, and
\cite{letard}.
Further results towards {\bf 2} in higher degrees are
extractable from the websites
associated to \cite{jr-global-database},
\cite{kluners-malle}, and \cite{LMFDB}.
The full situation that we are studying here was identified
clearly by Odlyzko in \cite{odlyzko-durham}, who responded to {\bf 1} with
a general lower bound. However this more general
case of Artin $L$-functions has almost no subsequent presence
in the literature. A noticeable exception is a
recent paper of Pizarro-Madariaga \cite{PM}, who improved
on Odlyzko's results on {\bf 1}. A novelty of our
paper is the separation into Galois types. For many
Galois types this separation allows us to go
considerably further on {\bf 1}. This paper is also the first
systematic study of {\bf 2} beyond the case
of number fields.
Sections~\ref{Artin} and \ref{Signature} review background on Artin $L$-functions and tools used to bound their conductors. Sections~\ref{Type}--\ref{otherchoices} form the new material on the lower bound problem {\bf 1}, while Sections~\ref{S5}--\ref{discussion} focus on the tabulation problem {\bf 2}. Finally, Section~\ref{asymp} returns to {\bf 1} and considers asymptotic lower bounds for root conductors of Artin $L$-functions in certain families. In regard to {\bf 1}, Figure~\ref{amalia-plot} and Corollary~\ref{limitcor} give a quick indication of how our type-based lower bounds compare with the earlier degree-based lower bounds. In regard to both {\bf 1} and {\bf 2}, Tables~\ref{tablelabel1}--\ref{tablelabel8} show how the new lower bounds compare with actual first conductors for many types.
\cmmt{
$**********$
Section~\ref{Artin} reviews some of the basic
formalism behind Artin $L$-functions,
with emphasis on the
direct connection with
number fields. In particular,
suppose $\phi$ is the
character of a transitive
permutation representation of $\G$;
then the set
$\cL(G,c,\phi;B)$
is naturally identified
with the set of Dedekind zeta functions $\zeta(K,s)$
arising from a corresponding
set $\cK(G,c,\phi;B)$ of arithmetic equivalence classes of
number fields.
Section~\ref{Signature} summarizes the
literature on lower bounds
for root conductors.
We restrict attention to
bounds which are
conditional on standard analytic hypotheses,
namely the Artin conjecture and
the Riemann hypothesis for the relevant
auxiliary $L$-functions.
If $\chi$ takes only
nonnegative values,
like permutation characters do,
there
is a direct method for
obtaining a conditional lower bound
for the smallest root conductor.
For general $\chi$, there is
an indirect method
that uses the very general
conductor relation \eqref{gencondrel}
based on the auxiliary character $\phi_S = \chi \bar{\chi}$.
Sections~\ref{Type}--\ref{otherchoices} form
the new material on analytic lower
bounds. Instead
of the general
conductor relation involving $\phi_S$,
we use type-based conductor
relations that depend on
the choice of an auxiliary nonnegative character $\phi$.
The form
of these relations is given in \eqref{maincondrel} and
the relation is made more explicit in Section~\ref{choices}
for four simple and useful types of $\phi$.
Section~\ref{otherchoices} describes the polytope of possible choices for $\phi$.
Our formalism of triples $(G,c,\chi)$ captures the strong tradition in
the literature of paying close attention to the placement of complex conjugation.
However in the remaining sections we keep things relatively brief by
working simply with $(G,\chi)$, which we
also call a type. We pursue the problem of identifying
the sets $\cL(G,\chi;B) := \cup_c \cL(G,c,\chi;B)$ with
a focus on finding at least the the minimal root conductor
$\delta_1(G,\chi)$.
Sections~\ref{S5}--\ref{discussion} focus on this tabulation problem.
Conductor relations similar
to \eqref{maincondrel} let one
use known typically large complete
lists coming from number field tables
to determine new typically smaller complete lists
of $L$-functions.
Section~\ref{S5} explains
the process and illustrates it
by working out the case $G=S_5$
in detail. Section~\ref{Tables}
considers $G$ arising as
transitive subgroups of
$S_n$,
restricting to $n \leq 9$ for
solvable groups and $n \leq 7$ for
nonsolvable groups.
It presents conditional
lower bounds
$\mathfrak{d}(G,\chi)$
and then initial segments
$\cL(G,\chi;B)$,
almost always non-empty.
As illustrated by Figure~\ref{amalia-plot}, our
type-based lower bounds $\mathfrak{d}(G,\chi)$ are
usually substantially larger then
the best degree-based lower bounds coming
from \cite{PM}. We find always
\begin{equation}
\mathfrak{d}(G,\chi)< \delta_1(G,\chi),
\end{equation}
thus no contradiction to the Artin
conjecture or Riemann hypothesis.
In fact, typically $\delta_1(G,\chi)$
is considerably larger than
$\mathfrak{d}(G,\chi)$.
Section~\ref{discussion}
discusses several aspects of
the information presented in the
tables.
Section~\ref{asymp} shows in Corollary~\ref{limitcor}
that restricting the type in simple ways gives
much increased
asymptotic lower bounds. It speculates that these larger
lower bounds may hold even with no restriction
on the type.
}
Artin $L$-functions
have recently become much more
computationally accessible through
a package implemented in {\em Magma}
by Tim Dokchitser. Thousands
are now collected
in a section on the LMFDB \cite{LMFDB}.
The present work
increases our understanding of all this
information in several ways, including by providing completeness
certificates for certain ranges.
\section{Artin $L$-functions} \label{Artin}
In this section we provide some background. An important
point is that our problems allow us to restrict consideration
to Artin characters which take rational values only. In this setting,
Artin $L$-functions can be expressed as products and quotients
of roots of Dedekind zeta functions, minimizing the background needed. General references on Artin $L$-functions include \cite{Mar77,Mur01}.
\subsection{Number fields} A number field $K$ has many invariants relevant for our study. First of all, there is the degree $n = [K:\Q]$. The other invariants we need are local in that they are associated with a place $v$ of $\Q$ and can be read off from the corresponding completed algebra $K_v = K \otimes \Q_v$, but not from other completions. For $v=\infty$, the complete invariant is the signature $r$, defined by $K_\infty \cong \R^{r} \times \C^{(n-r)/2}$. It is more convenient sometimes to work with the eigenspace dimensions for complex conjugation, $a = (n+r)/2$ and $b = (n-r)/2$. For an ultrametric place $v=p$, the full list of invariants is complicated. The most basic one is the positive integer $D_p = p^{c_p}$ generating the discriminant ideal of $K_p/\Q_p$. We package the $D_p$ into the single invariant $D = \prod_p D_p \in \Z_{\geq 1}$, the absolute discriminant of $K$.
\subsection{Dedekind zeta functions} Associated with a number field is its Dedekind zeta function \begin{equation} \label{prodser} \zeta(K,s) = \prod_{p} \frac{1}{P_p(p^{-s})} = \sum_{m=1}^\infty \frac{a_m}{m^s}. \end{equation} Here the polynomial $P_p(x) \in \Z[x]$ is a $p$-adic invariant. It has degree $\leq n$ with equality if and only if $D_p=1$. The integer
$a_m$ is the number of ideals of index $m$ in the ring of integers $\OK$.
\subsection{Analytic properties of Dedekind zeta functions}
Let $\Gamma_\R(s) = \pi^{-s/2} \Gamma(s/2)$, where
$\Gamma(s) = \int_0^\infty x^{s-1} e^{-x} dx$ is the standard
gamma function. Let
\begin{equation}
\label{completed}
\what{\zeta}(K,s) = D^{s/2} \Gamma_\R\left(s\right)^a \Gamma_\R\left(s+1\right)^b \zeta(K,s).
\end{equation} Then this completed Dedekind zeta function
$\what{\zeta}(K,s)$ meromorphically continues to the whole complex plane,
with simple poles at $s=0$ and $s=1$ being its only singularities. It satisfies the functional equation $\what{\zeta}(K,s) = \what{\zeta}(K,1-s)$.
\subsection{Permutation characters} We recall from the introduction that throughout this paper we are taking
$\overline{\Q}$ to be the algebraic closure of $\Q$ in $\C$ and
$\G = \Gal(\overline{\Q}/\Q)$ its absolute Galois group. A degree $n$ number field
$K$ then corresponds to the transitive $n$-element
$\G$-set $X = \mbox{Hom}(K,\overline{\Q})$.
A number field thus has a permutation character $\Phi = \Phi_K = \Phi_X$
with $\Phi(e)=n$. Also signature has the character-theoretic
interpretation $\Phi(\sigma) = r$, where $\sigma$
as before is the complex conjugation element.
\subsection{General characters and Artin $L$-functions} Let $\Chi$ be a character of $\G$.
Then one has an associated Artin $L$-function $L(\Chi,s)$ and conductor
$D_\Chi$, agreeing
with the Dedekind zeta function $\zeta(K,s)$ and the discriminant $D_K$
if $\Chi$ is the permutation
character of $K$. The function $L(\Chi,s)$ has both an Euler product and
Dirichlet series representation as in \eqref{prodser}.
In general, if
$\Phi = \sum_\Chi m_{\lChi} \Chi$ then \begin{align} \label{decomp} L(\Phi,s) & = \prod_{\Chi} L(\Chi,s)^{m_{\lChi}} & D_\Phi & = \prod_{\Chi} D_\Chi^{m_{\lChi}}. \end{align} One is often interested in \eqref{decomp} where the $\Chi$ are irreducible characters.
For a finite set of primes $S$, let $\overline{\Q}_S$
be the compositum of all number fields in $\overline{\Q}$
with discriminant divisible only by primes in $S$.
Let $\G_S = \Gal(\overline{\Q}_S/\Q)$ be the corresponding
quotient of $\G$. Then for primes $p \not \in S$ one has
a well-defined Frobenius conjugacy class $\Fr_p$ in $\G_S$.
The local factor $P_p(x)$ in \eqref{prodser} is the characteristic polynomial
$\det(1 - \rho(\Fr_p) x)$, where $\rho$ is a representation
with character $\Chi$.
\subsection{Relations with other objects}
Artin $L$-functions of degree $1$ are exactly Dirichlet
$L$-functions, so that $\Chi$ can be identified with a
faithful character of the quotient group $(\Z/D\Z)^\times$
of $\G$, with $D$ the conductor of $\Chi$.
Artin $L$-functions coming from irreducible degree $2$ characters
and conductor $D$ are expected to come from
cuspidal modular forms on $\Gamma_1(D)$, holomorphic if $r=0$ and
nonholomorphic otherwise. This expectation
is proved in all cases, except for those with $r = \pm 2$
and projective image the nonsolvable group $A_5$.
In general, to understand how an Artin $L$-function $L(\Chi,s)$
qualitatively relates to other objects, one needs to
understand its Galois theory, including
the placement of complex conjugation; in other words,
one needs to identify its Galois type.
To be more quantitative, one brings in the conductor.
\subsection{Analytic Properties of Artin $L$-functions}
An Artin $L$-function
has a meromorphic continuation and functional
equation, although each with an extra complication
in comparison with the special case of Dedekind zeta functions.
For the
meromorphic continuation, the behavior at $s=1$ is known:
the pole order is the multiplicity $(1,\Chi)$ of $1$
in $\Chi$. The complication is that one has
poor
control over other possible poles. The Artin conjecture for $\Chi$
says however that there are no poles other than $s=1$.
The completed $L$-function \[ \what{L}(\Chi,s) = D_\Chi^{s/2} \Gamma_\R(s)^a \Gamma_\R(s+1)^b L(\Chi,s)\] satisfies the functional equation
\[
\what{L}(\Chi,1-s) = w \what{L}(\overline{\Chi},s)
\] with root number $w$.
Irreducible characters of any compact group come
in three types, orthogonal, non-real, and symplectic.
The type is identified by the Frobenius-Schur
indicator, calculated with respect to the Haar probability
measure $dg$:
\[
FS(\chi) =
\int_{G} \chi(g^2) \, dg \in \{-1,0,1\}.
\]
For orthogonal characters $\Chi$ of $\G$, one has $\Chi = \overline{\Chi}$
and moreover $w = 1$. The complication in
comparison with permutation characters
is that for the other two types,
the root number $w$ is
not necessarily $1$. For symplectic characters,
$\Chi = \overline{\Chi}$ and $w$
can be either of the two possibilities $1$ or
$-1$. For non-real characters, $\Chi \neq \overline{\Chi}$
and $w$ is some algebraic number of norm $1$.
Recall from the introduction that an Artin $L$-function is said to satisfy the Riemann
hypothesis if all its zeros in the critical strip $0<\mbox{Re}(s)<1$ are actually on the critical line $\mbox{Re}(s)= 1/2$.
We will be using the Riemann hypothesis through
Lemma~\ref{lowboundlem}. If we replaced the function \eqref{Odlyzko} with the appropriately
scaled version of (5.17) from \cite{PM},
then our lower bounds would be only conditional on the Artin
conjecture, which is
completely known for some Galois types $(G,c,\chi)$. However
the bounds obtained would be much smaller, and the comparison
with first conductors as presented in Tables~\ref{tablelabel1}--\ref{tablelabel8} below would be
less interesting.
\subsection{Rational characters and rational Artin $L$-functions} \label{rat-chars} The abelianized Galois group $\G^{\rm ab}$ acts on continuous complex characters of profinite groups through its action on their values. If $\Chi'$ and $\Chi''$ are conjugate via this action then their conductors agree: \begin{equation} \label{discequal} D_{\Chi'} = D_{\Chi''}. \end{equation} Our study is simplified by this equality because it allows us to study a given irreducible character $\Chi'$ by studying instead the rational character $\Chi$ obtained by summing its conjugates.
By the Artin induction theorem \cite[Prop.~13.2]{feit}, a rational character $\Chi$ can be expressed as a rational linear combination of permutation characters: \begin{equation} \label{chiexpress} \Chi = \sum k_\Phi \Phi. \end{equation} For general characters $\Chi'$, computing the Frobenius traces $a_p = \Chi'(\Fr_p)$ requires the results of \cite{dok-dok}. Similarly the computation of bad Euler factors and the root number $w$ present difficulties. For Frobenius traces and bad Euler factors, these complications are not present for rational characters $\Chi$ because of \eqref{chiexpress}.
\section{Signature-based analytic lower bounds} \label{Signature} Here and in the
next section we aim to be brief, with the main point being to explain how type-based
lower bounds are usually much larger than signature-based lower bounds. We employ the standard framework for establishing lower bounds for conductors and discriminants, namely Weil's explicit formula. General references for the material
in this section are \cite{odlyzko-durham,PM}.
\subsection{Basic quantities} \label{basic-quantities}
The theory allows general test functions that satisfy certain axioms. We work only with a function introduced by Odlyzko (see \cite[(9)]{poitou-petits}), \begin{equation} \label{Odlyzko} f(x) = \left\{ \begin{array}{ll} {\displaystyle (1-x) \cos(\pi x) + \frac{\sin(\pi x)}{\pi}}, & \mbox{ if $0 \leq x \leq 1$,} \\ 0, & \mbox{ if $x>1$}. \end{array} \right. \end{equation} For $z \in [0,\infty)$ let \begin{align*} N(z) & = \log(\pi) + \int_0^{\infty} \frac{e^{-x/4}+e^{-3x/4}}{2(1-e^{-x})} f(x/(2z))
- \frac{e^{-x}}{x} \,dx, \\ &= \gamma+ \log(8\pi) + \int_0^\infty \frac{f(x/z)-1}{2\sinh(x/2)} \, dx \\ &= \gamma+\log(8\pi) + \int_0^z \frac{f(x/z)-1}{2\sinh(x/2)} \, dx -\log\left(\frac{e^{z/2}+1}{e^{z/2}-1} \right), \\ R(z) & = \int_0^{\infty}\frac{e^{-x/4}-e^{-3x/4}}{2(1-e^{-x})} f(x/(2z))\, dx, \\ &= \int_0^z \frac{f(x/z)}{2\cosh(x/2)}\, dx, \\ P(z) & = 4 \int_0^{\infty} f(x/z) \cosh(x/2)\, dx \\ &= \frac{256 \pi^2 z \cosh^2(z/4)}{(z^2+4\pi^2)^2}. \end{align*} The simplifications in the integrals for $N(z)$ and $R(z)$ are fairly standard and apply to any test function, with the exception of the final steps which make use of the support for $f(x)$. Evaluation of $P(z)$ depends on the choice of $f(x)$. The integrals for $N(z)$ and $R(z)$ cannot be evaluated in closed form like the third, but, as indicated in \cite[\S2]{poitou-petits}, they do have simple limits $N(\infty) = \log(8 \pi) + \gamma$ and $R(\infty) = \pi/2$ as $z \rightarrow \infty$. Here $\gamma \approx 0.5772$ is the Euler $\gamma$-constant. The constants $\Omega = e^{N(\infty)} \approx 44.7632$ and $e^{R(\infty)} \approx 4.8105$, as well as their product $\Theta = e^{N(\infty)+R(\infty)} \approx 215.3325$, will play important roles in the sequel.
\subsection{The quantity $M(n,r,u)$} Consider triples $(n,r,u)$ of real numbers with $n$ and $u$ positive and $r \in [-n,n]$. For such a triple, define \[ M(n,r,u) = \mbox{Max}_z \left( \exp \left( N(z) + \frac{r}{n} R(z) - \frac{u}{n} P(z) \right) \right). \] It is clear that $M(n,r,u) = M(n/u,r/u,1)$. Accordingly we regard $u=1$ as the essential case and abbreviate $M(n,r)=M(n,r,1)$. For fixed $\epsilon \in [0,1]$ and $u>0$, one has the asymptotic behavior \begin{equation} \label{asymptotic1} \lim_{n \rightarrow \infty} M(n,\epsilon n) = \Omega^{1-\epsilon} \Theta^{\epsilon} \approx 44.7632^{1-\epsilon} 215.3325^{\epsilon}. \end{equation} Figure~\ref{contourM} gives one a feel for the fundamental function $M(n,r)$. Particularly important are the univariate functions $ M(n,0)$ and $
M(n,n)$ corresponding to the left and right edges of this figure. \begin{figure}
\caption{ A contour plot of $M(n,\epsilon n)$ in the
window $[0,1] \times [1,1 \, 000 \, 000]$ of the $\epsilon$-$n$ plane, with a vertical logarithmic scale and contours at $2$, $4$, $6$, $8$, ${\bf 10}$, \dots, {\bf 170}, 172, 174, 176.
Some limits for $n \rightarrow \infty$ are shown on the upper boundary.}
\label{contourM}
\end{figure}
\subsection{Lower bounds for root discriminants} Suppose that
$\Phi$ is a nonzero Artin character which takes real values only. We say
that $\Phi$ is nonnegative if \begin{equation} \label{nonnegativity} \Phi(g) \geq 0 \mbox{ for all $g \in \G$.} \end{equation} This nonnegativity ensures that the inner product $(\Phi,1)$ of $\Phi$ with the unital character $1$ is positive. A central result of the theory, a special case of the statement in \cite[(7)]{poitou-petits}, then serves us as a lemma. \begin{lemma} \label{lowboundlem} The lower bound \[ \delta_{\Phi} \geq M(n,r,u). \] is valid for all nonnegative characters $\Phi$ with $(\Phi(e),\Phi(\sigma),(\Phi,1)) = (n,r,u)$ and $L(\Phi,s)$ satisfying the Artin conjecture and the Riemann hypothesis. \end{lemma} If $\Phi$ is a permutation character, then the nonnegativity condition \eqref{nonnegativity} is automatically satisfied. This makes the application of the analytic theory to lower bounds of root discriminants of fields relatively straightforward.
\subsection{Lower bounds for general Artin conductors} To pass from nonnegative characters to general characters, the classical method uses the following lemma. \begin{lemma}[Odlyzko \cite{odlyzko-durham}] \label{lemma1} The conductor relation \begin{equation} \label{gencondrel} \delta_\Chi \geq \delta_{\Phi}^{n/(2n-2)} \end{equation} holds for any degree $n$ character
$\Chi$ and its absolute square $\Phi = |\Chi|^2$. \end{lemma} \noindent A proof of this lemma from first principles is given in \cite{odlyzko-durham}.
Combining Lemma~\ref{lowboundlem} with Lemma~\ref{lemma1} one gets the following immediate consequence
\begin{thm} The lower bound \label{thm1} \[ \delta_\Chi \geq M(n^2,r^2,w)^{n/(2n-2)} \] is valid for all characters $\Chi$ with
$(\Chi(e),\Chi(\sigma),(\Chi,\overline{\Chi})) = (n,r,w)$ such that $L(|\Chi|^2,s)$ satisfies the Artin conjecture and the Riemann hypothesis. \end{thm} This theorem is essentially the main result in the literature on lower bounds for Artin conductors. It appears in \cite{odlyzko-durham, PM} with the right side replaced by explicit bounds. For fixed $\epsilon \in [-1,1]$ and $w>0$, one has the asymptotic behavior \begin{equation} \label{asymptotic2} \lim_{n \rightarrow \infty} M(n^2,\epsilon^2 n^2,w) = \Omega^{(1-\epsilon^2)/2} \Theta^{\epsilon^2/2} \approx 6.6905^{1-\epsilon^2} 14.6742^{\epsilon^2}. \end{equation} The bases $\Omega \approx 44.7632$ and $\Theta \approx 215.3325$ of \eqref{asymptotic1} serve as limiting lower bounds for root discriminants via Lemma~\ref{lowboundlem}. However it is only their square roots $\sqrt{\Omega} \approx 6.6905 $ and $\sqrt{\Theta} \approx 14.6742$ which Theorem~\ref{thm1} gives as limiting lower bounds for root conductors. This
discrepancy will be addressed in Section~\ref{asymp}.
\section{Type-based analytic lower bounds} \label{Type}
In this section we establish Theorem~\ref{thm2}, which is
a family of lower bounds on the root conductor
$\delta_\Chi$ of a given Artin character, dependent
on the choice of an auxiliary character $\phi$.
\subsection{Conductor relations} Let $G$ be a finite group, $c$ an involution in $G$, $\chi$ a faithful character of $G$, and $\phi$ a non-zero real-valued character of $G$. Say that a pair of Artin characters
($\Chi$,$\Phi$) has joint type $(G,c,\chi,\phi)$ if there is a surjection $h : \G \rightarrow G$ with $h(\sigma)=c$, $\Chi = \chi \circ h$, and $\Phi = \phi \circ h$.
Write the conductors respectively as \begin{align*} D_\Chi & = \prod_p p^{c_p(\Chi)}, & D_\Phi & = \prod_p p^{c_p(\Phi)}. \end{align*} Just as in the last section, we need lower bounds on $D_\Chi$ in terms of $D_{\Phi}$. Our paper \cite{jr-tame-wild} produces bounds of this sort in the context of many characters. Here we present some of these results restricted to the setting of two characters, but otherwise following the notation of \cite{jr-tame-wild}.
For $\tau \in G$, let $\bar{\tau}$ be its order. Let $\psi$ be a rational character of $G$. Define two similar numbers, \begin{align} \label{twosimilar} \what{c}_\tau(\psi)&= \psi(e)-\psi(\tau), & c_\tau(\psi)& =
\psi(e) - \frac{1}{\bar{\tau}}
\sum_{k|\bar{\tau}} \varphi(\bar\tau/k) \psi(\tau^k). \end{align}
Here $\varphi$ is the Euler totient function given by $\varphi(k) = |(\Z/k)^\times|$. For the identity element $e$, one clearly has $\what{c}_e(\psi) = c_e(\psi)=0$. When $\bar{\tau}$ is prime, the functions on rational characters defined in \eqref{twosimilar} are proportional: $(\bar{\tau}-1) \what{c}_\tau(\psi) =\bar{\tau}{c}_\tau(\psi)$.
The functions $\what{c}_\tau$ and ${c}_\tau$ are related to ramification as follows. Let $\Psi$ be an Artin character corresponding to $\psi$ under $h$. If $\Psi$ is tame at $p$ then \begin{equation} \label{tameidentity} c_p(\Psi) = c_\tau(\psi), \end{equation} for $\tau$ corresponding to a generator of tame inertia. The identity \eqref{tameidentity} holds because $c_\tau(\psi)$ is the number of non-unital eigenvalues of $\rho(\tau)$ for a representation $\rho$ with character $\psi$. For general $\Psi$, there is a canonical expansion \begin{equation} \label{wildbound} c_p(\Psi) = \sum_{\tau} k_\tau \what{c}_\tau(\psi), \end{equation} with always $k_\tau \geq 0$, coming from the filtration by higher ramification groups on the $p$-adic inertial subgroup of $G$.
Because \eqref{twosimilar}--\eqref{wildbound} are only correct for $\psi$ rational, when we apply them to characters $\chi$ and $\phi$ of interest, we are always assuming that $\chi$ and $\phi$ are rational. As explained in \S\ref{rat-chars}, the restriction to rational characters still allows obtaining general lower bounds. Also, as will be illustrated by an example in \S\ref{spectralwidth}, focusing on rational characters does not reduce the quality of these lower bounds.
For the lower bounds we need, we define the parallel quantities \begin{align} \label{alphaprod} \what{\alpha}(G,\chi,\phi) & = \min_{\tau \in G-\{e\}} \frac{\what{c}_\tau(\chi)}{\what{c}_\tau(\phi)}, & \alpha(G,\chi,\phi) & = \min_{\tau \in G-\{e\}} \frac{c_\tau(\chi)}{c_\tau(\phi)}. \end{align} Let $B(G,\chi,\phi)$ be the best lower bound, valid for all primes $p$, that one can make on $c_p(\Chi)/c_p(\Phi)$ by purely local arguments. As emphasized in \cite[\S2]{jr-tame-wild}, $B(G,\chi,\phi)$ can in principle be calculated by individually inspecting all possible $p$-adic ramification behaviors. The above discussion says \begin{equation} \label{aba} \what{\alpha}(G,\chi,\phi) \leq B(G,\chi,\phi) \leq \alpha(G,\chi,\phi). \end{equation} The left inequality holds because of the nonnegativity of the $k_\tau$ in
\eqref{wildbound}. The right inequality holds because of \eqref{tameidentity}.
A central theme of \cite{jr-tame-wild}
is that one is often but not always
in the extreme situation
\begin{equation}
\label{ba}
B(G,\chi,\phi) = \alpha(G,\chi,\phi).
\end{equation}
For example, it often occurs in practice that
the minimum in the expression \eqref{alphaprod} for $\what{\alpha}(G,\chi,\phi)$
occurs at a $\tau$ of prime order. Then the proportionality
remark above shows that in fact
all three quantities in \eqref{aba} are
the same, and so in particular \eqref{ba} holds.
As a quite different example, Theorem~7.3 of \cite{jr-tame-wild}
says that if $\phi$ is the regular character of $G$ and $\chi$ is a
permutation character, then
\eqref{ba} holds. Some other examples of \eqref{ba} are worked out in
\cite{jr-tame-wild} by explicit analysis of wild ramification;
a few examples show that $B(G,\chi,\phi) < \alpha(G,\chi,\phi)$ is possible too.
\subsection{Root conductor relations} To switch the focus from conductors to root conductors, we multiply all three quantities in \eqref{aba} by $\phi(e)/\chi(e)$ to obtain \begin{equation} \label{aba2} \walp(G,\chi,\phi) \leq b(G,\chi,\phi) \leq \alp(G,\chi,\phi). \end{equation} Here the elementary purely group-theoretic quantity $\walp(G,\chi,\phi)$ is improved to the best bound $b(G,\chi,\phi)$ which in turn often agrees with a second more complicated but still purely group-theoretic quantity $\alp(G,\chi,\phi)$. The notations $\what{\alpha}$, $\alpha$, $\what{\underline{\alpha}}$, $\underline{\alpha}$ are all taken from Section~7 of \cite{jr-tame-wild} while the notations $B$ and $b$ correspond to quantities not named in \cite{jr-tame-wild}.
Our discussion establishes the following lemma. \begin{lemma} \label{lemma2} The conductor relation \begin{equation} \label{maincondrel} \delta_\Chi \geq \delta_\Phi^{b(G,\chi,\phi)} \end{equation} holds for all pairs of Artin characters $(\Chi,\Phi)$ with joint type of the form $(G,c,\chi,\phi)$. \end{lemma}
\subsection{Bounds via an auxiliary Artin character $\Phi$.} \label{bounds} For $u \in \{\walp,b,\alp\}$, define \begin{equation} \label{mdef} m(G,c,\chi,\phi,u) = M(\phi(e),\phi(c),(\phi,1))^{u(G,\chi,\phi)}. \end{equation} Just like Lemma~\ref{lowboundlem} combined with Lemma~\ref{lemma1} to give Theorem~\ref{thm1}, so too Lemma~\ref{lowboundlem} combines with Lemma~\ref{lemma2} to give the following theorem. \begin{thm} \label{thm2} The lower bound \begin{equation} \label{thm2bound} \delta_\Chi \geq m(G,c,\chi,\phi,b) \end{equation} is valid for all character pairs $(\Chi,\Phi)$ of joint type $(G,c,\chi,\phi)$ such that $\Phi$ is non-negative and $L(\Phi,s)$ satisfies the Artin conjecture and the Riemann hypothesis. \end{thm} Computing the right side of \eqref{thm2bound} is difficult because the base in \eqref{mdef} requires evaluating the maximum of a complicated function, while the exponent $b(G,\chi,\phi)$ involves an exhaustive study of wild ramification. Almost always in the sequel, $\chi$ and $\phi$ are rational-valued and we replace $b(G,\chi,\phi)$ by $\walp(G,\chi,\phi)$; in the common case that all three quantities of \eqref{aba2} are equal, this is no loss.
\section{Four choices for $\phi$} \label{choices} This section fixes a type $(G,c,\chi)$ where the faithful character $\chi$ is rational-valued and uses the notation $(n,r) = (\chi(e),\chi(c))$. The section introduces four nonnegative characters $\phi_i$ built from $(G,\chi)$. For the first character $\phi_L$, it makes $m(G,c,\chi,\phi_L,b)$, the lower bound appearing in Theorem~\ref{thm2}, more explicit. For the remaining three characters $\phi_i$, it makes the perhaps smaller quantity $m(G,c,\chi,\phi_i,\walp)$ more explicit.
Two simple quantities enter into the constructions as follows. Let $X$ be the set of values of $\chi$, so that $X \subset \Z$ by our rationality assumption. Let $-\widecheck{\chi}$ be the least element of $X$. The greatest element of $X$ is of course $\chi(e)=n$, and we let $\widehat{\chi}$ be the second greatest element. Thus, $-\widecheck{\chi} <0 \leq \widehat{\chi} \leq n-1$.
\subsection{Linear auxiliary character} A simple nonnegative character associated to $\chi$ is $\phi_L = \chi+\widecheck{\chi}$. Both $m(G,c,\chi,\phi_L,\walp)$ and $m(G,c,\chi,\phi_L,\alp)$ easily evaluate to \begin{equation} \label{formlinear} m(G,c,\chi,\phi_L,b) = M(n+\widecheck{\chi},r+\widecheck{\chi},\widecheck{\chi})^{(n + \widecheck{\chi})/n}. \end{equation} The character $\phi_L$ seems most promising as an auxiliary character when $\widecheck{\chi}$ is very small.
In \cite[\S3]{PM} the auxiliary character $\chi+n$ is used, which has the advantage of being nonnegative for any rational character $\chi$. Odlyzko also uses $\chi+n$ in \cite{odlyzko-durham}, and suggests using the auxiliary character $\phi_L = \chi+\widecheck{\chi}$ since it gives a better bound whenever $\widecheck{\chi}<n$. This strict inequality holds exactly when the center of $G$ has odd order.
\subsection{Square auxiliary character} Another very simple nonnegative character associated to $\chi$ is $\phi_S = \chi^2$. This character gives \begin{equation} \label{formsquare} m(G,c,\chi,\phi_S,\walp) = M(n^2,r^2,(\chi,\chi))^{n/(n+\widehat{\chi})}. \end{equation} The derivation of \eqref{formsquare} uses the simple formula in \eqref{twosimilar} for $\what{c}_\tau$. The formula for $c_\tau$ in \eqref{twosimilar} is more complicated, and we do not expect a simple general formula for $m(G,c,\chi,\phi_S,{\alp})$, nor for the best bound $m(G,c,\chi,\phi_S,b)$ in Theorem~\ref{thm2}.
The character $\phi_S$ is used prominently in \cite{odlyzko-durham, PM}. When $\widehat{\chi}=n-2$, the lower bound $m(G,c,\chi,\phi_S,\walp)$ coincides with that of Lemma~\ref{lemma1}. Thus for $\widehat{\chi}=n-2$, Theorem~\ref{thm2} with $\phi = \phi_S$ gives the same bound as Theorem~\ref{thm1}. On the other hand, as soon as $\widehat{\chi}<n-2$, Theorem~\ref{thm2} with $\phi = \phi_S$ is stronger. The remaining case $\widehat{\chi}=n-1$ occurs only three times among the $195$ characters we consider in Section~\ref{Tables}. In these three cases, the bound in Theorem~\ref{thm1} is stronger because the exponent is larger. However, in each of these cases, the tame-wild principle applies \cite{jr-tame-wild} and we can use exponent $m(G,c,\chi,\phi_S,{\alp})$, which gives the same bound as Theorem~\ref{thm1} in two cases, and a better bound in the third.
\subsection{Quadratic auxiliary character} Let $-\widetilde{\chi}$ be the greatest negative element of the set $X$ of values of $\chi$. A modification of the given character $\chi$ is $\chi^* = \chi+\widetilde{\chi}$, with degree $n^* = n+\widetilde{\chi}$ and signature $r^* = r + \widetilde{\chi}$. A modification of $\phi_S$ is $\phi_Q = \chi \chi^*$. The function $\phi_Q$ takes only nonnegative values because the interval $(-\tilde{\chi},0)$ in the $x$-line where $x(x+\tilde{\chi})$ is negative is disjoint from the set $X$ of values of $\chi$. The lower bound associated to $\phi_Q$ is \begin{equation} \label{formquad} m(G,c,\chi,\phi_Q,\walp) = M(nn^*,rr^*,(\chi,\chi))^{(n^*)/(n^*+\widehat{\chi})}. \end{equation} Comparing formulas \eqref{formsquare} and \eqref{formquad}, $n^2$ strictly increases to $nn^*$ and $n/(n+\widehat{\chi})$ increases to $n^*/(n^* + \widehat{\chi})$. In the totally real case $n=r$, the monotonicity of the function $M(n,n)$ as exhibited in the right edge of Figure~\ref{contourM} then implies that $m(G,c,\chi,\phi_S,\walp)$ strictly increases to $m(G,c,\chi,\phi_Q,\walp)$. Even outside the totally real setting, one can expect that $\phi_Q$ almost always yields a better lower bound than $\phi_S$. The character $\phi_Q$ seems promising as an auxiliary character when $\widehat{\chi}$ is very small so that the exponent is near $1$ rather than its lower limit of $1/2$. As for the square case, we do not expect a simple formula for the best bound $m(G,c,\chi,\phi_Q,b)$ in Theorem~\ref{thm2}.
\subsection{Galois auxiliary character} Finally there is a strong candidate for a good auxiliary character that does not depend on $\chi$, namely the regular character $\phi_G$. By definition,
$\phi_G(e) = |G|$ and else $\phi_G(g)=0$. In this case one has \begin{equation} \label{formula3a}
m(G,c,\chi,\phi_G,\walp) = M(|G|,\delta_{ce}|G|,1)^{(n-\widehat{\chi})/n}. \end{equation} Here $\delta_{ce}$ is defined to be $1$ in the totally real case and $0$ otherwise. This auxiliary character again seems most promising when $\widehat{\chi}$ is small. As in the square and quadratic cases, we do not expect a simple formula for $m(G,c,\chi,\phi_G,b)$.
\subsection{Spectral bounds and rationality} \label{spectralwidth} To get large lower bounds on root conductors, one wants $\widecheck{\chi}/n$ to be small for \eqref{formlinear} or $\widehat{\chi}/n$ to be small for \eqref{formsquare}--\eqref{formula3a}. The analogous quantities $\widecheck{\chi}_1/n_1$
and $\widehat{\chi}/n_1$ are well-defined for a general real character $\chi_1$, and replacing $\chi_1$ by the sum $\chi$ of its conjugates can substantially reduce them.
For example, let $p$ be a prime congruent to 1 modulo 4, and let $G$ be the simple group $\PSL_2(p)$. Then $G$ has two irrational irreducible characters, say $\chi_1$ and $\chi_2$, both of degree $(p+1)/2$. For each, its set of values is $$\left\{\frac{-\sqrt{p}-1}{2},-1,0,1,\frac{\sqrt{p}-1}{2}, \frac{p+1}{2}\right\}$$ (except that $1$ is missing if $p=5$). However for $\chi = \chi_1+\chi_2$, the set of values is just $\{-2,0,2,p+1\}$. Thus in passing from $\widecheck{\chi}_1/n_1$ to $\widecheck{\chi}/n$, one saves a factor of $\sqrt{p}+1$. Similarly in passing from $\widehat{\chi}_1/n_1$ to $\widehat{\chi}/n$, one saves a factor of $\sqrt{p}-1$.
\section{Other choices for $\phi$} \label{otherchoices} To apply Theorem~\ref{thm2} for a given Galois type $(G,c,\chi)$, one needs to choose an auxiliary character $\phi$. We presented four choices in Section~\ref{choices}. We discuss all possible choices here, using $G=A_4$ and $G=A_5$ as illustrative examples.
\subsection{Rational character tables} As a preliminary, we review the notion of rational character table. Let $G^\sharp = \{C_j\}_{j \in J}$ be the set of power-conjugacy classes in $G$. Let $G^{\rm rat} = \{\chi_i\}_{i \in I}$ be the set of rationally irreducible characters. These sets have the same size $k$ and one has a $k\times k$ matrix $\chi_i(C_j)$, called the rational character table.
\begin{table}[htb] \[
\begin{array}{c|rrr c c|rrrr}
A_4 &1A & 2A & 3AB & \;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\; & A_5 & 1A & 2A & 3A & 5AB \\ \cline{1-4} \cline{6-10} \chi_1 & 1 & 1 & 1 & & \chi_1 & 1 & 1 & 1 & 1 \\ \chi_2 & 2 & 2 & -1 & &\chi_4 & 4 & 0 & 1 & - 1 \\ \chi_3 & 3 & - 1& 0 && \chi_5 & 5 & 1 & -1 & 0 \\
\multicolumn{2}{c}{\;} & &&&\chi_6 & 6 & -2 & 0 & 1 \\ \end{array} \] \caption{\label{ratchartables} Rational character tables for $A_4$ and $A_5$}
\end{table}
Two examples are given in Table~\ref{ratchartables}.
We index characters by their degree, with $I = \{1,2,3\}$ for $A_4$ and $I = \{1,4,5,6\}$ for $A_5$. All characters are absolutely irreducible except for $\chi_2$ and $\chi_{6}$, which each break as a sum of two conjugate irreducible complex characters. We likewise index power-conjugacy classes by the order of a representing element, always adding letters as is traditional. Thus $J = \{1A,2A,3AB\}$ for $A_4$ and $J = \{1A,2A,3A,5AB\}$ for $A_5$, with $3AB$ and $5AB$ each consisting of two conjugacy classes.
\subsection{The polytope $P_G$ of normalized nonnegative characters} \label{polytope-pg} A general real-valued function $\phi \in \R(G^\sharp)$ has an expansion $\sum x_i \chi_i$ with $x_i \in \R$. The coefficients are recovered via inner products, $x_i = (\phi,\chi_i)/(\chi_i,\chi_i)$. Alternative coordinates are given by $y_j = \phi(C_j)$. The $\phi$ allowed for Theorem~\ref{thm2} are the nonzero $\phi$ with the $x_i$ and $y_j$ non-negative integers.
An allowed $\phi$ gives the same lower bound in Theorem~\ref{thm2} as any of its positive multiples $m \phi$. Without getting any new bounds, we can therefore give ourselves the convenience of allowing the $x_i$ and $y_j$ to be nonnegative rational numbers. Similarly, we can extend by continuity to allow the $x_i$ and $y_j$ to be nonnegative real numbers. The allowed $\phi$ then become the cone in $k$-dimensional Euclidean space given by $x_i \geq 0$ and $y_j \geq 0$, excluding the tip of the cone at the origin.
Writing the identity character as $\chi_1$, we can normalize via scaling to $x_1=1$. Writing the identity class as $C_{1A}$, the inequality $y_{1A} \geq 0$ is implied by the other $y_j \geq 0$ and so the variable $y_{1A}$ can be ignored. The polytope $P_G$ of normalized nonnegative characters is then defined by $x_1=1$, the inequalities $x_i \geq 0$ for $i \neq 1$, and inequalities $y_j \geq 0$ for $j \neq 1A$. The point where all the $x_i$ are zero is the unital character $\phi_1$. The point where all the $y_j$ are zero is the regular character $\phi_G$. Thus the $(k-1)$-dimensional polytope $P_G$ is determined by $2k-2$ linear inequalities, with $k-1$ corresponding to non-unital characters and intersecting at $\phi_1$, and $k-1$ corresponding to non-identity classes and intersecting at $\phi_G$.
\begin{table}[htb] \centering \includegraphics{twopolytopes} \caption{\label{twopolytopes} The polytopes $P_{A_4}$ and $P_{A_5}$} \end{table}
Figure~\ref{twopolytopes} continues our two examples. On the left, $P_{A_4}$ is drawn in the $x_2$-$x_3$ plane. The character faces give the coordinate axes and are dashed. The class faces are calculated from columns in the rational character table and are solid.
On the right, a view of $P_{A_5}$ is given in $x_{4}$-$x_{5}$-$x_{6}$ space. The three pairwise intersections of character faces give coordinate axes and are dashed, while all other edges are solid. In this view, the point $\phi_G = \phi_{60} = (4,5,3)$ should be considered as closest to the reader, with the solid lines visible and the dashed lines hidden by the polytope. Note that $P_{A_4}$ has the combinatorics of a square and $P_{A_5}$ has the combinatorics of a cube. While the general $P_G$ is the intersection of an orthant with tip $\phi_1$ and an orthant with tip $\phi_G$, its combinatorics are typically more complicated than $[0,1]^{(k-1)}$. For example, the groups $G=A_6$, $S_5$, $A_7$, and $S_6$, have $k=6$, $7$, $8$, and $11$ respectively; but instead of having $32$, $64$, $128$ and $1024$ vertices, their polytopes $P_G$ have $28$, $40$, $115$, and $596$ vertices respectively.
\subsection{Points in $P_G$} In the previous subsection, we have mentioned already the distinguished vertices $\phi_1$ and $\phi_G$. For every rationally irreducible character, we also have $\phi_{\chi,L} = \chi + \widecheck{\chi}$, $\phi_{\chi,S} = \chi^2$, and $\phi_{\chi,Q} = \chi \chi^*$, as in Section~\ref{choices}.
For every subgroup $H$ of $G$, another element of $P_G$ is the permutation character $\phi_{G/H}$. For $H = G$, this character is just the $\phi_1$ considered before, which is a vertex. Otherwise, a theorem of Jordan, discussed at length in \cite{Ser03}, says that $\phi_{G/H}(C_j)=0$ for at least one $j$; in other words, $\phi_{G/H}$ is on at least one character face. For $A_4$ and $A_5$, there are respectively five and nine conjugacy classes of subgroups, distinguished by their orders. Figures~\ref{twopolytopes} draws the corresponding points, labeled by
$\phi_{|G/H|}$. All four vertices of $P_{A_4}$ and six of the eight vertices of $P_{A_5}$ are of the form $\phi_{N}$. The remaining one $\phi_N$ in $P_{A_4}$ is on an edge, while the remaining three $\phi_N$ in $P_{A_5}$
are on edges as well.
\subsection{The best choice for $\phi$} Given $(G,c,\chi)$ and $u \in \{\walp,b,\alp\}$, let $m(G,c,\chi,u) = \max_{\phi \in P_G} m(G,c,\chi,\phi,u)$. Computing these maxima seems difficult. Instead we vary $\phi$ over a modestly large finite set, denoting the largest bound appearing as $\mathfrak{d}(G,c,\chi,u)$. For most $G$, the set of $\phi$ we inspect consists of all $\phi_{\chi,L}$, $\phi_{\chi,S}$, and $\phi_{\chi,Q}$, all $\phi_{G/H}$ including the regular character $\phi_G$, and all vertices. For some $G$, like $S_7$, there are too many vertices and we exclude them from the list of $\phi$ we try.
For each $(G,\chi)$, we work either with $u=\walp$ or with $u=\alp$, as explained in the ``middle four columns" part of \S\ref{remainingrows}. We then report $\mathfrak{d}(G,\chi) = \min_c \mathfrak{d}(G,c,\chi,u)$ in Section~\ref{Tables}.
\section{The case $G=S_5$} \label{S5}
Our focus in the next two sections is on finding initial segments
$\cL(G,\chi; B)$ of complete lists of Artin $L$-functions, and
in particular on finding the first root conductor $\delta_1(G,\chi)$. It is a question of transferring completeness statements for number fields to completeness statements for Artin $L$-functions via conductor relations. In this section, we explain the process by presenting the case $G=S_5$ in some detail.
\subsection{Different orders on the same set of fields}
Consider the set $\cK$ of isomorphism classes of
quintic fields $K$ over $\Q$ with splitting field
$L/\Q$ having Galois group $\gal(L/\Q) \cong S_5$.
The group
$S_5$ has seven irreducible characters which we index by
degree and an auxiliary label: $\chi_{1a} = 1$, $\chi_{1b}$,
$\chi_{4a}$, $\chi_{4b}$, $\chi_{5a}$, $\chi_{5b}$, and $\chi_{6a}$.
For $\phi$ a permutation character, let $D_\phi(K) = D(K_\phi)$ be the absolute
discriminant of the associated resolvent algebra $K_\phi$ of $K$.
Extending by multiplicativity, functions $D_\chi : \cK \rightarrow \R_{>0}$
are defined for general $\chi = \sum m_n \chi_n$. They do not depend on the
coefficient $m_{1a}$. We follow our practice of often shifting attention to
the corresponding root conductors $\delta_\chi(K) = D_\chi(K)^{1/\chi(e)}$.
\begin{table}[htb] {\renewcommand{4pt}{3pt} \[
\begin{array}{r|rrrrrrr|rrrrrrr|rr} \lambda_5 & 1^5 & 2^2 1 & 31^2 & 5 & 21^3 & 41 & 32 & 1^5 & 2^2 1 & 31^2 & 5 & 21^3 & 41 & 32 & \\ \lambda_6 & 1^6 & 2^2 1^2 & 33 & 51 & 2^3 & 41^2 & 6 & 1^6 & \!\! 2^2 1^2 & 33 & 51 & 2^3 & 41^2 & 6 & \walp(n) & \alp(n) \\ \hline \chi_{1a} & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & \\ \chi_{1b} & 1 & 1 & 1 & 1 & -1 & -1 & -1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & \\ \chi_{4a} & 4 & 0 & 1 & -1 & {\bf 2} & 0 & -1 & 0 & 2 & 2 & 4 & {\bf 1} & 3 & 3 & 0.50 & 0.50 \\ \chi_{4b} & 4 & 0 & {\bf 1} & -1 & -2 & 0 & {\bf 1} & 0 & 2 & {\bf 2} & 4 & 3 & 3 & 3 & 0.75 & 0.75 \\ \chi_{5a} & 5 & {\bf 1} & -1 & 0 & {\bf 1} & -1 & {\bf 1} & 0 & {\bf 2} & 4 & 4 & {\bf 2} & 4 & 4 & 0.80 & 0.80 \\ \chi_{5b} & 5 & {\bf 1} & -1 & 0 & -1 & {\bf 1} & -1 & 0 & {\bf 2} & 4 & 4 & 3 & 3 & 5 & 0.80 & 0.80 \\ \chi_{6a} & 6 & -2 & 0 & {\bf 1} & 0 & 0 & 0 & 0 & 4 & 4 & {\bf 4} & 3 & 5 & 5 & 0.8\overline{3} & 0.8\overline{3} \\ \hline \phi_{120} & 120 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 60 & 80 & 96 & 60 & 90 & 100 & & \\
\end{array} \] } \caption{\label{chartabs5} Standard character table of $S_5$ on the left, with entries $\chi_n(\tau)$; tame table \cite[\S4.3]{jr-tame-wild}, on the right, with entries $c_\tau(\chi_n)$ as defined in \eqref{twosimilar}. } \end{table}
Let $\cK(\chi; B) = \{K \in \cK : \delta_\chi(K) \leq B\}$. Suppose now all
the $m_{n}$ are nonnegative with at least one coefficient
besides $m_{1a}$ and $m_{1b}$ positive. Then $\delta_\chi$ is a
height function in the sense that all the $\cK(\chi; B)$
are finite. Suppressing the secondary phenomenon
that ties among a finite number of fields can occur,
we think of each $\delta_\chi$ as giving an ordering on
the set $\cK$.
The orderings coming from different $\delta_\chi$ can be very different. For example, consider
the field $K \in \cK$ defined by the polynomial $x^5 - 2x^4 + 4x^3 - 4x^2 + 2x - 4$.
This field is the first field in $\cK$ when ordered by the regular character
$\phi_{120} = \sum_n \chi_n(n) \chi_n$. However it is
the $22^{\rm nd}$ field when
ordered by $\phi_6 = 1 + \chi_{5b}$
only the $2298^{\rm th}$ field when ordered by
$\phi_5 = 1+ \chi_{4a}$.
This phenomenon of different orderings on the same set of number fields plays
a prominent role in asymptotic studies \cite{wood}. Here we are interested instead
in initial segments and how they depend on $\chi$. Our formalism lets
us treat any $\chi$. Following the conventions for general $G$ of the next section,
we focus on the five irreducible $\chi$ with
$\chi(e)>1$, thus $\chi_n$ for $n \in \{4a,4b,5a,5b,6a\}$.
\subsection{Computing Artin conductors} To compute general $D_\chi(K)$, one needs to work with enough resolvents of
$K=K_5 = \Q[x]/f_5(x)$. For starters, we have the quadratic resolvent $K_2 = \Q[x]/(x^2-D(K_5))$ and the
Cayley-Weber resolvent $K_6 = \Q[x]/f_6(x)$ \cite{generic,jr-tame-wild}. The other resolvents
we will need are $K_{10} = K_5 \otimes K_2$,
$K_{12} = K_2 \otimes K_6$, and $K_{30} = K_5 \otimes K_6$. Defining
polynomials are obtained for $K_a \otimes K_b$ by the general formula \[ f_{ab}(x) = \prod_{i=1}^a \prod_{j=1}^b (x-\alpha_i-\beta_j), \] where $f_a(x)$ has roots $\alpha_i$ and $f_b(x)$ has roots $\beta_j$. So discriminants $D_2$, $D_5$, $D_6$, $D_{10}$, $D_{12}$, $D_{30}$ are easily computed.
From the character table, the permutation characters $\phi_N$ in question are expressed in the basis $\chi_n$ as on the left in the following display. Inverting, one gets the $\chi_n$ in terms of the $\phi_N$ as on the right. \[ \begin{array}{r@{\:}l@{\:}l@{\qquad}r@{\:}l}
\phi_2 && = 1 + \chi_{1b}, &\chi_{1b} & = -1 + \phi_2,\\
\phi_5 && = 1 + \chi_{4a}, & \chi_{4a} & = -1 + \phi_{5}, \\
\phi_6 && = 1 + \chi_{5b}, & \chi_{4a} & = 1 - \phi_2 - \phi_{5} + \phi_{10}, \\
\phi_{10} & = \phi_{5} \phi_2 & = 1 + \chi_{1b} + \chi_{4a} + \chi_{4b}, &
\chi_{5a} & = 1 - \phi_2 - \phi_6 + \phi_{12}, \\
\phi_{12} & = \phi_6 \phi_2 & = 1 + \chi_{1b} + \chi_{5a} + \chi_{5b}, & \chi_{5b} & = -1 + \phi_6, \\
\phi_{30} & = \phi_5 \phi_6 & = 1 + 2 \chi_{4a} + 2 \chi_{5a} + \chi_{5b} + \chi_{6a}, \!\!\!\! & \chi_{6a} & = 2 \phi_2 - 2 \phi_5 + \phi_6 - 2 \phi_{12} + \phi_{30}. \end{array} \] Conductors $D_n$ belonging to the $\chi_n$ are calculable through these formulas, as e.g.\ $D_{6a} = D_2^2 D_5^{-2} D_6 D_{12}^{-2} D_{30}$.
For all the groups $G$ considered in the next section, we proceeded similarly. Thus we started with rational character tables from {\em
Magma}. We used linear algebra to express rationally irreducible characters in terms of permutation characters. We used {\em Magma} again to compute resolvents and then {\em Pari} to evaluate their discriminants. In this last step, we often confronted large degree polynomials with large coefficients. The discriminant computation was only feasible because we knew {\em a priori} the set of primes dividing the discriminant, and could then easily compute the $p$-parts of the discriminants of these resolvent fields for relevant primes $p$ using {\em Pari/gp} without fully factoring the discriminants of the resolvent polynomials.
{\em Magma}'s Artin representation package computes conductors of Artin representations in a different and more local manner. Presently, it does not compute all conductors in our range because some decomposition groups are too large.
\subsection{Transferring completeness results.} As an initial complete list of fields, we take $\cK(\phi ; 85)$ with $\phi=\phi_G=\phi_{120}$. We know from \cite{jr-global-database} that this set consists of $2080$ fields. We list these fields by increasing discriminant, $K^1$, \dots, $K^{2080}$, with the resolution of ties conveniently not affecting the explicit results appearing in Table~\ref{tablelabel1}.
The quantities of Section~\ref{Type} reappear here, and we will use the abbreviations $\walp(n) = \walp(S_5,\chi_n,\phi)$ and $\alp(n) = \alp(S_5,\chi_n,\phi)$. Since $\phi$ is zero outside of the identity class, the formulas simplify substantially: \begin{align*} \walp(n) & =\frac{\phi(e)}{\chi_n(e)} \min_\tau \frac{\chi_n(e)-\chi_n(\tau)}{\phi(e)-\phi(\tau)} = 1-\max_{\tau} \frac{\chi_n(\tau)}{n}, \\ \alp(n)& = \frac{\phi(e)}{\chi_n(e)} \min_{\tau} \frac{c_\tau(\chi_{n})}{c_\tau(\phi)} = \frac{1}{n} \min_{\tau}
\frac{c_\tau(\chi_{n}) \overline{\tau}}{\overline{\tau} - 1}. \end{align*} For each of the five $n$, the classes contributing to the minima are in bold on Table~\ref{chartabs5}. So, extremely simply, for computing $\walp(n)$ on the left, the largest $\chi_n(\tau)$ besides $\chi_n(e)$ are in bold. For computing $\alp(n)$ on the right, the $c_\tau(\chi_n)$ with $c_\tau(\chi_n)/c_\tau(\phi)$ minimized are put in bold. For the group $S_5$, one has agreement $\walp(n) = \alp(n)$ in all five cases. This equality occurs for 170 of the lines in Tables~\ref{tablelabel1}--\ref{tablelabel8}, with the other possibility $\walp(n) < \alp(n)$ occurring for the remaining 25 lines.
For any cutoff $B$, conductor relations give \[ \cK(\chi_n;B^{\walp(n)}) \subseteq \cK(\phi; B). \] One has an analogous inclusion for general $(G,\chi)$, with $\phi$ again the regular character for $G$. When $G$ satisfies the tame-wild principle of \cite{jr-tame-wild}, the $\walp$ in exponents can be replaced by $\alp$. The group $S_5$ does satisfy the tame-wild principle, but in this case
the replacement has no effect.
The final results are on Table~\ref{tablelabel1}. In particular for $n = 4a$, $4b$, $5a$, $5b$, $6a$ the unique minimizing fields are $K^{103}$, $K^{21}$, $K^{14}$, $K^{6}$, and $K^{12}$, with root conductors approximately $6.33$, $18.72$, $17.78$, $16.27$, and $18.18$. The lengths of the initial segments identified are $45$, $15$, $186$, $592$, and $110$. Note that because of the relations $\phi_5 = 1+ \chi_{4a}$ and $\phi_6 = 1 + \chi_{5b}$, the results for $4a$ and $5b$ are just translation of known minima of discriminants of number fields with Galois groups $5T5$ and $6T14$ respectively. For $4b$, $5b$, $6a$, and the majority of the characters on the tables of the next section, the first root conductor and the entire initial segment are new.
\section{Tables for $84$ groups $G$} \label{Tables} In this section, we present our computational results for small Galois types. For simplicity, we focus on results coming from complete lists of Galois number fields. Summarizing statements are given in \S\ref{twotheorems} and then many more details in \S\ref{table-org}.
\subsection{Lower bounds and initial segments} \label{twotheorems} We consider all groups with a faithful transitive permutation representation in some degree from two to nine, except we exclude the nonsolvable groups in degrees eight and nine. There are $84$ such groups, and we consider all associated Galois types $(G,\chi)$ with $\chi$ a rationally irreducible faithful character. Our first result gives conditional lower bounds:
\begin{thm} \label{bounds-are-right}
For each of the $195$ Galois types $(G,\chi)$ listed in
Tables~\ref{tablelabel1}--\ref{tablelabel8}, the listed value
$\mathfrak{d}$ gives a lower bound for the root conductor of all
Artin representations of type $(G,\chi)$,
assuming the Artin conjecture and Riemann hypothesis for relevant
$L$-functions. \end{thm}
The bounds in Tables~\ref{tablelabel1}--\ref{tablelabel8} are graphed with the best previously known \begin{figure}
\caption{Points $(\chi_1(e),\mathfrak{d}(G,\chi))$ present lower bounds from Tables~\ref{tablelabel1}--\ref{tablelabel8}. The piecewise-linear curve plots lower bounds from \cite{PM}. Both the points and the curve assume the Artin conjecture and Riemann hypothesis for the relevant $L$-functions. }
\label{amalia-plot}
\end{figure} bounds from \cite{PM} in Figure~\ref{amalia-plot}. The horizontal axis represents the dimension $n_1=\chi_1(e)$ of any irreducible constituent $\chi_1$ of $\chi$. The vertical axis corresponds to lower bounds on root conductors. The piecewise-linear curve connects bounds from \cite{PM}, and there is one dot at height $\mathfrak{d}(G,\chi)$ for each $(G,\chi)$ from Tables~\ref{tablelabel1}--\ref{tablelabel8} with $\chi_1(e)\leq 20$. Here we are freely passing back and forth between a rational character $\chi$ and an irreducible constituent $\chi_1$ via $\delta_1(G,\chi) = \delta_1(G,\chi_1)$, which is a direct consequence of \eqref{discequal}.
Not surprisingly, the type-based bounds are larger. In low dimensions $n_1$, some type-based bounds are close to the general bounds, but by dimension $5$ there is a clear separation which widens as the dimension grows. This may in part be explained by the fact that we are only seeing a small number of representations for each of these dimensions. However, as we explain in \S\ref{speculation}, we also expect that the asymptotic lower bound
of $\sqrt{\Omega} \approx 6.7$ \cite{PM} is not optimal, and that this bound is more likely to be at least $\Omega\approx 44.8$.
Our second result unconditionally identifies initial segments: \begin{thm} \label{minima-and-segments-are-right} For $144$ Galois
types $(G,\chi)$, Tables~\ref{tablelabel1}--\ref{tablelabel8} identify
a non-empty initial segment $\cL(G,\chi;B^\beta)$, and in particular
identify the minimal root conductor $\delta_1(G,\chi)$. \end{thm}
\subsection{Tables detailing results on lower bounds and initial segments} \label{table-org} Our tables are organized by the standard doubly-indexed lists of transitive permutation groups $mTj$, with degrees $m$ running from $2$ through $9$. Within a degree, the blocks of rows are indexed by increasing $j$. There is no block to print if $mTj$ has no faithful irreducible characters. For example, there is no block to print for groups having noncyclic center, such as $4T2 = V = C_2 \times C_2$ or $8T9 = D_4 \times C_2$. Also the block belonging to $mTj$ is omitted if the abstract group $G$ underlying $mTj$ has appeared earlier. For example $G=S_4$ has four transitive realization in degrees $m \leq 8$, namely $4T5$, $6T7$, $6T8$, and $8T14$; there is correspondingly a $4T5$ line on our tables, but no $6T7$, $6T8$, or $8T14$ lines.
\begin{table}[htbp] \centering \caption{\label{tablelabel1}Artin $L$-functions with small conductor from groups in degrees $2$, $3$, $4$, and $5$}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline $C_2$ & 2 & TW & 2, 2 & & 1.73 & $3^*$& & & 100 & 6086\\ 2T1 & $1$ & & $[-1, -1]$ & $2.97_{\ell}$ & 3.00 & $3$ & 1 & $2.00_{\phantom{\text{$\bullet$}}}$ & 10000.00 & 6086\\ \hline $C_3$ & 3 & TW & 2, 2 & & 3.66 & $7^*$& & & 500 & 1772\\ 3T1 & $1$ & $\sqrt{-3}$ & $[-1, -1]$ & $6.93_{\ell}$ & 7.00 & $7$ & 1 & $1.50_{\phantom{\text{$\bullet$}}}$ & 11180.34 & 1772\\ \hline $S_3$ & 6 & TW & 3, 4 & & 4.80 & $23^*$& & & 250 & 24484\\ 3T2 & $2$ & & $[-1, 0]$ & $4.74_{\ell}$ & 4.80 & $23$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 250.00 & 13329\\ \hline $C_4$ & 4 & TW & 3, 4 & & 3.34 & $5^*$& & & 150 & 2668\\ 4T1 & $1$ & $i$ & $[-2, 0]$ & $4.96_{S}$ & 5.00 & $5$ & 1 & $1.33_\bullet$ & 796.99 & 489\\ \hline $D_4$ & 8 & TW & 5, 10 & & 6.03 & $3^*7^*$& & & 150 & 31742\\ 4T3 & $2$ & & $[-2, 0]$ & $5.74_{q}$ & 6.24 & $3\!\cdot\! 13$ & 2 & $1.00_{\phantom{\text{$\bullet$}}}$ & 150.00 & 9868\\ \hline $A_4$ & 12 & TW & 3, 4 & & 10.35 & $2^*7^*$& & & 150 & 846\\ 4T4 & $3$ & & $[-1, 0]$ & $7.60_{q}$ & 14.64 & $2^{6} 7^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 150.00 & 270\\ \hline $S_4$ & 24 & TW & 5, 12 & & 13.56 & $2^*11^*$& & & 150 & 14587\\ 6T8 & $3$ & & $[-1, 1]$ & $8.62_{G}$ & 11.30 & $2^{2} 19^{2}$ & 4 & $0.89_\bullet$ & 85.96 & 779\\ 4T5 & $3$ & & $[-1, 1]$ & $5.49_{p}$ & 6.12 & $229$ & 9 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 1603\\ \hline $C_5$ & 5 & TW & 2, 2 & & 6.81 & $11^*$& & & 200 & 49\\ 5T1 & $1$ & $\zeta_{5}$ & $[-1, -1]$ & $10.67_{\ell}$ & 11.00 & $11$ & 1 & $1.25_{\phantom{\text{$\bullet$}}}$ & 752.12 & 49\\ \hline $D_5$ & 10 & TW & 3, 4 & & 6.86 & $47^*$& & & 200 & 3622\\ 5T2 & $2$ & $\sqrt{5}$ & $[-1, 0]$ & $6.73_{q}$ & 6.86 & $47$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 3219\\ \hline $F_5$ & 20 & TW & 4, 8 & & 11.08 & $2^*5^*$& & & 200 & 3010\\ 5T3 & $4$ & & $[-1, 0]$ & $10.28_{q}$ & 13.69 & $2^{4} 13^{3}$ & 2 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 2066\\ \hline $A_5$ & 60 & TW & 4, 8 & & 18.70 & $2^*17^*$& & & 85 & 473\\ 5T4 & $4$ & & $[-1, 1]$ & $8.18_{g}$ & 11.66 & $2^{6} 17^{2}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 27.99 & 46\\ 6T12 & $5$ & & $[-1, 1]$ & $10.18_{p}$ & 12.35 & $2^{6} 67^{2}$ & 3 & $0.80_{\phantom{\text{$\bullet$}}}$ & 34.96 & 216\\ 12T33 & $3$ & $\sqrt{5}$ & $[-2, 1]$ & $10.34_{g}$ & 26.45 & $2^{6} 17^{2}$ & 1 & $0.83_{\phantom{\text{$\bullet$}}}$ & 40.54 & 18\\ \hline $S_5$ & 120 & TW & 7, 40 & & 24.18 & $2^*3^*5^*$& & & 85 & 2080\\ 5T5 & $4$ & & $[-1, 2]$ & $6.28_{\ell}$ & 6.33 & $1609$ & 103 & $0.50_{\phantom{\text{$\bullet$}}}$ & 9.22 & 45\\ 10T12 & $4$ & & $[-2, 1]$ & $10.28_{V}$ & 18.72 & $5^{2} 17^{3}$ & 21 & $0.75_{\phantom{\text{$\bullet$}}}$ & 27.99 & 15\\ 10T13 & $5$ & & $[-1, 1]$ & $12.13_{V}$ & 16.27 & $2^{4} 3^{2} 89^{2}$ & 6 & $0.80_{\phantom{\text{$\bullet$}}}$ & 34.96 & 592\\ 6T14 & $5$ & & $[-1, 1]$ & $11.09_{g}$ & 17.78 & $2^{6} 3^{4} 7^{3}$ & 14 & $0.80_{\phantom{\text{$\bullet$}}}$ & 34.96 & 186\\ 20T35 & $6$ & & $[-2, 1]$ & $12.26_{g}$ & 18.18 & $2^{4} 3^{3} 17^{4}$ & 12 & $0.83_{\phantom{\text{$\bullet$}}}$ & 40.54 & 110\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel2}Artin $L$-functions of small conductor from sextic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline $C_6$ & 6 & TW & 4, 6 & & 5.06 & $7^*$& & & 200 & 9609\\ 6T1 & $1$ & $\sqrt{-3}$ & $[-2, 1]$ & $6.93_{P}$ & 7.00 & $7$ & 1 & $1.20_\bullet$ & 577.08 & 617\\ \hline $D_6$ & 12 & TW & 6, 14 & & 8.06 & $3^*5^*$& & & 150 & 46197\\ 6T3 & $2$ & & $[-2, 1]$ & $7.60_{G}$ & 9.33 & $3\!\cdot\! 29$ & 6 & $1.00_\bullet$ & 150.00 & 10242\\ \hline $S_3C_3$ & 18 & & 6, 17 & & 10.06 & $2^*3^*7^*$& & & 200 & 9420\\ 6T5 & $2$ & $\sqrt{-3}$ & $[-2, 1]$ & $5.69_{q*}$ & 7.21 & $2^{2} 13$ & 4 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 503\\ \hline $A_4C_2$ & 24 & & 6, 16 & & 12.31 & $2^*7^*$& & & 150 & 6676\\ 6T6 & $3$ & & $[-3, 1]$ & $7.60_{p}$ & 8.60 & $7^{2} 13$ & 3 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 98\\ \hline $S_3^2$ & 36 & & 9, 69 & & 15.53 & $2^*19^*$& & & 200 & 45117\\ 6T9 & $4$ & & $[-2, 1]$ & $7.98_{q*}$ & 14.83 & $2^{4} 5^{2} 11^{2}$ & 27 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 824\\ \hline $C_3^2{\rtimes}C_4$ & 36 & TW & 5, 16 & & 23.57 & $3^*5^*$& & & 150 & 331\\ 6T10 & $4^{\rlap{\scriptsize{2}}}$ & & $[-2, 1]$ & $7.98_{q*}$ & 17.80 & $2^{11} 7^{2}$ & 2 & $0.75_{\phantom{\text{$\bullet$}}}$ & 42.86 & 33\\ \hline $S_4C_2$ & 48 & & 10, 96 & & 16.13 & $2^*23^*$& & & 150 & 70926\\ 6T11 & $3^{\rlap{\scriptsize{2}}}$ & & $[-3, 1]$ & $6.14_{g}$ & 6.92 & $2^{2} 83$ & 7 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 3694\\ \hline $C_3^2{\rtimes}D_4$ & 72 & TW & 9, 105 & & 21.76 & $3^*11^*$& & & 150 & 8536\\ 6T13 & $4^{\rlap{\scriptsize{2}}}$ & & $[-2, 2]$ & $7.60_{p}$ & 7.90 & $3^{2} 433$ & 52 & $0.50_{\phantom{\text{$\bullet$}}}$ & 12.25 & 41\\ 12T36 & $4^{\rlap{\scriptsize{2}}}$ & & $[-2, 1]$ & $11.29_{P}$ & 23.36 & $3^{5} 5^{2} 7^{2}$ & 18 & $0.75_{\phantom{\text{$\bullet$}}}$ & 42.86 & 106\\ \hline $A_6$ & 360 & TW & 6, 28 & & 31.66 & $2^*3^*$& & & 60 & 26\\ 6T15 & $5^{\rlap{\scriptsize{2}}}$ & & $[-1, 2]$ & $7.71_{\ell}$ & 12.35 & $2^{6} 67^{2}$ & 8 & $0.60_{\phantom{\text{$\bullet$}}}$ & 11.67 & 0\\ 10T26 & $9$ & & $[-1, 1]$ & $17.69_{g}$ & 28.20 & $2^{18} 3^{16}$ & 1 & $0.89_{\phantom{\text{$\bullet$}}}$ & 38.07 & 7\\ 30T88 & $10$ & & $[-2, 1]$ & $18.34_{g}$ & 30.61 & $2^{24} 3^{16}$ & 1 & $0.90_{\phantom{\text{$\bullet$}}}$ & 39.84 & 4\\ 36T555 & $8$ & $\sqrt{5}$ & $[-2, 1]$ & $20.70_{g}$ & 42.81 & $2^{18} 3^{16}$ & 1 & $0.94_{\phantom{\text{$\bullet$}}}$ & 46.45 & 3\\ \hline $S_6$ & 720 & & 11, 596 & & 33.50 & $2^*3^*5^*$& & & 60 & 99\\ 12T183 & $5^{\rlap{\scriptsize{2}}}$ & & $[-3, 2]$ & $8.21_{v}$ & 11.53 & $11^{2} 41^{2}$ & 6 & $0.60_{\phantom{\text{$\bullet$}}}$ & 11.67 & 1\\ 6T16 & $5^{\rlap{\scriptsize{2}}}$ & & $[-1, 3]$ & $6.23_{\ell}$ & 6.82 & $14731$ & 53 & $0.40_{\phantom{\text{$\bullet$}}}$ & 5.14 & 0\\ 10T32 & $9$ & & $[-1, 3]$ & $10.77_{v}$ & \textit{16.60} & $2^{15} 11^{3} 13^{3}$ & 74 & $0.67_{\phantom{\text{$\bullet$}}}$ & 15.33 & 0\\ 20T145 & $9$ & & $[-3, 1]$ & $19.33_{g}$ & 31.25 & $2^{6} 5^{6} 73^{4}$ & 16 & $0.89_{\phantom{\text{$\bullet$}}}$ & 38.07 & 4\\ 30T176 & $10^{\rlap{\scriptsize{2}}}$ & & $[-2, 2]$ & $16.88_{v}$ & 24.22 & $11^{4} 41^{6}$ & 6 & $0.80_{\phantom{\text{$\bullet$}}}$ & 26.46 & 1\\ 36T1252 & $16$ & & $[-2, 1]$ & $22.73_{g}$ & 35.46 & $2^{36} 3^{8} 7^{12}$ & 5 & $0.94_{\phantom{\text{$\bullet$}}}$ & 46.45 & 11\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel3}Artin $L$-functions of small conductor from septic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline $C_7$ & 7 & TW & 2, 2 & & 17.93 & $29^*$& & & 200 & 15\\ 7T1 & $1$ & $\zeta_{7}$ & $[-1, -1]$ & $14.03_{\ell}$ & 29.00 & $29$ & 1 & $1.17_{\phantom{\text{$\bullet$}}}$ & 483.65 & 15\\ \hline $D_7$ & 14 & TW & 3, 4 & & 8.43 & $71^*$& & & 200 & 2078\\ 7T2 & $2$ & $\zeta_7^+$ & $[-1, 0]$ & $8.38_{q}$ & 8.43 & $71$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 1948\\ \hline $C_7{\rtimes}C_3$ & 21 & TW & 3, 4 & & 31.64 & $2^*73^*$& & & 100 & 11\\ 7T3 & $3$ & $\sqrt{-7}$ & $[-1, 0]$ & $25.50_{q}$ & 34.93 & $2^{3} 73^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 100.00 & 8\\ \hline $F_7$ & 42 & TW & 5, 12 & & 15.99 & $2^*7^*$& & & 75 & 342\\ 7T4 & $6$ & & $[-1, 0]$ & $14.47_{q}$ & 18.34 & $11^{3} 13^{4}$ & 2 & $1.00_{\phantom{\text{$\bullet$}}}$ & 75.00 & 287\\ \hline $\textrm{GL}_3(2)$ & 168 & TW & 5, 14 & & 32.25 & $2^*3^*11^*$& & & 45 & 19\\ 42T37 & $3$ & $\sqrt{-7}$ & $[-2, 2]$ & $15.55_{G}$ & 26.06 & $7^{2} 19^{2}$ & 7 & $0.89_\bullet$ & 29.48 & 1\\ 7T5 & $6$ & & $[-1, 2]$ & $9.36_{p}$ & 11.23 & $13^{2} 109^{2}$ & 4 & $0.67_{\phantom{\text{$\bullet$}}}$ & 12.65 & 1\\ 8T37 & $7$ & & $[-1, 1]$ & $14.10_{g}$ & 32.44 & $3^{8} 7^{8}$ & 11 & $0.86_{\phantom{\text{$\bullet$}}}$ & 26.12 & 0\\ 21T14 & $8$ & & $[-1, 1]$ & $14.90_{g}$ & 23.16 & $2^{6} 3^{6} 11^{6}$ & 1 & $0.88_{\phantom{\text{$\bullet$}}}$ & 27.96 & 1\\ \hline $A_7$ & 2520 & & 8, 115 & & 39.52 & $2^*3^*7^*$& & & 45 & 1\\ 7T6 & $6$ & & $[-1, 3]$ & $9.13_{\ell}$ & 12.54 & $3^{6} 73^{2}$ & 26 & $0.50_{\phantom{\text{$\bullet$}}}$ & 6.71 & 0\\ 15T47 & $14$ & & $[-1, 2]$ & $19.39_{g}$ & \textit{36.05} & $3^{24} 53^{6}$ & 4 & $0.86_{\phantom{\text{$\bullet$}}}$ & 26.12 & 0\\ 21T33 & $14$ & & $[-1, 2]$ & $19.39_{g}$ & \textit{31.07} & $3^{18} 17^{10}$ & 2 & $0.86_{\phantom{\text{$\bullet$}}}$ & 26.12 & 0\\ 42T294 & $15$ & & $[-1, 3]$ & $18.18_{v}$ & \textit{35.73} & $2^{12} 3^{20} 7^{12}$ & 1 & $0.80_{\phantom{\text{$\bullet$}}}$ & 21.02 & 0\\ 70 & $10$ & $\sqrt{-7}$ & $[-4, 2]$ & $22.49_{g}$ & \textit{41.21} & $2^{9} 3^{14} 7^{8}$ & 1 & $0.90_{\phantom{\text{$\bullet$}}}$ & 30.75 & 0\\ 42T299 & $21$ & & $[-3, 1]$ & $26.95_{g}$ & \textit{38.33} & $2^{18} 3^{30} 7^{16}$ & 1 & $0.95_{\phantom{\text{$\bullet$}}}$ & 37.54 & 0\\ 70 & $35$ & & $[-1, 1]$ & $\mathit{28.79_{g}}$ & \textit{41.28} & $2^{30} 3^{50} 7^{28}$ & 1 & $0.97_\circ$ & 40.36 & 0\\ \hline $S_7$ & 5040 & & 15, -- & & 40.49 & $2^*3^*5^*$& & & 35 & 0\\ 7T7 & $6$ & & $[-1, 4]$ & $7.50_{\ell}$ & 7.55 & $184607$ & &
$0.33_{\phantom{\text{$\bullet$}}}$ & 3.27 & 0\\ 14T46 & $6$ & & $[-4, 3]$ & $\mathit{7.66_{p}}$ & \textit{17.02} & $2^{2} 7^{5} 19^{2}$ & 194 & $0.50_{\phantom{\text{$\bullet$}}}$ & 5.92 & 0\\ 30T565 & $14$ & & $[-2, 4]$ & $16.32_{p}$ & \textit{26.02} & $2^{20} 53^{8}$ & 2 & $0.71_{\phantom{\text{$\bullet$}}}$ & 12.67 & 0\\ 30T565 & $14$ & & $[-4, 2]$ & $20.24_{g}$ & \textit{30.98} & $2^{14} 71^{9}$ & 46 & $0.86_{\phantom{\text{$\bullet$}}}$ & 21.06 & 0\\ 42T413 & $14$ & & $[-6, 2]$ & $20.24_{g}$ & \textit{38.27} & $2^{20} 3^{12} 11^{10}$ & 6 & $0.86_{\phantom{\text{$\bullet$}}}$ & 21.06 & 0\\ 21T38 & $14$ & & $[-1, 6]$ & $13.12_{p}$ & \textit{22.02} & $2^{24} 3^{12} 29^{4}$ & 170 & $0.57_{\phantom{\text{$\bullet$}}}$ & 7.63 & 0\\ 42T412 & $15$ & & $[-3, 5]$ & $16.96_{p}$ & \textit{32.90} & $3^{12} 5^{5} 11^{13}$ & 24 & $0.67_{\phantom{\text{$\bullet$}}}$ & 10.70 & 0\\ 42T411 & $15$ & & $[-5, 3]$ & $16.56_{g}$ & \textit{29.92} & $2^{30} 3^{12} 17^{6}$ & 3 & $0.80_{\phantom{\text{$\bullet$}}}$ & 17.19 & 0\\ 70 & $20$ & & $[-4, 2]$ & $23.53_{g}$ & \textit{35.18} & $2^{34} 53^{12}$ & 2 & $0.90_{\phantom{\text{$\bullet$}}}$ & 24.53 & 0\\ 42T418 & $21$ & & $[-3, 3]$ & $20.24_{g}$ & \textit{33.42} & $2^{41} 3^{18} 17^{9}$ & 3 & $0.86_{\phantom{\text{$\bullet$}}}$ & 21.06 & 0\\ 84 & $21$ & & $[-3, 1]$ & $28.27_{g}$ & \textit{39.59} & $2^{38} 3^{18} 7^{16}$ & 4 & $0.95_{\phantom{\text{$\bullet$}}}$ & 29.55 & 0\\ 70 & $35$ & & $[-1, 5]$ & $\mathit{25.92_{p}}$ & \textit{40.71} & $2^{61} 3^{30} 7^{28}$ & 4 & $0.86_{\phantom{\text{$\bullet$}}}$ & 21.06 & 0\\ 126 & $35$ & & $[-5, 1]$ & $\mathit{30.23_{g}}$ & \textit{43.26} & $2^{54} 3^{42} 5^{30}$ & &
$0.97_\circ$ & 31.62 & 0\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel4}Artin $L$-functions of small conductor from octic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline $C_8$ & 8 & TW & 4, 8 & & 11.93 & $17^*$& & & 125 & 198\\ 8T1 & $1$ & $\zeta_{8}$ & $[-4, 0]$ & $8.84_{S}$ & 17.00 & $17$ & 1 & $1.14_\bullet$ & 249.15 & 41\\ \hline $Q_8$ & 8 & TW & 5, 10 & & 18.24 & $2^*3^*$& & & 100 & 72\\ 8T5 & $2$ & & $[-2, 0]$ & $26.29_{S}$ & 48.00 & $2^{8} 3^{2}$ & 2 & $1.33_\bullet$ & 464.16 & 41\\ \hline $D_8$ & 16 & TW & 6, 20 & & 9.75 & $5^*19^*$& & & 125 & 6049\\ 8T6 & $2$ & $\sqrt{2}$ & $[-4, 0]$ & $9.07_{q}$ & 9.75 & $5\!\cdot\! 19$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 2296\\ \hline $C_8{\rtimes}C_2$ & 16 & & 7, 24 & & 9.32 & $3^*5^*$& & & 125 & 672\\ 8T7 & $2$ & $i$ & $[-4, 0]$ & $9.07_{q}$ & 15.00 & $3^{2} 5^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 75\\ \hline $QD_{16}$ & 16 & & 6, 20 & & 10.46 & $2^*3^*$& & & 125 & 1664\\ 8T8 & $2$ & $\sqrt{-2}$ & $[-4, 0]$ & $9.07_{q}$ & 16.97 & $2^{5} 3^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 155\\ \hline $Q_8{\rtimes}C_2$ & 16 & & 9, 32 & & 9.80 & $2^*3^*$& & & 100 & 3366\\ 8T11 & $2$ & $i$ & $[-4, 0]$ & $9.07_{q}$ & 10.95 & $2^{3} 3\!\cdot\! 5$ & 3 & $1.00_{\phantom{\text{$\bullet$}}}$ & 100.00 & 825\\ \hline $\textrm{SL}_2(3)$ & 24 & TW & 5, 14 & & 29.84 & $163^*$& & & 250 & 681\\ 24T7 & $2$ & & $[-2, 1]$ & $65.51_{P}$ & 163.00 & $163^{2}$ & 1 & $1.20_\bullet$ & 754.27 & 94\\ 8T12 & $2$ & $\sqrt{-3}$ & $[-4, 1]$ & $8.09_{p}$ & 12.77 & $163$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 62.87 & 78\\ \hline
& 32 & & 11, 74 & & 13.79 & $2^*5^*$& & & 125 & 11886\\ 8T15 & $4$ & & $[-4, 0]$ & $12.92_{q}$ & 16.12 & $2^{4} 5^{2} 13^{2}$ & 4 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 3464\\ \hline
& 32 & & 9, 58 & & 13.56 & $5^*11^*$& & & 125 & 766\\ 8T16 & $4$ & & $[-4, 0]$ & $12.92_{q}$ & 16.58 & $5^{4} 11^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 129\\ \hline $C_4\wr C_2$ & 32 & & 10, 90 & & 13.37 & $2^*5^*$& & & 125 & 2748\\ 8T17 & $2^{\rlap{\scriptsize{2}}}$ & $i$ & $[-4, 2]$ & $\mathit{5.74_{p}}$ & 8.25 & $2^{2} 17$ & 6 & $0.50_\circ$ & 11.18 & 3\\ \hline
& 32 & & 9, 58 & & 14.05 & $2^*$& & & 125 & 2720\\ 8T19 & $4$ & & $[-4, 0]$ & $12.92_{q}$ & 19.03 & $2^{17}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 125.00 & 1282\\ \hline
& 32 & & 17, 806 & & 18.42 & $2^*3^*5^*$& & & 100 & 3284\\ 8T22 & $4$ & & $[-4, 0]$ & $12.92_{q}$ & 20.49 & $2^{4} 3^{2} 5^{2} 7^{2}$ & 3 & $1.00_{\phantom{\text{$\bullet$}}}$ & 100.00 & 1162\\ \hline $\textrm{GL}_2(3)$ & 48 & & 7, 41 & & 16.52 & $2^*43^*$& & & 100 & 2437\\ 24T22 & $2$ & $\sqrt{-2}$ & $[-4, 2]$ & $\mathit{5.74_{v}}$ & \textit{16.82} & $283$ & 2 & $0.50_\circ$ & 10.00 & 0\\ 8T23 & $4$ & & $[-4, 1]$ & $9.07_{p}$ & 9.95 & $3^{4} 11^{2}$ & 4 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 99\\ \hline $C_2^3{\rtimes}C_7$ & 56 & TW & 3, 4 & & 17.93 & $29^*$& & & 200 & 28\\ 8T25 & $7$ & & $[-1, 0]$ & $16.10_{q}$ & 17.93 & $29^{6}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 27\\ \hline
& 64 & & 16, -- & & 20.37 & $2^*5^*$& & & 125 & 10317\\ 8T26 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $9.07_{p}$ & \textit{12.85} & $3^{2} 5^{2} 11^{2}$ & 7 & $0.50_\circ$ & 11.18 & 0\\ \hline $C_2\wr C_4$ & 64 & & 11, 206 & & 19.44 & $2^*$& & & 125 & 2482\\ 8T27 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $9.07_{p}$ & 10.60 & $5^{3} 101$ & 19 & $0.50_{\phantom{\text{$\bullet$}}}$ & 11.18 & 1\\ \hline $C_2 \wr C_2^2$ & 64 & & 16, -- & & 19.41 & $2^*7^*$& & & 125 & 11685\\ 8T29 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $9.07_{p}$ & 10.13 & $2^{4} 3^{2} 73$ & 28 & $0.50_{\phantom{\text{$\bullet$}}}$ & 11.18 & 1\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel5}Artin $L$-functions of small conductor from octic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline
& 64 & & 11, 206 & & 19.44 & $2^*$& & & 125 & 1217\\ 8T30 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $9.07_{p}$ & \textit{14.57} & $5^{3} 19^{2}$ & 3 & $0.50_\circ$ & 11.18 & 0\\ \hline
& 96 & & 9, 49 & & 34.97 & $2^*5^*13^*$& & & 250 & 5520\\ 8T32 & $4$ & & $[-4, 1]$ & $9.12_{g}$ & 22.80 & $2^{6} 5^{2} 13^{2}$ & 2 & $0.75_{\phantom{\text{$\bullet$}}}$ & 62.87 & 180\\ 24T97 & $4$ & $\sqrt{-3}$ & $[-8, 1]$ & $\mathit{13.19_{g}}$ & 43.30 & $2^{6} 5^{2} 13^{3}$ & 2 & $0.88_\circ$ & 125.37 & 112\\ \hline
& 96 & & 8, 44 & & 30.01 & $2^*5^*7^*$& & & 150 & 791\\ 8T33 & $6^{\rlap{\scriptsize{2}}}$ & & $[-2, 2]$ & $11.29_{p}$ & 25.14 & $5^{3} 7^{4} 29^{2}$ & 12 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 3\\ \hline
& 96 & & 10, 92 & & 27.28 & $2^*3^*31^*$& & & 110 & 1915\\ 8T34 & $6$ & & $[-2, 2]$ & $11.29_{p}$ & 22.61 & $31^{3} 67^{2}$ & 64 & $0.67_{\phantom{\text{$\bullet$}}}$ & 22.96 & 1\\ \hline $C_2\wr D_4$ & 128 & & 20, -- & & 22.91 & $2^*3^*13^*$& & & 125 & 14369\\ 8T35 & $4^{\rlap{\scriptsize{4}}}$ & & $[-4, 2]$ & $9.07_{p}$ & 9.45 & $5^{2} 11\!\cdot\! 29$ & 110 & $0.50_{\phantom{\text{$\bullet$}}}$ & 11.18 & 9\\ \hline $C_2^3{\rtimes}F_{21}$ & 168 & & 5, 14 & & 31.64 & $2^*73^*$& & & 200 & 342\\ 8T36 & $7$ & & $[-1, 1]$ & $16.06_{p}$ & 21.03 & $2^{6} 73^{4}$ & 1 & $0.86_{\phantom{\text{$\bullet$}}}$ & 93.82 & 120\\ 24T283 & $7$ & $\sqrt{-3}$ & $[-2, 1]$ & $\mathit{20.23_{p}}$ & 38.55 & $2^{6} 7^{11}$ & 2 & $0.93_\circ$ & 136.98 & 81\\ \hline $C_2\wr A_4$ & 192 & & 12, 700 & & 37.27 & $2^*5^*7^*$& & & 250 & 13649\\ 8T38 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $8.56_{v}$ & 15.20 & $2^{6} 7^{2} 17$ & 11 & $0.50_{\phantom{\text{$\bullet$}}}$ & 15.81 & 1\\ 24T288 & $4^{\rlap{\scriptsize{2}}}$ & $\sqrt{-3}$ & $[-8, 4]$ & $\mathit{10.84_{p}}$ & \textit{23.95} & $2^{6} 3\!\cdot\! 5\!\cdot\! 7^{3}$ & 66 & $0.50_{\phantom{\text{$\bullet$}}}$ & 15.81 & 0\\ \hline
& 192 & & 13, 559 & & 32.35 & $2^*23^*$& & & 100 & 1193\\ 8T39 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $5.74_{p}$ & 8.71 & $2^{4} 359$ & 49 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 8\\ 24T333 & $8$ & & $[-8, 1]$ & $\mathit{15.28_{g}}$ & 39.94 & $2^{10} 43^{6}$ & 2 & $0.88_\circ$ & 56.23 & 16\\ \hline
& 192 & & 13, 559 & & 29.71 & $2^*23^*$& & & 100 & 2001\\ 8T40 & $4^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $\mathit{5.74_{p}}$ & \textit{13.04} & $2^{2} 5^{2} 17^{2}$ & 9 & $0.50_\circ$ & 10.00 & 0\\ 24T332 & $8$ & & $[-8, 1]$ & $\mathit{15.28_{g}}$ & 29.71 & $2^{12} 23^{6}$ & 1 & $0.88_\circ$ & 56.23 & 47\\ \hline
& 192 & & 14, 1210 & & 28.11 & $2^*11^*$& & & 100 & 4723\\ 12T108 & $6^{\rlap{\scriptsize{2}}}$ & & $[-2, 2]$ & $\mathit{11.29_{p}}$ & 20.78 & $2^{12} 3^{9}$ & 5 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 2\\ 8T41 & $6^{\rlap{\scriptsize{2}}}$ & & $[-2, 2]$ & $11.29_{p}$ & 13.01 & $5^{3} 197^{2}$ & 13 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 40\\ \hline $A_4\wr C_2$ & 288 & & 10, 178 & & 32.18 & $2^*37^*$& & & 135 & 1362\\ 8T42 & $6$ & & $[-2, 3]$ & $11.29_{p}$ & 11.58 & $5^{3} 139^{2}$ & 76 & $0.50_{\phantom{\text{$\bullet$}}}$ & 11.62 & 1\\ 18T112 & $9$ & & $[-3, 1]$ & $\mathit{17.10_{g}}$ & 35.11 & $2^{24} 13^{6}$ & 2 & $0.89_{\phantom{\text{$\bullet$}}}$ & 78.28 & 66\\ 12T128 & $9$ & & $[-3, 3]$ & $13.59_{p}$ & 22.52 & $7^{6} 233^{3}$ & 56 & $0.67_{\phantom{\text{$\bullet$}}}$ & 26.32 & 6\\ 24T703 & $6$ & $\sqrt{-3}$ & $[-4, 4]$ & $\mathit{13.79_{p}}$ & \textit{32.18} & $2^{4} 37^{5}$ & 1 & $0.67_{\phantom{\text{$\bullet$}}}$ & 26.32 & 0\\ \hline $C_2 \wr S_4$ & 384 & & 20, -- & & 31.38 & $5^*197^*$& & & 100 & 6400\\ 8T44 & $4^{\rlap{\scriptsize{4}}}$ & & $[-4, 2]$ & $5.74_{p}$ & 7.53 & $5\!\cdot\! 643$ & 391 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 26\\ 24T708 & $8^{\rlap{\scriptsize{2}}}$ & & $[-8, 4]$ & $\mathit{13.19_{p}}$ & \textit{25.55} & $2^{12} 5^{2} 11^{6}$ & 4 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel6}Artin $L$-functions of small conductor from octic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline
& 576 & & 16, -- & & 29.35 & $2^*3^*$& & & 100 & 2664\\ 12T161 & $6$ & & $[-2, 3]$ & $\mathit{7.60_{p}}$ & \textit{19.04} & $2^{8} 3^{3} 83^{2}$ & 1179 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 8T45 & $6$ & & $[-2, 3]$ & $7.60_{p}$ & 15.48 & $2^{14} 29^{2}$ & 110 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 18T179 & $9$ & & $[-3, 1]$ & $18.84_{g}$ & 29.69 & $2^{12} 5^{6} 23^{4}$ & 16 & $0.89_{\phantom{\text{$\bullet$}}}$ & 59.95 & 121\\ 12T165 & $9$ & & $[-3, 3]$ & $10.60_{p}$ & 17.20 & $2^{6} 19^{3} 67^{3}$ & 161 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 12\\ 18T185 & $9^{\rlap{\scriptsize{2}}}$ & & $[-3, 3]$ & $\mathit{13.74_{p}}$ & \textit{22.69} & $2^{12} 3^{11} 13^{3}$ & 6 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 0\\ 24T1504 & $12$ & & $[-4, 4]$ & $\mathit{16.40_{p}}$ & \textit{35.03} & $2^{8} 5^{6} 31^{8}$ & 51 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 0\\ \hline
& 576 & & 11, 522 & & 49.75 & $3^*5^*7^*$& & & 100 & 153\\ 8T46 & $6$ & & $[-2, 3]$ & $9.66_{p}$ & 19.51 & $3^{6} 5^{4} 11^{2}$ & 42 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 12T160 & $6$ & & $[-2, 3]$ & $\mathit{7.60_{p}}$ & \textit{27.27} & $2^{23} 7^{2}$ & 11 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 16T1030 & $9$ & & $[-3, 1]$ & $18.84_{g}$ & 35.40 & $2^{22} 3^{6} 13^{4}$ & 6 & $0.89_{\phantom{\text{$\bullet$}}}$ & 59.95 & 50\\ 18T184 & $9$ & & $[-3, 1]$ & $18.84_{g}$ & 35.50 & $2^{12} 5^{7} 23^{4}$ & 7 & $0.89_{\phantom{\text{$\bullet$}}}$ & 59.95 & 32\\ 24T1506 & $12$ & & $[-4, 4]$ & $\mathit{16.40_{p}}$ & \textit{55.16} & $2^{20} 3^{18} 5^{9}$ & 4 & $0.67_{\phantom{\text{$\bullet$}}}$ & 21.54 & 0\\ 36T766 & $9$ & $i$ & $[-6, 2]$ & $\mathit{18.84_{g}}$ & 58.55 & $2^{36} 7^{6}$ & 3 & $0.89_{\phantom{\text{$\bullet$}}}$ & 59.95 & 2\\ \hline $S_4\wr C_2$ & 1152 & & 20, -- & & 35.05 & $2^*5^*41^*$& & & 150 & 23694\\ 12T200 & $6$ & & $[-2, 4]$ & $\mathit{7.60_{p}}$ & \textit{15.62} & $5^{3} 11^{2} 31^{2}$ & 20668 & $0.33_\circ$ & 5.31 & 0\\ 8T47 & $6$ & & $[-2, 4]$ & $7.60_{p}$ & 10.51 & $2^{9} 2633$ & 20566 & $0.33_{\phantom{\text{$\bullet$}}}$ & 5.31 & 0\\ 12T201 & $6$ & & $[-4, 3]$ & $\mathit{7.60_{p}}$ & \textit{19.19} & $3^{7} 151^{2}$ & 21 & $0.50_{\phantom{\text{$\bullet$}}}$ & 12.25 & 0\\ 12T202 & $6$ & & $[-4, 3]$ & $\mathit{7.60_{p}}$ & \textit{16.51} & $3^{10} 7^{3}$ & 12 & $0.50_{\phantom{\text{$\bullet$}}}$ & 12.25 & 0\\ 18T272 & $9$ & & $[-3, 3]$ & $\mathit{9.70_{p}}$ & 19.73 & $3^{6} 853^{3}$ & 450 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 44\\ 18T274 & $9$ & & $[-3, 3]$ & $15.95_{p}$ & 27.07 & $2^{12} 5^{7} 29^{3}$ & 105 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 3\\ 18T273 & $9$ & & $[-3, 3]$ & $\mathit{12.88_{p}}$ & \textit{30.86} & $2^{16} 3^{18}$ & 5 & $0.67_\circ$ & 28.23 & 0\\ 16T1294 & $9$ & & $[-3, 3]$ & $10.60_{p}$ & 13.16 & $43^{3} 53^{3}$ & 39 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 295\\ 36T1946 & $12$ & & $[-4, 4]$ & $\mathit{14.77_{p}}$ & \textit{32.80} & $2^{10} 13^{7} 17^{6}$ & 16 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 0\\ 24T2821 & $12$ & & $[-4, 4]$ & $\mathit{16.03_{p}}$ & 26.48 & $2^{16} 5^{6} 41^{5}$ & 1 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 1\\ 36T1758 & $18$ & & $[-6, 2]$ & $\mathit{20.29_{g}}$ & 36.08 & $2^{24} 5^{9} 41^{9}$ & 1 & $0.89_{\phantom{\text{$\bullet$}}}$ & 85.96 & 1222\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel7}Artin $L$-functions of small conductor from nonic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline $C_9$ & 9 & TW & 3, 4 & & 13.70 & $19^*$& & & 200 & 48\\ 9T1 & $1$ & $\zeta_{9}$ & $[-3, 0]$ & $17.02_{Q}$ & 19.00 & $19$ & 1 & $1.13_\bullet$ & 387.85 & 26\\ \hline $D_9$ & 18 & TW & 4, 8 & & 12.19 & $2^*59^*$& & & 200 & 699\\ 9T3 & $2$ & $\zeta_9^+$ & $[-3, 0]$ & $9.70_{q}$ & 14.11 & $199$ & 3 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 638\\ \hline $3^{1+2}_{-}$ & 27 & & 6, 12 & & 31.18 & $7^*13^*$& & & 200 & 32\\ 9T6 & $3$ & $\sqrt{-3}$ & $[-3, 0]$ & $30.32_{q}$ & 38.70 & $7^{3} 13^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 14\\ \hline $3^{1+2}_{+}$ & 27 & TW & 6, 12 & & 50.20 & $3^*19^*$& & & 200 & 16\\ 9T7 & $3$ & $\sqrt{-3}$ & $[-3, 0]$ & $30.32_{q}$ & 64.08 & $3^{6} 19^{2}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 12\\ \hline $3^{1+2}.2$ & 54 & & 7, 34 & & 17.01 & $2^*3^*$& & & 200 & 981\\ 9T10 & $6$ & & $[-3, 0]$ & $15.90_{q}$ & 17.49 & $31^{5}$ & 2 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 741\\ \hline
& 54 & & 7, 34 & & 16.83 & $3^*7^*$& & & 200 & 880\\ 9T11 & $6$ & & $[-3, 0]$ & $15.90_{q}$ & 19.01 & $3^{9} 7^{4}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 200.00 & 805\\ \hline
& 54 & & 8, 42 & & 16.72 & $2^*3^*5^*$& & & 200 & 2637\\ 9T12 & $3$ & $\sqrt{-3}$ & $[-3, 2]$ & $7.75_{p}$ & 10.71 & $2^{2} 307$ & 7 & $0.67_{\phantom{\text{$\bullet$}}}$ & 34.20 & 256\\ 18T24 & $3$ & $\sqrt{-3}$ & $[-3, 1]$ & $\mathit{10.03_{g}}$ & 20.08 & $2^{2} 3^{4} 5^{2}$ & 1 & $0.83_\circ$ & 82.70 & 77\\ \hline $M_9$ & 72 & & 6, 20 & & 29.72 & $2^*3^*$& & & 100 & 27\\ 9T14 & $8$ & & $[-1, 0]$ & $17.50_{q}$ & 31.59 & $2^{24} 3^{10}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 100.00 & 26\\ \hline $C_3^2{\rtimes}C_8$ & 72 & TW & 5, 16 & & 25.41 & $2^*3^*$& & & 100 & 19\\ 9T15 & $8$ & & $[-1, 0]$ & $17.50_{q}$ & 25.41 & $2^{31} 3^{4}$ & 1 & $1.00_{\phantom{\text{$\bullet$}}}$ & 100.00 & 16\\ \hline $C_3\wr C_3$ & 81 & & 9, 59 & & 75.41 & $3^*19^*$& & & 500 & 131\\ 9T17 & $3^{\rlap{\scriptsize{3}}}$ & $\sqrt{-3}$ & $[-3, 3]$ & $12.92_{q}$ & 30.14 & $7^{2} 13\!\cdot\! 43$ & 22 & $0.50_{\phantom{\text{$\bullet$}}}$ & 22.36 & 0\\ \hline $C_3^2{\rtimes}D_6$ & 108 & & 11, 262 & & 22.06 & $3^*23^*$& & & 150 & 12002\\ 18T55 & $6$ & & $[-3, 1]$ & $\mathit{11.98_{g}}$ & 24.38 & $2^{8} 3^{8} 5^{3}$ & 4 & $0.83_\circ$ & 65.07 & 229\\ 9T18 & $6$ & & $[-3, 2]$ & $9.70_{p}$ & 13.46 & $3^{3} 7^{2} 67^{2}$ & 53 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 216\\ \hline $C_3^2{\rtimes}QD_{16}$ & 144 & & 8, 62 & & 23.41 & $3^*7^*$& & & 100 & 488\\ 9T19 & $8$ & & $[-1, 2]$ & $\mathit{12.13_{v}}$ & 25.65 & $3^{13} 7^{6}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 14\\ 18T68 & $8$ & & $[-2, 1]$ & $\mathit{14.44_{g}}$ & 25.65 & $3^{13} 7^{6}$ & 1 & $0.88_\circ$ & 56.23 & 48\\ \hline $C_3\wr S_3$ & 162 & & 13, 2004 & & 29.89 & $3^*$& & & 200 & 1617\\ 9T20 & $3^{\rlap{\scriptsize{3}}}$ & $\sqrt{-3}$ & $[-3, 3]$ & $7.75_{p}$ & 11.17 & $7\!\cdot\! 199$ & 23 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 20\\ 18T86 & $3^{\rlap{\scriptsize{3}}}$ & $\sqrt{-3}$ & $[-3, 3]$ & $\mathit{8.23_{v}}$ & \textit{19.48} & $2^{2} 43^{2}$ & 5 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ \hline
& 162 & & 11, 223 & & 24.90 & $2^*3^*5^*$& & & 100 & 597\\ 9T21 & $6^{\rlap{\scriptsize{3}}}$ & & $[-3, 3]$ & $9.70_{p}$ & 15.58 & $5^{2} 83^{3}$ & 2 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ \hline
& 162 & & 10, 205 & & 26.46 & $3^*$& & & 100 & 180\\ 9T22 & $6^{\rlap{\scriptsize{3}}}$ & & $[-3, 3]$ & $9.70_{p}$ & 17.21 & $2^{6} 7^{4} 13^{2}$ & 6 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ \hline $C_3^2{\rtimes}\textrm{SL}_2(3)$ & 216 & & 7, 44 & & 49.57 & $349^*$& & & 100 & 37\\ 9T23 & $8$ & & $[-1, 2]$ & $11.29_{p}$ & 23.39 & $547^{4}$ & 10 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 3\\ 24T569 & $8$ & $\sqrt{-3}$ & $[-2, 1]$ & $18.99_{g}$ & 38.84 & $349^{5}$ & 1 & $0.94_{\phantom{\text{$\bullet$}}}$ & 74.99 & 20\\ \hline
& 324 & & 17, -- & & 30.64 & $2^*3^*11^*$& & & 100 & 1816\\ 18T129 & $6^{\rlap{\scriptsize{3}}}$ & & $[-3, 3]$ & $\mathit{9.34_{p}}$ & \textit{22.88} & $2^{9} 23^{4}$ & 16 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 9T24 & $6^{\rlap{\scriptsize{3}}}$ & & $[-3, 3]$ & $9.70_{p}$ & 15.84 & $3^{7} 5^{2} 17^{2}$ & 399 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ \end{tabular} \end{table} \begin{table}[htbp] \centering \caption{\label{tablelabel8}Artin $L$-functions of small conductor from nonic groups}
\begin{tabular}{l@{\;}r@{\;}c@{\;}c@{\;}|@{\;}r@{\;}r@{\;}c@{}r@{\;}|@{\;}c@{\;}r@{\;}r} $G$& $n_1$ & $z$ & $\Imnaught$ & \multicolumn{1}{c}{$\mathfrak{d}$} & \multicolumn{1}{c}{$\delta_1$} & $\Delta_1$ & pos'n & $\beta$ & $B^\beta$ & \# \\ \hline
& 324 & & 9, 116 & & 29.96 & $2^*3^*$& & & 100 & 107\\ 9T25 & $6$ & & $[-3, 3]$ & $12.73_{p}$ & 22.25 & $2^{6} 3^{8} 17^{2}$ & 59 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 18T141 & $6$ & & $[-3, 3]$ & $\mathit{9.34_{p}}$ & \textit{30.81} & $3^{8} 19^{4}$ & 9 & $0.50_{\phantom{\text{$\bullet$}}}$ & 10.00 & 0\\ 12T133 & $4$ & $\sqrt{-3}$ & $[-4, 2]$ & $11.15_{g}$ & 19.34 & $2^{6} 3^{7}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 10\\ 12T132 & $4^{\rlap{\scriptsize{2}}}$ & $\sqrt{-3}$ & $[-4, 2]$ & $\mathit{11.15_{g}}$ & \textit{33.50} & $2^{6} 3^{9}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 0\\ 18T142 & $12$ & & $[-3, 3]$ & $19.68_{p}$ & 30.57 & $2^{18} 3^{26}$ & 3 & $0.75_{\phantom{\text{$\bullet$}}}$ & 31.62 & 2\\ \hline $C_3^2{\rtimes}\textrm{GL}_2(3)$ & 432 & & 10, 206 & & 27.88 & $3^*11^*$& & & 76 & 453\\ 9T26 & $8$ & & $[-1, 2]$ & $11.54_{g}$ & 17.59 & $2^{6} 523^{3}$ & 14 & $0.75_{\phantom{\text{$\bullet$}}}$ & 25.74 & 17\\ 18T157 & $8$ & & $[-2, 2]$ & $\mathit{12.14_{p}}$ & 19.04 & $3^{7} 53^{4}$ & 16 & $0.75_{\phantom{\text{$\bullet$}}}$ & 25.74 & 3\\ 24T1334 & $16$ & & $[-2, 1]$ & $21.27_{g}$ & 26.68 & $3^{26} 11^{10}$ & 1 & $0.94_{\phantom{\text{$\bullet$}}}$ & 57.98 & 134\\ \hline $S_3\wr C_3$ & 648 & & 14, 3706 & & 33.56 & $2^*5^*13^*$& & & 150 & 1677\\ 18T197 & $6^{\rlap{\scriptsize{2}}}$ & & $[-3, 3]$ & $\mathit{9.70_{v}}$ & \textit{19.71} & $7^{4} 29^{3}$ & 1 & $0.50_{\phantom{\text{$\bullet$}}}$ & 12.25 & 0\\ 18T202 & $6$ & & $[-4, 3]$ & $\mathit{9.70_{v}}$ & \textit{27.73} & $2^{6} 3^{9} 19^{2}$ & 49 & $0.50_{\phantom{\text{$\bullet$}}}$ & 12.25 & 0\\ 9T28 & $6$ & & $[-3, 4]$ & $9.70_{p}$ & 12.20 & $3^{8} 503$ & 335 & $0.33_{\phantom{\text{$\bullet$}}}$ & 5.31 & 0\\ 12T176 & $8$ & & $[-4, 2]$ & $12.05_{g}$ & 21.75 & $11^{4} 43^{4}$ & 4 & $0.75_{\phantom{\text{$\bullet$}}}$ & 42.86 & 57\\ 36T1102 & $12$ & & $[-4, 3]$ & $\mathit{15.58_{v}}$ & 29.90 & $2^{6} 5^{10} 13^{8}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 42.86 & 2\\ 18T206 & $12$ & & $[-3, 4]$ & $15.23_{v}$ & 21.99 & $2^{6} 7^{10} 29^{4}$ & 10 & $0.67_{\phantom{\text{$\bullet$}}}$ & 28.23 & 8\\ 24T1539 & $8$ & $\sqrt{-3}$ & $[-8, 4]$ & $\mathit{17.07_{v}}$ & \textit{45.71} & $2^{14} 3^{19}$ & 3 & $0.75_{\phantom{\text{$\bullet$}}}$ & 42.86 & 0\\ \hline
& 648 & & 13, 2206 & & 40.81 & $2^*3^*17^*$& & & 200 & 838\\ 9T29 & $6$ & & $[-3, 3]$ & $12.73_{p}$ & 16.62 & $2^{8} 7^{2} 41^{2}$ & 31 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 18T223 & $6$ & & $[-3, 3]$ & $\mathit{9.70_{v}}$ & \textit{30.14} & $2^{4} 5^{2} 37^{4}$ & 71 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 24T1527 & $4$ & $\sqrt{-3}$ & $[-4, 2]$ & $12.05_{g}$ & 32.34 & $2^{2} 3^{7} 5^{3}$ & 18 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 43\\ 12T175 & $4$ & $\sqrt{-3}$ & $[-4, 4]$ & $7.60_{p}$ & 9.23 & $11\!\cdot\! 659$ & 164 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 14\\ 36T1131 & $6$ & $\sqrt{-3}$ & $[-6, 6]$ & $\mathit{11.95_{v}}$ & \textit{36.04} & $2^{2} 3^{8} 17^{4}$ & 6 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 36T1237 & $12$ & & $[-3, 3]$ & $\mathit{17.78_{p}}$ & 44.72 & $2^{22} 3^{7} 17^{8}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 6\\ 18T219 & $12$ & & $[-3, 3]$ & $\mathit{14.05_{v}}$ & 33.23 & $3^{17} 107^{5}$ & 5 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 65\\ 24T1540 & $8$ & $\sqrt{-3}$ & $[-8, 4]$ & $\mathit{17.07_{v}}$ & 49.37 & $2^{10} 3^{15} 7^{4}$ & 2 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 1\\ \hline
& 648 & & 13, 1322 & & 30.37 & $2^*269^*$& & & 200 & 4001\\ 9T30 & $6$ & & $[-3, 3]$ & $9.70_{p}$ & 10.67 & $11^{2} 23^{3}$ & 3 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 1\\ 18T222 & $6$ & & $[-3, 3]$ & $\mathit{9.70_{v}}$ & \textit{23.27} & $31^{3} 73^{2}$ & 57 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 12T178 & $8$ & & $[-4, 2]$ & $12.05_{g}$ & 18.27 & $2^{10} 59^{4}$ & 10 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 327\\ 12T177 & $8^{\rlap{\scriptsize{2}}}$ & & $[-4, 2]$ & $\mathit{12.05_{g}}$ & 24.98 & $2^{10} 23^{6}$ & 22 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 173\\ 36T1121 & $6$ & $\sqrt{3}$ & $[-6, 6]$ & $\mathit{11.95_{v}}$ & \textit{31.61} & $2^{8} 7^{2} 43^{3}$ & 91 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 36T1123 & $12$ & & $[-3, 3]$ & $\mathit{17.78_{p}}$ & 28.87 & $2^{35} 5^{10}$ & 2 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 71\\ 18T218 & $12$ & & $[-3, 3]$ & $14.05_{v}$ & 18.33 & $2^{10} 269^{5}$ & 1 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 453\\ \hline $S_3 \wr S_3$ & 1296 & & 22, -- & & 36.26 & $2^*3^*$& & & 200 & 12152\\ 18T320 & $6$ & & $[-3, 4]$ & $\mathit{8.80_{p}}$ & \textit{14.80} & $5\!\cdot\! 23^{3} 173$ & 8562 & $0.33_{\phantom{\text{$\bullet$}}}$ & 5.85 & 0\\ 18T312 & $6$ & & $[-4, 3]$ & $\mathit{8.79_{p}}$ & \textit{17.45} & $3^{5} 11^{2} 31^{2}$ & 343 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 9T31 & $6$ & & $[-3, 4]$ & $9.70_{p}$ & 10.38 & $31^{2} 1303$ & 10036 & $0.33_{\phantom{\text{$\bullet$}}}$ & 5.85 & 0\\ 18T303 & $6$ & & $[-4, 3]$ & $\mathit{8.79_{p}}$ & \textit{18.34} & $5^{5} 23^{3}$ & 4 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 12T213 & $8$ & & $[-4, 4]$ & $11.29_{p}$ & 13.38 & $5^{2} 7^{4} 131^{2}$ & 397 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 3\\ 24T2895 & $8$ & & $[-4, 2]$ & $\mathit{12.79_{g}}$ & 30.65 & $2^{8} 3^{4} 5^{6} 7^{4}$ & 77 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 230\\ 18T315 & $12$ & & $[-3, 4]$ & $13.59_{p}$ & 22.81 & $2^{10} 23^{4} 37^{5}$ & 72 & $0.67_{\phantom{\text{$\bullet$}}}$ & 34.20 & 105\\ 36T2216 & $12$ & & $[-3, 4]$ & $\mathit{13.79_{p}}$ & 29.13 & $2^{10} 3^{17} 41^{4}$ & 78 & $0.67_{\phantom{\text{$\bullet$}}}$ & 34.20 & 11\\ 36T2305 & $12$ & & $[-4, 3]$ & $\mathit{15.14_{p}}$ & 31.73 & $2^{18} 331^{5}$ & 58 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 151\\ 36T2211 & $12$ & & $[-6, 6]$ & $\mathit{14.64_{p}}$ & \textit{37.29} & $2^{16} 5^{8} 7^{10}$ & 55 & $0.50_{\phantom{\text{$\bullet$}}}$ & 14.14 & 0\\ 36T2214 & $12$ & & $[-4, 3]$ & $\mathit{15.14_{p}}$ & 32.07 & $2^{22} 3^{24}$ & 11 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 136\\ 24T2912 & $16$ & & $[-8, 4]$ & $\mathit{18.82_{p}}$ & 35.12 & $5^{12} 23^{12}$ & 4 & $0.75_{\phantom{\text{$\bullet$}}}$ & 53.18 & 40\\ \end{tabular} \end{table}
\subsubsection{Top row of the $G$-block.} The top row in the $G$-block is different from the other rows, as it gives information corresponding to the abstract group $G$. Instead of referring to a faithful irreducible character, as the other lines do, many of its entries are the corresponding quantities for the regular character $\phi_G$. The first four entries are a common name for the group $G$ (if there is one), the order
$\phi_G(e) = |G|$, the symbol TW if $G$ is known to have the universal tame-wild property as defined in \cite{jr-tame-wild}, and finally $k,N$. Here, $k$ is the size of the rational character table, and $N$ is number of vertices of the polytope $P_G$ discussed in \S\ref{polytope-pg}, or a dash if we did not compute $N$. The last four entries are the smallest root discriminant of a Galois $G$ field, the factored form of the corresponding discriminant, a cutoff $B$ for which the set $\cK(G;B)$ is known, and the
size $|\cK(G;B)|$.
\subsubsection{Remaining rows of the $G$-block}
\label{remainingrows}
Each remaining line of the $G$-block
corresponds to a type $(G,\chi)$.
However the number of rows in the $G$-block is typically substantially less than the number of faithful irreducible characters of $G$, as we list only one representative of each $\Gal(\overline{\Q}/\Q) \times \mbox{Out}(G)$ orbit of such characters. As an example, $S_6$ has eleven characters, all rational. Of the nine which are faithful, there are three which are fixed by the nontrivial element of $\mbox{Out}(S_6)$ and the others form three two-element orbits. Thus the $S_6$-block has six rows. In general, the information on a $(G,\chi)$ row comes in three parts, which we now describe in turn.
{\em First four columns.} The first column gives the lexicographically first permutation group $mTj$ for which the corresponding permutation character has $\chi$ as a rational constituent. Then $n_1=\chi_1(e)$ is the degree of an absolutely irreducible character $\chi_1$ such that $\chi$ is the sum of its conjugates. The number $n_1$ is superscripted by the size of the $\Out(G)$ orbit of $\chi$, in the unusual case when this orbit size is not $1$. Next, the complex number $z$ is a generator for the field generated by the values of the character $\chi_1$, with no number printed in the common case that $\chi_1$ is rational-valued. The last entry gives the interval $\Imnaught$, where $\widecheck{\chi}$ and $\widehat{\chi}$ are the numbers introduced in the beginning of Section~\ref{choices}.
In the range presented, the data of $mTj$, $n_1$, $z$, and $\Imnaught$
suffice to distinguish Galois types $(G,\chi)$ from each other.
{\em Middle four columns.} The next four columns focus on
minimal root conductors. In the first entry, $\frak d$ is the best
conditional lower bound we obtained for root conductors, and the subscript $i \in \{\ell,s,q,g,p,v\}$ gives information on
the corresponding auxiliary character $\phi$. The first four possibilities refer to the methods of
Section~\ref{choices}, namely {\em linear}, {\em square}, {\em quadratic},
and {\em Galois}. The last two, $p$ and $v$, indicate a {\em permutation}
character and a character coming from a {\em vertex} of the polytope $P_G$.
The best $\phi$ of the ones
we inspect is always at a vertex, except in the three cases on
Table~\ref{tablelabel2}
where $*$ is appended to the subscript.
Capital letters
$S$, $Q$, $G$, $P$, and $V$ also appear as subscripts.
These occur only for groups marked with TW, and indicate
that the tame-wild principle improved the lower bound.
For most groups with fifteen or more classes, it was prohibitive
to calculate all vertices, and the best of the other methods
is indicated.
When the second entry is in roman type, it is the minimal root conductor and the third entry is the minimal conductor in factored form. When the second entry is in italic type, then it is the smallest currently known root conductor. The fourth entry gives the position of the source number field on the complete list ordered by Galois root discriminant. This information lets readers obtain further information from \cite{jr-global-database}, such as a defining polynomial and details on ramification.
{\em Last three columns.} The quantity $\beta$ is
the exponent we are using to pass from Galois number fields to Artin representations.
Writing $\walp = \walp(G,\chi,\phi_G)$
and $\alp = \alp(G,\chi,\phi_G)$,
one has the universal relation $\walp \leq \alp$.
When equality holds then the common number is printed.
To indicate that inequality holds, an extra symbol is printed.
When we know that $G$ satisfies TW then we can use larger
exponent and $\alp_\bullet$ is printed.
Otherwise we use the smaller exponent and $\walp_\circ$ is printed.
The column
$B^\beta$ gives the corresponding upper bound on our
complete list of root conductors. Finally
the column $\#$ gives $|\cL(G,\chi; B^\beta)|$,
the length of the complete list of Artin $L$-functions
we have identified. For the $L$-functions themselves,
we refer to \cite{LMFDB}.
\section{Discussion of tables}
\label{discussion}
In this section, we discuss four topics, each of which makes
specific reference to parts of the tables of the
previous section. Each of the topics also serves the
general purpose of making the
tables more readily understandable.
\subsection{Comparison of first Galois root discriminants and root conductors}
Suppose first, for notational simplicity, that $G$ is a group for which
all irreducible complex characters take rational values only.
When one fixes $K^{\rm gal}$ with $\Gal(K^{\rm gal}/\Q) \cong G$
and lets $\chi$ runs over all the irreducible characters of $G$, the root discriminant $\delta_{\rm Gal}$ is just the
weighted multiplicative average $\prod_\chi = {\delta_\chi^{\chi(e)^2/|G|}}$.
Deviation of a root conductor $\delta_\chi$ from $\delta_{\rm Gal}$ is
caused by nonzero values of $\chi$.
When $\chi(e)$ is large and $\Imnaught$ is small, $\delta_\chi$
is necessarily close to $\delta_{\rm Gal}$. One
can therefore think generally of $\delta_{\rm Gal}$ as a first
approximation to $\delta_{\rm \chi}$. The general principle
of $\delta_{\rm Gal}$ approximating $\delta_{\rm \chi}$
applies to groups $G$ with irrational characters as well.
Our first example of $S_5$ illustrates both how the principle
$\delta_{\rm Gal} \approx \delta_{\chi}$ is reflected in the tables,
and how it tends to be somewhat off in the direction that
$\delta_{\rm Gal} > \delta_{\chi}$.
For a given $K^{\rm gal}$, the variance of its $\delta_\chi$ about its
$\delta_{\rm Gal}$ is substantial and depends on the details
of the ramification in $K^{\rm gal}$. There are many $K^{\rm gal}$
with root discriminant near the minimal root discriminant, all of which
are possible sources of minimal root conductors. It
is therefore expected that the minimal conductors $\delta_1(S_5,\chi) = \min \delta_\chi$
printed in the table, $6.33$, $18.72$, $16.27$, $17.78$, and $18.18$,
are substantially less than the printed
minimal root discriminant $\delta_1(S_5,\phi_{120}) \approx 24.18$. As groups $G$
get larger, one can generally expect tighter clustering
of the $\delta_1(G,\chi)$ about $\delta_1(G,\phi_G)$. One can see
the beginning of this trend in our partial results for
$S_6$ and $S_7$.
\subsection{Known and unknown minimal root conductors} Our method of starting with a complete list of
Galois fields is motivated by the principle from the previous
subsection that the Galois root discriminant $\delta_{\rm Gal}$ is
a natural first approximation to $\delta_\chi$. Indeed, as the tables show via nonzero entries in the $\#$
column, this general method suffices to obtain a non-empty initial segment for most $(G,\chi)$.
As our focus is primarily on the first root conductor $\delta_1 = \delta_1(G,\chi)$, we do not pursue larger
initial segments in these cases.
When the initial segment from our general method is empty, as reported by a $0$ in the $\#$
column, we aim to nonetheless present the minimal root conductor $\delta_1$.
Suppose there are subgroups $H_m \subset H_k \subseteq G$, of the
indicated indices, such that a multiple of the character $\chi$ of interest
is a difference of the corresponding permutation characters: $c \chi = \phi_m - \phi_k$.
Suppose one has the complete list of all degree $m$ fields
corresponding to the permutation representation of $G$ on $G/H_m$ and
root discriminant $\leq B$. Then one can extract the complete
list of $\cL(G,\chi;B^{m/(m-k)})$ of desired Artin $L$-functions.
For example, consider $\chi_5$, the absolutely irreducible $5$-dimensional character of $A_6$. The permutation character for a corresponding sextic field decomposes $\phi_6 = 1+\chi_5$, and so the discriminant of the sextic field equals the conductor of $\chi_5$. As an example with $k>1$,
consider the $6$-dimension character $\chi_6$ for $C_3\wr C_3 = 9T17$, which is the sum of a three-dimensional character and its conjugate. The nonic field has a cubic subfield, and the characters are related by $\phi_9 = \phi_3 + \chi_6$. In terms of conductors, $D_9 = D_3 \cdot D_{\chi_6}$, where $D_9$ and $D_3$ are field discriminants. So, we can determine the minimal conductor of an $L$-function with type $(C_3\wr C_3,\chi_6)$ from a sufficiently long complete list of nonic number fields with Galois group $C_3\wr C_3$.
This method, applied to both old and newer lists presented in \cite{jr-global-database},
accounts for all but one of the $\delta_1$ reported
in Roman type on the same line as a $0$ in the $\#$ column.
The remaining case of an established $\delta_1$ is for the type $(\GL_3(2),\chi_7)$. The
group $\GL_3(2)$ appears on our tables as $7T5$. The permutation
representation $8T37$ has character $\chi_7+1$. Here
the general method says that $\cL(\GL_3(2),\chi_7; 26.12)$ is empty.
It is prohibitive to compute the first octic discriminant by
searching among octic polynomials. In \cite{jr-psl27} we carried out a long search of septic polynomials,
examining all local possibilities giving an octic discriminant at most $30$.
This computation shows that $|\cL(\GL_3(2),\chi_7; 48.76)| = 25$ and in particular identifies
$\delta_1 = 21^{8/7} \approx 32.44$.
The complete lists of Galois fields for a group first appearing in degree $m$ were likewise computed
by searching polynomials in degree $m$, targeting for small $\delta_{\rm Gal}$. This single
search can give many first root conductors at once. For example, the largest groups
on our octic and nonic tables are $S_4 \wr S_2 = 8T47$ and $S_3 \wr S_3 = 9T31$.
In these cases, minimal root conductors were obtained for $5$ of the $10$ and
$7$ of the $12$ faithful $\chi$ respectively. Searches adapted to a particular character
$\chi$ as in the previous paragraph
can be viewed as a refinement of our method, with targeting being
not for small $\delta_{\rm Gal}$ but instead for small $\delta_\chi$.
Many of the italicized entries in the column $\delta_1$ seem
improvable to known minimal root conductors via this refinement.
\subsection{The ratio $\delta_1/{\frak d}$} In all cases on the table, $\delta_1>{\frak d}$. Thus, as expected, we did not encounter a contradiction to the Artin conjecture or the Riemann hypothesis. In some cases on the table, the ratio $\delta_1/{\frak d}$ is quite close to $1$. As two series of examples, consider $S_m$ with its reflection character $\chi_{m-1} = \phi_m-1$, and $D_m$ and the sum $\chi$ of all its faithful $2$-dimensional characters. Then these ratios are as follows:
\[
\begin{array}{r|llllllll}
m & \;\; 2 & \;\; 3 & \; \;4 & \; \; 5 & \; \; 6 & \; \; 7 & \; \;8 & \; \;9 \\
\hline
\delta_1/{\frak d}\mbox{ for } (S_m,\chi_{m-1}) & 1.00 & 1.02 & 1.2 & 1.005 & 1.1 & 1.007 \\ \delta_1/{\frak d} \mbox{ for } (D_m,\chi) & & & 1.09 & 1.02 & 1.23 & 1.006 & 1.07 & 1.45 \\ \end{array}
\]
In the cases with the smallest ratios,
the transition
from no $L$-functions to many $L$-functions is commonly abrupt. For example,
in the case $(S_7,\chi_6)$ the lower bound is ${\frak d} \approx 7.50$ and the first seven
rounded root conductors are $7.55$, $7.60$, $7.61$, $7.62$, $7.64$, $7.66$, and $7.66$.
When the translation from no $L$-functions to many $L$-functions is not
abrupt, but there is an $L$-function with outlyingly small conductor, again
$\delta_1/{\frak d}$ may be quite close to $1$. As an example,
for $(8T25,\chi_7)$, one has ${\frak d} \approx 16.10$ and $\delta_1 = 29^{6/7} \approx 17.93$
yielding $\delta_1/{\frak d} \approx 1.11$. However in this
case the next root conductor is $\delta_2 = 113^{6/7} \approx 57.52$, yielding
$\delta_2/{\frak d} \approx 3.57$. Thus the close agreement is entirely dependent
on the $L$-function with outlyingly small conductor. Even the second
root conductor is somewhat of an outlier as the next three conductors
are $71.70$, $76.39$, and $76.39$, so that already $\delta_3/{\frak d} \approx 4.45$.
There are many $(G,\chi)$ on the table for which the ratio $\delta_1/{\frak d}$ is
around $2$ or $3$.
There is some room for improvement in our analytic lower bounds, for example changing the test function \eqref{Odlyzko}, varying $\phi$ over all of $P_G$, or replacing the exponent $\walp$ with the best possible exponent $b$. However examples like the one in the previous paragraph suggest to us that in many cases the resulting increase in $\frak d$ towards $\delta_1$ would be very small.
\subsection{Multiply minimal fields} Tables~\ref{tablelabel1}--\ref{tablelabel8} make implicit reference to many
Galois number fields, and all necessary complete lists are accessible on
the database \cite{jr-global-database}. Table~\ref{multmin} presents
a small excerpt from this database by giving six polynomials $f(x)$. For each $f(x)$,
Table~\ref{multmin} first gives the Galois group $G$ and the root discriminant $\delta$ of the
splitting field $K^{\rm gal}$. We are highlighting these particular Galois number fields $K^{\rm gal}$ here
because they are {\em multiply minimal}: they each give rise to the minimal root conductor for at least
two different rationally irreducible characters $\chi$. The
degrees of these characters are given in the last column of Table~\ref{multmin}.
\begin{table}[htb]
\[
{\renewcommand{4pt}{4pt}
\begin{array}{crclll}
G & & \!\! \delta \!\! & & \mbox{Polynomial} & \chi(e) \\
\hline
A_5 & 2^{3/2} 17^{2/3} & \!\! \approx \!\! & 18.70 & x^5-x^4+2 x^2-2 x+2 &4,6 \\
A_6 & 2^{13/6} 3^{16/9} & \!\! \approx \!\! & 31.66 & x^6-3 x^4-12 x^3-9 x^2+1 &9,10,16 \\
S_6 & 11^{1/2} 41^{2/3} & \approx & 39.44 & x^6 - x^5 - 4x^4 + 6 x^3 - 6x + 5 & 5,10 \\
\SL_2(3) & 163^{2/3} & \!\! \approx \!\! & 29.83 & x^8+9 x^6+23 x^4+14 x^2+1 & 2,4 \\
8T47 & 2^{31/24} 5^{1/2} 41^{1/2} & \!\! \approx \!\! & 35.05 & x^8-2 x^7+6 x^6-2 x^5+26 x^4&12,18\\
& & & & \; -24 x^3-24 x^2+16 x+4\\
9T19 & 3^{37/24} 7^{3/4} & \!\! \approx \!\! & 23.41 & x^9-3 x^8-3 x^7+12 x^6-21 x^5 &8,8\\
& & & & \;+ 36 x^4-48 x^3+45 x^2-24 x+7 \\ \end{array} }
\]
\caption{\label{multmin} Invariants and defining polynomials for Galois
number fields giving rise to minimal root discriminants for
at least two rationally irreducible characters $\chi$ }
\end{table}
Further information on the characters $\chi$ is given in Tables \ref{tablelabel1}--\ref{tablelabel8}.
An interesting point, evident from repeated $1$'s in the $G$-block on these tables,
is that five of the six fields $K^{\rm gal}$ are also first on the list of $G$ fields
ordered by root discriminant. The exception is
the $S_6$ field on Table~\ref{multmin}, which is only sixth on the
list of Galois $S_6$ fields ordered by root discriminant.
\section{Lower bounds in large degrees} \label{asymp} In this section, we continue our practice of assuming the Artin conjecture and Riemann hypothesis for the relevant $L$-functions. For $n$ a positive integer, let $\Delta_1(n)$ be the smallest root discriminant of a degree $n$ field. As illustrated by Figure~\ref{contourM}, one has, \begin{equation} \label{lowfield} \liminf_{n \to \infty} \Delta_1(n) \geq \Omega \approx 44.7632. \end{equation} Now let $\delta_1(n)$ be the smallest root conductor of an absolutely irreducible degree $n$ Artin representation. Theorem~4.2 of \cite{PM} uses the quadratic method to conclude that
$\delta_1(n) \geq 6.59 e^{(-13278.42/n)^2}$. If one repeats the argument there without concerns for effectivity, one gets
\begin{equation}
\label{lowrep} \liminf_{n \to \infty} \delta_1(n) \geq \sqrt{\Omega} \approx 6.6905. \end{equation} The contrast between \eqref{lowfield} and \eqref{lowrep} is striking, and raises the question of whether $\sqrt{\Omega}$ in \eqref{lowrep} can be increased at least part way to $\Omega$.
\subsection{The constant $\Omega$ as a limiting lower bound.} The next corollary makes use of the extreme character values $\widecheck{\chi}$ and $\widehat{\chi}$ introduced at the beginning of Section~\ref{choices}. It shows that if one restricts the type, then one can indeed increase $\sqrt{\Omega}$ all the way to $\Omega$. We formulate the corollary in the context of rationally irreducible characters, to stay in the main context we have set up. However via \eqref{discequal}, it can be translated to a statement about absolutely irreducible characters.
\begin{cor}
\label{limitcor} Let $(G_k,\chi_k)$ be a sequence of rationally
irreducible Galois types of degree $n_k = \chi_k(e)$ . Suppose that
the number of irreducible constituents $(\chi_k,\chi_k)$ is bounded,
$n_k \rightarrow \infty$, and either \begin{description} \item[A] $\widecheck{\chi}_k/n_k \rightarrow 0$, or \item[B] $\widehat{\chi}_k/n_k \rightarrow 0$. \end{description} Then, assuming the Artin conjecture and Riemann hypothesis for relevant $L$-functions, \begin{equation} \label{limlowerbound} \liminf_{k \to \infty} \delta_1(G_k,\chi_k) \geq \Omega. \end{equation} \end{cor} \begin{proof} For Case A, Theorem~\ref{thm2} using a linear auxiliary character as in \eqref{formlinear} says \[ \delta_1(G_k,\chi_k) \geq M \left( \frac{n_k}{\widecheck{\chi}_k} + 1, \frac{r_k}{\widecheck{\chi}_k} + 1,(\chi_k,\chi_k) \right)^{{1+\widecheck{\chi}_k/{n_k}} }. \] For Case B, Theorem~\ref{thm2} using a Galois auxiliary character as in \eqref{formula3a} says \[
\delta_1(G_k,\chi_k) \geq M \left( |G_k| , 0 ,(\chi_k,\chi_k) \right)^{1 - {\widehat{\chi}}/{n_k}}. \] In both cases, the first argument of $M$ tends to infinity, the second argument does not matter, the third argument does not matter either by boundedness, and the exponent tends to $1$. By \eqref{asymptotic1}, these right sides thus have an infimum limit of at least $\Omega$, giving the conclusion \eqref{limlowerbound}. \end{proof}
For the proof of Case~B, the square auxiliary character would work equally well through \eqref{formsquare}. Also~\eqref{lowfield}, \eqref{lowrep}, and Corollary~\ref{limitcor} could all be strengthened by considering the placement of complex conjugation. For example, when restricting to the totally real case $c=e$, the $\Omega$'s in \eqref{lowfield}, \eqref{lowrep}, and \eqref{limlowerbound} are simply replaced by $\Theta \approx 215.3325$.
\subsection{Four contrasting examples.} Many natural sequences of types are covered by either Hypothesis A or Hypothesis B, but some are not.
Table~\ref{limitcor} summarizes four sequences which we discuss together with some related sequences next.
\begin{table}[htb] { \[
\begin{array}{c| l |cccc|cc}
G_k & \multicolumn{1}{c|}{\chi_k} & \widecheck{\chi}_k & \widehat{\chi}_k & n & |G| & \mbox{A}& \mbox{B} \\ \hline
\PGL_2(k) &\mbox{Steinberg} & 1 & 1 & k & k^3-k & \checkmark & \checkmark \\
S_{k} & \mbox{Reflection} & 1 & k-3 & k-1 & k! & \checkmark \\
2_\epsilon^{1+2k} & \mbox{Spin} & 2^k & 0 & 2^k & 2^{1+2k} & & \checkmark \\
2^k.S_k & \mbox{Reflection} & k & k-2 & k & 2^k k! & &
\end{array} \] } \caption{\label{fourasymp} Four sequences of types, with Corollary~\ref{limitcor} applicable to the first three.} \end{table}
\subsubsection{The group $\PGL_2(k)$ and its characters of degree $k-1$, $k$, and $k+1$.} \label{pglk} In the sequence $(\PGL_2(k),\chi_k)$ from the first line of Table~\ref{fourasymp}, the index $k$ is restricted to be a prime power. The permutation character $\phi_{k+1}$ arising from the natural action of $\PGL_2(k)$ on $\mathbb{P}^1(\mathbb{F}_k)$ decomposes as $1+\chi_k$ where $\chi_k$ is the Steinberg character. Table~\ref{fourasymp} says that the ratios $\widecheck{\chi}_k/n_k$ and $\widehat{\chi}_k/n_k$ are both $1/k$, so Corollary~\ref{limitcor} applies through both Hypotheses A and B.
The conductor of $\chi_k$ is the absolute discriminant of the degree $k+1$ number field with character $\phi_{k+1}$. Thus, in this instance, \eqref{limlowerbound} is already implied by the classical \eqref{lowfield}. However, the other nonabelian irreducible characters $\chi$ of $\PGL_2(k)$ behave very similarly to $\chi_k$. Their dimensions are in $\{k-1,k,k+1\}$ and their values besides $\chi(e)$ are all in $[-2,2]$. So suppose for each $k$, an arbitrary nonabelian rationally irreducible character $\chi_k$ of $\PGL_2(k)$ were chosen, in such a way that the sequence $(\chi_k,\chi_k)$ is bounded. Then Corollary~\ref{limitcor} would again apply through both Hypotheses A and B. But now the $\chi_k$ are not particularly closely related to permutation characters.
\subsubsection{The group $S_{k}$ and its canonical characters.} \label{sk} As with the last example, the permutation character $\phi_{k}$ arising from the natural action of $S_{k}$ on $\{1,\dots,k\}$ decomposes as $1 + \chi_k$ where $\chi_k$ is the reflection character with degree $k-1$. The second line of Table~\ref{fourasymp} shows that Corollary~\ref{limitcor} applies through Hypothesis A. In fact, using the linear auxiliary character underlying Hypothesis A here is essential; the limiting lower bound coming from the square or quadratic auxiliary characters is $\sqrt{\Omega}$, and this lower bound is just $1$ from the Galois auxiliary character.
Again in parallel to the previous example, the familiar sequence $(S_k,\chi_k)$ of types needs to be modified to make it a good illustration of the applicability of Corollary~\ref{limitcor}.
Characters of $S_k$ are most commonly indexed by partitions of $k$, with $\chi_{(k)} =1$, $\chi_{(k-1,1)}$ being the reflection character, and $\chi_{(1,1,\dots,1,1)}$ being the sign character. However an alternative convention is to include explicit reference to the degree $k$ and then omit the largest part of the partition, so that the above three characters have the alternative names $\chi_{k,()}$, $\chi_{k,(1)}$, and $\chi_{k,(1,\dots,1,1)}$. With this convention, one can prove that for any fixed partition $\mu$ of a positive integer $m$, the sequence of types $(G_k,\chi_{k,\mu})$ satisfies Hypothesis A but not B.
The case of general $\mu$ is well represented by the two cases where $m=2$. In these two cases, information in the same format as Table~\eqref{fourasymp} is \[ {\def1.3{1.3}
\begin{array}{c| l |cll}
G_k & \multicolumn{1}{c|}{\chi_k} & \multicolumn{1}{c}{\widecheck{\chi}_k} & \multicolumn{1}{c}{\widehat{\chi}_k} & \multicolumn{1}{c}{n} \\ \hline
S_{k} & \chi_{k,(1,1)} & {\lfloor \frac{k-1}{2} \rfloor }& { \frac{1}{2} (k-2)(k-5)} & {\frac{1}{2} (k-1)(k-2)} \\
S_{k} & \chi_{k,(2)} & 1 & { \frac{1}{2} (k-2)(k-5)} + 1& {\frac{1}{2} (k-1)(k-2)} -1 \\
\end{array}.
} \] Let $X_{k,m}$ be the $S_k$-set consisting of $m$-tuples of distinct elements of $\{1,\dots,k\}$. Then its permutation character $\phi_{k,m}$ decomposes into $\chi_{k,\mu}$ with $\mu$ a partition of an integer $\leq m$. These formulas are uniform in $k$, as in \[ \phi_{k,2} = \chi_{k,(1,1)} + \chi_{k,(2)} + 2 \chi_{k,(1)} + \chi_{k,()}. \] For $\mu$ running over partitions of a large integer $m$, the characters $\chi_{k,\mu}$ can be reasonably regarded as quite far from permutation characters, and they thus serve as a better illustration of Corollary~\ref{limitcor}. The sequences $(S_k,\chi_{k,\mu})$ satisfy Hypothesis A but not B, because $n_k$ and $\widehat{\chi}_k$ grow polynomially as $k^m$, while $\widecheck{\chi}_k$ grows polynomially with degree $<m$.
\subsubsection{The extra-special group $2_\epsilon^{1+2k}$ and its degree $2^k$ character} \label{esk} Fix $\epsilon \in \{+,-\}$. Let $G_k$ be the extra-special $2$-group of type $\epsilon$ and order $2^{1+2k}$, so that $2^{1+2}_+$ and $2^{1+2}_{-}$ are the dihedral and quaternion groups respectively. These groups each have exactly one irreducible character of degree larger than $1$, this degree being $2^k$. There are just three character values, $-2^k$, $0$, and $2^k$. For these two sequences, Corollary~\ref{limitcor} again applies, but now only through Hypothesis B.
\subsubsection{The Weyl group $2^k.S_k$ and its degree $k$ reflection character} The Weyl group $W(B_k) \cong 2^k.S_k$ of signed permutation matrices comes with its defining degree $k$ character $\chi_k$. Here, as indicated by the fourth line of Table~\ref{fourasymp}, neither hypothesis of Corollary~\ref{limitcor} applies.
However the conclusion \eqref{limlowerbound} of Corollary~\ref{limitcor} continues to hold as follows. Relate the character $\chi_k$ in question to the two standard permutation characters of $2^k.S_k$ via $\phi_{2k} = \phi_k + \chi_k$. For a given $2^k.S_k$ field, $D_{\Phi_{2k}}=D_{\Phi_k}D_{\Chi_k}$. But, since $\Phi_k$ corresponds to an index $2$ subfield of the degree $2k$ number field for $\Phi_{2k}$, we have $D_{\Phi_k}^2\mid D_{\Phi_{2k}}$. Combining these we get $D_{\Phi_k} \mid D_{\Chi_k}$ and hence $\delta_{\Phi_k} < \delta_{\Chi_k}$. So \eqref{lowfield} implies \eqref{limlowerbound}.
\subsection{Concluding speculation} \label{speculation} As we have illustrated in \S\ref{pglk}--\ref{esk}, both Hypothesis A and Hypothesis B are quite broad. This breadth, together with the fact that the conclusion \eqref{limlowerbound} still holds for our last sequence, raises the question of whether \eqref{limlowerbound} can be formulated more universally. While the evidence is far from definitive, we expect a positive answer. Thus we expect that the first accumulation point of the numbers $\delta_1(G,\chi)$ is at least $\Omega$, where $(G,\chi)$ runs over all types with $\chi$ irreducible. Phrased differently, we expect that the first accumulation point of the root conductors of all irreducible Artin $L$-functions is at least $\Omega$.
\end{document} |
\begin{document}
\title{Tilting modules in category $\mathcal{O}$ and sheaves on moment graphs}
\author{Johannes K\"ubel*}
\address{Department of Mathematics, University of Erlangen, Germany}
\curraddr{Cauerstr. 11, 91058 Erlangen, Germany} \email{[email protected]}
\thanks{*supported by the DFG priority program 1388}
\date{\today}
\keywords{representation theory, category $\mathcal{O}$}
\begin{abstract} We describe tilting modules of the deformed category $\mathcal{O}$ over a semisimple Lie algebra as certain sheaves on a moment graph associated to the corresponding block of $\mathcal{O}$. We prove that they map to Braden-MacPherson sheaves constructed along the reversed Bruhat order under Fiebig's localization functor. By this means, we get character formulas for tilting modules and explain how Soergel's result about the Andersen filtration gives a Koszul dual proof of the semisimplicity of subquotients of the Jantzen filtration. \end{abstract}
\maketitle
\section{Introduction}
Let $\mathfrak{g} \supset \mathfrak{b} \supset \mathfrak{h}$ be a complex semisimple Lie algebra with a Borel and a Cartan subalgebra. Let $A$ be the localization of the symmetric algebra $S=S(\mathfrak{h})$ at the maximal ideal of zero. The deformed category $\mathcal{O}_A$ is the full subcategory of $\mathfrak{g}$-$A$-bimodules that are finitely generated over $\mathfrak{g} \otimes_\mathbb{C} A$, semisimple over $\mathfrak{h}$ and locally finite over $\mathfrak{b}$. $\mathcal{O}_A$ decomposes into blocks which are parameterized by antidominant weights. For a given antidominant weight ${\lambda} \in \mathfrak{h}^*$ the weights involved in the corresponding block are given by the orbit $\mathcal{W}_{\lambda} \cdot {\lambda}$ of ${\lambda}$ under the dot-action of the integral Weyl group corresponding to ${\lambda}$. This combinatorial data defines a graph with $\mathcal{W}_{\lambda} \cdot {\lambda}$ the set of vertices being partially ordered by the Bruhat order on $\mathcal{W}_{\lambda}$ divided by the stabilizer of ${\lambda}$. Two different vertices are linked by an edge if there is a reflection of $\mathcal{W}_{\lambda}$ mapping one vertex to the other. In addition, every edge has a labeling given by the coroot corresponding to the positive root of the according reflection. Denote by $\mathcal{M}_{A,{\lambda}}$ the subcategory of the block corresponding to the antidominant weight ${\lambda}$ consisting of modules which have a Verma flag, i.e., a filtration with subquotients isomorphic to deformed Verma modules. Now the usual duality on category $\mathcal{O}$ extends to the deformed version $\mathcal{O}_A$. The modules which are self-dual and admit a Verma flag are called deformed tilting modules. The indecomposable tilting modules are parameterized by their highest weight and $\mathcal{M}_{A,{\lambda}}$ contains those with a highest weight lying in the orbit $\mathcal{W}_{\lambda} \cdot {\lambda}$.\\ While Fiebig shows that indecomposable deformed projective modules of $\mathcal{M}_{A,{\lambda}}$ correspond to Braden-MacPherson sheaves constructed along the Bruhat order of the associated moment graph, we prove in a very similar way that indecomposable deformed tilting modules correspond to Braden-MacPherson sheaves constructed along the reversed order on the moment graph. This approach implies a character formula for tilting modules which was already discovered in \cite{A}. There, Soergel uses a tilting equivalence to trace back character formulas for tilting modules to the known ones for projective modules. Our approach, however, doesn't use the tilting functor but has the disadvantage that it doesn't generalize to Kac-Moody algebras without using the tilting functor.\\ Another application of our result about tilting modules as sheaves on a moment graph is the connection between the Andersen and Jantzen filtration. The Jantzen filtration on a Verma module induces a filtration on the space of homomorphisms from a projective to the Verma module. The Andersen filtration on the space of homomorphisms from a Verma module to a tilting module is constructed in a very similar way as the Jantzen filtration. In \cite{11} we already proved that there is an isomorphism between both spaces of homomorphisms which interchanges the mentioned filtrations.\\ In this paper we describe both Hom-spaces on the level of sheaves on moment graphs. Since the construction of those involves the symmetric algebra $S(\mathfrak{h})$ we discover an inherited grading on both Hom-spaces. Now the advantage of this approach compared to \cite{11} is that we are able to construct an isomorphism which respects the grading and lifts to an isomorphism on the Hom-spaces which also interchanges both filtrations. In \cite{14} it is proved that the Andersen filtration coincides with the grading filtration on this Hom-space. Soergel's approach, however, is Koszul dual to \cite{1} and in combination with our result, leads to another proof of the semisimplicity of the Jantzen filtration layers. \section{Preliminaries}
In this section we repeat results of \cite{7} and \cite{3} about certain sheaves on moment graphs. We mostly follow the lecture notes \cite{10} and \cite{15} which are more introductory to this subject.
\subsection{Moment graphs}
For a vector space $V$ we denote by $S:= S(V)$ the symmetric algebra of $V$ with the usual grading doubled, i.e., $\mathrm{deg} V=2$. Let $\mathcal{V}$ be the set of vertices and $\mathcal{E}$ the set of edges of a finite graph $(\mathcal{V},\mathcal{E})$. I.e., $\mathcal{V}$ is a finite set and $\mathcal{E} \subset \mathcal{P}(\mathcal{V})$ a subset of the power set of $\mathcal{V}$ with the following property:\\ If $E$ is an element of $\mathcal{E}$, then the cardinality of $E$ is two.
\begin{definition} An unordered $V$-moment graph $\mathcal{G}=(\mathcal{V},\mathcal{E}, \alpha)$ is a finite graph $(\mathcal{V},\mathcal{E})$ without loops and double edges, which is equipped with a map $\alpha: \mathcal{E} \rightarrow \mathbb{P}(V)$ that associates to any edge $E$ a line $\alpha_E :=\alpha(E)$ in $V$. \end{definition}
\begin{remark} The subsets $Y \subset \mathcal{V} \cup \mathcal{E}$ with the property:
$$x \in Y \cap \mathcal{V} \Rightarrow \{ E \in \mathcal{E} \,| \, x \in E\} \subset Y$$ form the open sets of a topology on $\mathcal{V} \cup \mathcal{E}$. By this means, we view $\mathcal{G}$ as a topological space. \end{remark}
\subsection{Sheaves on moment graphs}
Let $A$ be a commutative $S$-algebra. For $x \in \mathcal{V}$ define $x^{\circ}:= \{x\} \cup \{E \in \mathcal{E} \,| \, x\in E\}$. For a sheaf $\mathscr{M}$ of $A$-modules on the topological space $\mathcal{G}$ the stalks are given by\\ $\mathscr{M}_x = \mathscr{M}(x^{\circ})$ for $x\in \mathcal{V}$ and $\mathscr{M}_E = \mathscr{M}(\{E\})$ for $E \in \mathcal{E}$.\\ We denote by $\rho_{x,E}: \mathscr{M}_x \rightarrow \mathscr{M}_E$ the restriction map for $x \in E$. The sheaf $\mathscr{M}$ is uniquely determined by this data and we can define a sheaf of rings $\mathscr{A}$, namely the \textit{structure sheaf} of $\mathcal{G}$ over $A$, by setting \begin{itemize} \item $\mathscr{A}_x =A$ $\forall x \in \mathcal{V}$ \item $\mathscr{A}_E = A / \alpha_E A$ $\forall E \in \mathcal{E}$ \item $\rho_{x,E}: \mathscr{A}_x \rightarrow \mathscr{A}_E$ the quotient map for $x \in E$. \end{itemize}
By \cite{3} Proposition 1.1, an $\mathscr{A}$-module $\mathscr{M}$ is characterized by a tuple \\ $(\{\mathscr{M}_x\},\{\mathscr{M}_E\},\{\rho_{x,E}\})$ with the properties \begin{itemize} \item $\mathscr{M}_x$ is an $A$-module for any $x \in \mathcal{V}$ \item $\mathscr{M}_E$ is an $A$-module for all $E \in \mathcal{E}$ with $\alpha_E \mathscr{M}_E =0$ \item $\rho_{x,E}: \mathscr{M}_x \rightarrow \mathscr{M}_E$ is a homomorphism of $A$-modules for $x \in \mathcal{V}$, $E \in \mathcal{E}$ with $x \in E$. \end{itemize}
\begin{remark} In what follows, we will always work with this characterization of sheaves on the moment graph. If the $S$-algebra $A$ is $S$ itself, we consider all modules as graded $S$-modules and all maps between them as graded homomorphisms of degree zero.\\ To distinguish between the $S$-algebras we are working with we sometimes call the sheaf $\mathscr{M}$ an $A$-sheaf. \end{remark}
\subsection{Global sections}
Now let $A$ be a localization of $S$ at a prime ideal $\mathfrak{p} \subset S$. Denote by $\mathcal{SH}_A(\mathcal{G})^{f}$ the subcategory of $\mathscr{A}$-modules, such that $\mathscr{M}_x$ is torsion free and finitely generated over $A$ for all $x \in \mathcal{V}$. We denote by $\mathcal{Z} = \mathcal{Z}_A (\mathcal{G})$ the global sections $\Gamma(\mathscr{A})$ of the structure sheaf and call it the \textit{structure algebra of $\mathcal{G}$ over $A$}. By \cite{7} section 2.5. we get $\mathcal{Z} :=\mathcal{Z}_A(\mathcal{G}) = \{(z_x) \in \prod_{x \in \mathcal{V}} A \, | \, (z_x \equiv z_y \, \mathrm{mod} \, \alpha_E) \, \mathrm{for} \, \{x,y\}=E\}$.
\begin{remark} In case $A=S$, $\mathcal{Z}_S(\mathcal{G})$ carries a grading induced by $S$. In this case we consider all $\mathcal{Z}$-modules as graded modules. \end{remark}
The functor of global sections
\begin{equation*} \Gamma: \mathscr{A}- \mathrm{mod} \longrightarrow \mathcal{Z}-\mathrm{mod} \end{equation*} has a left adjoint, namely the localization functor $\mathcal{L}$. Denote by $\mathcal{Z}_A-\mathrm{mod}^f$ the subcategory of $\mathcal{Z}$-modules that are finitely generated and torsion free over $A$.
\begin{lemma}(\cite{7}, Proposition 3.5.) \label{adj} The functors $\Gamma$ and $\mathcal{L}$ induce a pair of adjoint functors
\[ \begin{xy} \xymatrix{
\mathcal{SH}_A(\mathcal{G})^f \ar@<2pt>[r]^{\Gamma} & \mathcal{Z}_A-\mathrm{mod}^f \ar@<2pt>[l]^{\mathcal{L}} \\ } \end{xy} \] and the canonical maps $\Gamma(\mathscr{M}) \rightarrow \Gamma \mathcal{L} \Gamma (\mathscr{M})$ and $\mathcal{L}(M) \rightarrow \mathcal{L} \Gamma \mathcal{L} (M)$ are isomorphisms. \end{lemma}
\begin{remark} Lemma \ref{adj} implies that we get a pair of mutually inverse equivalences between the images of both functors
\[ \begin{xy} \xymatrix{
\mathcal{L}(\mathcal{Z}_A-\mathrm{mod}^f ) \ar@<2pt>[r]^{\Gamma} & \Gamma(\mathcal{SH}_A(\mathcal{G})^f) \ar@<2pt>[l]^{\mathcal{L}} \\ } \end{xy} \] \end{remark}
We now follow \cite{10} to give a concrete description of $\mathcal{L}$. Let $M \in \mathcal{Z}_A-\mathrm{mod}^f$ and denote by $Q$ the quotient field of $A$. Since $M$ is torsion free over $A$ we get an inclusion $M \hookrightarrow M \otimes_A Q$.\\ Let $\sum_{x \in \mathcal{V}} {e_x} = 1 \otimes 1 \in \mathcal{Z} \otimes_A Q \cong \prod_{x \in \mathcal{V}} Q$ be a decomposition of $1 \in \mathcal{Z} \otimes_A Q$ into idempotents. For $x \in \mathcal{V}$ set $$\mathcal{L}(M)_x = e_xM \subset M \otimes_A Q$$ For an edge $E=\{x,y\}$ with $\alpha:= \alpha(E)$ we set $$M(E) = (e_x + e_y) M + \alpha e_x M \subset e_x(M\otimes_A Q) \oplus e_y (M \otimes_A Q)$$ and form the push-out diagram
\begin{eqnarray*}
\begin{CD}
M(E) @>\pi_x >> \mathcal{L}(M)_x\\
@VV\pi_y V @VV\rho_{x,E} V\\
\mathcal{L}(M)_y @>\rho_{y,E} >> \mathcal{L}(M)_E
\end{CD} \end{eqnarray*}
where $\pi_x$, $\pi_y$ are defined by $\pi_x(z)=e_x z$ and $\pi_y(z) = e_y z$.
This gives the sought after stalk $\mathcal{L}(M)_E$ with restriction maps $\rho_{x,E}$, $\rho_{y,E}$ coming from the push-out diagram.
\subsection{Sheaves on ordered moment graphs}
\begin{definition} An ordered moment graph $\mathcal{G}=(\mathcal{V},\mathcal{E},\alpha, \leq)$ is a moment graph $(\mathcal{V},\mathcal{E},\alpha)$ with a partial order $\leq$ on the set $\mathcal{V}$ of vertices, such that for any $E = \{x,y\}$ the vertices $x, y \in \mathcal{V}$ are comparable. \end{definition}
\begin{definition} An F-open subgraph $\mathcal{H} = (\mathcal{V}',\mathcal{E}', \alpha',\leq')$ of $\mathcal{G}$ is a subgraph with $\alpha'$ and $\leq'$ the restrictions of $\alpha$ and $\leq$, respectively, such that \begin{itemize} \item If $E= \{x,y\}$ and $x,y \in \mathcal{V}'$, then $E \in \mathcal{E}'$ \item If $x \in \mathcal{V}'$ and $y \in \mathcal{V}$ with $y \leq x$, then $y \in \mathcal{V}'$ \end{itemize} \end{definition}
\begin{definition}
An $A$-sheaf $M$ on $\mathcal{G}$ is called F-flabby if for any F-open subgraph $\mathcal{H}$ of $\mathcal{G}$ the restriction map $\Gamma(\mathscr{M}) \rightarrow \Gamma(\mathcal{H}, \mathscr{M})$ is surjective, where $\Gamma(\mathcal{H}, \mathscr{M}):= \{(m_x) \in \prod_{x \in \mathcal{V}'} \mathscr{M}_x \, | \, \rho_{x,E} (m_x) = \rho_{y,E}(m_y) \,\, \mathrm{for}\,\, E=\{x,y\}\}$ \end{definition}
\begin{definition} A moment graph $(\mathcal{G}, \alpha, \leq)$ has the GKM-property if for every pair $E, E' \in \mathcal{E}$ with $E \neq E'$ and $E \cap E' \neq \emptyset$ we have $\alpha(E) \neq \alpha(E')$. \end{definition}
Recall that we decomposed $1 \in \mathcal{Z} \otimes_A Q \cong \prod_{x \in \mathcal{V}} Q$ into idempotents $1= \sum_{x \in \mathcal{V}}e_x$. For an F-open subset $\mathcal{H}$ of $\mathcal{G}$ with vertices $\mathcal{V}'$, we set $e_{\mathcal{H}} := \sum_{x \in \mathcal{V}'} e_x$.
\begin{prop}[\cite{10}, Proposition 3.14] \label{B} Suppose that $\mathcal{G}$ is a GKM-graph. Let $M$ be a finitely generated $\mathcal{Z}$-module that is torsion free over A. Suppose in addition, that $e_{\mathcal{H}}M$ is a reflexive $A$-module for any F-open subgraph $\mathcal{H}$ of $\mathcal{G}$. Then $\mathcal{L}(M)$ is F-flabby on $\mathcal{G}$ and we have an isomorphism $$e_{\mathcal{H}}M \stackrel{\sim}{\longrightarrow} \Gamma({\mathcal{H}}, \mathcal{L}(M))$$ \end{prop}
\subsection{Braden-MacPherson sheaves}
In this section we will repeat the notion of F-projective sheaves on a moment graph and introduce Braden-MacPherson sheaves which form the indecomposable F-projective sheaves. \begin{definition} A sheaf $\mathscr{M}$ on $\mathcal{G}$ is \textit{generated by global sections} if the map $\Gamma(\mathscr{M}) \rightarrow \mathscr{M}_x$ is surjective for every $x \in \mathcal{V}$. \end{definition}
\begin{notation} \label{not1}
For any $x \in \mathcal{V}$ set $D_x := \{E \in \mathcal{E} \,| \, E=\{x,y\}$, $y \in \mathcal{V}$, $y\leq x\}$ and $U_x := \{E \in \mathcal{E} \,|\, E=\{x,y\}$, $y\in \mathcal{V}$, $x \leq y\}$. \end{notation}
\begin{definition} An $A$-sheaf $\mathscr{P}$ on $\mathcal{G}$ is called F-projective if \begin{itemize} \item $\mathscr{P}$ is F-flabby and generated by global sections \item Each $\mathscr{P}_x$ with $x \in \mathcal{V}$ is a free (graded free for $A=S$) $A$-module \item Any $\rho_{x,E}$ with $x \in \mathcal{V}$, $E \in U_x$ induces an isomorphism $\mathscr{P}_x / \alpha_E\mathscr{P}_x \rightarrow \mathscr{P}_E$ of (graded) $A$-modules. \end{itemize} \end{definition}
Next, we cite some results about Braden-MacPherson (BMP) sheaves from \cite{B} and \cite{10}. For this we take $A=S_{(0)}$ to be the localization of $S$ at the maximal ideal generated by $V$.
\begin{thm}[\cite{10}, section 3.5 and \cite{B}, Theorem 6.3] \begin{enumerate} \item For any $x \in \mathcal{V}$ there is an up to isomorphism unique graded $S$-sheaf $\mathscr{B}(x)$ on $\mathcal{G}$ with the following properties:
\begin{itemize}
\item $\mathscr{B}(x)$ is F-projective
\item $\mathscr{B}(x)$ is indecomposable (even as a non-graded sheaf)
\item $\mathscr{B}(x)_x \cong S$ and $\mathscr{B}(x)_y = 0$ unless $ x \leq y$
\end{itemize}
\item Let $\mathscr{P}$ be an F-projective $A$-sheaf of finite type on $\mathcal{G}$. Then there exists an isomorphism of $A$-sheaves
$$\mathscr{P} \cong \mathscr{B}(z_1) \otimes_S A \oplus ... \oplus \mathscr{B}(z_n) \otimes_S A$$
with suitable vertices $z_1,..., z_n$.
\item Let $\mathscr{P}$ be a graded F-projective $S$-sheaf of finite type on $\mathcal{G}$. Then there exists an isomorphism of graded $S$-sheaves
$$\mathscr{P} \cong \mathscr{B}(z_1)[l_1] \oplus ... \oplus \mathscr{B}(z_n)[l_n]$$
with suitable vertices $z_1,..., z_n$ and suitable shifts $l_1,...,l_n$. \end{enumerate} \end{thm}
\section{Deformed category $\mathcal{O}$}
In this section we recall results about the deformed category $\mathcal{O}$ of a semisimple complex Lie algebra $\mathfrak{g}$ with Borel $\mathfrak{b}$ and Cartan $\mathfrak{h}$, which one can find in \cite{5}, \cite{14} and \cite{11}. Let $S$ denote the universal enveloping algebra of the Cartan $\mathfrak{h}$ which is equal to the ring of polynomial functions $\mathbb{C}[\mathfrak{h}^{*}]$. We call a commutative, associative, noetherian, unital, local $S$-algebra $A$ with structure morphism $\tau: S \rightarrow A$ a \textit{local deformation algebra}.\\
Let $A$ be a local deformation algebra with structure morphism $\tau: S\rightarrow A$ and let $M \in \mathfrak{g}$-mod-$A$. For ${\lambda} \in \mathfrak{h}^{*}$ we set
$$M_{\lambda} = \{m \in M| hm=({\lambda} + \tau)(h)m \, \, \forall h \in \mathfrak{h}\}$$ where $({\lambda}+\tau)(h)$ is meant to be an element of $A$. We call the $A$-submodule $M_{\lambda}$ the \textit{deformed ${\lambda}$-weight space} of $M$.\\
The \textit{deformed category} $\mathcal{O}_{A}$ is the full subcategory of all bimodules $M \in \mathfrak{g}$-mod-$A$ with the properties \begin{itemize} \item $M= \bigoplus\limits_{{\lambda} \in \mathfrak{h}^{*}} M_{\lambda}$, \item for every $m \in M$ the $\mathfrak{b}$-$A$-sub-bimodule generated by $m$ is finitely generated as an $A$-module, \item $M$ is finitely generated as a $\mathfrak{g}$-$A$-bimodule. \end{itemize}
Taking $A=S/S\mathfrak{h} \cong \mathbb{C}$, $\mathcal{O}_{A}$ is just the usual BGG-category $\mathcal{O}$.\\ For ${\lambda} \in \mathfrak{h}^{*}$ the \textit{deformed Verma module} is defined by $$\Delta_A({\lambda}) = U(\mathfrak{g}) \otimes_{U(\mathfrak{b})} A_{\lambda}$$ where $A_{\lambda}$ denotes the $U(\mathfrak{b})$-$A$-bimodule $A$ with $\mathfrak{b}$-structure given by the composition $U(\mathfrak{b}) \rightarrow S \stackrel{{\lambda} + \tau}{\longrightarrow} A$.\\
The Lie algebra $\mathfrak{g}$ possesses an involutive anti-automorphism $\sigma:\mathfrak{g} \rightarrow \mathfrak{g}$ with $\sigma|_{\mathfrak{h}} = -\mathrm{id}$. This gives the $A$-module $\mathrm{Hom}_A(M,A)$ a $\sigma$-twisted $\mathfrak{g}$-module structure. Denoting by $dM$ the sum of all deformed weight spaces in $\mathrm{Hom}_A(M,A)$, we get a functor
$$d=d_\sigma : \mathcal{O}_{A} \longrightarrow \mathcal{O}_{A}$$
which is a duality on $\mathfrak{g}$-$A$-bimodules which are free over $A$. We now set $\nabla_A({\lambda})=d\Delta_A({\lambda})$ for ${\lambda} \in \mathfrak{h}^{*}$ and call this the \textit{deformed nabla module}.
\begin{prop}[\cite{14}, Proposition 2.12.]\label{Soe1} \begin{enumerate} \item For all ${\lambda}$ the restriction to the deformed weight space of ${\lambda}$ together with the two canonical identifications $\Delta_A({\lambda})_{\lambda} \stackrel{\sim}{\rightarrow} A$ and $\nabla_A({\lambda})_{\lambda} \stackrel{\sim}{\rightarrow} A$ induces an isomorphism $$\mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),\nabla_A({\lambda})) \stackrel{\sim}{\longrightarrow}A$$
\item For ${\lambda} \neq \mu$ in $\mathfrak{h}^{*}$ we have $\mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),\nabla_A(\mu))=0$.
\item For all ${\lambda},\mu \in \mathfrak{h}^{*}$ we have $\mathrm{Ext}^{1}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),\nabla_A(\mu))=0$. \end{enumerate} \end{prop}
\begin{cor}[\cite{14}, Corollary 2.13.]\label{Soe2} Let $M,N \in \mathcal{O}_{A}$. If $M$ has a $\Delta_A$-flag and $N$ a $\nabla_A$-flag, then the space of homomorphisms $\mathrm{Hom}_{\mathcal{O}_{A}}(M,N)$ is a finitely generated free $A$-module and for any homomorphism $A\rightarrow A'$ of local deformation algebras the obvious map defines an isomorphism $$\mathrm{Hom}_{\mathcal{O}_{A}}(M,N)\otimes_{A} A' \stackrel{\sim}{\longrightarrow} \mathrm{Hom}_{\mathcal{O}_{A'}}(M\otimes_{A} A',N\otimes_{A} A')$$ \end{cor}
\begin{proof} This follows from Proposition \ref{Soe1} by induction on the length of the $\Delta_A$- and $\nabla_A$-flag. \end{proof}
If $\mathfrak{m}\subset A$ is the unique maximal ideal in the local deformation algebra $A$ we set $\mathbb{K}=A/\mathfrak{m}A$ for its residue field.
\begin{thm}[\cite{5}, Propositions 2.1 and 2.6] \label{Fie1}
\begin{enumerate} \item The base change $\cdot \otimes_A \mathbb{K}$ gives a bijection\\ \begin{displaymath}
\begin{array}{ccc}
\left\{\begin{array}{c}
\textrm{simple isomorphism}\\
\textrm{classes of $\mathcal{O}_{A}$}
\end{array}\right\}
&
\longleftrightarrow
&
\left\{\begin{array}{c}
\textrm{simple isomorphism}\\
\textrm{classes of $\mathcal{O}_{\mathbb{K}}$}
\end{array}\right\}
\end{array}
\end{displaymath}
\item The base change $\cdot \otimes_A \mathbb{K}$ gives a bijection\\ \begin{displaymath}
\begin{array}{ccc}
\left\{\begin{array}{c}
\textrm{projective isomorphism}\\
\textrm{classes of $\mathcal{O}_{A}$}
\end{array}\right\}
&
\longleftrightarrow
&
\left\{\begin{array}{c}
\textrm{projective isomorphism}\\
\textrm{classes of $\mathcal{O}_{\mathbb{K}}$}
\end{array}\right\}
\end{array}
\end{displaymath} \end{enumerate} \end{thm}
The category $\mathcal{O}_{\mathbb{K}}$ is a direct summand of the category $\mathcal{O}$ over the Lie algebra $\mathfrak{g} \otimes \mathbb{K}$. It consists of all objects whose weights lie in the complex affine subspace $\tau + \mathfrak{h}^{*} = \tau + \mathrm{Hom}_\mathbb{C}(\mathfrak{h},\mathbb{C}) \subset \mathrm{Hom}_\mathbb{K}(\mathfrak{h}\otimes \mathbb{K},\mathbb{K})$ for $\tau$ the restriction to $\mathfrak{h}$ of the map that makes $\mathbb{K}$ to an $S$-algebra. Thus the simple objects of $\mathcal{O}_{A}$ are parameterized by their highest weight in $\mathfrak{h}^{*}$. Denote by $L_A({\lambda})$ the simple object with highest weight ${\lambda}$. We also use the usual partial order on $\mathfrak{h}^{*}$ to partially order $\tau + \mathfrak{h}^{*}$.
\begin{thm}[\cite{5}, Propositions 2.4 and Theorem 2.7] \label{Fie2} Let $A$ be a local deformation algebra and $\mathbb{K}$ its residue field. Let $L_A({\lambda})$ be a simple object in $\mathcal{O}_{A}$.
\begin{enumerate} \item There is a projective cover $P_A({\lambda})$ of $L_A({\lambda})$ in $\mathcal{O}_{A}$ and every projective object in $\mathcal{O}_{A}$ is isomorphic to a direct sum of projective covers. \item $P_A({\lambda})$ has a Verma flag, i.e., a finite filtration with subquotients isomorphic to Verma modules, and for the multiplicities we have the BGG-reciprocity formula $$(P_A({\lambda}):\Delta_A(\mu)) = [\Delta_{\mathbb{K}}(\mu):L_{\mathbb{K}}({\lambda})]$$ for all Verma modules $\Delta_A(\mu)$ in $\mathcal{O}_{A}$. \item Let $A \rightarrow A'$ be a homomorphism of local deformation algebras and $P$ projective in $\mathcal{O}_{A}$. Then $P\otimes_A A'$ is projective in $\mathcal{O}_{A'}$ and the natural transformation $$\mathrm{Hom}_{\mathcal{O}_{A}}(P,\cdot) \otimes_A A' \longrightarrow \mathrm{Hom}_{\mathcal{O}_{A'}}(P \otimes_A A', \cdot \otimes_A A')$$ is an isomorphism of functors from $\mathcal{O}_{A}$ to $A'$-mod. \end{enumerate} \end{thm}
\subsection{Block decomposition}
Let $A$ again denote a local deformation algebra and $\mathbb{K}$ its residue field.
\begin{definition} Let $\sim_A$ be the equivalence relation on $\mathfrak{h}^{*}$ generated by ${\lambda} \sim_A \mu$ if $[\Delta_{\mathbb{K}}({\lambda}):L_{\mathbb{K}}(\mu)] \neq 0$. \end{definition}
\begin{definition} Let $\Lambda \in \mathfrak{h}^{*}/\sim_{A}$ be an equivalence class. Let $\mathcal{O}_{A,\Lambda}$ be the full subcategory of $\mathcal{O}_{A}$ consisting of all modules $M$ such that every highest weight of a subquotient of $M$ lies in $\Lambda$. \end{definition}
\begin{prop}[\cite{5}, Proposition 2.8] (Block decomposition)\label{Fie3} The functor \begin{eqnarray*}
\begin{array}{ccc}
\bigoplus\limits_{\Lambda \in \mathfrak{h}^{*}/\sim_A} \mathcal{O}_{A,\Lambda} &\longrightarrow& \mathcal{O}_{A}\\
(M_\Lambda)_{\Lambda \in \mathfrak{h}^{*}/\sim_A}& \longmapsto & \bigoplus\limits_{\Lambda \in \mathfrak{h}^{*}/\sim_A} M_\Lambda
\end{array}
\end{eqnarray*} is an equivalence of categories. \end{prop}
\begin{remark} For $R=S_{(0)}$ the localization of $S$ at the maximal ideal generated by $\mathfrak{h}$, the block decomposition of $\mathcal{O}_R$ corresponds to the block decomposition of the BGG-category $\mathcal{O}$ over $\mathfrak{g}$.\\ \end{remark}
Let $\tau:S \rightarrow \mathbb{K}$ be the induced map that makes $\mathbb{K}$ into an $S$-algebra. Restricting to $\mathfrak{h}$ and extending with $\mathbb{K}$ yields a $\mathbb{K}$-linear map $\mathfrak{h} \otimes \mathbb{K} \rightarrow \mathbb{K}$ which we will also call $\tau$. Let $\mathcal{R} \supset \mathcal{R}^{+}$ be the root system with positive roots according to our data $\mathfrak{g}\supset \mathfrak{b} \supset \mathfrak{h}$. For ${\lambda} \in \mathfrak{h}_{\mathbb{K}}^{*}=\mathrm{Hom}_\mathbb{K}(\mathfrak{h} \otimes \mathbb{K},\mathbb{K})$ and $\check{\alpha} \in \mathfrak{h}$ the dual root of a root $\alpha \in \mathcal{R}$ we set $\left\langle {\lambda}, \check{\alpha}\right\rangle_{\mathbb{K}} = {\lambda}(\check{\alpha}) \in \mathbb{K}$. Let $\mathcal{W}$ be the Weyl group of $(\mathfrak{g},\mathfrak{h})$ and denote by $s_\alpha$ the reflection corresponding to $\alpha \in \mathcal{R}$.
\begin{definition} For $\mathcal{R}$ the root system of $\mathfrak{g}$ and $\Lambda \in \mathfrak{h}^{*}/\sim_A$ we define
$$\mathcal{R}_A(\Lambda)=\{\alpha \in \mathcal{R} | \left\langle {\lambda} + \tau, \check{\alpha}\right\rangle_{\mathbb{K}} \in \mathbb{Z} \subset \mathbb{K} \text{ for some } {\lambda} \in \Lambda\}$$
and call it the integral roots corresponding to $\Lambda$. Let $\mathcal{R}_A^{+}(\Lambda)$ denote the positive roots in $\mathcal{R}_A(\Lambda)$ and set $$\mathcal{W}_A(\Lambda)=\langle \{s_\alpha \in \mathcal{W} | \alpha \in \mathcal{R}_A^{+}(\Lambda)\}\rangle \subset \mathcal{W}$$ We call it the integral Weyl group with respect to $\Lambda$. \end{definition} From \cite{5} Corollary 3.3 it follows that $$\Lambda=\mathcal{W}_A(\Lambda)\cdot {\lambda} \text{ for any } {\lambda} \in \Lambda$$ where we denote by $\cdot$ the $\rho$-shifted dot-action of the Weyl group.\\ Since most of the following constructions commute with base change, we are particularly interested in the case when $A=R_{\mathfrak{p}}$ is a localization of $R$ at a prime ideal $\mathfrak{p}$ of height one. The functor $\cdot \otimes_R R_{\mathfrak{p}}$ will split the deformed category $\mathcal{O}_{A}$ into generic and subgeneric blocks:
\begin{lemma}[\cite{6}, Lemma 3] \label{Fie4} Let $\Lambda \in \mathfrak{h}^{*}/\sim_{R}$ and let $\mathfrak{p} \in R$ be a prime ideal. \begin{enumerate} \item If $\check{\alpha} \notin \mathfrak{p}$ for all roots $\alpha \in \mathcal{R}_{R}(\Lambda)$, then $\Lambda$ splits under $\sim_{R_\mathfrak{p}}$ into generic equivalence classes. \item If $\mathfrak{p} = R\check{\alpha}$ for a root $\alpha \in \mathcal{R}_{R}(\Lambda)$, then $\Lambda$ splits under $\sim_{R_\mathfrak{p}}$ into subgeneric equivalence classes of the form $\{{\lambda},s_{\alpha}\cdot {\lambda}\}$. \end{enumerate} \end{lemma} We recall that we denote by $P_A({\lambda})$ the projective cover of the simple object $L_A({\lambda})$. It is indecomposable and up to isomorphism uniquely determined. For an equivalence class $\Lambda \in \mathfrak{h}^{*}/\sim_A$ which contains ${\lambda}$ and is generic, i.e., $\Lambda=\{{\lambda}\}$, we get $P_A({\lambda})=\Delta_A({\lambda})$. If $\Lambda=\{{\lambda},\mu\}$ and $\mu< {\lambda}$, we have $P_A({\lambda})=\Delta_A({\lambda})$ and there is a non-split short exact sequence in $\mathcal{O}_{A}$ $$0\rightarrow \Delta_A({\lambda}) \rightarrow P_A(\mu) \rightarrow \Delta_A(\mu)\rightarrow 0$$ In this case, every endomorphism $f: P_A(\mu) \rightarrow P_A(\mu)$ maps $\Delta_A({\lambda})$ to $\Delta_A({\lambda})$ since ${\lambda}>\mu$. So $f$ induces a commutative diagram \begin{eqnarray*} \begin{CD}
0 @>>> \Delta_{A}({\lambda}) @>>> P_{A}(\mu) @>>> \Delta_{A}(\mu) @>>> 0\\
@VVV @V f_{\lambda} VV @VV f V @VV f_\mu V @VVV\\
0 @>>> \Delta_{A}({\lambda}) @>>> P_{A}(\mu) @>>> \Delta_{A}(\mu) @>>> 0 \end{CD}\end{eqnarray*} Since endomorphisms of Verma modules correspond to elements of $A$, we get a map \begin{displaymath}
\begin{array}{ccc}
\chi: \mathrm{End}_{\mathcal{O}_{A}}(P_{A}(\mu))& \longrightarrow & A \oplus A \\
f & \longmapsto &(f_{\lambda} , f_\mu)
\end{array} \end{displaymath}
For $\mathfrak{p}=R\check{\alpha}$ we define $R_\alpha :=R_\mathfrak{p}$ for the localization of $R$ at the prime ideal $\mathfrak{p}$.
\begin{prop}[\cite{5}, Corollary 3.5] \label{Fie5} Let $\Lambda \in \mathfrak{h}^{*}/\sim_{R_\alpha}$. If $\Lambda=\{{\lambda},\mu\}$ and ${\lambda}=s_{\alpha}\cdot \mu >\mu$, the map $\chi$ from above induces an isomorphism of $R_\alpha$-modules
$$\mathrm{End}_{\mathcal{O}_{R_\alpha}}(P_{R_\alpha}(\mu)) \cong \left\{(t_{\lambda},t_\mu) \in {R_\alpha}\oplus {R_\alpha} \middle| t_{\lambda} \equiv t_\mu \text{ mod } \check{\alpha}\right\}$$ \end{prop}
\subsection{Deformed tilting modules}
In this chapter, $A$ will be a localization of $R=S_{(0)}$ at a prime ideal $\mathfrak{p} \subset R$ and $\mathbb{K}$ its residue field. Let ${\lambda} \in \mathfrak{h}^{*}$ be such that $\Delta_{\mathbb{K}}({\lambda})$ is a simple object in $\mathcal{O}_{\mathbb{K}}$. Thus, we have $\Delta_{\mathbb{K}}({\lambda}) \cong \nabla_{\mathbb{K}}({\lambda})$ and the canonical inclusion $\Delta_A({\lambda}) \hookrightarrow \nabla_A({\lambda})$ becomes an isomorphism after applying $\cdot \otimes_A \mathbb{K}$. So by Nakayama's lemma, we conclude that this inclusion is bijective.
\begin{definition} By $\mathcal{K}_A$ we denote the full subcategory of $\mathcal{O}_{A}$ which \begin{enumerate} \item includes the self-dual deformed Verma modules, \item is stable under tensoring with finite dimensional $\mathfrak{g}$-modules, \item is stable under forming direct sums and summands. \end{enumerate} \end{definition}
\begin{prop}(\cite{11}, Proposition 3.2.) \label{tilt1} The base change $\cdot \otimes_A \mathbb{K}$ gives a bijection\\ \begin{displaymath}
\begin{array}{ccc}
\left\{\begin{array}{c}
\textrm{isomorphism classes}\\
\textrm{of $\mathcal{K}_A$}
\end{array}\right\}
&
\longleftrightarrow
&
\left\{\begin{array}{c}
\textrm{isomorphism classes}\\
\textrm{of $\mathcal{K}_\mathbb{K}$}
\end{array}\right\}
\end{array}
\end{displaymath} \end{prop}
\begin{remark} For $A=S/S\mathfrak{h}=\mathbb{C}$ the category $\mathcal{K}_A$ is just the usual subcategory of tilting modules of the category $\mathcal{O}$ over $\mathfrak{g}$. The definition also implies that deformed tilting modules have a Verma flag and are self-dual. Furthermore, the indecomposable tilting modules are classified by their highest weight and we denote by $K_A({\lambda})$ the deformed tilting module with highest weight ${\lambda} \in \mathfrak{h}^*$. \end{remark}
\section{Tilting modules as sheaves on moment graphs}
In this section we repeat the connection between representation theory and sheaves on moment graphs via the structure functor $\mathbb{V}$ as it is described in \cite{7}. We prove without using the tilting functor that tilting modules of a deformed block in category $\mathcal{O}$ become certain BMP-sheaves on a certain moment graph associated to this block. As a corollary of this we get character formulas of tilting modules without using the tilting functor.
\subsection{The functor $\mathbb{V}$}
Again, $A$ denotes a localization of $R$ at a prime ideal. We first want to get a functor from a block of deformed category $\mathcal{O}$ to sheaves on a certain moment graph. Given an antidominant weight ${\lambda} \in \mathfrak{h}^*$ denote by $\Lambda \in \mathfrak{h}^* /\sim_A$ its equivalence class. We now set $\mathcal{W}_{\lambda} = \mathcal{W}_A (\Lambda)$ and $\mathcal{W}':= \mathrm{Stab}_{\mathcal{W}_{\lambda}} ({\lambda})$. We then get a bijection $$\mathcal{W}_{\lambda} \cdot {\lambda} \cong \mathcal{W}_{\lambda} / \mathcal{W}'$$ Now we define the ordered $\mathfrak{h}^*$-moment graph $\mathcal{G}=(\mathcal{V},\mathcal{E},\alpha, \leq)$ associated to the block $\mathcal{W}_{\lambda} \cdot {\lambda}$ by letting
$$\mathcal{V} := \mathcal{W}_{\lambda} / \mathcal{W}'$$
and two different vertices $x,y \in \mathcal{V}$ are joined by an edge $E=\{x,y\}$ if there is a positive root $\alpha \in \mathcal{R}_A^+(\Lambda)$ with $x=s_\alpha \cdot y$. The labeling of $E$ is $\alpha(E)= \check{\alpha}$. For $w,w' \in \mathcal{V}$ we define the order $\leq$ by $$w \leq w' \Leftrightarrow w \cdot {\lambda} \leq w'\cdot {\lambda}$$ This ordered moment graph has the GKM-property. Note that this order is not the Bruhat order in general. But for two adjacent vertices both orderings coincide.
\begin{thm}[\cite{5}, Theorem 3.6.] Let $\mathcal{Z}= \mathcal{Z}_A (\mathcal{G})$ be the structure algebra of the above moment graph. Then there is an isomorphism $$\mathcal{Z} \cong \mathrm{End}_{\mathcal{O}_A}(P_A({\lambda}))$$ \end{thm}
We now get a functor
$$\mathbb{V} := \mathrm{Hom}_{\mathcal{O}_A}(P_A({\lambda}), \cdot) : \mathcal{O}_{A, {\lambda}} \longrightarrow \mathcal{Z}-mod$$
\begin{definition} Denote by $\mathscr{V}_A(w)$ the skyscraper sheaf on $\mathcal{G}$ at the vertex $w \in \mathcal{V}$, i.e., the $A$-sheaf with $\mathscr{V}_A(w)_w \cong A$ and whose stalks at every other vertex and at every edge are zero. \end{definition}
\begin{prop} \begin{enumerate} \item Let $w \in \mathcal{V}$. Then $\mathcal{L}(\mathbb{V} \Delta_A(w \cdot {\lambda})) \cong \mathcal{L}(\mathbb{V} \nabla_A(w \cdot {\lambda})) \cong \mathscr{V}_A(w)$.
\item Let $M \in \mathcal{O}_{A,{\lambda}}$ admit a Verma- or a Nabla-flag. Then $\mathbb{V} M$ is a free $A$-module of finite rank. \end{enumerate} \end{prop}
\begin{proof} The proof is analogous to \cite{10} Proposition 4.13. \end{proof}
\subsection{Deformed tilting modules and BMP-sheaves}\label{BMP}
Following \cite{10} section 4.14., we construct certain submodules and quotients of a module $M \in \mathcal{O}_{A}$. Let $D$ be a subset of $\mathfrak{h}^*$ with the property:\\ If ${\lambda} \in D$ and $\mu \in \mathfrak{h}^*$ with $\mu \leq {\lambda}$, then $\mu \in D$.\\ Set for $M \in \mathcal{O}_A$ $$ O^D M:= \sum_{\mu \notin D} U(\mathfrak{g}_A) M_\mu \, \, and \,\, M[D] := M/O^D M$$
\begin{prop} [\cite{10}, section 4.14.]´
\begin{enumerate}
\item If $M$ has a Verma flag, so do $O^D M$ and $M[D]$
\item
\begin{displaymath}
\begin{array}{ccc}
O^D\Delta_A({\lambda}) \cong
\left\{ \begin{array}{c}
\Delta_A ({\lambda}) \, \textrm{if}\, {\lambda} \notin D\\
0 \,\,\,\,\,\,\,\,\, \textrm{else}
\end{array}\right.
&
and
&
\Delta_A({\lambda})[D] \cong
\left \{\begin{array}{c}
0 \,\,\,\,\,\,\,\, \textrm{if}\, {\lambda} \notin D\\
\Delta_A({\lambda}) \,\,\,\textrm{else}
\end{array}\right.
\end{array}
\end{displaymath}
\end{enumerate} \end{prop}
We now change notation: For the moment graph $\mathcal{G}=(\mathcal{V}, \mathcal{E}, \alpha , \leq)$ associated to the block $\mathcal{O}_{A,{\lambda}}$ we write $\uparrow$-open for an F-open subgraph. For a subgraph which is F-open according to the moment graph with the reversed order, we write $\downarrow$-open. A subgraph $\mathcal{H}$ with set of vertices $\mathcal{V}'$ is $\uparrow$-open if and only if $\mathcal{H}^c$ is $\downarrow$-open, where $\mathcal{H}^c$ is the full subgraph of $\mathcal{G}$ with vertices $\mathcal{V}^c:= \mathcal{V} - \mathcal{V}'$. For $\mathcal{H}$ an $\downarrow$-open sugraph, set $D$ equal to the set of all $\nu \in \mathfrak{h}^*$, such that there exists $x \in \mathcal{V}^c$ with $\nu \leq x \cdot {\lambda}$.\\ Set $O^{\mathcal{H}^c} M := O^D M$ and $M[\mathcal{H}^c] =M [D]$.
\begin{lemma} \label{A} Let $M$ be a tilting module. There is a natural isomorphism $$e_{\mathcal{H}} \mathbb{V} M \cong \mathbb{V}(d(O^{\mathcal{H}^c} M))$$ \end{lemma}
\begin{proof} Dualising the short exact sequence $$O^{\mathcal{H}^c} M \hookrightarrow M \twoheadrightarrow M[\mathcal{H}^c]$$
we get by self-duality of $M$ and the freeness of all modules over $A$ a short exact sequence
$$d(O^{\mathcal{H}^c} M) \twoheadleftarrow M \hookleftarrow d(M[\mathcal{H}^c])$$
Since $\mathbb{V}$ is exact we get a short exact sequence
$$\mathbb{V}(d(O^{\mathcal{H}^c} M)) \twoheadleftarrow \mathbb{V}(M) \hookleftarrow \mathbb{V}(d(M[\mathcal{H}^c]))$$
Since $d(M[\mathcal{H}^c])\otimes_A Q$ is the direct sum of Verma modules of the form $\Delta_Q (x \cdot {\lambda})\cong \nabla_Q (x \cdot {\lambda})$ with $x \notin \mathcal{V}'$ we get $e_{\mathcal{H}} \mathbb{V} M \cong \mathbb{V}(d(O^{\mathcal{H}^c} M))$. \end{proof}
\begin{definition} A sheaf $\mathscr{M}$ on $\mathcal{G}$ is called $\downarrow$-flabby (resp. $\downarrow$-projective) if it is F-flabby (resp. F-projective) according to the moment graph $\mathcal{G}$ with reversed order. \end{definition}
\begin{prop} \label{D} Let $M$ be a tilting module. Then $\mathcal{L}(\mathbb{V} M)$ is $\downarrow$-flabby. \end{prop}
\begin{proof} For any $\downarrow$-open subgraph $\mathcal{H}$ we get that $e_{\mathcal{H}}M$ is a free $A$-module by lemma \ref{A}. Now proposition \ref{B} tells us that $\mathcal{L}(\mathbb{V} M)$ is $\downarrow$-flabby. \end{proof}
\begin{notation} For $x \in \mathcal{V}$ we denote by $\mathscr{B}^{\uparrow}(x)$ the BMP-sheaf $\mathscr{B}(x)$ for our moment graph with the original order. For the moment graph with reversed order we denote the BMP-sheaf at the vertex $x$ by $\mathscr{B}^{\downarrow}(x)$. \end{notation}
In the following we want to show that the indecomposable deformed tilting module $K_A(x \cdot {\lambda})$ with highest weight $x\cdot {\lambda}$ corresponds to $\mathscr{B}^{\downarrow}(x)\otimes_S A$ under $\mathcal{L} \circ \mathbb{V}$. For this we need some preparation.
\begin{lemma}(\cite{7}, Lemma 7.4.)\label{inj} Let $M \in \mathcal{O}$ admit a Verma flag, $\mu \in \mathfrak{h}^*$ and $k \in \mathbb{N}$. If $g: (\Delta_\mathbb{C}(\mu))^k \rightarrow M$ is a morphism which induces an injective map $g_\mu : (\Delta_\mathbb{C}(\mu))_\mu ^k \rightarrow M_\mu$ on the $\mu$-weight spaces, then $g$ is injective. \end{lemma}
\begin{lemma} \label{C} Let $K \in \mathcal{O}_{A,{\lambda}}$ be a tilting module. Then for any $w \in \mathcal{V}$ $\mathcal{L}(\mathbb{V} K)_w$ is free over $A$ of rank $r:= (K: \Delta_A (w\cdot {\lambda}))$. \end{lemma}
\begin{proof} We want to construct an isomorphism $e_w (\mathbb{V} K) = \mathcal{L}(\mathbb{V} K)_w \xrightarrow{\sim} \mathbb{V}(\nabla_A (w\cdot {\lambda})^r)$. Using corollary \ref{Soe2} and the fact that over the residue field $\mathbb{K}$ we have $\mathrm{dim}_\mathbb{K} \mathrm{Hom}_{\mathcal{O}_\mathbb{K}}(\Delta_\mathbb{K}(w \cdot {\lambda}) , K \otimes_A \mathbb{K}) = \mathrm{dim}_\mathbb{K} \mathrm{Hom}_{\mathcal{O}_\mathbb{K}}(K \otimes_A \mathbb{K}, \nabla_\mathbb{K}(w\cdot {\lambda})) = (K\otimes_A \mathbb{K} : \Delta_\mathbb{K}(w\cdot {\lambda}))$, we can deduce that $\mathrm{Hom}_{\mathcal{O}_A}(\Delta_A(w\cdot {\lambda}),K)$ is a free $A$- module of rank $r$. Now choose an $A$-basis $f_1,...,f_r$ of $\mathrm{Hom}_{\mathcal{O}_A}(\Delta_A(w\cdot {\lambda}),K)$. Dualising yields a basis $df_1,..., df_r$ of $\mathrm{Hom}_{\mathcal{O}_A}(K,\nabla_A(w\cdot {\lambda}))$. Consider now the map \begin{displaymath}
\begin{array}{cc}
f: \Delta_A(w\cdot {\lambda})^r \longrightarrow K
&
(v_1,...,v_r) \mapsto \sum f_i(v_i)
\end{array} \end{displaymath}
Dualising this map yields by self-duality of $K$
\begin{displaymath}
\begin{array}{cc}
df: K \longrightarrow \nabla_A(w\cdot {\lambda})^r
&
v \mapsto \sum (df_i(v))
\end{array} \end{displaymath}
Applying the functor $\mathbb{V}$ gives a map $$\mathbb{V} df :\mathbb{V} K \longrightarrow \mathbb{V} \nabla_A(w\cdot {\lambda})^r$$ which factors over $e_{w}(\mathbb{V} K)$, since $\mathbb{V} \nabla_A (w \cdot {\lambda})^r$ has support $\{w\}$. So this gives a well-defined map \begin{displaymath}
\begin{array}{cc}
(\mathbb{V} df)^w: e_w \mathbb{V} K \longrightarrow \mathbb{V}\nabla_A(w\cdot {\lambda})^r
\end{array} \end{displaymath} which is injective, since it becomes an isomorphism after applying $\cdot \otimes_A Q$. We now want to prove that $(\mathbb{V} df)^w$ is surjective. For this, by Nakayama's lemma and by exactness of $\mathbb{V}$, it is enough to prove that \begin{displaymath}
\begin{array}{cc}
df\otimes_A \mathrm{id_\mathbb{C}}: K \otimes_A \mathbb{C} \longrightarrow \nabla_A(w\cdot {\lambda})^r \otimes_A \mathbb{C}
\end{array} \end{displaymath} is surjective. But since $f: \Delta_\mathbb{C} (w \cdot {\lambda})^r \rightarrow K\otimes_A \mathbb{C}$ is injective, by lemma \ref{inj} we get the surjectivity of $df: K\otimes_A \mathbb{C} \rightarrow \nabla_\mathbb{C}(w \cdot {\lambda})^r$. Since $\mathbb{V} \nabla_A (w\cdot {\lambda})^r$ is free of rank $r$ over $A$ we get the result. \end{proof}
\begin{thm} Let $K \in \mathcal{O}_{A,{\lambda}}$ be a tilting module. Then $\mathcal{L}(\mathbb{V} K)$ is $\downarrow$-projective as an $A$-sheaf on $\mathcal{G}$. \end{thm}
\begin{proof} By \cite{10} section 2.12.(A) $\mathcal{L}(\mathbb{V} K)$ is generated by global sections and $\downarrow$-flabby by proposition \ref{D}. Furthermore, the lemma above shows that $(\mathcal{L} (\mathbb{V} K))_w$ is free over $A$ for every $w \in \mathcal{V}$.\\ So we only have to prove for any edge $E:=\{x,y\}$ $x> y$ and $\alpha(E)= \check{\alpha}$ where $x,y \in \mathcal{W}_{\lambda} / \mathcal{W}'$ and $\check{\alpha}$ the coroot of a root $\alpha \in \mathcal{R}_{\lambda}$, that the map $\rho_{x,E} : \mathcal{L} (\mathbb{V} K)_x \rightarrow \mathcal{L} (\mathbb{V} K)_E$ induces an isomorphism $$\mathcal{L} (\mathbb{V} K)_x / \check{\alpha} \mathcal{L} (\mathbb{V} K)_x \stackrel{\sim}\longrightarrow \mathcal{L} (\mathbb{V} K)_E$$
Since $\check{\alpha} \mathcal{L} (\mathbb{V} K)_x \subset \mathrm{ker} \rho_{x,E}$, we have to show $\mathrm{ker} \rho_{x,E} \subset \check{\alpha} \mathcal{L} (\mathbb{V} K)_x$. For this it suffices to show that \begin{equation}\label{E} \mathrm{ker} \rho_{x,E} \subset \check{\alpha} (\mathcal{L} (\mathbb{V} K)_x \otimes_A A_\mathfrak{p}) = \check{\alpha} \cdot e_x(\mathcal{L} (\mathbb{V} K) \otimes_A A_\mathfrak{p}) \end{equation} for every prime ideal $\mathfrak{p} \subset A$ of hight 1. For $\check{\alpha} \notin \mathfrak{p}$ (\ref{E}) follows since $\check{\alpha}$ is invertible in $A_\mathfrak{p}$.\\ So we have to prove (\ref{E}) for $\mathfrak{p}=A \check{\alpha}$.
Since $\rho_{x,E}$ is a push-out map, we can identify $\mathrm{ker} \rho_{x,E}$ with the set $\{e_x u | u \in \mathbb{V} K(E), \, e_yu =0\}$. Since an element $u \in \mathbb{V} K(E)$ is of the form $u = (e_x +e_y) v + \check{\alpha} e_x w$ for $v,w \in \mathbb{V} K$, we have to prove
$$\{e_x u| u \in (e_x+e_y) \mathbb{V} K, \, e_y u =0\} \subset \check{\alpha} e_x (\mathbb{V} K \otimes_A A_\mathfrak{p})$$ Since $x > y$, we get $K_{A_\mathfrak{p}}(y \cdot {\lambda}) \cong \Delta_{A_\mathfrak{p}}(y \cdot {\lambda})$ and $K_{A_\mathfrak{p}}(x \cdot {\lambda})\cong P_{A_\mathfrak{p}}( y \cdot {\lambda})$. Now we can identify $(e_x +e_y) (\mathbb{V} K \otimes_A A_\mathfrak{p})$ with a direct sum of $A_\mathfrak{p}$-modules of the form $M:= \mathrm{Hom}_{\mathfrak{g}_{A_\mathfrak{p}}}(P_{A_\mathfrak{p}}(y\cdot {\lambda}) , \Delta_{A_\mathfrak{p}}(y \cdot {\lambda})) \cong A_\mathfrak{p}$ and $N:= \mathrm{Hom}_{\mathfrak{g}_{A_\mathfrak{p}}}(P_{A_\mathfrak{p}}(y\cdot {\lambda}) , K_{A_\mathfrak{p}}(x \cdot{\lambda})) \cong A_\mathfrak{p} (e_x+e_y) + A_\mathfrak{p} \check{\alpha} e_x$ by proposition \ref{Fie5}.\\ If $f \in M$, we get $e_x f =0$ which is equal to $\check{\alpha} e_x f$. If $f \in N$, $e_y f =0$ implies $e_xf \in \check{\alpha} A_\mathfrak{p} e_x$. But with our identification we get $e_x f \in \check{\alpha} e_x (\mathbb{V} K \otimes_A A_\mathfrak{p})$. \end{proof}
\begin{cor} We have $\mathcal{L} (\mathbb{V} K_A (w \cdot {\lambda})) \cong \mathscr{B}^\downarrow(w) \otimes_S A$ for all $w \in \mathcal{W}_{\lambda}$. \end{cor}
\begin{proof} The proof is essentially the same as in \cite{10} Theorem 4.22. and relies on the facts that $\mathcal{L} (\mathbb{V} K_A (w \cdot {\lambda}))$ is $\downarrow$-projective and indecomposable. Now the description of the indecomposable $\downarrow$-projective sheaves by BMP-sheaves gives the claim. \end{proof}
\begin{cor} We have $(K(w \cdot {\lambda}):\Delta(x \cdot {\lambda}))= \mathrm{rk}_S \mathscr{B}^\downarrow(w)_x$. \end{cor}
\begin{proof} Lemma \ref{C} shows that $\textrm{rk}_A (\mathcal{L} (\mathbb{V} K_A (w \cdot {\lambda})_x) = (K_A(w \cdot {\lambda}):\Delta_A(x \cdot {\lambda}))= (K(w \cdot {\lambda}):\Delta(x \cdot {\lambda}))$. Now apply the above corollary. \end{proof}
Let $w_{\circ} \in \mathcal{W}_{\lambda}$ be the longest element according to the Bruhat order. Then the multiplication from the left $$w_{\circ}: \mathcal{G} \longrightarrow \mathcal{G}^{\circ}, \,\,\, x\mathcal{W}' \mapsto w_{\circ} x \mathcal{W}'$$ is an isomorphism of moment graphs in the sense of \cite{12}. Thus, it induces a pull-back functor (\cite{12} definition 3.3.)
$$w_{\circ}^*: \mathcal{SH}_A(\mathcal{G}^{\circ})^f \longrightarrow \mathcal{SH}_A(\mathcal{G})^f$$
\begin{prop} $w_{\circ} ^*(\mathscr{B}^\downarrow (x)) = \mathscr{B}^\uparrow (w_{\circ}x)$ \end{prop}
\begin{proof} \cite{12} Lemma 5.1. \end{proof}
\begin{cor} $(K(w \cdot {\lambda}):\Delta(x \cdot {\lambda}))= P_{x,w}(1)$ where $P_{x,w}$ denotes the Kazhdan-Lusztig polynomial. \end{cor}
\begin{proof} Since $w_{\circ}^* \mathscr{B}^\downarrow(w) =\mathscr{B}^\uparrow(w_{\circ}w)$ we get $\mathrm{rk}_S \mathscr{B}^\downarrow(w)_x =\mathrm{rk}_S \mathscr{B}^\uparrow(w_{\circ} w)_{w_\circ x}$ and the result follows since the stalks of BMP-sheaves describe the local equivariant intersection cohomology of the according Schubert variety by \cite{3} Theorem 1.6. \end{proof}
\begin{remark} Character formulae for tilting modules were already discovered in \cite{A} for Kac-Moody algebras by using a tilting equivalence on category $\mathcal{O}$ which interchanges projective with tilting modules. Our approach, however, uses sheaves on moment graphs but only works for the finite dimensional case. \end{remark}
\section{The Jantzen and Andersen filtrations}
We fix a deformed tilting module $K \in \mathcal{K}_A$ and let ${\lambda} \in \mathfrak{h}^{*}$. The composition of homomorphisms induces an $A$-bilinear pairing \begin{eqnarray*}
\begin{array}{ccc}
\mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),K)\times \mathrm{Hom}_{\mathcal{O}_{A}}(K,\nabla_A({\lambda})) & \longrightarrow & \mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),\nabla_A({\lambda}))\cong A\\
\\
(\varphi,\psi) & \longmapsto & \psi \circ \varphi
\end{array} \end{eqnarray*}
For any $A$-module $H$ we denote by $H^{*}$ the $A$-module $\mathrm{Hom}_A(H,A)$. As in \cite{14} Section 4 one shows that for $A$ a localization of $S$ at a prime ideal $\mathfrak{p}$ our pairing is non-degenerated and induces an injective map $$E=E_A^{{\lambda}}(K): \mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),K) \longrightarrow \left(\mathrm{Hom}_{\mathcal{O}_{A}}(K,\nabla_A({\lambda}))\right)^{*}$$ of finitely generated free $A$-modules.\\ If we take $A=\mathbb{C} \llbracket v \rrbracket$ the ring of formal power series around the origin on a line $\mathbb{C}\delta \subset \mathfrak{h}^{*}$ not contained in any hyper plane corresponding to a reflection of the Weyl group (e.g. $\mathbb{C} \rho$, where $\rho$ is the half sum of positive roots), we get a filtration on $\mathrm{Hom}_{\mathcal{O}_{A}}(\Delta_A({\lambda}),K)$ by taking the preimages of $\left (\mathrm{Hom}_{\mathcal{O}_{A}}(K,\nabla_A({\lambda})) \right )^{*}\cdot v^{i}$ for $i=0,1,2,...$ under $E$.
\begin{definition}[\cite{14}, Definition 4.2.] Given $K_\mathbb{C} \in \mathcal{K}_\mathbb{C}$ a tilting module of $\mathcal{O}$ and $K \in \mathcal{K}_{\mathbb{C} \llbracket v \rrbracket}$ a preimage of $K_\mathbb{C}$ under the functor $\cdot \otimes_{\mathbb{C} \llbracket v \rrbracket} \mathbb{C}$, which is possible by Proposition \ref{tilt1} with $S\rightarrow \mathbb{C} \llbracket v \rrbracket$ the restriction to a formal neighborhood of the origin in the line $\mathbb{C} \rho$. Then the image of the filtration defined above under specialization $\cdot \otimes_{\mathbb{C} \llbracket v \rrbracket} \mathbb{C}$ is called the \textit{Andersen filtration} on $\mathrm{Hom}_\mathfrak{g} (\Delta({\lambda}), K_\mathbb{C})$. \end{definition}
The Jantzen filtration on a Verma module $\Delta({\lambda})$ induces a filtration on the vector space $\mathrm{Hom}_\mathfrak{g}(P , \Delta({\lambda}))$, where $P$ is a projective object in $\mathcal{O}$. Now consider the embedding $\Delta_{\mathbb{C} \llbracket v \rrbracket}({\lambda}) \hookrightarrow \nabla_{\mathbb{C} \llbracket v \rrbracket}({\lambda})$. Let $P_{\mathbb{C} \llbracket v \rrbracket}$ denote the up to isomorphism unique projective object in $\mathcal{O}_{\mathbb{C} \llbracket v \rrbracket}$ that maps to $P$ under $\cdot \otimes_{\mathbb{C} \llbracket v \rrbracket}\mathbb{C}$, which is possible by theorem \ref{Fie1}. We then get the same filtration if we first take the preimages of $\mathrm{Hom}_{\mathcal{O}_{\mathbb{C} \llbracket v \rrbracket}}(P_{\mathbb{C} \llbracket v \rrbracket},\nabla_{\mathbb{C} \llbracket v \rrbracket}({\lambda})) \cdot v^{i}$, $i=0,1,2,...$, under the induced inclusion $$J=J_A^{{\lambda}}(P):\mathrm{Hom}_{\mathcal{O}_{A}}(P_A,\Delta_A({\lambda})) \longrightarrow \mathrm{Hom}_{\mathcal{O}_{A}}(P_A,\nabla_A({\lambda}))$$ for $A=\mathbb{C} \llbracket v \rrbracket$ and then the image of this filtration under the map\\
$\mathrm{Hom}_{\mathcal{O}_{A}}(P_A,\Delta_A({\lambda}))\twoheadrightarrow \mathrm{Hom}_{\mathfrak{g}}(P,\Delta({\lambda}))$ induced by $\cdot \otimes_A \mathbb{C}$.
\subsection{Sheaves with a Verma flag}
For $M \in \mathcal{Z}_A-\mathrm{mod}^f$ and $\mathcal{I} \subset \mathcal{V}$, we define $$M_\mathcal{I} := M \cap \bigoplus_{x\in \mathcal{I}} e_x (M \otimes_A Q)$$ and $$M^{\mathcal{I}}:= \mathrm{Im} \left( M \rightarrow M\otimes_A Q \rightarrow \bigoplus_{x\in \mathcal{I}} e_x (M \otimes_A Q) \right)$$
\begin{definition} We say that $M \in \mathcal{Z}_A -\mathrm{mod}^f$ admits a Verma flag if the module $M^{\mathcal{I}}$ is free (graded free in case $A=S$) for each F-open subset $\mathcal{I}$. \end{definition} Denote the full subcategory of $\mathcal{Z}_A(\mathcal{G})-\mathrm{mod}^f$ consisting of all modules admitting a Verma flag by $\mathcal{Z}_A(\mathcal{G})-\mathrm{mod}^{VF}$. Now for any vertex $x\in \mathcal{V}$ and an $A$-sheaf $\mathscr{M} \in \mathcal{SH}_A(\mathcal{G})^f$ define $$\mathscr{M}^{[x]}:= \mathrm{ker}(\mathscr{M}_x \rightarrow \bigoplus_{E \in U_x} \mathscr{M}_E)$$ where $U_x$ is from Notation \ref{not1}. Furthermore, denote by $$\mathscr{M}^{x}:= \bigcap_E \mathrm{ker}(\rho_{x,E})$$ the \textit{costalk} of $\mathscr{M}$ at the vertex $x$. Here the intersection runs over all edges $E \in \mathcal{E}$ with $x \in E$.\\ Denote the image of $\mathcal{Z}_A(\mathcal{G})-\mathrm{mod}^{VF}$ under $\mathcal{L}$ by $\mathcal{C}_A(\mathcal{G})$. The next proposition gives an explicit description of $\mathcal{C}_A(\mathcal{G})$
\begin{prop}(\cite{C}, Proposition 2.9) For $M \in \mathcal{Z}_A(\mathcal{G})-\mathrm{mod}^f$, set $\mathscr{M}= \mathcal{L}(M)$. Then $M$ admits a Verma flag if and only if $\mathscr{M}$ is flabby and $\mathscr{M}^{[x]}$ is (graded) free for all $x \in \mathcal{V}$. \end{prop}
In \cite{8} section 2.6 Fiebig introduces a duality $D$ on $\mathcal{Z}_S-\mathrm{mod}^f$. For $M \in \mathcal{Z}_S-\mathrm{mod}^f$ we set $$D(M)= \bigoplus_{i\in \mathbb{Z}} \mathrm{Hom}_S^i(M,S)$$ where $\mathrm{Hom}_S^i(M,S) = \mathrm{Hom}_S(M,S[i])$ ($[\cdot]$ grading shift). This induces an equivalence $D : \mathcal{C}_S(\mathcal{G}) \stackrel{\sim}{\rightarrow} \mathcal{C}_S^{op}(\mathcal{G}^{\circ})$, where $\mathcal{G}^{\circ}$ denotes the moment graph $\mathcal{G}$ with reversed order.\\ For $A$ a localization of $S$ at a prime ideal we define $D_A:= \mathrm{Hom}_A(\cdot, A): \mathcal{C}_A(\mathcal{G}) \stackrel{\sim}{\rightarrow} \mathcal{C}_A^{op}(\mathcal{G}^{\circ})$.
\begin{thm}(\cite{8}, Theorem 6.1 and \cite {D}, Proposition 3.20) The BMP-sheaves are self-dual: $D \mathscr{B}^\uparrow (x) \cong \mathscr{B}^\uparrow (x)$ for all $x\in \mathcal{V}$. \end{thm}
Recall the pull-back functor of section \ref{BMP} $$w_{\circ}^*: \mathcal{SH}_A(\mathcal{G}^{\circ})^f \longrightarrow \mathcal{SH}_A(\mathcal{G})^f$$
\begin{lemma} We have $w_{\circ}^*(\mathcal{C}_A(\mathcal{G}^{\circ}))=\mathcal{C}_A(\mathcal{G})$. \end{lemma}
\begin{proof} Let $\mathscr{M} \in \mathcal{C}_A(\mathcal{G}^{\circ})$. We have to show that $w_{\circ}^*(\mathscr{M})$ is $\uparrow$-flabby and $(w_{\circ}^*(\mathscr{M}))^{[x]}$ is free over $A$ for $x\in \mathcal{V}$.\\ Let $\mathcal{I}$ be $\downarrow$-open. Then $w_{\circ}\mathcal{I}$ is $\uparrow$-open and we get that $$\Gamma(\mathscr{M})\cong \Gamma(w_{\circ}^*(\mathscr{M})) \rightarrow \Gamma(\mathcal{I}, w_{\circ}^*(\mathscr{M})) \cong \Gamma(w_{\circ} \mathcal{I}, \mathscr{M})$$ is surjective since $\mathscr{M}$ is flabby.\\ As $(w_{\circ}^*(\mathscr{M}))^{[x]}=\mathscr{M}^{[w_{\circ}x]}$ the claim follows. \end{proof}
We get an equivalence of categories $$w_\circ ^*:\mathcal{C}_A(\mathcal{G}^{\circ}) \longrightarrow \mathcal{C}_A(\mathcal{G})$$ with the properties: $w_\circ ^*(\mathscr{B}^\uparrow (x)\otimes_S A) \cong \mathscr{B}^\downarrow (w_\circ x)\otimes_S A$ and $w_\circ ^*(\mathscr{V}_A (x)) \cong \mathscr{V}_A (w_\circ x)$. Thus the composition $$F_A= (w_\circ ^*)^{op} \circ D_A : \mathcal{C}_A(\mathcal{G}) \longrightarrow \mathcal{C}_A(\mathcal{G})^{op}$$ is an equivalence with $F_A(\mathscr{B}^\uparrow (x)\otimes_S A) \cong \mathscr{B}^\downarrow (w_\circ x)\otimes_S A$ and $F(\mathscr{V}_A (x)) \cong \mathscr{V}_A (w_\circ x)$ which are isomorphisms of graded sheaves if $A=S$.\\
\begin{thm}(\cite{7}, Theorem 7.1.) The functor $\mathbb{V} : \mathcal{M}_{A,{\lambda}} \rightarrow \mathcal{C}_A(\mathcal{G})$ is an equivalence of categories for $A=S_{(0)}$. \end{thm}
Now we can lift the functor $F_A$ via Fiebig's equivalence to a functor $T_A$ on the representation theoretic side such that the following diagram of functors commutes:
\begin{eqnarray*}
\begin{CD}
\mathcal{M}_{A,{\lambda}} @> \mathbb{V} >> \mathcal{C}_A(\mathcal{G})\\
@VT_A VV @VVF_A V\\
\mathcal{M}^{op}_{A,{\lambda}} @> \mathbb{V} ^{op} >> \mathcal{C}_A(\mathcal{G})^{op}
\end{CD} \end{eqnarray*}
\begin{thm} Let ${\lambda}\in \mathfrak{h}^{*}$ be antidominant, $x,y \in \mathcal{W}$ and $w_\circ \in \mathcal{W}$ the longest element. Denote by $A=S_{(0)}$ the localization of $S$ at $0$. There exists an isomorphism $L=L_A(x,y)$ which makes the diagram
{\small \begin{eqnarray*}\label{eqn:DiaS}
\begin{CD}
\mathrm{Hom}_{\mathcal{O}_A}(P_A(x \cdot {\lambda}),\Delta_A(y \cdot {\lambda})) @>J>> \mathrm{Hom}_{\mathcal{O}_A}(P_A(x \cdot {\lambda}),\nabla_A(y \cdot {\lambda}))\\
@VVT V @VVL V\\
\mathrm{Hom}_{\mathcal{O}_A}(\Delta_A(w_\circ y \cdot {\lambda}),K_A(w_\circ x \cdot {\lambda})) @>E >> \left(\mathrm{Hom}_{\mathcal{O}_A}(K_A(w_\circ x \cdot {\lambda}),\nabla_A(w_\circ y \cdot {\lambda}))\right)^{*}
\end{CD} \end{eqnarray*}} commutative. Here $J=J_A^{y\cdot {\lambda}}(P_A(x \cdot{\lambda}))$ and $E=E_A^{w_\circ y \cdot {\lambda}}(K_A(w_\circ x \cdot {\lambda}))$ denote the inclusions defined above and $T=T_A$ denotes the isomorphism induced by the functor $T_A$ from above. \end{thm}
\begin{proof} The proof is essentially the same as the one for Theorem 4.2. in \cite{11}, where we prove a similar result for $T_A$ the tilting functor. \end{proof} Denote by $T_\mathbb{C}:\mathrm{Hom}_\mathfrak{g}(P(x \cdot {\lambda}),\Delta(y \cdot {\lambda})) \stackrel{\sim}{\rightarrow} \mathrm{Hom}_\mathfrak{g}(\Delta(w_\circ y \cdot {\lambda}),K(w_\circ x \cdot {\lambda}))$ the isomorphism we get from $T_A \otimes_A \mathrm{id}_\mathbb{C}$ after base change. The next corollary now follows in the same way as Corollary 4.3. in \cite{11}. \begin{cor} The isomorphism $$T_\mathbb{C}:\mathrm{Hom}_\mathfrak{g}(P(x \cdot {\lambda}),\Delta(y \cdot {\lambda})) \stackrel{\sim}{\rightarrow} \mathrm{Hom}_\mathfrak{g}(\Delta(w_\circ y \cdot {\lambda}),K(w_\circ x \cdot {\lambda}))$$ identifies the filtration induced by the Jantzen filtration with the Andersen filtration. \end{cor}
Now we consider $\mathbb{C} \cong S/S\mathfrak{h}$ as a simple graded $S$-module living in degree $0$. The map $$T_\mathbb{C}:\mathrm{Hom}_\mathfrak{g}(P(x \cdot {\lambda}),\Delta(y \cdot {\lambda})) \stackrel{\sim}{\rightarrow} \mathrm{Hom}_\mathfrak{g}(\Delta(w_\circ y \cdot {\lambda}),K(w_\circ x \cdot {\lambda}))$$ can then be identified with $$F_S \otimes \mathrm{id}_\mathbb{C}: \mathrm{Hom}_{\mathcal{C}_S(\mathcal{G})}(\mathscr{B}^\uparrow (x), \mathscr{V}_S (y))\otimes_S \mathbb{C} \stackrel{\sim}{\rightarrow} \mathrm{Hom}_{\mathcal{C}_S(\mathcal{G})} (\mathscr{V}_S (w_{\circ} y),\mathscr{B}^\downarrow (w_{\circ} x))\otimes_S \mathbb{C}$$ which is now an isomorphism of graded $\mathbb{C}$-vector spaces. But using the proof of proposition 7.1. (3) in \cite{6} this isomorphism becomes a graded isomorphism between certain costalks of the Braden-MacPherson sheaves, namely an isomorphism $$\varphi: \mathscr{B}^\uparrow (x)^y \otimes_S \mathbb{C} \stackrel{\sim}{\rightarrow} \mathscr{B}^\downarrow (w_{\circ} x)^{w_{\circ}y}\otimes_S \mathbb{C}$$
In \cite{14} Soergel shows that the filtration on $\mathscr{B}^\downarrow (w_{\circ} x)^{w_{\circ}y}\otimes_S \mathbb{C}$ induced by the Andersen filtration coincides with the grading filtration we get from the grading on the Braden-MacPherson sheaf $\mathscr{B}^\downarrow (w_{\circ} x)$. Since the graded isomorphism $\varphi$ interchanges the filtration on $\mathscr{B}^\uparrow (x)^y \otimes_S \mathbb{C}$ induced by the Jantzen filtration with the filtration on $\mathscr{B}^\downarrow (w_{\circ} x)^{w_{\circ}y}\otimes_S \mathbb{C}$ induced by the Andersen filtration, we get that the Jantzen filtration coincides with the grading filtration coming from the grading on the Braden-MacPherson sheaf $\mathscr{B}^\uparrow (x)$.
\end{document} |
\begin{document}
\thispagestyle{empty} \title{On colouring point visibility graphs}
\begin{abstract} In this paper we show that it can be decided in polynomial time whether or not the visibility graph of a given point set is $4$-colourable, and such a $4$-colouring, if it exists, can also be constructed in polynomial time. We show that the problem of deciding whether the visibility graph of a point set is $5$-colourable, is NP-complete. We give an example of a point visibility graph that has chromatic number $6$ while its clique number is only $4$. \end{abstract} \section{Introduction} The visibility graph is a fundamental structure studied in the field of computational geometry and geometric graph theory \cite{bcko-cgaa-08,g-vap-07}.
Some of the early applications of visibility graphs included computing Euclidean shortest paths in the presence of obstacles \cite{lw-apcf-79} and decomposing two-dimensional shapes into clusters \cite{sh-ddsg-79}. Here, we consider problems concerning the colouring of visibility graphs. $\\ \\$ Let $P$ be a set of points $\{p_1, p_2, \ldots, p_n \}$ in the plane. Two points $p_i$ and $p_j$ of $P$ are said to be \emph{mutually visible} if there is no third point $p_k$ on the line segment joining $p_i$ and $p_j$. Otherwise, $p_i$ and $p_j$ are said to be mutually \emph{invisible}. The \emph {point visibility graph} (denoted as PVG) $G(V,E)$ of $P$ is defined as follows. The set $V$ of vertices contains a vertex $v_i$ for every
point $p_i$ in $P$. The set $E$ contains an undirected edge $v_iv_j$ if and only if the corresponding points $p_i$ and $p_j$ are mutually visible \cite{prob-ghosh}. Point visibility graphs have been studied in the contexts of construction \cite{cgl-pgd-85,Edelsbrunner:1986:CAL}, recognition \cite{pvg-card,prob-ghosh,pvg-tcs,pvg-np-hard}, partitioning \cite{pvg_part}, connectivity \cite{viscon-wood-2012}, chromatic number and clique number \cite{penta-2013,kpw-ocnv-2005,p-vgps-2008}. $\\ \\$ A graph is said to be \emph{$k$-colourable} if each vertex of the graph can be assigned a colour, so that no two adjacent vertices are assigned the same colour, and the total number of distinct colours assigned to the vertices is at most $k$. K\'{a}ra et al characterized PVGs that are $2$-colourable and $3$-colourable \cite{kpw-ocnv-2005}.
It was not known whether the
chromatic number of a PVG can be found in polynomial time.
In Section \ref{sec5col} we show that the
problem of deciding whether a PVG is $k$-colourable, for $k \geq 5$, is NP-complete. $\\ \\$ K\'{a}ra et al also asked whether there is a function $f$ such that for every point visibility graph $G$, $\chi (G) \leq f(\omega (G))$ \cite{kpw-ocnv-2005}? \label{prob3} They presented a family of PVGs that have their chromatic number lower bounded by an exponential function of their clique number. Their question was answered by Pfender, showing that for a PVG with $\omega (G) = 6$, $\chi (G)$ can be arbitrarily large \cite{p-vgps-2008}. However, it is not known whether the chromatic number of a PVG is bounded, if its clique number is only $4$ or $5$. In another related paper, Cibulka et al showed that PVGs of point sets $S$ such that there is no convex pentagon with vertices in $S$ and no other point of $S$ lying in the pentagon, might have arbitrarily large clique numbers \cite{penta-2013}. In this direction, K\'{a}ra et al showed that there is a PVG $G$ with $\omega (G) = 4$ and $\chi (G) = 5$ \cite{kpw-ocnv-2005}. In Section \ref{secexample2} we construct a PVG $G'$ with $\omega (G') = 4$ and $\chi (G') = 6$. \section{Four-colouring} \label{sec4col} In this section, we provide a polynomial-time algorithm to decide if the PVG of a given point set is 4-colourable, and construct a 4-colouring if it exists. Consider a finite set $P$ of $n$ points in the Euclidean plane. We start with a brief overview of our algorithm: \begin{enumerate} [(i)]
\item Check if $P$ is 3-colourable. If $P$ is 3-colourable then construct the 3-colouring and terminate.
\item Find a convex hull vertex of $P$ that forms a $K_4$ with three other vertices. Delete this convex hull vertex from $P$.
Repeat this step until there is no such convex hull vertex.
\item There are at most eight possible colourings of the reduced $P$. Check if any of these colourings is valid. If none of
then are valid then output ``NO'' and terminate.
\item Consider each of the valid colourings and progressively add the deleted points to $P$, in the reversed order of their deletion.
With each addition, colour the added point and check if the colouring is valid. If it is not valid then output ``NO'' and terminate. \end{enumerate}
In the next section we provide a proof of correctness and analysis of our algorithm. \subsection{Correctness of the algorithm} The algorithm begins with checking for 3-colourability. This can be done in polynomial time due to K\'{a}ra et al \cite{kpw-ocnv-2005}. We present the following lemma and theorem from K\'{a}ra et al verbatim without proof. Note that in Lemma \ref{kara1}, $V(P)$ being planar actually means that $V(P)$ drawn on $P$ is plane. \begin{figure}
\caption{(a) }
\label{figocta}
\end{figure}
\lemma \label{kara1} \textbf{(K\'{a}ra et al \cite{kpw-ocnv-2005})}
Let $P$ be a point set. Then $V(P )$ is planar if and only if at least one of the following conditions hold: \begin{enumerate} [(a)] \item all the points in $P$ are collinear, \item all the points in $P$ , except for one, are collinear, \item all the points in $P$ are collinear, except for two non-visible points, \item all the points in $P$ are collinear, except for two points $v$, $w \in P$ , such that the line-segment $vw$ does not intersect the line-segment that contains $P \setminus \{ v, w \}$, \item $V(P)$ is the drawing of the octahedron shown in Figure \ref{figocta}. \end{enumerate}
\theorem
\label{karatheo} \textbf{(K\'{a}ra et al \cite{kpw-ocnv-2005})}
Let $P$ be a finite point set. Then the following are equivalent:
\begin{enumerate} [(i)] \item $\chi (V(P )) \leq 3$, \item $P$ satisfies conditions (a), (b), (c) or (e) in Lemma \ref{kara1}, \item $V(P )$ has no $K_4$ subgraph. \end{enumerate}
\emph{ If the algorithm finds $P$ to be 3-colourable, then it produces a 3-colouring and terminates. Suppose that the algorithm finds that $P$ is not 3-colourable. Then the algorithm proceeds to the next step. It deletes any convex hull vertex that sees three mutually visible points in the rest of $P$. It continues this process till no such convex hull vertex is left. We call the resultant point set the \emph{reduced set} $P_r$. The set $P_r$ can be obtained from $P$ in $O(n^4)$ time. We have the following lemma.} $\\ \\$
\lemma \label{lemrestr}
$P$ is 4-colourable only if $P_r$ is 4-colourable, and given a 4-colouring of $P_r$, it can be found in polynomial time
if it is a 4-colouring of $P$ restricted to $P_r$
\proof The contrapositive of the first part is easy to see, since the PVG of $P_r$ is an induced subgraph of the PVG of $P$. $\\ \\$ Consider the deleted points of $P$ in the reverse order of their deletion. Since each deleted point sees a $K_3$ in the remaining points of $P$, its colour must be uniquely determined by the remaining points of $P$. Given a 4-colouring of $P_r$, we can add the deleted points in the reverse order of their deletion, colour them and check if the colouring is valid. It takes $O(n^3)$ time for each point to locate its corresponding $K_3$, and $O(n^2)$ time to check if the colouring is valid. So, the total procedure takes $O(n^4)$ time. \qed $\\ \\$ Now the algorithm checks if $P_r$ is 4-colourable. First it checks if $P_r$ is 3-colourable. According to the characterization in Theorem \ref{karatheo}, this can be achieved in polynomial time. Furthermore, we have the following lemmas.
\lemma \label{k3}
A reduced set must contain a $K_3$.
\proof A reduced set is obtained only after progressively deleting all convex hull vertices which see a $K_3$. So, after the final deletion step, the $K_3$ which the deleted point saw must remain. \qed
\lemma \label{unq3col}
If a reduced set is 3-colourable, then it requires three colours, and has a unique 3-colouring.
\proof By Theorem \ref{karatheo}, any 3-colourable PVG must be of the forms (a), (b), (c) or (e) of Lemma \ref{kara1}. Among these, the PVGs of the forms (b), (c) or (e), require 3-colours and have unique 3-colourings. A reduced set can never be of the form (a), i.e. all collinear points, because by Lemma \ref{k3} every reduced set must contain a $K_3$. \qed \begin{figure}
\caption{Three 4-colourings of point $p$ added to reduced set of type (b) in Lemma \ref{kara1}. }
\label{figcol1}
\end{figure} \begin{figure}
\caption{Four colourings of point $p$ added to reduced set of type (c) in Lemma \ref{kara1}. }
\label{figcol2}
\end{figure}
\lemma \label{cont4col}
A reduced 3-colourable set is no more 3-colourable if its last deleted point is added to it. It then requires four colours and can have no more than a constant number of
4-colourings.
\proof Suppose that the last deleted point $p$ is added back to the reduced set. Due to Lemma \ref{unq3col} the reduced set can only be of three types. If the reduced set is of type (b) in Lemma \ref{kara1}, then the three colourings in Figure \ref{figcol1} are the only possibilities. If the reduced set is of type (c) in Lemma \ref{kara1}, then the four colourings in Figure \ref{figcol2} are the only possibilities. If the reduced set is of type (e) in Lemma \ref{kara1}, then if has only a constant number of points and hence a constant number of 4-colourings. \qed $\\ \\$ If the algorithm finds $P_r$ to be 3-colourable, then it adds the last deleted point to $P_r$, and due to Lemma \ref{cont4col} constructs a constant number of 4-colourings in polynomial time. For each 4-colouring of $P_r$, the algorithm then reintroduces the deleted points of $P$ progressively in the reversed order of their deletion, and by Lemma \ref{lemrestr}, constructs a 4-colouring of $P$ if it exists, in $O(n^3)$ time. Suppose that the algorithm finds that $P_r$ is not 3-colourable. Then it checks whether $P_r$ is 4-colourable. Now, we describe the structure of reduced sets that are 4-colourable but not 3-colourable. $\\ \\$ Let $p_x$ be a vertex of the convex hull of $P_r$. All the other points of $P_r$ lie on rays emanating from $p_x$. Here we consider only \emph{open rays} emanating from $p_x$, i.e. rays that do not contain their initial point $p_x$. Let $r_1, r_2, \ldots r_k$ be the rays emanating from $p_x$ in the clockwise order, with the rays $r_1$ and $r_k$ respectively being tangents from $p_x$ to the convex hull of $P \setminus \{ p_x \}$. Let $q_i$ be the closest point on $r_i$ to $p_x$. We call the path $(q_1, q_2, \ldots, q_k)$ the \emph{frontier} of $p_x$. The points $q_1, q_2, \ldots, q_k$ are called the \emph{frontier points} of $p_x$. The points $q_2, q_3, \ldots, q_{k-1}$ are called the \emph{internal frontier points} of $p_x$. A continuous subpath $(q_i, q_{i+1}, q_{i+2})$ of the frontier is said to be a \emph{convex triple} (or, \emph{concave}) when $q_{i+2}$,
lies to the right (respectively, left) of the ray $\overrightarrow{q_iq_{i+1}}$. If the continuous subpath is a straight line-segment then it is said to be a \emph{straight triple}. The frontier might have concave, straight and convex triples. We have the following lemmas. \begin{figure}
\caption{ (a) The convex hull points $p_x$ and $p_y$ have more than one point between them. (b) The convex hull points $p_x$ and $p_y$ have exactly one point $q_x$ between them. }
\label{fignopoint}
\end{figure}
\lemma \label{noconv}
The following holds for each convex hull vertex of $P_r$:
\begin{enumerate} [(a)]
\item There is no concave triple in its frontier.
\item If there is a convex triple in its frontier then the ray containing the convex vertex has at least two points.
\end{enumerate}
\proof If three consecutive points of its frontier are concave with respect to $p_x$, then they together form a $K_4$. If three consecutive points of its frontier are convex, then they do not form a $K_3$ if and only if there is a blocker on the ray containing the convex vertex that prevents the first points of its two neighbouring rays from seeing each other. \qed
\lemma \label{lemintfront}
If a reduced set is not 3-colourable then each of its convex hull vertices has an internal frontier point.
\proof Suppose that a convex hull vertex $p_x$ of the reduced set $P_r$ does not have an internal frontier point. Then the rest of the points of $P_r$ must be lying on only two rays emanating from $p_x$. Denote these two rays as $r_1$ and $r_2$ and the points of $P_r$ on them farthest from $p_x$ as $p_1$ and $p_2$ respectively. If both $r_1$ and $r_2$ each have two or more points excluding $p_x$, then $p_1$, $p_2$ and the two points preceding them form a $K_4$. This is a contradiction, since $P_r$ is reduced and both $p_1$ and $p_2$ are convex hull vertices of $P_r$. If at least one of $r_1$ and $r_2$ has only one point excluding $p_x$ then $P_r$ is of the form (b) in Lemma \ref{kara1} and hence 3-colourable, a contradiction. \qed
\lemma \label{lemtype} If a reduced set is not 3-colourable, then all of its convex hull points are vertices.
\proof Assume on the contrary that the point set is not 3-colourable and not all the convex hull points are convex hull vertices. This means that there are two consecutive convex hull vertices $p_x$ and $p_y$ such that there is at least one convex hull point in the interior of the line segment joining them. Wlog assume that $p_x$ precedes $p_y$ in the clockwise order of points on the convex hull. There can be either more than one or exactly one point in the interior of $\overline{p_xp_y}$. $\\ \\$ First consider the case where there is more than one point in the interior of $\overline{p_xp_y}$ (Figure \ref{fignopoint}(a)). Let $q_x$ and $q_y$ be the points among them closest to $p_x$ and $p_y$ respectively. By Lemma \ref{lemintfront}, $p_x$ has an internal frontier point. Let $q_z$ be the first internal frontier point of $p_x$ following $q_x$. Suppose that $q_z$ is not an internal frontier point of $p_y$. Then there must be a point (say, $q_u$) on $\overline{p_yq_z}$ that is an internal frontier point of $p_y$. But then there must also be an internal frontier point $q_w$ of $p_x$ on $\overline{p_xq_u}$, contradicting the assumption that $q_z$ is the first internal frontier point of $p_x$. So, $q_z$ must be an internal frontier point of $p_y$. By Lemma \ref{noconv}, the frontier of $p_x$ cannot have concave triples, so that the rest of the frontier points of $p_x$ must lie on or to the left of $\overline{q_xq_z}$. Similarly, the frontier of $p_y$ cannot have concave triples, so that the rest of the frontier points of $p_y$ must lie on or to the right of $\overline{q_yq_z}$. But if there is a frontier point $q_t$ of $p_x$ on or to the left of $\overline{q_xq_z}$, then $p_y$ must have a frontier point on $\overline{p_yq_t}$ that is to the right of $\overline{q_yq_z}$, a contradiction. $\\ \\$ Now consider the case where there is exactly one point in the interior of $\overline{p_xp_y}$ and denote it as $q_x$ (Figure \ref{fignopoint}(b)). As before, we consider $q_z$ which is a frontier point of both $p_x$ and $p_y$. As before, by Lemma \ref{noconv}, the frontier of $p_x$ cannot have concave triples, so that the rest of the frontier points of $p_x$ must lie on or to the left of $\overline{q_xq_z}$. Similarly, the frontier of $p_y$ cannot have concave triples, so that the rest of the frontier points of $p_y$ must lie on or to the right of $\overline{q_xq_z}$. This means that all of the points of $P_r$ other than $p_x$ and $p_y$ must lie on $\overline{q_xq_z}$. But then $P_r$ is of the form (c) in Lemma \ref{kara1} and hence 3-colourable, a contradiction. \qed \cor \label{cor1} If a reduced set is not 3-colourable, and its convex hull has at least four vertices, then the interior of its convex hull is not empty.
\proof Suppose on the contrary the interior of the convex hull of such a reduced set $P_r$ is empty. By Lemma \ref{lemtype} $P_r$ has no convex hull points that are not convex hull vertices. So, all the convex hull vertices of $P_r$ see each other. Since $P_r$ has at least four convex hull points, all of them are a part of a $K_4$. Hence, $P_r$ is not reduced, a contradiction. \qed \begin{figure}
\caption{(a) The intersection of $\overline {p_1p_7}$ and $\overline {p_4p_5}$ forces another point on $\overline {p_4p_5}$. (b) A non 3-colourable reduced set with only three convex hull vertices}
\label{figstruct}
\end{figure}
\lemma \label{lemstruct}
If a reduced set is not 3-colourable, then
its convex hull has only three vertices.
\proof Suppose on the contrary that a reduced set $P_r$ is not 3-colourable and its convex hull has at least four vertices. Consider the lowest vertex of the convex hull of $P_r$, and its two adjacent vertices on the convex hull of $P_r$. Call these three points $p_1$, $p_2$ and $p_3$ in the clockwise order (Figure \ref{figstruct}(a)). Denote as $P'$ the set of points other than the convex hull vertices of $P_r$. $\\ \\$ Since by our assumption the convex hull of $P_r$ has at least four vertices, by Corollary \ref{cor1}, $P'$ must be nonempty. Suppose that all the points of $P'$ lie on $\overline {p_1p_3}$. If there are at least two convex hull vertices above $\overline {p_1p_3}$, then they can see two mutually visible points of $P'$, and hence $P_r$ is not reduced, a contradiction. If there is only one convex hull vertex above $\overline {p_1p_3}$, then either it sees $p_2$ and two mutually visible points of $P'$, which means $P_r$ is not reduced, or it is blocked from $p_2$, thereby making $P_r$ 3-colourable, a contradiction. So, not all the points of $P'$ lie on $\overline {p_1p_3}$. $\\ \\$ If all the points of $P'$ lie above $\overline {p_1p_3}$, then the lowermost point of $P'$ forms a $K_4$ with $p_1$, $p_2$ and $p_3$, a contradiction. Similarly, not all the points of $P'$ can lie below $\overline {p_1p_3}$ as well. So, the convex hull of $P'$ must intersect $\overline {p_1p_3}$. Suppose that the convex hull of $P'$ intersects $\overline {p_1p_3}$ at only one point, say $p_i$. If there are points of $P'$ to the left or right of the ray $\overrightarrow{p_2p_i}$, then among such points let $p_j$ be a point closest to $\overline {p_1p_3}$. Wlog if $p_j$ lies to the right of $\overrightarrow{p_2p_i}$ then it sees both $p_i$ and $p_1$. But $p_2$ sees $p_1$, $p_i$ and $p_j$, so $P_r$ is not reduced, a contradiction. Otherwise, all the points of $P'$ lie on the ray $\overrightarrow{p_2p_i}$. If a fourth convex hull vertex of $P_r$ above $\overline {p_1p_3}$
lies on $\overrightarrow{p_2p_i}$, then $P_r$ is 3-colourable, a contradiction. Otherwise, wlog let this fourth convex hull vertex lie to the left of $\overrightarrow{p_2p_i}$. Let $p_j$ be the point of $P'$ immediately after $p_i$ on $\overrightarrow{p_2p_i}$. Then $p_3$ forms a $K_4$ with $p_i$, $p_j$ and the fourth convex hull vertex, so $P_r$ is not reduced, a contradiction. So, the convex hull of $P'$ must intersect $\overline {p_1p_3}$ at two points. $\\ \\$ Let $\overline {p_4p_5}$ and $\overline {p_6p_7}$ be the segments of the convex hull of $P'$ intersecting $\overline {p_1p_3}$, where $p_4$ and $p_5$ (respectively, $p_6$ and $p_7$) are consecutive points on the convex hull of $P'$. Also assume that $p_4$, $p_5$, $p_6$ and $p_7$ are in the clockwise order on the convex hull of $P'$, with none of the segments $\overline {p_1p_4}$ $\overline {p_1p_5}$ $\overline {p_3p_6}$ and $\overline {p_3p_7}$ intersecting the convex hull of $P'$. $\\ \\$ Consider the segment $\overline {p_3p_4}$. Suppose that $\overline {p_3p_4}$ and $\overline {p_6p_7}$ intersect. To prevent a concave frontier of $p_3$ from forming, there must be a point of $P'$ on the intersection of $\overline {p_3p_4}$ and $\overline {p_6p_7}$. But that is not possible because $p_6$ and $p_7$ are consecutive points on the convex hull of $P'$. Thus, $\overline {p_6p_7}$ must lie to the right of $\overrightarrow {p_3p_4}$. But then, $\overline {p_4p_5}$ and $\overline {p_1p_7}$ must intersect. So, there must be another point of $P'$ on $\overline {p_4p_5}$, which is a contradiction to $p_4$ and $p_5$ being consecutive points on the convex hull of $P'$ (Figure \ref{figstruct}(a)). Hence, the convex hull of $P$ can have at most three points (Figure \ref{figstruct}(b)). \qed $\\ \\$ Let $P_r$ be a reduced set and $p_x$ be a convex hull vertex of $P_r$. We will henceforth refer to the four colours as red, blue, yellow and green. If a ray emanating from a convex hull vertex of a reduced set has only one point, we call it a \emph{small ray}. Otherwise, we call it a \emph{big ray}. On a ray, the point closest to $p_x$ is called its \emph{first point}, next closest point to $p_x$ is called its \emph{second point} and so on. We have the following lemmas.
\lemma \label{noblock}
The first point of any ray emanating from a convex hull vertex of a reduced set can block only first points of other rays from each other.
\proof Consider three rays $r_i$, $r_j$ and $r_k$ emanating from a convex hull vertex $p_x$ lying in clockwise order around it. Let $p_2$ be the first point of $r_j$. Suppose that $p_2$ blocks $p_1$ and $p_3$ lying on $r_i$ and $r_k$ respectively, and wlog $p_1$ is not the first point of $r_i$. Let $p_i$ be the first point of $r_i$. If both of the triangles $\bigtriangleup p_ip_2p_x$ and $\bigtriangleup p_2p_3p_x$ are empty, then $p_x$ forms a $K_4$ with $p_i$, $p_2$ and $p_r$, hence $P_r$ is not reduced. So, at least one of the two triangles must be nonempty. Wlog suppose that $\bigtriangleup p_1p_2p_x$ is nonempty. Let $p_4$ be a point contained in $\bigtriangleup p_ip_2p_x$ such that no other point contained in $\bigtriangleup$ is closer to $\overline {p_1p_2}$. Then if $\bigtriangleup p_2p_3p_x$ is empty then $p_4$ forms a $K_4$ with $p_x$, $p_2$ and $p_3$. If $\bigtriangleup p_2p_3p_x$ too is nonempty then let analogously $p_5$ be a point contained in $\bigtriangleup p_2p_3p_x$ that is a point closest to $\overline {p_1p_2}$. Again, $p_3$, $p_4$, $p_5$ and $p_i$ form a $K_4$, so $P_r$ is not reduced, a contradiction. \qed
\begin{figure}
\caption{ (a) The break is even and $r_x$ has a red point. (b) The break is even and $r_x$ does not have a red point. (c) The break is odd and $r_x$ has a red point. (d) The break is odd and $r_x$ does not have a red point.}
\label{fig1col}
\end{figure}
\lemma \label{2must} A reduced set that is not 3-colourable must have at least two big rays emanating from each of its convex hull vertices.
\proof Consider a reduced set $P_r$ that is not 3-colourable.
Suppose that there is a convex hull vertex $p_x$ of $P_r$ such that only one big ray $r_x$ emanates from it. First suppose that $r_x$ has only two points. By Lemma \ref{noconv}, the frontier of $p_x$ is either convex or a straight line. If the frontier of $p_x$ is a straight line, then $P_r$ is 3-colourable, a contradiction. So, the frontier of $p_x$ must be convex. Suppose the first point of some small ray is a convex point in the frontier of $p_x$. Then, the last and second-last points of the frontier of $p_x$ in the same side of $r_x$, form a $K_4$ with the two points of $r_x$. Among the four points forming the $K_4$, the last point of the frontier or the second point of $r_x$ is a convex hull vertex of $P_r$. Then $P_r$ is not reduced, a contradiction. So, the first point of $r_x$ must be the only convex point in the frontier of $p_x$ Denote the two neighours of the first point of $r_x$ on the frontier of $p_a$ as $p_1$ and $p_b$. If $p_a$ and $p_b$ see each other then they form a $K_4$ with $p_x$ and the first point of $r_x$. Then since $p_x$ sees a $K_3$, $P_r$ is not reduced, a contradiction. Hence, $p_a$ and $p_b$ must be blocked from each other by the second point of $r_x$. The frontier of $p_x$ must have more points, for otherwise $P_r$ is 3-colourable. But now, each point of the frontier sees both the points of $r_x$. One of the end points of the frontier must be a convex hull vertex. This end point forms a $K_4$ with both points of $r_x$ and the second-last point of the frontier in the same side. Hence $P_r$ is not reduced, a contradiction. $\\ \\$ Now suppose that $r_x$ has at least three points. If the frontier of $p_x$ has two or more points in the same side of $r_x$, then the last and second-last points of the frontier in that side form a $K_4$ with the last and second-last points of $r_x$. Since the last point of the frontier or the last point of $r_x$ is a convex hull vertex, $P_r$ is not a reduced set, a contradiction. Then the frontier of $p_x$ can have at most one point on each side of $r_x$. If there is no point on one side of $r_x$, then $P_r$ is 3-colourable. So the frontier of $p_x$ must have exactly one point on each side of $r_x$. If these two end points of the frontier do not see each other then again $P_r$ is 3-colourable. So, the two end points of the frontier see each other. But they also see the last and second-last points of $r_x$, which means that $P_r$ is not reduced, a contradiction.
\qed
\lemma \label{lem3colray} In any 4-colouring of a reduced set that is not 3-colourable, any big ray emanating from a convex hull vertex has exactly two colours. \proof Let $p_x$ be a convex hull vertex and $\{r_1, r_2, \ldots, r_k \}$ be the open rays emanating from it in the clockwise angular order. Suppose that a big ray $r_x$ emanating from $p_x$ has three points that are assigned three different colours. By Lemma \ref{2must}, there is another big ray with respect to $p_x$. Consider a big ray $r_y$ such that there is no other big ray between $r_x$ and $r_y$, and $y<x$, without loss of generality. If $r_x$ and $r_y$ are neighbouring rays, i.e. $y=x-1$, then both of the first and second points of $r_y$ must be assigned the fourth colour, which is not possible. Suppose that there is at least one small ray between $r_x$ and $r_y$. Then the first point of the ray $r_{y+1}$ forms a $K_5$ with the first two points of $r_y$ and the second and third points of $r_x$, a contradiction. \qed $\\ \\$ We call the occurrence of a small ray after a big ray, or vice versa, a \emph{break}. If there are a consecutive odd number of small rays before or after the break, it is called an \emph{odd break}. If there are a consecutive even number of small rays before or after the break, it is called an \emph{even break}. We have the following lemmas.
\lemma \label{fixcol}
A reduced set that is not 3-colourable can have at most a constant number of 4-colourings. \proof By Lemma \ref{2must}, in such sets, at least two big rays emanate from every convex hull vertex. We consider a reduced set $P_r$. Suppose that $P_r$ is 4-colourable. Consider a convex hull vertex $p_x$ of $P_r$. By our assumption, at least two big rays must emanate from $p_x$. Wlog assign red to $p_x$. Consider any big ray $r_x$ and suppose that it contains the colour red. Due to Lemma \ref{lem3colray}, each ray can be assigned at most two colours. Wlog let the other colour of $r_x$ be blue. Observe that since $p_x$ is red, the first point of $r_x$ must be blue, the second point red and so on. If a neighbouring ray of $r_x$ is also big, then it must have yellow and green alternatingly assigned to its points, but either of yellow and green can be assigned to its first point. In general, till a break occurs, every alternate big ray must contain red points. Furthermore, the second point of every such big ray must be red and red points should occur alternatingly on it. Hence, our initial choice of $r_x$ as a ray containing red points fixes the assignment of red till a break occurs. $\\ \\$ Suppose that we have one or more consecutive big rays in which the assignment of red is fixed, and a break occurs after a big ray $r_x$. Suppose that this break is even. Call the first big ray occuring after the break $r_y$. Suppose that $r_x$ contains red points. Wlog let the other colour of $r_x$ be blue. (Figure \ref{fig1col} (a)). Then the second point of $r_y$ must be assigned either yellow or green. Suppose that it is yellow. Then the colour of the only point in the first small ray occuring in the break is determined by the first and second points of $r_x$ and the second point of $r_y$, and it must be green. Similarly, the next small ray gets blue. The first point of $r_y$ gets green. Thus, yellow and green alternate throughout $r_y$ and if a neighbouring ray of $r_y$ is big, then it must contain a red point. $\\ \\$ Suppose that $r_x$ does not contain any red point (Figure \ref{fig1col} (b)), and wlog the first and second points of $r_x$ are yellow and green respectively. Then the only points of the small rays between $r_x$ and $r_y$ must be assigned blue and yellow alternately. The first and second points of $r_y$ must be assigned blue and red respectively. So, the assigment of red to points in rays containing an even break depends only on whether or not $r_x$ contains a red point. $\\ \\$ Suppose that a break is odd. Suppose that the break starts after $r_x$, and ends at $r_y$, both being big rays. Supose that $r_x$ contains a red point (Figure \ref{fig1col} (c)). Wlog let the other colour of $r_x$ be blue. Then the first and second points of $r_x$ must be blue and red respectively. Then the second point of $r_y$ can be either yellow or green. Wlog let it be yellow. Then green and blue must be alternately assigned to the only points of the small rays in between, and the first point of $r_y$ must be blue. Thus, $r_y$ does not contain a red point. $\\ \\$ Now suppose that $r_x$ does not contain any red point (Figure \ref{fig1col} (d)). Wlog suppose that the first and second points of $r_x$ are yellow and green respectively. Then the second point of $r_y$ must be red, and the first points of all rays till $r_y$ must be assigned blue and yellow alternately. $\\ \\$ In all cases, the points that are assigned red are fixed. Thus, the assignment of red has only two possibilities, which depend on our initial choice of whether or not $r_x$ contains a red point. Now, by Lemma \ref{lemtype}, all the convex hull points of a reduced set are also its convex hull vertices, and by Lemma \ref{lemstruct} it can have at most three convex hull vertices. In a 4-colouring, these three convex hull vertices must be assigned three distinct colours. For each of these three colours, there are at most two possible assignments to the rest of the points of $P_r$. Each assignment of three colours also fixes the assignment of the fourth colour. This means that there are at most eight possible four colourings. \qed $\\ \\$ Thus, our algorithm checks if any of the eight colourings are valid 4-colourings, and then adds the deleted points back to $P_r$, assigning them their unique colours. Finally, we sum up our algorithm in the following theorem. \theorem It can be determined in polynomial time if a point set has a 4-colouring. Such a 4-colouring, if it exists, can be constructed in polynomial time.
\proof If the given point set $P$ is 3-colourable then it is be identified and 3-coloured in $O(n^2)$ time due to Theorem \ref{karatheo}. If $P$ is not 3-colourable then it is reduced to $P_r$ in $O(n^4)$ time by the method of Lemma \ref{lemrestr}. $\\ \\$ If the reduced set $P_r$ is 3-colourable by Lemma \ref{unq3col}, then the last deleted point is added to it and a constant number of possible 4-colourings are constructed and checked due to Lemma \ref{cont4col}. Each 4-colouring is considered one by one, and each of the deleted point is added in the reversed order of its deletion, and assigned the unique colour determined by the $K_3$ it sees in the remaining point set. At each step, it is checked whether the 4-colouring is valid or not. If at any step two mutually visible points are forced to have the same colour, then $P$ does not have a 4-colouring, with the chosen 4-colouring of $P_r$. Otherwise, after adding all the deleted points, a 4-colouring is obtained in $O(n^3)$ time. $\\ \\$ If the reduced set $P_r$ is not 3-colourable, then three distinct colours are assigned to its convex hull vertices, and all eight possible 4-colourings are found in $O(n^2)$ time due to Lemma \ref{fixcol}. It is checked whether any of these colourings is a valid 4-colouring or not. If some valid 4-colourings are obtained, then each of them is considered one by one, and as before, the deleted points are added and coloured. If at any step two mutually visible points are forced to have the same colour, then $P$ does not have a 4-colouring, with the chosen 4-colouring of $P_r$. Otherwise, after adding all the deleted points, a 4-colouring is obtained in $O(n^3)$ time. The whole algorithm takes $O(n^4)$ time. $\\ \\$ \qed \section{$5$-colouring point visibility graphs} \label{sec5col}
In this section we prove that deciding whether a PVG with a given embedding is $5$-colourable, is NP-hard. We provide a reduction of 3-SAT to the PVG $5$-colouring problem. We use the reduction of 3-SAT to the $3$-colouring problem of general graphs. \subsection{$3$-colouring a general graph} \begin{figure}
\caption{(a) A variable gadget. (b) A clause gadget.}
\label{gadgets}
\end{figure} For convenience, we first briefly describe the reduction for the $3$-colouring of general graphs, considering the graph as an embedding $\xi$ of points and line segments in the plane \cite{cai-79}. Consider a 3-SAT formula $\theta$ with variables $x_1, x_2, \ldots, x_n$ and clauses $C_1, C_2, \ldots, C_m$. Suppose the corresponding graph is to be coloured with red, green and blue. Consider Figure \ref{gadgets}(a). It shows the variable-gadgets. The points representing a variable $x_i$ and its negation (say, $p(x_i)$ and $p(\overline{x_i})$, respectively) are adjacent to each other, making $n$ pairs altogether. No two points in different pairs are adjacent to each other. A separate point $p_b$ is wlog assumed to be blue and made adjacent to all the other points in the variable gadgets. So, each variable point must have exactly one red and one green point. For variable points, let green and red represent an assignment of true and false, respectively. The point $p_b$ is also adjacent to a separate point $p_r$ assumed to be red. $\\ \\$ Now consider Figure \ref{gadgets}(b). It shows a clause gadget. Suppose that the points $p_1$, $p_2$ and $p_6$ can be coloured only with green and red. Then $p_9$ can be coloured with green if and only if at least one of $p_1$, $p_2$ and $p_6$ are coloured with green. To prevent $p_9$ from being coloured red or blue, $p_9$ is made adjacent to $p_r$ and $p_b$. $\\ \\$ The whole embedding corresponding to the 3-SAT formula is shown in Figure \ref{gengraph}. The points $p_r$ and $p_b$ are the same for all variables and clauses. For each clause gadget, the points corresponding to $p_1$, $p_2$ and $p_6$ are in the respective variable gadgets. Thus, for $n$ variables and $m$ clauses, $\xi$ has $2n + 6m + 2$ points in total. \begin{figure}
\caption{The full embedding for a given 3-SAT formula.}
\label{gengraph}
\end{figure} \subsection{Transformation to a point visibility graph} Consider the following transformation of $\xi$ into a new embedding $\zeta$ (Figure \ref{finalemb}(a)) for a given 3-SAT formula $\theta$. We use some extra points called \emph{dummy points} to act as blockers during the transformation. \begin{enumerate} [(a)]
\item All points of $\xi$ are embedded on two vertical lines $l_1$ and $l_3$.
\item Two points $p_r$ and $p_b$ are placed on $l_3$ above all other points of $\xi$, followed by a dummy point.
\item Each pair of variable gadget points are embedded as consecutive points on $l_1$.
\item Separating a variable gadget from the next variable gadget is a dummy point.
\item For every clause gadget, the points corresponding to $p_5$ and $p_9$ are on $l_1$,
separated by a dummy point.
\item For every clause gadget, the points corresponding to $p_3$, $p_4$, $p_7$ and $p_8$
are on $l_3$, in the vertical order from top to bottom. The points $p_3$ and $p_4$ are consecutive.
The points $p_7$ and $p_8$ are consecutive. There is a dummy point between $p_4$ and $p_7$.
\item The points of consecutive clause gadgets are separated by a dummy point each.
\item Let $l_2$ be a vertical line lying between $l_1$ and $l_3$. On $l_2$, embed points to block all visibility relationships other than those corresponding to edges in $\xi$.
Perturb the points of $l_1$ and $l_3$ so that each point in $l_2$ blocks exactly one pair of points. \end{enumerate} The total number of points needed in the new embedding is as follows: \begin{itemize}
\item $p_r$ and $p_b$ are $2$ points.
\item Variable gadgets are $2n$ points.
\item Clause gadgets are $2m$ points on $l_1$ and $4m$ points on $l_3$.
\item There are $n + 2m - 1$ dummy points on $l_1$ and $2m$ dummy points on $l_3$.
\item Thus, there are $3n + 4m - 1$ points on $l_1$ and $6m +2$ points on $l_3$.
\item There are $9m + 2n$ edges from $\xi$ between $l_1$ and $l_3$.
\item Thus, there are $(3n + 4m - 1)(6m +2) - (9m + 2n)$ points on $l_2$ to block the visibility of the rest of the pairs. \end{itemize} \begin{figure}
\caption{(a) The new embedding $\zeta$ on three vertical lines. The dummy points are shown in gray. (b) A 3-colouring of $\zeta$ representing $x_1, x_2, \overline{x_3}$ and $\overline{x_4}$ assigned $1$ in $\theta$.}
\label{finalemb}
\end{figure}
\lemma \label{poly}
The above construction can be achieved in polynomial time.
\proof
As shown above, the number of points used is polynomial. All the points of $l_1$ and $l_3$ are embedded on lattice points. The intersections of the line segments joining points of $l_1$ and $l_3$ are computed, and $l_2$ is chosen such that none of these intersection point lies on $l_2$.
To block visibilities,
and the intersection of the line segments joining points of $l_1$ and $l_3$ with $l_2$ are computed and blockers are placed on the intersection
points. All of this is achievable in polynomial time. \qed
\lemma \label{iff}
The PVG of $\zeta$ can be $5$-coloured if and only if $\theta$ has a satisfying assignment.
\proof Suppose that $\theta$ has a satisfying assignment. Then the points of $\zeta$ obtained from $\xi$ can be coloured with red, blue
and green.
The two neighbours of a dummy point on $l_1$ or $l_3$ can have at most two colours, so the dummy point can always be assigned the third colour.
The points on $l_2$ are coloured alternately with two different colours
(Figure \ref{finalemb}(b)).
$\\ \\$
Now suppose that $\zeta$ has a 5-colouring. All points of $l_2$ are visible from all points of $l_1$ and $l_3$. The points of
$l_2$ must be coloured at least with two colours. This means, the points of the graph induced by $\xi$ are coloured with
at most $3$ colours, which is possible only when $\theta$ is satisfiable. \qed We have the following theorem. \theorem The problem of deciding whether the
visibility graph of a given point set is $5$-colourable, is NP-complete.
\proof
A $5$-colouring of a point visibility graph can be verified in polynomial time. Thus, the problem is in NP.
On the other hand, 3-SAT can be reduced to the problem.
Given a 3-SAT formula $\theta$, by Lemmas \ref{poly} and \ref{iff}, a point set $\zeta$ can be constructed in time polynomial in the size of $\theta$
such that $\zeta$ can be $5$-coloured if and only if $\theta$ has a satisfying assignment. Thus, the problem is NP-complete. \qed
\section{Colouring a point set with small clique number} In general, graphs with small clique numbers can have arbitrarily large chromatic numbers. In fact, there exist triangle free graphs with arbitrarily high chromatic numbers due to the construction of Mycielski \cite{myc}. Pfender showed that for a PVG with $\omega (G) = 6$, $\chi (G)$ can be arbitrarily large \cite{p-vgps-2008}. But it is not known whether the chromatic number of a PVG is bounded, if its clique number is only $4$ or $5$. Here, we address this question. \subsection{A graph with $\omega (G) = 4$ and $\chi (G) = 6$} \label{secexample2}
K\'{a}ra et al \cite{kpw-ocnv-2005} showed that a PVG with clique number $4$ can have chromatic number $5$. They then generalized their example to prove that there is an exponential function $f$ such that for a family of PVGs, the identity $\chi (G) = f(\omega (G))$ holds for all graphs in the family.
The main question remaining is whether PVGs with maximum clique size $4$ have bounded chromatic number. Here we construct a visibility graph $G'$ with $\omega (G') = 4$ and $\chi (G') = 6$ (Figure \ref{6graph}). We construct $G'$ directly as a visibility embedding, as follows. \begin{enumerate}
\item Consider three horizontal lines $l_1$, $l_2$ and $l_3$ parallel to each other. \item On the first line, embed ten points $\{ p_1, p_2, \ldots, p_{10}\}$ from left to right. \item From left to right embed the points $q_1, \ldots, q_4$ on $l_3$. \item Join with line segments the pairs $(q_1,p_1)$, $(q_1,p_4)$, $(q_2,p_2)$, $(q_2,p_5)$. \label{step1} \item Join with line segments the pairs $(q_3,p_6)$, $(q_3,p_9)$, $(q_4p_7)$, $(q_4,p_{10})$. \item Starting from the right of $q_4$, embed the points $r_1, b_1, r_2, b_2, \ldots, r_{10}$ on $l_3$. \item Join each $r_i$ with $1 \leq i \leq 5$ with $p_1$, $p_3$ and $p_{i+5}$. \item Join each $r_i$ with $6 \leq i \leq 10$ with $p_1$, $p_4$ and $p_{i}$. \label{step2} \item Embed points on $l_2$ such that only the adjacencies described from steps \ref{step1} to \ref{step2} hold. \end{enumerate} We have the following lemmas.
\lemma
The clique number of $G'$ is $4$.
\proof By construction, the points on $l_1$ and $l_3$ together induce a triangle free graph. The points of $l_2$ can contribute at most two more points to cliques induced by the points on $l_1$ and $l_3$. So, $G'$ has cliques of size at most four. \qed
\lemma
The chromatic number of $G'$ is $6$.
\proof
Each point on $l_2$ is adjacent to every point on $l_1$ and $l_3$, so the points on $l_2$ require two colours which are absent from the points
of $l_1$ and $l_3$. So, it suffices to show that the graph induced by points on $l_1$ and $l_3$ is not three colourable. Suppose that the points
$p_1, \ldots p_5$ have only two colours (say, $C_1$ and $C_2$). This means that they are coloured with $C_1$ and $C_2$ alternately,
and $q_1$ and $q_2$ must be coloured with two extra colours, $C_3$ and $C_4$ respectively.
$\\ \\$
Now suppose that all three of $C_1$, $C_2$ and $C_3$ occur among $p_1, \ldots p_5$. Similarly,
all three of $C_1$, $C_2$ and $C_3$ occur among $p_6, \ldots p_{10}$, for otherwise the previous argument is applicable to $q_3$, $q_4$
and $p_6, \ldots p_{10}$. Also, $p_1$ and $p_3$, or $p_1$ and $p_4$ must have two distinct colours. On the other hand, three distinct colours
must also occur among $p_6, \ldots p_{10}$. But for every $p_i$, $6 \leq i \leq 10$, there are two points among $r_1, r_2, \ldots, r_{10}$
that are adjacent to $p_i$ and one pair among $\{p_1, p_3\}$ and $\{p_1, p_4\}$. So, at least one of the points among
$r_1, r_2, \ldots, r_{10}$ must have the fourth colour. \qed
\begin{figure}
\caption{A point visibility graph with clique number four but chromatic number six.}
\label{6graph}
\end{figure}
\section{Concluding Remarks} We have settled the question of colouring PVGs, showing that the $4$-colour problem on them is solvable in polynomial time while the $5$-colour problem is NP-complete. We have shown that there is a PVG with clique number four but chromatic number six. However, it is still open to show whether a PVG with clique number four can have a greater chromatic number or not.
\end{document} |
\begin{document}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}\setcounter{footnote}{0}
\begin{center} {\Large Construction of Locally Conservative Fluxes for High Order Continuous Galerkin Finite Element Methods } \end{center}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}} \renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{center} Q.~Deng and V.~Ginting\\ Department of Mathematics, University of Wyoming, Laramie, Wyoming 82071, USA\footnote{Complete mailing address: Department of Mathematics, 1000 E. University Ave. Dept. 3036, Laramie, WY 82071} \end{center}
\renewcommand{1.5}{1.5} \begin{abstract}
We propose a simple post-processing technique for linear and high order continuous Galerkin Finite Element Methods (CGFEMs) to obtain locally conservative flux field. The post-processing technique requires solving an auxiliary problem on each element independently which results in solving a linear algebra system whose size is $\frac{1}{2}(k+1)(k+2)$ for $k^\text{th}$ order CGFEM. The post-processing could have been done directly from the finite element solution that results in locally conservative flux on the element. However, the normal flux is not continuous at the element's boundary. To construct locally conservative flux field whose normal component is also continuous, we propose to do the post-processing on the nodal-centered control volumes which are constructed from the original finite element mesh. We show that the post-processed solution converges in an optimal fashion to the true solution in an $H^1$ semi-norm. We present various numerical examples to demonstrate the performance of the post-processing technique.
\end{abstract}
\paragraph*{Keywords} CGFEM; FVEM; conservative flux; post-processing
\section{Introduction} \label{sec:intro}
\renewcommand{1.5}{1.5}
Both finite volume element method (FVEM) and continuous Galerkin finite element method (CGFEM) are widely used for solving partial differential equations and they have both advantages and disadvantages. Both methods share a property in how the approximate solutions are represented through linear combinations of the finite element basis functions. They have a main advantage of the ability to solve the partial differential equations posed in complicated geometries. However the methods differ in the variational formulations governing the approximate solutions. CGFEMs are defined as a global variational formulation while FVEM relies on local variational formulation, namely one that imposes local conservation of fluxes. In the case of linear finite element, it is well-known that the bilinear form of FVEM is closely related to its CGFEM counterpart, and this closeness is exploited to carry out the error analysis of FVEM \cite{bank1987some, hackbusch1989first}. In 2002, Ewing \textit{et al.} showed that the stiffness matrix derived from the linear FVEM is a small perturbation of that of the linear CGFEM for sufficiently small mesh size of the triangulation \cite{ewing2002accuracy}. In 2009, an identity between stiffness matrix of linear FVEM and the matrix of linear CGFEM was established; see Xu \textit{et al.} \cite{xu2009analysis}. A significant amount of work has been done to investigate the closeness of linear FVEM and linear CGFEM. However, the current understanding and implementation of higher order FVEMs are still at its infancy and are not as satisfactory as linear FVEM. For one-dimensional elliptic equations, high order FVEMs have been developed in \cite{plexousakis2004construction}. Other relevant high order FVEM work can be found in \cite{liebau1996finite, li2000generalized, cai2003development, chen2012higher, chen2015construction}.
As mentioned, FVEM produces locally conservative fluxes while, due to the global formulation, CGFEMs do not. Robustness of the CGFEMs for any order has been established through extensive and rigorous error analysis, while this is not the case for FVEM. Development of linear algebra solvers for CGFEMs has reached an advanced stage, mainly driven from a solid understanding of the variational formulations and their properties, such as coercivity (and symmetry) of the bilinear form in the Galerkin formulation. On the other hand, the resulting linear algebra systems derived from FVEMs, especially high order FVEMs, are not that easy to solve. Typically, the matrices resulting from FVEMs are not symmetric even if the original boundary value problem is. Furthermore, at most FVEM discretization with linear finite element basis yields M-matrix, while with quadratic finite element basis it is not (see \cite{liebau1996finite}).
Preservation of numerical local conservation property of approximate solutions are imperative in simulations of many physical problems especially those that are derived from law of conservation. In order to maintain the advantages of CGFEM as well as to obtain locally conservative fluxes, post-processing techniques are developed; see \cite{arbogast1995characteristics, bush2015locally, bush2013application, chou2000conservative, cockburn2007locally, cordes1992continuous,
deng2015construction, gmeiner2014local, hughes2000continuous,
kees2008locally, larson2004conservative, loula1995higher, nithiarasu2004simple, srivastava1992three, sun2009locally, thomas2008element, thomas2008locally, toledo1989mixed, zhang2013locally}. The post-processing techniques proposed in the aforementioned references are mainly techniques for post-processing linear finite element related methods and they include finite element methods for solving pure elliptic equations, advection diffusion equations, advection dominated diffusion equations, elasticity problems, Stokes problem, etc. Among them, some of the proposed post-processing techniques require solving global systems. We will focus on a brief review on the post-processing techniques for high order CGFEMs. Generally, those post-processing techniques that works for high order CGFEMs also work for lower order CGFEMs, but may not vice versa.
There are very limited work on the post-processing for high order CGFEMs to obtain locally conservative fluxes. An interesting work on post-processing for high order CGFEMs is in Zhang \textit{et al} \cite{zhang2012flux}. In their work, they showed that the elemental fluxes directly calculated from any order of CGFEM solutions converge to the true fluxes in an optimal order but the fluxes are not naturally locally conservative. They proposed two post-processing techniques to obtain the locally conservative fluxes at the boundaries of the each element. The post-processed solutions are of optimal both $L^2$ norm and $H^1$ semi-norm convergence orders. Very interestingly, one of the post-processed solution still satisfies the original finite element equations. Other work on post-processing to gather locally conservative flux that include high order finite elements is recorded in \cite{cockburn2007locally}. The post-processing involves two steps: solving a set of local systems followed by solving a global system.
A uniform approach to local reconstruction of the local fluxes from various finite element method (FEM) solutions was presented in \cite{becker2015robust}. These methods includes any order of conforming, nonconforming, and discontinuous FEMs. They proposed a hybrid formulation by utilizing Lagrange multipliers, which can be computed locally. However, the reconstructed fluxes are not locally conservative. They used the reconstructed fluxes to derive a posteriori error estimator \cite{becker2015stopping}.
In this paper, we propose a post-processing technique for any order of CGFEMs to obtain fluxes that are locally conservative on a dual mesh consisting of control volumes. The dual mesh is constructed from the original mesh in a different way for different order of CGFEMs. The technique requires solving an auxiliary problem which results in a low dimensional linear algebra system on each element independently. Thus, the technique can be implemented in a parallel environment and it produces locally conservative fluxes wherever it is needed. The technique is developed on triangular meshes and it can be naturally extended to rectangular meshes.
The rest of the paper is organized as follows. The CGFEM formulation of the model problem is presented in Section \ref{sec:cgfem} followed by the description of the methodology of the post-processing technique in Section \ref{sec:pp}. Analysis of the post-processing technique is presented in Section \ref{sec:ana} and numerical examples are presented to demonstrate the performance of the technique in Section \ref{sec:num}.
\section{Continuous Galerkin Finite Element Method} \label{sec:cgfem}
For simplicity, we consider the elliptic boundary value problem \begin{equation}\label{pde} \begin{cases} \begin{aligned} - \nabla \cdot ( \kappa \nabla u ) & =f \quad \text{in} \quad \Omega, \\ u&= g \quad \text{on} \quad \partial\Omega, \\ \end{aligned} \end{cases} \end{equation} where $\Omega$ is a bounded open domain in $\mathbb{R}^2$ with Lipschitz boundary $\partial\Omega$, $\kappa = \kappa(\boldsymbol{x})$ is the elliptic coefficient, $u=u(\boldsymbol{x})$ is the solution to be found, $f=f(\boldsymbol{x})$ is a forcing function. Assuming $0 < \kappa_{\min} \leq \kappa(\boldsymbol{x}) \leq \kappa_{\max} < \infty $ for all $\boldsymbol{x} \in\Omega$ and $f \in L^2(\Omega)$, Lax-Milgram Theorem guarantees a unique weak solution to \eqref{pde}. For the polygonal domain $\Omega$, we consider a partition $\mathcal{T}_h$ consisting of triangular elements $\tau$ such that $\overline\Omega = \bigcup_{\tau\in\mathcal{T}_h} \tau.$ We set $h=\max_{\tau\in\mathcal{T}_h} h_\tau$ where $h_\tau$ is defined as the diameter of $\tau$. The continuous Galerkin finite element space is defined as $$
V^k_h = \big\{w_h\in C(\overline\Omega): w_h|_\tau \in P^k(\tau), \ \forall \ \tau\in\mathcal{T}_h \ \text{and} \ w_h|_{\partial\Omega} = 0 \big\}, $$ where $P^k(\tau)$ is a space of polynomials with degree at most $k$ on $\tau$. The CGFEM formulation for \eqref{pde} is to find $u_h$ with $(u_h - g_h) \in V^k_h$, such that \begin{equation} \label{eq:fem} a(u_h, w_h) = \ell(w_h) \quad \forall \ w_h \in V^k_h, \end{equation} where \begin{equation*} a(v, w) = \int_\Omega \kappa \nabla v \cdot \nabla w \ \text{d} \boldsymbol{x}, \qquad \text{and} \qquad \ell(w) = \int_\Omega f w \ \text{d} \boldsymbol{x}, \end{equation*} and $g_h \in V_h^k$ can be thought of as the interpolant of $g$ using the usual finite element basis.
\begin{figure}
\caption{Setting for $V_h^1$: the dots represent the degrees of freedom and $\Omega^z = \cup_{i=1}^5 \tau_i^z$ is the support of $\phi_z$.}
\caption{Setting for $V_h^2$: The dots represent the degrees of freedom. $\Omega^z= \cup_{i=1}^5 \tau_i^z$ (left) is support of $\phi_z$ and $\Omega^y = \cup_{i=1}^2 \tau_i^y$ (right) is the support of $\phi_y$.}
\caption{Setting for $V_h^3$: The dots represent the degrees of freedom. $\Omega^z= \cup_{i=1}^5 \tau_i^z$ (left) is the support of $\phi_z$, and $\Omega^y = \cup_{i=1}^2 \tau_i^y$ (middle) is the support of $\phi_y$, and $\Omega^x = \tau_1^x$ is the support of $\phi_x$.}
\label{fig:lelemsupp}
\label{fig:qelemsupp}
\label{fig:celemsupp}
\end{figure}
We now present a fundamental but obvious fact for this CGFEM formulation. To make it as general as possible, let $Z$ be the set of nodes in $\Omega$ resulting from the partition $\mathcal{T}_h$ and placing the degrees of freedom owned by $V_h^k$. In particular, $Z$ consists of vertices for $V_h^1$ (see Figure \ref{fig:lelemsupp}), vertices and degrees of freedom on the edges for $V_h^2$ (see Figure \ref{fig:qelemsupp}), vertices and degrees of freedom on the edges and in the elements' barycenters for $V_h^3$ (see Figure \ref{fig:celemsupp}). Furthermore, $Z = Z_{\text{in}} \cup Z_\text{d}$, where $Z_{\text{in}}$ is the set of interior degrees of freedom and $Z_\text{d}$ is the set of corresponding points on $\partial\Omega$. Denoting the usual Lagrange nodal basis of $V^k_h$ as $\{ \phi_\xi \}_{\xi \in Z_\text{in}}$, \eqref{eq:fem} yields \begin{equation} \label{eq:femphi} a(u_h, \phi_\xi) = \ell (\phi_\xi) \quad \forall \ \xi \in Z_{\text{in}}. \end{equation}
For a $\xi \in Z_{\text{in}}$, let $\Omega^\xi$ be the support of the basis function $\phi_\xi$. Then in the partition $\mathcal{T}_h$, $\Omega^\xi = \cup_{i=1}^{N_\xi} \tau_i^\xi$, where $\tau_i^\xi$ is an element that has $\xi$ as one of its degrees of freedom, and
$N_\xi$ is the total number of such elements. In the linear case, $N_\xi$ is the number of elements sharing vertex $\xi$ (see Figure \ref{fig:lelemsupp}); in the quadratic case, $N_\xi$ is the number of elements sharing vertex $\xi$ or a middle point on an edge in which case $N_\xi = 2$ (see Figure \ref{fig:qelemsupp}); in the cubic case, $N_\xi$ can be those in quadratic case plus that it can be one if $\xi$ is inside the element (see Figure \ref{fig:celemsupp}). With this in mind, \eqref{eq:femphi} is expressed as
\begin{equation} \label{eq:localfem} \sum_{i=1}^{N_\xi} a_{\tau^\xi_i}(u_h, \phi_\xi) = \sum_{i=1}^{N_\xi} \ell_{\tau^\xi_i} (\phi_\xi), \end{equation} where $a_\tau(v_h, w_h)$ is $a(v_h, w_h)$ restricted to element $\tau$ and $\ell_\tau (w_h)$ is $\ell(w_h)$ restricted to element $\tau$.
Equation \eqref{eq:localfem} is fundamental and we will use this fact to derive the post-processing technique in Section \ref{sec:pp}.
\section{A Post-processing Technique} \label{sec:pp}
A naive derivative calculation of $u_h$ does not yield locally conservative fluxes. For this reason, in this section we propose a post-processing technique to construct locally conservative fluxes over control volumes from CGFEM solutions. We will focus on the construction for $k^{\text{th}}$ order CGFEM, where $k=1, 2, 3$, i.e., linear, quadratic, and cubic CGFEMs. Construction for orders higher than these CGFEMs can be conducted in a similar fashion.
\subsection{Auxiliary Elemental Problem} \label{sec:bvp}
Based upon the original finite element mesh and $V_h^k$, a dual mesh that consists of control volumes is generated over which the post-processed fluxes is to satisfy the local conservation. For $V_h^1$, we connect the barycenter and middle points of edges of a triangular element; see Figure \ref{fig:lelemcv}. For $V_h^2$, we firstly discretize the triangular element into \textcolor{red}{four} sub-triangles and then connect the barycenters and middle points of each sub-triangle; see plots in Figure \ref{fig:qelemcv}, and similarly $V_h^3$, see plots in Figure \ref{fig:celemcv}. We can also see the construction of the dual mesh on a single element in Figure \ref{fig:elem}. Each control volume corresponds to a degree of freedom in CGFEMs. We post-process the CGFEM solution $u_h$ to obtain $\widetilde {\boldsymbol{\nu}}_h = -\kappa \nabla \widetilde{u}_h $ such that it is continuous at the boundaries of each control volume and satisfies the local conservation property in the sense \begin{equation} \label{eq:cvconservation} \int_{\partial C^\xi} \widetilde {\boldsymbol{\nu}}_h \cdot \boldsymbol{n} \ \text{d} l = \int_{C^\xi} f \ \text{d} \boldsymbol{x}, \end{equation} where $C^\xi$ can be a control volume surrounding a vertex as $C^z$ in Figure \ref{fig:lelemcv}, \ref{fig:qelemcv}, and \ref{fig:celemcv}, or a control volume surrounding a degree of freedom on an edge as $C^y$ in Figure \ref{fig:qelemcv} and \ref{fig:celemcv}, or $C^x$ in Figure \ref{fig:celemcv}.
\begin{figure}
\caption{ $C^z$ is the control volume corresponding to $\phi_z$ in $V_h^1$. }
\caption{ $C^z, C^y$ are control volumes corresponding to $\phi_z$ (left) and $\phi_y$ (right), respectively, in $V_h^2$. }
\caption{ $C^z, C^y, C^x$ are control volumes corresponding to $\phi_z$ (left), $\phi_y$ (middle), and $\phi_x$ (right), respectively, in $V_h^3$. }
\label{fig:lelemcv}
\label{fig:qelemcv}
\label{fig:celemcv}
\end{figure}
In order to obtain the locally conservative fluxes on each control volume, we set and solve an elemental/local problem on $\tau$. Let $N_k = \frac{1}{2}(k+1)(k+2)$ be the total number of degrees of freedom on a triangular element for $V_h^k$. We denote the collection of those degrees of freedom by $s(\tau, k) = \{ z_j \}_{j=1}^{N_k}$; see Figure \ref{fig:elem}. We partition each element $\tau$ into $N_k$ non-overlapping polygonals $\{ t_{z_j} \}_{j=1}^{N_k}$; see Figure \ref{fig:elem}. For $t_\xi$ with $\xi \in s(\tau,k)$, we make decomposition $\partial t_\xi = ( \partial \tau \cap \partial t_\xi ) \cup ( \partial C^\xi \cap \partial t_\xi ).$ We also define the average on an edge or part of the edge which is the intersection of two elements $\tau_1$ and $\tau_2$ for vector $\boldsymbol{v}$ as \begin{equation} \label{def:ave}
\{ \boldsymbol{v} \} = \frac{\boldsymbol{v}|_{\tau_1} + \boldsymbol{v}|_{\tau_2} }{2}. \end{equation}
\begin{figure}
\caption{ Control volume construction and degrees of freedom on an element for $V_h^k$: $k=1$ (left), $k=2$ (middle), and $k=3$ (right). }
\label{fig:elem}
\end{figure}
Let $V^0(\tau)$ be the space of piecewise constant functions on element $\tau$ such that $V^0(\tau) = \text{span} \{ \psi_\eta \}_{\eta \in s(\tau,k)}$, where $\psi_\eta$ is the characteristic function of the polygonal $t_\eta$, i.e., \begin{equation} \psi_\eta(\boldsymbol{x}) = \Big\lbrace\begin{array}{c} 1 \quad \text{if} \quad \boldsymbol{x} \in t_\eta \\ 0 \quad \text{if} \quad \boldsymbol{x} \notin t_\eta \end{array}. \end{equation} We define a map $I_\tau: H^1(\tau) \rightarrow V^0(\tau)$ with $I_\tau w = \displaystyle \sum_{\xi \in s(\tau,k)} w_\xi \psi_\xi$, where $w_\xi = w(\xi)$ for $w\in H^1(\tau)$. We define the following bilinear forms \begin{equation} \label{eq:bebiforms} b_\tau( v, w) = -\sum_{\xi \in s(\tau,k)} \int_{\partial C^\xi \cap \partial t_\xi} \kappa \nabla v \cdot \boldsymbol{n} I_\tau w \ \text{d} l, \qquad e_\tau(v, w) = \int_{\partial \tau} \{ \kappa \nabla v \} \cdot \boldsymbol{n} w \ \text{d} l. \end{equation}
Let $ V^k_h(\tau) = \text{span}\{ \phi_\eta \}_{ \eta \in s(\tau,k) }$ where $ \phi_\eta$ can be thought as the usual nodal $\eta$ basis function restricted to element $\tau$. The elemental calculation for the post-processing is to find $\widetilde u_{\tau, h} \in V^k_h(\tau) $ satisfying \begin{equation} \label{eq:bvpvf} b_\tau(\widetilde u_{\tau, h}, w) = \ell_\tau ( I_\tau w - w ) + a_\tau(u_h, w) + e_\tau(u_h, I_\tau w - w), \quad \forall \ w \in V^k_h(\tau). \end{equation}
\newtheorem{lem}{Lemma}[section] \begin{lem} \label{lem:ebvp} The variational formulation \eqref{eq:bvpvf} has a unique solution up to a constant. \end{lem}
\begin{proof} We have $V_h^k(\tau) = \text{span}\{ \phi_\xi \}_{ \xi \in s(\tau,k) }$, where $\phi_\xi(\eta) = \delta_{\xi \eta}$ with $\delta_{\xi \eta}$ being the Kronecker delta, for all $\xi, \eta \in s(\tau,k)$. By replacing the test function $w$ with $\phi_\xi$ for all $\xi \in s(\tau,k)$,
\eqref{eq:bvpvf} is reduced to \begin{equation} \label{eq:cz10} - \int_{\partial C^\xi \cap \partial t_\xi } \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_{t_\xi} f \ \text{d} \boldsymbol{x} - \ell_\tau ( \phi_\xi ) + a_\tau(u_h, \phi_\xi ) + e_\tau( u_h, I_\tau \phi_\xi - \phi_\xi ), \quad \forall \ \xi \in s(\tau,k). \end{equation} This is a fully Neumann boundary value problem in $\tau$ with boundary condition satisfying \begin{equation} \label{eq:bvpb} - \int_{\partial \tau \cap \partial t_\xi } \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi ) - e_\tau( u_h, I_\tau \phi_\xi - \phi_\xi ), \quad \forall \ \xi \in s(\tau,k). \end{equation} To establish the existence and uniqueness of the solution, one needs to verify the compatibility condition \cite{evans2010partial}. We calculate \begin{equation*} -\int_{\partial \tau} \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \sum_{\xi \in s(\tau,k)} \Big( \ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi ) - e_\tau( u_h, I_\tau \phi_\xi - \phi_\xi ) \Big). \end{equation*} Using the fact that $\sum_{\xi \in s(\tau,k)} \phi_\xi = 1$ and linearity, we obtain \begin{equation*} \sum_{\xi\in s(\tau,k)} \ell_\tau( \phi_\xi) = \sum_{\xi\in s(\tau,k)} \int_\tau f \phi_\xi \ \text{d} \boldsymbol{x} = \int_\tau f \sum_{\xi\in s(\tau,k)} \phi_\xi \ \text{d} \boldsymbol{x} = \int_\tau f \ \text{d} \boldsymbol{x}. \end{equation*} Using the fact that $\nabla \big( \sum_{\xi\in s(\tau,k)} \phi_\xi \big) = \boldsymbol{0} $ and linearity, we obtain \begin{equation*} \sum_{\xi\in s(\tau,k)} a_\tau(u_h, \phi_\xi) = \sum_{\xi\in s(\tau,k)} \int_\tau \kappa \nabla u_h \cdot \nabla \phi_\xi \ \text{d} \boldsymbol{x} = \int_\tau \kappa \nabla u_h \cdot \nabla \big( \sum_{\xi\in s(\tau,k)} \phi_\xi \big) \text{d} \boldsymbol{x} = 0. \end{equation*} Also, we notice that \begin{equation*} \sum_{\xi\in s(\tau,k)} e_\tau(u_h, I_\tau \phi_\xi - \phi_\xi ) = \sum_{\xi\in s(\tau,k)} \int_{\partial \tau \cap \partial t_\xi } \{ \kappa \nabla u_h \} \cdot \boldsymbol{n} \ \text{d} l - \int_{\partial \tau} \{ \kappa \nabla u_h \} \cdot \boldsymbol{n} \sum_{\xi\in s(\tau,k)} \phi_\xi \ \text{d} l = 0. \end{equation*} Combining these equalities, compatibility condition $\int_{\partial \tau} - \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_\tau f \ \text{d} \boldsymbol{x}$ is verified. This completes the proof. \end{proof}
\begin{remark} The technique proposed here can be naturally generalized to rectangular elements. In the proof of Lemma \ref{lem:ebvp}, for $\xi = z_{10}$ in cubic CGFEM, \eqref{eq:cz10} is naturally reduced to a local conservation equation $- \int_{\partial C^\xi } \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_{C^\xi} f \ \text{d} \boldsymbol{x}$. This can be proved by using \eqref{eq:localfem}, $\partial \tau \cap \partial t_\xi = \emptyset$, and $\phi_{z_{10}} = 0$ on $\partial \tau.$ \end{remark}
\begin{lem} \label{lem:elemlc} The piecewise boundary fluxes defined in \eqref{eq:bvpb} satisfy local conservation on each element, i.e., \begin{equation} \label{eq:elemlc} - \int_{\partial\tau } \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_\tau f \ \text{d} \boldsymbol{x}, \end{equation} and \begin{equation} \label{eq:fluxed} - \int_{\partial\tau } \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = - \int_{\partial\tau } \kappa \nabla u \cdot \boldsymbol{n} \ \text{d} l. \end{equation} \end{lem}
\begin{proof} Equation \eqref{eq:elemlc} is established in the proof of Lemma \ref{lem:ebvp}. Identity \eqref{eq:fluxed} is verified by using \eqref{pde} and Divergence theorem: \begin{equation*} \begin{aligned} \int_{\partial \tau} - \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_\tau f \ \text{d} \boldsymbol{x} = \int_\tau \nabla \cdot ( -\kappa \nabla u) \ \text{d} \boldsymbol{x} = \int_{\partial\tau } -\kappa \nabla u \cdot \boldsymbol{n} \ \text{d} l. \end{aligned} \end{equation*}
\end{proof}
\begin{remark} Lemma \ref{lem:elemlc} implies that the proposed way of imposing elemental boundary condition in \eqref{eq:bvpb} is a rather simple post-processing technique: we set flux at $\partial \tau \cap \partial t_\xi $ as $\ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi ) - e_\tau( u_h, I_\tau \phi_\xi - \phi_\xi )$. It does not require solving any linear system but provides locally conservative fluxes at the element boundaries. In two-dimensional case, $\partial \tau \cap \partial t_\xi $ consists of two segments in the setting as shown in Figure \ref{fig:elem}. If we want to provide a flux approximation on each segment, we can for example set the flux for each segment $\Gamma_\xi$ as \begin{equation} \frac{\ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi ) + e_\tau( u_h, \phi_\xi )}{2} - \int_{\Gamma_\xi} \{ \kappa \nabla u_h \} \cdot \boldsymbol{n} \ \text{d} l. \end{equation} If $\partial \tau \cap \partial t_\xi $ consists of more segments, we can set the flux in the similar way. For instance, we assign weights to $\ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi ) + e_\tau( u_h, \phi_\xi )$ according to the length ratio of the segment over $\partial \tau \cap \partial t_\xi $. A drawback of this technique, however, is that the post-processed fluxes in general is not continuous at the element boundaries, except only when $\ell_\tau ( \phi_\xi ) - a_\tau(u_h, \phi_\xi )$ from two neighboring elements are equal. This reveals the merits of the main post-processing technique proposed in this paper. The main post-processing technique provides a way to obtain locally conservative fluxes on control volumes and the fluxes are continuous at the boundaries of each control volume.
\end{remark}
\begin{lem} \label{lem:ppsol} The true solution $u$ of \eqref{pde} satisfies \begin{equation} \label{eq:bvpvft} b_\tau(u, w) = \ell_\tau ( I_\tau w - w ) + a_\tau(u, w) + e_\tau(u, I_\tau w - w), \quad \forall \ w \in H^1(\tau), \end{equation} and this further implies \begin{equation} \label{eq:bvpvfe} b_\tau(u - \widetilde u_{\tau, h}, w) = a_\tau(u - u_h, w) + e_\tau(u - u_h, I_\tau w - w), \quad \forall \ w \in V^k_h(\tau). \end{equation} \end{lem}
\begin{proof} This can be easily proved by simple calculations. \end{proof}
\begin{remark}
The first result in Lemma \ref{lem:ppsol} tells us that accurate boundary data gives us a chance to obtain accurate fluxes. This is the very reason that the post-processing technique proposed in \cite{bush2013application} and extended in \cite{bush2014application, bush2015locally, deng2015construction} could not be generalized to a post-processing technique for high order CGFEMs. The boundary condition imposed in \cite{bush2013application} is \begin{equation}
\int_{\partial \tau \cap \partial t_\xi } - \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \ell_\tau( \phi_\xi) - a_\tau(u_h, \phi_\xi), \end{equation} and clearly $\ell_\tau(w) - a_\tau(u, w) \ne 0$ for the true solution $u$ of \eqref{pde}. Specifically for high order CGFEM solutions, only imposing $\ell_\tau(w) - a_\tau(u_h, w)$ as a boundary condition for $\widetilde{u}_{\tau,h}$ is not sufficient to guarantee optimal accuracy. The convergence order and accuracy of the post-processed solution strongly depend on the boundary data. By imposing the boundary data in the way shown in \eqref{eq:bvpb}, we can get optimal convergence order for the post-processed solution $\widetilde u_h$ for any high order CGFEMs. \end{remark}
\begin{remark} Equation \eqref{eq:bvpvfe} in Lemma \ref{lem:ppsol} resembles Galerkin orthogonality and plays a crucial role in establishing the post-processing error.
\end{remark}
\subsection{Elemental Linear System } \label{sec:llas} We note that the dimension of $V^k_h(\tau)$ is $N_k$ and hence the variational formulation \eqref{eq:bvpvf} yields an $N_k$-by-$N_k$ linear algebra system. Since $\widetilde u_{\tau, h} \in V^k_h(\tau)$, \begin{equation} \label{eq:ppsol} \widetilde u_{\tau, h} = \sum_{\eta \in s(\tau,k)} \alpha_\eta \phi_\eta, \end{equation} so by inserting this representation to \eqref{eq:bvpvf} and replacing the test function by $\phi_\xi$ give us the linear algebra system \begin{equation} \label{eq:axb} A \boldsymbol{\alpha} = \boldsymbol{\beta}, \end{equation} where $\boldsymbol{\alpha} \in \mathbb{R}^{N_k} $ whose entries are the nodal solutions in \eqref{eq:ppsol}, $\boldsymbol{\beta} \in \mathbb{R}^{N_k} $ with entries \begin{equation} \beta_\xi = \ell_\tau ( I_\tau \phi_\xi - \phi_\xi ) + a_\tau(u_h, \phi_\xi) + e_\tau (u_h, I_\tau \phi_\xi - \phi_\xi ), \quad \forall \ \xi \in s(\tau,k), \end{equation} and \begin{equation} A_{\xi \eta} = b_\tau( \phi_\eta, \phi_\xi), \quad \forall \ \xi, \eta \in s(\tau,k), \end{equation}
The linear system \eqref{eq:axb} is singular and there are infinitely many solutions since the solution to \eqref{eq:bvpvf} is unique up to a constant by Lemma \ref{lem:ebvp}. However, this does not cause any issue since
to obtain locally conservative fluxes, the desired quantity from the post-processing is $\nabla \widetilde{u}_{\tau,h}$, which is unique.
\subsection{Local Conservation} \label{sec:loccons}
At this stage, we verify the local conservation property \eqref{eq:cvconservation} on control volumes for the post-processed solution. It is stated in the following lemma.
\begin{lem} \label{lem:localconserv} The desired local conservation property \eqref{eq:cvconservation} is satisfied on the control volume $C^\xi$ where $\xi \in Z_{\text{in}}$. \end{lem}
\begin{proof} Obviously, for $\xi = z_{10}$ in the case of $V_h^3$, the polygonal $t_{z_{10}} = C^{10}$, \eqref{eq:cvconservation} is directly satisfied from solving \eqref{eq:bvpvf}. Similar situation occurs for $k>3$ in $V_h^k$. Thus, we only need to prove this lemma in the case that a control volume is associated with $\xi$ that is either on the edge of $\tau$ or the vertex of $\tau$. For a basis function $\phi_\xi$, let $\Omega^\xi = \cup_{i=1}^{N_\xi} \tau_i^\xi$ be its support. Noting that the gradient component is averaged, it is obvious that \begin{equation*} \sum_{j=1}^{N_\xi} \int_{\partial \tau_j^\xi} \{ \kappa \nabla u_{\tau_j, h} \} \cdot \boldsymbol{n} \phi_\xi \ \text{d} l = 0 \qquad \text{and} \qquad \sum_{j=1}^{N_\xi} \int_{\partial \tau_j^\xi \cap \partial t_\xi } \{ \kappa \nabla u_{\tau_j, h} \} \cdot \boldsymbol{n} \ \text{d} l = 0. \end{equation*} This implies that $\sum_{j=1}^{N_\xi} e_{\tau_j^\xi}(u_h, \phi_\xi) = 0.$ Straightforward calculation and \eqref{eq:localfem} gives \begin{equation*}
\int_{ \partial C^\xi } - \kappa \nabla \widetilde u_{\tau, h} \cdot \boldsymbol{n} \ \text{d} l = \int_{C^\xi} f \ \text{d} \boldsymbol{x} + \sum_{j=1}^{N_\xi} \Big( a_{\tau_j^\xi}(u_h, \phi_\xi) - \ell_{\tau_j^\xi}(\phi_\xi) - e_{\tau_j^\xi}(u_h, \phi_\xi) \Big) = \int_{C^\xi} f \ \text{d} \boldsymbol{x}, \end{equation*} which completes the proof. \end{proof}
\section{An Error Analysis for the Post-processing} \label{sec:ana}
In this section, we focus on establishing an optimal convergence property of the post-processed solution $\widetilde u_{\tau, h}$ in $H^1$ semi-norm. We denote $\| \cdot \|_{L^2}$ the usual $L^2$ norm and $| \cdot |_{W}$ the usual semi-norm in a Sobolev space $W$. We start with proving a property of $I_\tau$ defined in Section \ref{sec:bvp}.
\begin{lem} \label{lem:map} Let $I_\tau$ be as defined in Section \ref{sec:bvp}. Then \begin{equation} \label{eq:interr}
\| w - I_\tau w \|_{L^2(\tau)} \leq Ch^2_\tau | w |_{H^2(\tau)} + C h_\tau | w |_{H^1(\tau)}, \quad \text{for} \quad w \in H^2(\tau). \end{equation} \end{lem}
\begin{proof} For $w \in H^2(\tau)$, suppose $\Pi w \in V_h^1(\tau)$ is the standard linear interpolation of $w$. Then we have $I_\tau w = I_\tau (\Pi w)$. By adding and subtracting $\Pi w$, and invoking triangle inequality we get \begin{equation*} \begin{aligned}
\| w - I_\tau w \|_{L^2(\tau)} \leq \| w - \Pi w \|_{L^2(\tau)} + \| \Pi w - I_\tau (\Pi w) \|_{L^2(\tau)}. \end{aligned} \end{equation*} Standard interpolation theory (see for example Theorem 4.2 in \cite{johnson2009numerical}) states that \begin{equation}
\| w - \Pi w \|_{L^2(\tau)} \leq Ch^2_\tau | w |_{H^2(\tau)}. \end{equation} Since $I_\tau w = \displaystyle \sum_{\xi \in s(\tau,k)} w_\xi \psi_\xi$, we divide $\tau$ equally into $k^2$ sub-triangles $\tau_j, j=1, \cdots, k^2$. By Lemma 6.1 in \cite{chatzipantelidis2002finite}, we have \begin{equation}
\| \Pi w - I_\tau (\Pi w) \|^2_{L^2(\tau)} = \sum_{j=1}^{k^2} \| \Pi w - I_\tau (\Pi w) \|^2_{L^2(\tau_j)} \leq \sum_{j=1}^{k^2} C h^2_{\tau_j} | \Pi w |^2_{H^1(\tau_j)} \leq Ch^2_\tau | \Pi w |^2_{H^1(\tau)}. \end{equation} Taking square root gives \begin{equation}
\| \Pi w - I_\tau (\Pi w) \|_{L^2(\tau)} \leq Ch_\tau | \Pi w |_{H^1(\tau)}. \end{equation}
By using triangle inequality and interpolation theory again, we have $$
| \Pi w |_{H^1(\tau)} \leq | w |_{H^1(\tau)} + | w - \Pi w |_{H^1(\tau)} \leq | w |_{H^1(\tau)} + Ch_\tau | w |_{H^2(\tau)}. $$ Combining these inequalities gives the desired result. \end{proof}
\begin{lem} \label{lem:loccoe} The bilinear form defined in \eqref{eq:bvpvf} is bounded, i.e., for all $w \in H^2(\tau), v \in V^k_h(\tau),$ \begin{equation} \label{eq:bbounded}
b_\tau(w, v) \leq C |w|_{H^1(\tau)} |v|_{H^1(\tau)}. \end{equation} Furthermore, for $v\in V^k_h(\tau)$ with $k=1, 2$, $b_\tau(\cdot,\cdot)$ is coercive, namely, \begin{equation} \label{eq:coerc}
b_\tau(v, v) \geq C_b |v|^2_{H^1(\tau)}, \end{equation} for some positive constant $C_b$.
\end{lem}
\begin{proof} The boundedness of $b_\tau(\cdot, \cdot)$ has been established in Theorem 1 in \cite{xu2009analysis}. The local coercivity is also established for linear (Theorem 2) and quadratic (Theorem 5) CGFEM in \cite{xu2009analysis}. \end{proof}
\begin{lem} \label{lem:eb} Fix a triangle $\tau = \tau_0.$ Suppose $\{ \tau_i \}_{i=1}^3$ are the neighbors (sharing edges) of $\tau$, i.e., $\partial \tau \cap \partial \tau_i \neq \emptyset.$ Then for $w, v\in H^2(\tau)$ \begin{equation} \label{eq:ebounded}
e_\tau(w, I_\tau v - v) \leq C h_\tau^{1/2} \Big( |v|_{H^1(\tau)} + h_\tau |v|_{H^2(\tau)} \Big) \sum_{i=0}^3 \Big( h_{\tau_i}^{-1/2} | w |_{H^1(\tau_i)} + h_{\tau_i}^{1/2} | w |_{H^2(\tau_i)} \Big), \end{equation} where $C$ is a constant independent on $h_\tau$ and $h_{\tau_i}.$ \end{lem}
\begin{proof} By definition and Cauchy-Schwarz inequality, \begin{equation} \label{eq:ede0} \begin{aligned} e_\tau(w, I_\tau v - v) & = \int_{\partial \tau} \{ \kappa \nabla w \} \cdot \boldsymbol{n} ( I_\tau v - v ) \ \text{d} l \\
& \leq \Big( \int_{\partial \tau} | \{ \kappa \nabla w \} \cdot \boldsymbol{n} |^2 \ \text{d} l \Big)^{1/2} \Big( \int_{\partial \tau} | I_\tau v - v |^2 \ \text{d} l \Big)^{1/2} \\
& \leq \kappa_{\tau, \text{max}} \| \{ \nabla w \} \cdot \boldsymbol{n} \|_{L^2(\partial \tau)} \| I_\tau v - v \|_{L^2(\partial \tau)}, \end{aligned} \end{equation} where $\kappa_{\tau, \text{max}}$ is the maximum of $\kappa$ on $\tau.$
By trace inequality, we have \begin{equation} \label{eq:ede1}
\| \{ \nabla w \} \cdot \boldsymbol{n} \|_{L^2(\partial \tau)} \leq \frac{1}{2} \sum_{i=0}^3 \| \nabla w \cdot \boldsymbol{n} \|_{L^2(\partial \tau_i)} \leq \frac{1}{2} \sum_{i=0}^3 \Big( C h_{\tau_i}^{-1/2} | w |_{H^1(\tau_i)} + C h_{\tau_i}^{1/2} | w |_{H^2(\tau_i)} \Big). \end{equation}
Similarly by trace inequality and Lemma \ref{lem:map}, we have \begin{equation} \label{eq:ede2} \begin{aligned}
\| I_\tau v - v \|_{L^2(\partial \tau)} & \leq \sum_{\xi \in s(\tau,k)} \| I_\tau v - v \|_{L^2(\partial t_\xi)} \\
& \leq \sum_{\xi \in s(\tau,k)} C h_\tau^{-1/2} || I_\tau v - v ||_{L^2(t_\xi)} + C h_\tau^{1/2} | v |_{H^1(t_\xi)} \\
& \leq C \Big( h_\tau^{-1/2} || I_\tau v - v ||_{L^2(\tau)} + h_\tau^{1/2} | v |_{H^1(\tau)} \Big) \\
& \leq C h_\tau^{-1/2} \Big( C h^2_\tau |v|_{H^2(\tau)} + C h_\tau |v|_{H^1(\tau)} \Big)+ C h_\tau^{1/2} | v |_{H^1(\tau)} \\
& \leq C h_\tau^{1/2} \Big( |v|_{H^1(\tau)} + h_\tau |v|_{H^2(\tau)} \Big), \end{aligned} \end{equation} where $t_\xi$ are the polygonals defined in Figure \ref{fig:elem}. Putting \eqref{eq:ede1} and \eqref{eq:ede2} into \eqref{eq:ede0} gives us the desired result.
\end{proof}
\begin{lem} \label{lem:locerr} We have the following local error estimate \begin{equation}
|u - \widetilde u_{\tau, h} |_{H^1{(\tau)}} \leq C | u - u_h |_{H^1{(\tau)}} + Ch_\tau^{1/2} \sum_{i=0}^3 \Big( h_{\tau_i}^{-1/2} | u - u_h |_{H^1(\tau_i)} + h_{\tau_i}^{1/2} | u - u_h |_{H^2(\tau_i)} \Big). \end{equation} \end{lem}
\begin{proof} Triangle inequality gives \begin{equation} \label{eq:j1}
| u - \widetilde u_{\tau, h} |_{H^1{(\tau)}} \leq | u - u_h |_{H^1{(\tau)}} + | u_h - \widetilde u_{\tau, h} |_{H^1{(\tau)}}. \end{equation}
By Lemma \ref{lem:loccoe}, we have \begin{equation} \label{eq:j2} \begin{aligned}
C_b | u_h - \widetilde u_{\tau, h} |^2_{H^1{(\tau)}}& \leq b_\tau(u_h - \widetilde u_{\tau, h}, u_h - \widetilde u_{\tau, h} ) \\ & = b_\tau(u_h - u, u_h - \widetilde u_{\tau, h} ) + b_\tau(u - \widetilde u_{\tau, h}, u_h - \widetilde u_{\tau, h} ) \\
& \leq C | u - u_h |_{H^1{(\tau)}} | u_h - \widetilde u_{\tau, h} |_{H^1{(\tau)}} + b_\tau(u - \widetilde u_{\tau, h}, u_h - \widetilde u_{\tau, h} ). \\ \end{aligned} \end{equation}
For simplicity, we set $\delta_{\tau, h} = u_h - \widetilde u_{\tau, h}$. By \eqref{eq:bvpvfe}, we have \begin{equation}\label{eq:j3} b_\tau(u - \widetilde u_{\tau, h}, \delta_{\tau, h} ) = a_\tau(u - u_h, \delta_{\tau, h} ) + e_\tau(u - u_h, \delta_{\tau, h} ) \end{equation}
Now, by boundedness of the bilinear form of $a_\tau(\cdot, \cdot)$, \begin{equation}\label{eq:j4}
a_\tau(u - u_h, \delta_{\tau, h}) \leq \kappa_{\tau, \text{max}} | u - u_h |_{H^1{(\tau)}} | \delta_{\tau, h} |_{H^1{(\tau)}}, \end{equation} where $\kappa_{\tau, \text{max}}$ is the maximum of $\kappa$ on $\tau.$ By Lemma \ref{lem:eb} and inverse inequality, we obtain \begin{equation} \label{eq:j5} \begin{aligned}
e_\tau(u - u_h, \delta_{\tau, h} ) & \leq C h_\tau^{1/2} \Big( |\delta_{\tau, h}|_{H^1(\tau)} + h_\tau |\delta_{\tau, h}|_{H^2(\tau)} \Big) \sum_{i=0}^3 \Big( h_{\tau_i}^{-1/2} | u - u_h |_{H^1(\tau_i)} + h_{\tau_i}^{1/2} | u - u_h |_{H^2(\tau_i)} \Big) \\
& \leq Ch_\tau^{1/2} | \delta_{\tau, h} |_{H^1(\tau)} \sum_{i=0}^3 \Big( h_{\tau_i}^{-1/2} | u - u_h |_{H^1(\tau_i)} + h_{\tau_i}^{1/2} | u - u_h |_{H^2(\tau_i)} \Big).
\end{aligned} \end{equation}
Combining \eqref{eq:j2}, \eqref{eq:j3}, \eqref{eq:j4}, and \eqref{eq:j5}, dividing both side $ | u_h - \widetilde u_{\tau, h} |_{H^1{(\tau)}}$ and then putting it into \eqref{eq:j1} gives the desired result. \end{proof}
\newtheorem{thm}{Theorem}[section] \begin{thm} \label{thm:perr} Assume $u$ is the solution of \eqref{pde} and it is sufficiently smooth. Let $\widetilde u_h = \sum_{\tau \in \mathcal{T}_h } \widetilde u_{\tau, h} \chi_\tau$, where $\widetilde u_{\tau, h} \in V^k_h(\tau)$ is the post-proccessed solution \eqref{eq:ppsol} and $\chi_\tau$ is the usual characteristic function for $\tau$, then we have $$
| u - \widetilde u_h |_{H^1(\Omega)} \leq Ch^k |u|_{H^{k+1}(\Omega)}, $$ where $C$ is a constant independent of $h$. \end{thm}
\begin{proof} Noticing that $h_\tau \leq h$, this can be proved by using Lemma \ref{lem:locerr} and arithmetic-geometric mean inequality: \begin{equation*} \begin{aligned}
| u - \widetilde u_h |^2_{H^1(\Omega)} & = \sum_{\tau} | u - \widetilde u_h |^2_{H^1(\tau)} \\
& \leq C \sum_{\tau} \Big( | u - u_h |^2_{H^1{(\tau)}} + h^2 | u - u_h |^2_{H^2{(\tau)}} \Big) \\
& \leq C | u - u_h |^2_{H^1(\Omega)} + C h^2 | u - u_h |^2_{H^2(\Omega)} . \end{aligned} \end{equation*} By the property of CGFEM approximation (see Theorem 14.3.3 in \cite{brenner2008mathematical} for example), we have \begin{equation}
| u - u_h |_{H^1(\Omega)} \leq C h^k |u|_{H^{k+1}(\Omega)}, \qquad | u - u_h |_{H^2(\Omega)} \leq C h^{k-1} |u|_{H^{k+1}(\Omega)}. \end{equation} Substituting these inequalities back gives the desired result.
\end{proof}
\begin{comment} Lots of work has been done to investigate the closeness of the linear FVEM and linear CGFEM. It is shown in \cite{ewing2002accuracy} that linear FVEM is a small perturbation of linear CGFEM. An identity between linear FVEM and linear CGFEM was established in \cite{xu2009analysis}. However, for higher order FVEM and CGFEM, closeness are not investigated as satisfactory as in the linear case. Now, as a by-product of our post-processing technique, we present and prove that for higher order case, the local FVEM bilinear form is close to its corresponding local CGFEM bilinear form plus an extra term for sufficiently small mesh discretization size and this extra term vanishes in the linear FVEM case.
\begin{thm} \label{thm:fvem2fem} When $\kappa$ is sufficiently smooth, we have \begin{equation} \label{eq:localfvem2fem}
\Big| b_\tau(v, w) - a_\tau(v, w) - \int_\tau \kappa \Delta v (w - I_\tau w) \text{d} \boldsymbol{x} \Big| \leq C h_\tau | v |_{H^1(\tau)} | w |_{H^1(\tau)}, \end{equation} for $v, w \in V^k_h(\tau)$. \end{thm}
\begin{proof} Using definition and Divergence theorem, we have \begin{equation} b_\tau(v, w) = -\sum_{\xi\in v(\tau)} \int_{\partial C^\xi \cap \partial t_\xi} \kappa \nabla v \cdot \boldsymbol{n} I_\tau w \ \text{d} l = - \int_\tau \nabla \cdot ( \kappa \nabla v ) I_\tau w \ \text{d} \boldsymbol{x} + \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} I_\tau w \ \text{d} l. \end{equation} Integration by parts gives \begin{equation} a_\tau(v, w) = - \int_\tau \nabla \cdot ( \kappa \nabla v ) w \ \text{d} \boldsymbol{x} + \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} w \ \text{d} l. \end{equation} Taking the difference of the two bilinear form yields \begin{equation} \begin{aligned} b_\tau(v, w) & - a_\tau(v, w) = \int_\tau \nabla \cdot (\kappa \nabla v ) (w - I_\tau w) \ \text{d} \boldsymbol{x} + \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} ( I_\tau w - w ) \ \text{d} l \\ & = \int_\tau \nabla \kappa \cdot \nabla v (w - I_\tau w) \ \text{d} \boldsymbol{x} + \int_\tau \kappa \Delta v (w - I_\tau w) \ \text{d} \boldsymbol{x} + \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} ( I_\tau w - w ) \ \text{d} l, \\ \end{aligned} \end{equation} which in turn gives
\begin{equation} b_\tau(v, I_\tau w) - a_\tau(v, w) - \int_\tau\kappa \Delta v (w - I_\tau w) \text{d} \boldsymbol{x} = I_1 + I_2, \end{equation}
where
\begin{equation} I_1 = \int_\tau \nabla \kappa \cdot \nabla v (w - I_\tau w) \ \text{d} \boldsymbol{x} \hspace*{0.3cm} \text{and} \hspace*{0.3cm} I_2 = \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} ( I_\tau w - w ) \ \text{d} l. \end{equation}
By Cauchy-Schwarz inequality followed by application of Lemma \ref{lem:map}, $I_1$ is estimated as
\begin{equation}
|I_1| \le \| \nabla \kappa \|_{L^\infty(\tau)} \, | v |_{H^1(\tau)} \| w - I_\tau w \|_{L^2(\tau)}
\le C \| \nabla \kappa \|_{L^\infty(\tau)} h_\tau | v |_{H^1(\tau)} \, | w |_{H^1(\tau)}. \end{equation}
\begin{equation} \begin{aligned}
\Big| b_\tau(v, I_\tau w) & - a_\tau(v, w) - \int_\tau\kappa \Delta v (w - I_\tau w) \text{d} \boldsymbol{x} \Big| \\
& = \Big| \int_\tau \nabla \kappa \cdot \nabla v (w - I_\tau w) \ \text{d} \boldsymbol{x} + \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} ( I_\tau w - w ) \ \text{d} l \Big| \\
& \leq \Big| \int_\tau \nabla \kappa \cdot \nabla v (w - I_\tau w) \ \text{d} \boldsymbol{x} \Big| + \Big| \int_{\partial \tau} \kappa \nabla v \cdot \boldsymbol{n} ( I_\tau w - w ) \ \text{d} l \Big| \\
& \leq \| \nabla \kappa \|_{L^2(\tau)} | v |_{H^1(\tau)} \| w - I_\tau w \|_{L^2(\tau)} + C h_\tau | v |_{H^1(\tau)} | w |_{H^1(\tau)} \\
& \leq C h_\tau | v |_{H^1(\tau)} | w |_{H^1(\tau)}. \end{aligned} \end{equation}
Here, we used the same argument in the proof of Lemma \ref{lem:eb} and inverse inequality for approximation of the second integration, and used Lemma \ref{lem:map} for the approximation of $\| w - I_\tau w \|_{L^2(\tau)}$. This completes the proof. \end{proof}
\begin{remark} For linear FVEM, $\Delta v = 0$, \eqref{eq:localfvem2fem} tells us that for sufficiently small $h_\tau$, the local bilinear forms of FVEM and CGFEM are equivalent in the sense that the error is of higher order.
\end{remark}
\end{comment}
\section{Numerical Experiments}\label{sec:num}
In this section we present various numerical examples to illustrate the performance of the proposed post-processing technique for CGFEM using $V_h^k$, $k=1,2,3$. For the numerical examples, we consider mainly the local conservation property of the post-processed fluxes and the convergence behavior of the post-processed solutions. For these purposes, we consider the following test problems in the unit domain $[0, 1]\times[0, 1]$.
\textbf{Example 1.} Elliptic equation \eqref{pde} with $\kappa =1, u = (x-x^2) (y-y^2)$ with fully Dirichlet boundary condition $g = 0$. $f$ is the function derived from \eqref{pde}.
\textbf{Example 2.} Elliptic equation \eqref{pde} with $\kappa =e^{2x-y^2}, f = -e^{x}, u = e^{-x + y^2}$ with the fully Dirichlet boundary condition $g$ satisfying the true solution.
\textbf{Example 3.} Elliptic equation \eqref{pde} with $$ \kappa = \frac{1}{1-0.8\sin(6\pi x)} \cdot \frac{1}{1-0.8\sin(6\pi y)}, $$ $$ u = 1 - \frac{2\cos(6\pi x) + 15\pi x - 2}{15 \pi}, $$ and $f = 0$. We impose boundary conditions as Dirichlet $1$ at the left boundary and $0$ at the right boundary with homogeneous Neumann boundary conditions at the top and bottom boundaries.
For each test problem, we will present the numerical results of linear, quadratic, and cubic CGFEMs. Now we start with a study of local conservation property of the post-processed fluxes.
\subsection{Conservation Study} \label{sec:cns} To numerically illustrate the behavior of the post-processed fluxes, we run the examples by verifying that the post-processed fluxes satisfies the desired local conservation property \eqref{eq:cvconservation}. For this purpose, we define a local conservation error (LCE) as \begin{equation} \text{LCE}_z = \int_{\partial C^z} - \kappa \nabla \hat u_h \cdot \boldsymbol{n} \ \text{d} l - \int_{C^z} f \ \text{d} \boldsymbol{x}, \end{equation} where $\hat u_h = u_h$ for CGFEMs solution and $\hat u_h = \widetilde u_h$ for the post-processed solution. Naturally, $\text{LCE}_z=0$ means local conservation property \eqref{eq:cvconservation} is satisfied while $\text{LCE}_z \ne 0$ means local conservation property \eqref{eq:cvconservation} is not satisfied on the control volume $C^z$.
Without a post-processing, for instance, LCEs of $u_h$ solved by quadratic CGFEMs are shown by red plots in the left column for Example 1 and right column for Example 2 in Figure \ref{fig:ex12lce}, respectively. We see that these errors are non-zeros, which means that the local conservation property \eqref{eq:cvconservation} is not satisfied. The control volume indices in the figures are arranged as follows: firstly indices from vertices of the mesh, secondly the indices of the degrees of freedom on edges of elements, and lastly (for the cubic case) indices of degrees of freedom inside the elements. Now with the post-processing, LCEs of $\widetilde u_h$ in the quadratic case are shown by dotted green plots in the left column for Example 1 and right column for Example 2 in Figure \ref{fig:ex12lce}, respectively. These errors are practically negligible, which is mainly attributed to the errors in the application of numerical integration and the machine precision. Theoretically, these errors should be zeros as discussed in Section \ref{sec:loccons}. The LCEs for both $u_h$ and $\widetilde u_h$ for Example 3 are shown in Figure \ref{fig:ex3lce}.
\begin{figure}
\caption{ LCEs for $u_h$ (red plots) and for $\widetilde u_h$ (dotted green plots) for Example 1 (left column) and Example 2 (right column) using CGFEM with $V_h^2$.}
\label{fig:ex12lce}
\end{figure}
\begin{figure}
\caption{ LCEs for $u_h$ (red plots) and for $\widetilde u_h$ (dotted green plots) for Example 3 using $V_h^k$ with $k=1$ (top), $k=2$ (middle), and $k=3$ (bottom). }
\label{fig:ex3lce}
\end{figure}
\subsection{Convergence Study} \label{sec:cnv}
Now we show the numerical convergence rates for Example 1, 2, and 3. We collect the $H^1$ semi-norm errors of the CGFEM solution, post-processed solution, and also the difference between these two solutions defined as $| u_h - \widetilde u_h |_{H^1(\Omega)}$. The results for Example 1, 2, and 3 are shown in Figure \ref{fig:ex123h1err}. The $H^1$ semi-norm errors of $k^{\text{th}}$ order CGFEM solutions and post-processed solutions are of optimal convergence orders, which confirm convergence analysis in Theorem \ref{thm:perr} in Section \ref{sec:ana}.
From Figure \ref{fig:ex123h1err}, we can also see that for quadratic and cubic CGFEM, $| u_h - \widetilde u_h |_{H^1(\Omega)}$ tends to be good error estimators. In the linear case, $| u_h - \widetilde u_h |_{H^1(\Omega)}$ is of order 2, which is higher than the optimal error convergence order of CGFEM solution.
\begin{figure}\label{fig:ex123h1err}
\end{figure}
\section{Conclusion}
In this work, we proposed a post-processing technique for any order CGFEM for elliptic problems. This technique builds a bridge between any order of finite volume element method (FVEM) and any order of CGFEM. FVEM has the advantage of its local conservation property but the disadvantages in analysis especially for higher order FVEMs, while CGFEM has its fully established analysis but it lacks the local conservation property. This technique is naturally proposed in this less than ideal situation to serve as a great tool if one would like to use CGFEM to solve PDEs while maintaining a local conservation property, for instance in two-phase flow simulations.
Since the problems that the post-processing technique requires to solve are localized, they are independent of each other. For linear CGFEM, the technique requires solving a 3-by-3 system for each element while for quadratic and cubic CGFEMs, the technique requires solving a 6-by-6 system and 10-by-10 system for each element, respectively. It thus can be easily, naturally, and efficiently implemented in a parallel computing environment.
As for future work, one can use this technique for other differential equations, such as advection diffusion equations, two-phase flow problems, and elasticity models; also one can apply this technique for other numerical methods, such as SUPG. One interesting direction we would like to work on is to develop a post-processing technique which requires solving only 3-by-3 systems for each element and for any order of CGFEMs.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Cancellability and Regularity of Operator Connections}
\author{Pattrawut Chansangiam} \ead{[email protected]}
\address{Department of Mathematics, Faculty of Science,
King Mongkut's Institute of Technology Ladkrabang,
Bangkok 10520, Thailand.}
\begin{abstract}
An operator connection is a binary operation assigned to each pair of positive operators satisfying monotonicity, continuity from above and the transformer inequality. In this paper, we introduce and characterize the concepts of cancellability and regularity of operator connections with respect to operator monotone functions, Borel measures and certain operator equations. In addition, we investigate the existence and the uniqueness of solutions for such operator equations.
\end{abstract}
\begin{keyword} operator connection \sep operator monotone function \sep operator mean \MSC[2010]47A63 \sep 47A64 \end{keyword}
\end{frontmatter}
\section{Introduction}
A general theory of connections and means for positive operators was given by Kubo and Ando \cite{Kubo-Ando}. Let $B(\mathcal{H})$ be the algebra of bounded linear operators on a Hilbert space $\mathcal{H}$. The set of positive operators on $\mathcal{H}$ is denoted by $B(\mathcal{H})^+$. Denote the spectrum of an operator $X$ by $\Sp(X)$. For Hermitian operators $A,B \in B(\mathcal{H})$, the partial order $A \leqslant B$ means that $B-A \in B(\mathcal{H})^+$. The notation $A>0$ suggests that $A$ is a strictly positive operator. A \emph{connection} is a binary operation $\,\sigma\,$ on $B(\mathcal{H})^+$ such that for all positive operators $A,B,C,D$: \begin{enumerate}
\item[(M1)] \emph{monotonicity}: $A \leqslant C, B \leqslant D \implies A \,\sigma\, B \leqslant C \,\sigma\, D$
\item[(M2)] \emph{transformer inequality}: $C(A \,\sigma\, B)C \leqslant (CAC) \,\sigma\, (CBC)$
\item[(M3)] \emph{continuity from above}: for $A_n,B_n \in B(\mathcal{H})^+$,
if $A_n \downarrow A$ and $B_n \downarrow B$,
then $A_n \,\sigma\, B_n \downarrow A \,\sigma\, B$.
Here, $A_n \downarrow A$ indicates that $A_n$ is a decreasing sequence
and $A_n$ converges strongly to $A$. \end{enumerate} Two trivial examples are the left-trivial mean $\omega_l : (A,B) \mapsto A$ and the right-trivial mean $\omega_r: (A,B) \mapsto B$. Typical examples of a connection are the sum $(A,B) \mapsto A+B$ and the parallel sum \begin{align*}
A \,:\,B = (A^{-1}+B^{-1})^{-1}, \quad A,B>0, \end{align*} the latter being introduced by Anderson and Duffin \cite{Anderson-Duffin}. From the transformer inequality, every connection is \emph{congruence invariant} in the sense that for each $A,B \geqslant 0$ and $C>0$ we have \begin{align*}
C(A \,\sigma\, B)C \:=\: (CAC) \,\sigma\, (CBC). \end{align*}
A \emph{mean} is a connection $\sigma$ with normalized condition $I \,\sigma\, I = I$ or, equivalently, fixed-point property $A \,\sigma\, A =A$ for all $A \geqslant 0$. The class of Kubo-Ando means cover many well-known operator means in practice, e.g.
\begin{itemize}
\item $\alpha$-weighted arithmetic means: $A \triangledown_{\alpha} B = (1-\alpha)A + \alpha B$
\item $\alpha$-weighted geometric means:
$A \#_{\alpha} B = A^{1/2}
({A}^{-1/2} B {A}^{-1/2})^{\alpha} {A}^{1/2}$
\item $\alpha$-weighted harmonic means: $A \,!_{\alpha}\,
B = [(1-\alpha)A^{-1} + \alpha B^{-1}]^{-1}$
\item logarithmic mean: $(A,B) \mapsto A^{1/2}f(A^{-1/2}BA^{-1/2})A^{1/2}$
where $f: \mathbb{R}^+ \to \mathbb{R}^+$, $f(x)=(x-1)/\log{x}$, $f(0) \equiv 0$ and $f(1) \equiv 1$.
Here, $\mathbb{R}^+=[0, \infty)$.
\end{itemize}
It is a fundamental that there are one-to-one correspondences between the following objects: \begin{enumerate}
\item[(1)] operator connections on $B(\mathcal{H})^+$
\item[(2)] operator monotone functions from $\mathbb{R}^+$ to $\mathbb{R}^+$
\item[(3)] finite (positive) Borel measures on $[0,1]$
\item[(4)] monotone (Riemannian) metrics on the smooth manifold of positive definite matrices. \end{enumerate}
Recall that a function $f: \mathbb{R}^+ \to \mathbb{R}^+$ is said to be \emph{operator monotone} if \begin{align*}
A \leqslant B \implies f(A) \leqslant f(B) \end{align*} for all positive operators $A,B \in B(\mathcal{H})$ and for all Hilbert spaces $\mathcal{H}$. This concept was introduced in \cite{Lowner}; see also \cite{Bhatia,Hiai,Hiai-Yanagi}. A remarkable fact is that (see \cite{Hansen-Pedersen}) a function $f: \mathbb{R}^+ \to \mathbb{R}^+$ is operator monotone if and only if it is \emph{operator concave}, i.e. \begin{align*}
f((1-\alpha)A + \alpha B) \:\geqslant\: (1-\alpha) f(A) + \alpha f(B), \quad \alpha \in (0,1) \end{align*} holds for all positive operators $A,B \in B(\mathcal{H})$ and for all Hilbert spaces $\mathcal{H}$.
A connection $\sigma$ on $B(\mathcal{H})^+$ can be characterized via operator monotone functions as follows:
\begin{thm}[\cite{Kubo-Ando}] \label{thm: Kubo-Ando f and sigma}
Given a connection $\sigma$, there is a unique operator monotone function $f: \mathbb{R}^+ \to \mathbb{R}^+$ satisfying
\begin{align*}
f(x)I = I \,\sigma\, (xI), \quad x \geqslant 0.
\end{align*}
Moreover, the map $\sigma \mapsto f$ is a bijection.
\end{thm}
We call $f$ the \emph{representing function} of $\sigma$. A connection also has a canonical characterization with respect to a Borel measure via a meaningful integral representation as follows.
\begin{thm}[\cite{Pattrawut}] \label{thm: 1-1 conn and measure} Given a finite Borel measure $\mu$ on $[0,1]$, the binary operation
\begin{align}
A \,\sigma\, B = \int_{[0,1]} A \,!_t\, B \,d \mu(t), \quad A,B \geqslant 0
\label{eq: int rep connection}
\end{align}
is a connection on $B(\mathcal{H})^+$.
Moreover, the map $\mu \mapsto \sigma$ is bijective,
in which case the representing function of $\sigma$ is given by
\begin{align}
f(x) = \int_{[0,1]} (1 \,!_t\, x) \,d \mu(t), \quad x \geqslant 0. \label{int rep of OMF}
\end{align} \end{thm} We call $\mu$ the \emph{associated measure} of $\sigma$.
A connection is a mean if and only if $f(1)=1$ or its associated measure is a probability measure. Hence every mean can be regarded as an average of weighted harmonic means.
From \eqref{eq: int rep connection} and \eqref{int rep of OMF}, $\sigma$ and $f$ are related by \begin{align}
f(A) \:=\: I \,\sigma\, A, \quad A \geqslant 0. \label{eq: f(A) = I sm A} \end{align} A connection $\sigma$ is said to be \emph{symmetric} if $A \,\sigma\, B = B \,\sigma\, A$ for all $A,B \geqslant 0$.
The notion of monotone metrics arises naturally in quantum mechanics. A metric on the differentiable manifold of $n$-by-$n$ positive definite matrices is a continuous family of positive definite sesqilinear forms assigned to each invertible density matrix in the manifold. A monotone metric is a metric with contraction property under stochastic maps. It was shown in \cite{Petz} that there is a one-to-one correspondence between operator connections and monotone metrics. Moreover, symmetric metrics correspond to symmetric means.
In \cite{Hansen}, the author defined a symmetric metric to be \emph{nonregular} if $f(0)=0$ where $f$ is the associated operator monotone function. In \cite{Gibilisco}, $f$ is said to be \emph{nonregular} if $f(0) = 0$, otherwise $f$ is \emph{regular}. It turns out that the regularity of the associated operator monotone function guarantees the extendability of this metric to the complex projective space generated by the pure states (see \cite{Petz-Sudar}).
In the present paper, we introduce the concept of cancellability for operator connections in a natural way. Various characterizations of cancellability with respect to operator monotone functions, Borel measures and certain operator equations are provided. It is shown that a connection is cancellable if and only if it is not a scalar multiple of trivial means.
Applications of this concept go to certain operator equations involving operator means. We investigate the existence and the uniqueness of such equations. It is shown that such equations are always solvable if and only if $f$ is unbounded and $f(0)=0$ where $f$ is the associated operator monotone function. We also characterize the condition $f(0)=0$ for arbitrary connections without assuming the symmetry. Such connection is said to be nonregular.
\section{Cancellability of connections}
Since each connection is a binary operation, we can define the concept of cancellability as follows.
\begin{defn}
A connection $\sigma$ is said to be
\begin{itemize}
\item \emph{left cancellable} if for each $A>0, B \geqslant 0$ and $C \geqslant 0$,
\begin{align*}
A \,\sigma\, B = A \,\sigma\, C \implies B=C
\end{align*}
\item \emph{right cancellable} if for each $A>0, B \geqslant 0$ and $C \geqslant 0$,
\begin{align*}
B \,\sigma\, A = C \,\sigma\, A \implies B=C
\end{align*}
\item \emph{cancellable} if it is both left and right cancellable.
\end{itemize} \end{defn}
\begin{lem} \label{lem: nonconstant OM}
Every nonconstant operator monotone function from $\mathbb{R}^+$ to $\mathbb{R}^+$ is injective. \end{lem} \begin{proof}
Let $f: \mathbb{R}^+ \to \mathbb{R}^+$ be a nonconstant operator monotone function.
Suppose there exist $b>a \geqslant 0$ such that $f(a)=f(b)$.
Since $f$ is monotone increasing (in usual sense),
$f(x)=f(a)$ for all $a \leqslant x \leqslant b$ and
$f(y) \geqslant f(b)$ for all $y \geqslant b$.
Since $f$ is operator concave,
$f$ is concave in usual sense and hence $f(x)=f(b)$ for all $x \geqslant b$.
The case $a=0$ contradicts the fact that $f$ is nonconstant.
Consider the case $a>0$.
The monotonicity of $f$ yields $f(x) \leqslant f(a)$ for all $0 \leqslant x \leqslant a$.
The differentiability of $f$ implies that $f(x)=f(a)$ for all $x \geqslant 0$,
again a contradiction. \end{proof}
A similar result for this lemma under the restriction that $f(0)=0$ was obtained in \cite{Molnar}. The left-cancellability of connections is characterized as follows.
\begin{thm} \label{thm: cancallability 1}
Let $\sigma$ be a connection with representing function $f$ and associated measure $\mu$.
Then the following statements are equivalent:
\begin{enumerate}
\item[(1)] $\sigma$ is left cancellable ;
\item[(2)] for each $A \geqslant 0$ and $B \geqslant 0$, $I \sigma A = I \sigma B \implies A=B$ ;
\item[(3)] $\sigma$ is not a scalar multiple of the left-trivial mean ;
\item[(4)] $f$ is injective, i.e., $f$ is left cancellable in the sense that
\begin{align*}
f \circ g = f \circ h \:\implies\: g=h \quad ;
\end{align*}
\item[(5)] $f$ is a nonconstant function ;
\item[(6)] $\mu$ is not a scalar multiple of the Dirac measure $\delta_0$ at $0$.
\end{enumerate} \end{thm} \begin{proof}
Clearly, (1) $\Rightarrow$ (2) $\Rightarrow$ (3) and (4) $\Rightarrow$ (5).
For each $k \geqslant 0$, it is straightforward to show that
the representing function of the connection
\begin{align*}
k \omega_l \;:\; (A,B) \mapsto kA
\end{align*}
is the constant function $f \equiv k$
and its associated measure is given by $k\delta_0$.
Hence, we have the implications (3) $\Leftrightarrow$ (5) $\Leftrightarrow$ (6).
By Lemma \ref{lem: nonconstant OM}, we have (5) $\Rightarrow$ (4).
(4) $\Rightarrow$ (2): Assume that $f$ is injective.
Consider $A \geqslant 0$ and $B \geqslant 0$ such that
$I \sigma A = I \sigma B$. Then $f(A) = f(B)$ by \eqref{eq: f(A) = I sm A}.
Since $f^{-1} \circ f (x) =x$
for all $x \in \mathbb{R}^+$, we have $A=B$.
(2) $\Rightarrow$ (1): Let $A>0, B\geqslant 0$ and $C \geqslant 0$ be such that $A \,\sigma\, B = A \,\sigma\, C$.
By the congruence invariance of $\sigma$, we have
\begin{align*}
A^{\frac{1}{2}} (I \,\sigma\, A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) A^{\frac{1}{2}}
\:=\: A^{\frac{1}{2}} (I \,\sigma\, A^{-\frac{1}{2}} C A^{-\frac{1}{2}}) A^{\frac{1}{2}}
\end{align*}
and thus
$I \,\sigma\, A^{-\frac{1}{2}} B A^{-\frac{1}{2}} = I \,\sigma\, A^{-\frac{1}{2}} C A^{-\frac{1}{2}} $.
Now, the assumption (2) implies
$$ A^{-\frac{1}{2}} B A^{-\frac{1}{2}} \:=\: A^{-\frac{1}{2}} C A^{-\frac{1}{2}}, $$ i.e., $B=C$. \end{proof}
Recall that the \emph{transpose} of a connection $\sigma$ is the connection \begin{align*}
(A,B) \;\mapsto B \,\sigma\, A. \end{align*} If $f$ is the representing function of $\sigma$, then the representing function of the transpose of $\sigma$ is given by the \emph{transpose} of $f$ (see \cite{Kubo-Ando}), defined by \begin{align*}
x \mapsto xf(1/x), \quad x>0. \end{align*} A connection is \emph{symmetric} if it coincides with its transpose.
\begin{thm}
Let $\sigma$ be a connection with representing function $f$ and associated measure $\mu$.
Then the following statements are equivalent:
\begin{enumerate}
\item[(1)] $\sigma$ is right cancellable ;
\item[(2)] for each $A \geqslant 0$ and $B \geqslant 0$, $A \sigma I = B \sigma I \implies A=B$ ;
\item[(3)] $\sigma$ is not a scalar multiple of the right-trivial mean ;
\item[(4)] the transpose of $f$ is injective ;
\item[(5)] $f$ is not a scalar multiple of the identity function $x \mapsto x$ ;
\item[(6)] $\mu$ is not a scalar multiple of the Dirac measure $\delta_1$ at $1$.
\end{enumerate} \end{thm} \begin{proof}
It is straightforward to see that, for each $k\geqslant 0$,
the representing function of the connection
\begin{align*}
k \omega_r \;:\; (A,B) \mapsto kB
\end{align*}
is the function $x \mapsto kx$ and its associated measure is given by
$k\delta_1$.
The proof is done by replacing $\sigma$ with its transpose in Theorem \ref{thm: cancallability 1}. \end{proof}
\begin{remk}
The injectivity of the transpose of $f$ does not imply the surjectivity of $f$.
To see that, take $f(x)=(1+x)/2$. Then the transpose of $f$ is $f$ itself. \end{remk}
The following results are characterizations of cancellability of connections.
\begin{cor}
Let $\sigma$ be a connection with representing function $f$ and associated measure $\mu$.
Then the following statements are equivalent:
\begin{enumerate}
\item[(1)] $\sigma$ is cancellable ;
\item[(2)] $\sigma$ is not a scalar multiple of the left/right-trivial mean ;
\item[(3)] $f$ and its transpose are injective ;
\item[(4)] $f$ is neither a constant function nor a scalar multiple of the identity function;
\item[(5)] $\mu$ is not a scalar multiple of $\delta_0$ or $\delta_1$.
\end{enumerate}
In particular, every nontrivial mean is cancellable. \end{cor}
\begin{remk}
The ``order cancellability" does not hold for general connections,
even if we restrict them to the class of means.
For each $A,B>0$, it is not true that the condition $I \,\sigma\, A \leqslant I \,\sigma\, B$
or the condition $A \,\sigma\, I \leqslant B \,\sigma\, I$ implies $A \leqslant B$.
To see this, take $\sigma$ to be the geometric mean.
It is not true that $A^{1/2} \leqslant B^{1/2}$ implies $A \leqslant B$ in general. \end{remk}
\section{Applications to certain operator equations}
Cancellability of connections can be restated in terms of the uniqueness of certain operator equations as follows. A connection $\sigma$ is left cancellable if and only if \begin{quote}
for each given $A>0$ and $B \geqslant 0$, if the equation $A \,\sigma\, X =B$ has a solution, then it has a unique solution. \end{quote} The similar statement for right-cancellability holds. In this section, we investigate the existence and the uniqueness of the operator equation $A \,\sigma\, X =B$.
\begin{thm} \label{thm: operator eq 1}
Let $\sigma$ be a connection which is not a scalar multiple of the left-trivial mean.
Let $f$ be its representing function.
Given $A>0$ and $B \geqslant 0$, the operator equation
\begin{align*}
A \, \sigma X \, \:=\: B
\end{align*}
has a (positive) solution if and only if $\Sp (A^{-\frac{1}{2}} B A^{-\frac{1}{2}})
\subseteq \Range (f)$.
In fact, such a solution is unique and given by
\begin{align*}
X \:=\: A^{\frac{1}{2}} f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) A^{\frac{1}{2}}.
\end{align*} \end{thm} \begin{proof}
Suppose that there is a positive operator $X$ such that $A \,\sigma\, X =B$.
The congruence invariance of $\sigma$ yields
\begin{align*}
A^{\frac{1}{2}} (I \,\sigma\, A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) A^{\frac{1}{2}} \:=\: B.
\end{align*}
The property \eqref{eq: f(A) = I sm A} now implies
\begin{align*}
f(A^{-\frac{1}{2}} X A^{-\frac{1}{2}})
\:=\: I \,\sigma\, A^{-\frac{1}{2}} X A^{-\frac{1}{2}}\:=\: A^{-\frac{1}{2}} B A^{-\frac{1}{2}}.
\end{align*}
By spectral mapping theorem,
\begin{align*}
\Sp (A^{-\frac{1}{2}} B A^{-\frac{1}{2}} )
=\Sp\left(f(A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) \right)
= f\left(\Sp(A^{-\frac{1}{2}} X A^{-\frac{1}{2}})\right)
\subseteq \Range (f).
\end{align*}
Conversely, suppose that $\Sp (A^{-\frac{1}{2}} B A^{-\frac{1}{2}})
\subseteq \Range (f)$.
Since $\sigma \neq k \omega_l$ for all $k \geqslant 0$, we have that $f$ is nonconstant
by Theorem \ref{thm: cancallability 1}.
It follows that $f$ is injective by Lemma \ref{lem: nonconstant OM}.
The assumption yields the existence of the operator
$X \equiv A^{\frac{1}{2}} f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) A^{\frac{1}{2}}$.
We obtain from the property \eqref{eq: f(A) = I sm A} that
\begin{align*}
A \,\sigma\, X
\:&=\: A \,\sigma\, A^{\frac{1}{2}} f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) A^{\frac{1}{2}} \\
\:&=\: A^{\frac{1}{2}} \left[I \,\sigma\, f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) \right]
A^{\frac{1}{2}} \\
\:&=\: A^{\frac{1}{2}} f\left(f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}\right)
A^{\frac{1}{2}} \\
\:&=\: B
\end{align*}
The uniqueness of a solution follows from the left-cancellability of $\sigma$.
\end{proof}
Similarly, we have the following theorem.
\begin{thm}
Let $\sigma$ be a connection which is not a scalar multiple of the right-trivial mean.
Given $A>0$ and $B \geqslant 0$, the operator equation
\begin{align*}
X \,\sigma A \, \:=\: B
\end{align*}
has a (positive) solution if and only if $\Sp (A^{-1/2} B A^{-1/2}) \subseteq \Range (g)$,
here $g$ is the representing function of the transpose of $\sigma$.
In fact, such a solution is unique and given by
\begin{align*}
X \:=\: A^{1/2} g^{-1}(A^{-1/2}BA^{-1/2}) A^{1/2}.
\end{align*}
\end{thm}
\begin{thm} \label{cor: f is unbounded and f(0)=0}
Let $\sigma$ be a connection
with representing function $f$.
Then the following statements are equivalent:
\begin{enumerate}
\item[(1)] $f$ is unbounded and $f(0)=0$ ;
\item[(2)] $f$ is surjective, i.e., $f$ is right cancellable in the sense that
\begin{align*}
g \circ f = h \circ f \:\implies\: g=h \quad ;
\end{align*}
\item[(3)] the operator equation
\begin{align}
A \,\sigma X \, \:=\: B \label{eq: A sm X =B}
\end{align}
has a unique solution for any given $A>0$ and $B \geqslant 0$.
\end{enumerate}
Moreover, if (1) holds, then
the solution of \eqref{eq: A sm X =B} varies continuously in each given $A>0$
and $B \geqslant 0$, i.e. the map $(A,B) \mapsto X$ is separately continuous with respect to
the strong-operator topology. \end{thm} \begin{proof}
(1) $\Rightarrow$ (2): This follows directly from the intermediate value theorem.
(2) $\Rightarrow$ (3): It is immediate from Theorem \ref{thm: operator eq 1}.
(3) $\Rightarrow$ (1): Assume (3). The uniqueness of solution for the equation $A \,\sigma\, X =B$
implies the left-cancellability of $\sigma$.
By Theorem \ref{thm: cancallability 1}, $f$ is injective.
The assumption (3) implies the existence of a positive operator $X$ such that
\begin{align*}
f(X) \:=\: I \,\sigma\, X \:=\: 0.
\end{align*}
The spectral mapping theorem implies that $f(\ld)=0$ for all $\lambda\in \Sp(X)$.
Since $f$ is injective, we have $\Sp(X) =\{\ld\}$ for some $\lambda\in \mathbb{R}^+$.
Suppose that $\ld>0$. Then there is $\epsilon >0$ such that $X > \epsilon I >0$.
It follows from the monotonicity of $\sigma$ that
\begin{align*}
0 \:=\: I \,\sigma\, X \:\geqslant\: I \,\sigma\, \epsilon I \:=\: f(\epsilon)I,
\end{align*}
i.e. $f(\epsilon)=0$. Hence, $\epsilon = \ld$. Similarly, since $X > (\epsilon/2) I >0$, we have
$\epsilon = 2 \ld$, a contradiction. Thus, $\ld=0$ and $f(0)=0$.
Now, let $k>0$. The assumption (3) implies the existence of $X \geqslant 0$ such that
$I \,\sigma\, X =kI$. Since $f(X)=kI$, we have $f(\ld)=k$ for all $\lambda\in \Sp(X)$.
Since $\Sp(X)$ is nonempty, there is $\lambda\in \Sp(X)$ such that $f(\ld)=k$.
Therefore, $f$ is unbounded.
Assume that (1) holds. Then the map $(A,B) \mapsto X$ is well-defined.
Recall that if $A_n \in B(\mathcal{H})^+$ converges strongly to $A$,
then $\phi(A_n)$ converges strongly to $\phi(A)$
for any continuous function $\phi$.
It follows that the map
$$(A,B) \mapsto X = A^{\frac{1}{2}} f^{-1}(A^{-\frac{1}{2}} B A^{-\frac{1}{2}}) A^{\frac{1}{2}} $$
is separately continuous in each variable. \end{proof}
\begin{ex}
Consider the quasi-arithmetic power mean $\#_{p,\alpha}$ with exponent $p \in [-1,1]$ and weight $\alpha \in (0,1)$, defined by \begin{align*}
A \,\#_{p,\alpha}\, B \:=\: \left[(1-\alpha)A^p + \alpha B^p \right]^{1/p}. \end{align*} Its representing function of this mean is given by \begin{align*}
f_{p,\alpha}(x) \:=\: (1-\alpha+\alpha x^p)^{1/p}. \end{align*} The special cases $p=1$ and $p=-1$ are the $\alpha$-weighted arithmetic mean and the $\alpha$-weighted harmonic mean, respectively. The case $p=0$ is defined by continuity and, in fact, $\#_{0,\alpha} = \#_{\alpha}$ and $f_{0,\alpha}(x)=x^{\alpha}$. Given $A>0$ and $B \geqslant 0$, consider the operator equation
\begin{align}
A \,\#_{p,\alpha}\, X \:=\: B. \label{eq weighted GM}
\end{align}
\underline{The case $p=0$ :}
Since the range of $f_{0,\alpha}(x)=x^{\alpha}$ is $\mathbb{R}^+$,
the equation \eqref{eq weighted GM} always has a unique solution given by
\begin{align*}
X
\:=\: A^{1/2} (A^{-1/2}BA^{-1/2})^{1/\alpha} A^{1/2}
\:\equiv \: A \,\#_{1/\alpha}\, B.
\end{align*}
\underline{The case $0<p\leqslant 1$ :} The range of $f_{p,\alpha}$ is the interval $[(1-\alpha)^{1/p}, \infty)$. Hence, the equation \eqref{eq weighted GM} is solvable if and only if $\Sp(A^{-1/2} B A^{-1/2}) \subseteq [(1-\alpha)^{1/p}, \infty)$, i.e., $B \geqslant (1-\alpha)^{1/p} A$.
\underline{The case $-1 \leqslant p< 0$ :}
The range of $f_{p,\alpha}$ is the interval $[0, (1-\alpha)^{1/p})$. Hence, the equation \eqref{eq weighted GM} is solvable if and only if $\Sp(A^{-1/2} B A^{-1/2}) \subseteq [0, (1-\alpha)^{1/p})$, i.e., $B < (1-\alpha)^{1/p} A$.
For each $p \in [-1,0) \cup (0,1]$ and $\alpha \in (0,1)$, we have \begin{align*}
f_{p,\alpha}^{-1} (x) \:=\: \left(1-\frac{1}{\alpha} + \frac{1}{\alpha} x^p \right)^{1/p}. \end{align*} Hence, the solution of \eqref{eq weighted GM} is given by \begin{align*}
X \:=\: \left[(1-\frac{1}{\alpha})A^p + \frac{1}{\alpha} B^p \right]^{1/p}
\:\equiv\: A \;\#_{p, \frac{1}{\alpha}}\; B. \end{align*} \end{ex}
\begin{ex}
Let $\sigma$ be the logarithmic mean with representing function
\begin{align*}
f(x) \:=\: \frac{x-1}{\log x}, \quad x>0.
\end{align*}
Here, $f(0) \equiv 0$ by continuity. We see that $f$ is unbounded.
Thus, the operator equation $A \,\sigma X \, =B$ is solvable for all $A>0$ and $B \geqslant 0$. \end{ex}
\begin{ex}
Let $\eta$ be the dual of the logarithmic mean, i.e.,
\begin{align*}
\eta \,:\, (A,B) \mapsto \left(A^{-1} \, \sigma \, B^{-1}\right)^{-1}
\end{align*}
where $\sigma$ denotes the logarithmic mean.
The representing function of $\eta$ is given by
\begin{align*}
f(x) \:=\: \frac{x}{x-1} \log{x}, \quad x>0.
\end{align*}
We have that $f(0) \equiv 0$ by continuity and $f$ is unbounded.
Therefore, the operator equation $A \,\eta X \, =B$ is solvable for all $A>0$ and $B \geqslant 0$. \end{ex}
\section{Regularity of connections}
In this section, we consider the regularity of connections.
\begin{thm} \label{thm: regularity}
Let $\sigma$ be a connection with representing function $f$ and associated measure $\mu$.
Then the following statements are equivalent.
\begin{enumerate}
\item[(1)] $f(0)=0$ ;
\item[(2)] $\mu(\{0\}) = 0$ ;
\item[(3)] $I \,\sigma\, 0 = 0$ ;
\item[(4)] $A \,\sigma\, 0 =0$ for all $A \geqslant 0$ ;
\item[(5)] for each $A \geqslant 0$, $0 \in \Sp(A) \Longrightarrow 0 \in \Sp(I \,\sigma\, A)$ ;
\item[(6)] for each $A,X \geqslant 0$, $0 \in \Sp(A) \Longrightarrow 0 \in \Sp(X \,\sigma\, A)$.
\end{enumerate} \end{thm} \begin{proof}
From the integral representation \eqref{int rep of OMF}, we have
\begin{align}
f(x) \:=\: \mu(\{0\}) + \mu(\{1\})x + \int_{(0,1)} (1\,!_t\, x)\,d\mu(t), \quad x\geqslant 0,
\label{eq: int rep of OMR expand}
\end{align}
i.e. $f(0)=\mu(\{0\})$. From the property \eqref{eq: f(A) = I sm A}, we have
$I \,\sigma\, 0 =f(0)I$. Hence, (1)-(3) are equivalent.
It is clear that (4) $\Rightarrow$ (3) and (6) $\Rightarrow$ (5).
(3) $\Rightarrow$ (4): Assume that $I \,\sigma\, 0=0$. For any $A>0$,
we have by the congruence invariance that
\begin{align*}
A \,\sigma\, 0 \:=\: A^{\frac{1}{2}} (I \,\sigma\, 0) A^{\frac{1}{2}} \:=\: 0.
\end{align*}
For general $A \geqslant 0$,
we have $(A + \epsilon I) \,\sigma\, 0 =0$ for all $\epsilon>0$ by the previous claim
and hence $A \,\sigma\, 0 =0$ by the continuity from above.
(5) $\Rightarrow$ (1): We have $0 \in \Sp(I \,\sigma\, 0) = \Sp(f(0)I) = \{f(0)\}$, i.e. $f(0)=0$.
(1) $\Rightarrow$ (6): Assume $f(0)=0$. Consider $A \geqslant 0$ such that $0 \in \Sp(A)$,
i.e. $A$ is not invertible.
Assume first that $X>0$. Then
\begin{align*}
X \,\sigma\, A
\:=\: X^{\frac{1}{2}} (I \,\sigma\, X^{-\frac{1}{2}} A X^{-\frac{1}{2}}) X^{\frac{1}{2}}
\:=\: X^{\frac{1}{2}} f(X^{-\frac{1}{2}} A X^{-\frac{1}{2}}) X^{\frac{1}{2}}.
\end{align*}
Since $X^{-\frac{1}{2}} A X^{-\frac{1}{2}}$ is not invertible, we have
$0 \in \Sp(X^{-\frac{1}{2}} A X^{-\frac{1}{2}})$ and hence by spectral mapping theorem
\begin{align*}
0 \:=\: f(0)
\in f \left(\Sp(X^{-\frac{1}{2}} A X^{-\frac{1}{2}}) \right)
\:=\: \Sp\left(f(X^{-\frac{1}{2}} A X^{-\frac{1}{2}}) \right).
\end{align*}
This implies that $X \,\sigma\, A$ is not invertible.
Now, consider $X \geqslant 0$. The previous claim shows that $(X+I)\,\sigma\, A$ is not invertible.
Since $X \,\sigma\, A \leqslant (X+I) \,\sigma\, A$, we conclude that $X \,\sigma\, A$ is not invertible. \end{proof}
We say that a connection $\sigma$ is \emph{nonregular} if one (thus, all) of the conditions in Theorem \ref{thm: regularity} holds, otherwise $\sigma$ is \emph{regular}. Hence, regular connections correspond to regular operator monotone functions and regular monotone metrics.
\begin{remk} Recall from \cite{Pattrawut} that every connection $\sigma$ can be written as the sum of three connections.
The \emph{singularly discrete part} is a countable sum of weighted harmonic means with nonnegative coefficients. The \emph{absolutely continuous part} is a connection admitting an integral representation with respect to Lebesgue measure $m$ on $[0,1]$. The \emph{singularly continuous part} is associated to a continuous measure mutually singular to $m$. Hence, a connection, whose associated measure has no singularly discrete part, is nonregular. \end{remk}
\begin{remk}
Let $\sigma$ be a connection with representing function $f$ and associated measure $\mu$.
Let $g$ be the representing function of the transpose of $\sigma$.
From \eqref{eq: int rep of OMR expand},
\begin{align*}
g(0) = \lim_{x \to 0^+} xf(\dfrac{1}{x}) = \lim_{x \to \infty} \frac{f(x)}{x}
= \mu(\{1\}).
\end{align*}
Thus, the transpose of $\sigma$ is nonregular if and only if $\mu(\{1\})=0$. \end{remk}
\begin{thm} \label{thm: regularity of mean}
The following statements are equivalent for a mean $\sigma$.
\begin{enumerate}
\item[(1)] $\sigma$ is nonregular ;
\item[(2)] $I \,\sigma\, P = P$ for each projection $P$.
\end{enumerate} \end{thm} \begin{proof}
(1) $\Rightarrow$ (2): Assume that $f(0)=0$ and consider a projection $P$.
Since $f(1)=1$, we have $f(x)=x$ for all $x \in \{0,1\}\supseteq \Sp(P)$.
Thus $I \,\sigma\, P = f(P)=P$.
(2) $\Rightarrow$ (1): We have $0=I \,\sigma\, 0=f(0)I$, i.e. $f(0)=0$. \end{proof}
To prove the next result, recall the following lemma.
\begin{lem}[\cite{Kubo-Ando}]
If $f: \mathbb{R}^+ \to \mathbb{R}^+$ is an operator monotone function such that $f(1)=1$
and $f$ is neither the constant function $1$ nor the identity function, then
\begin{enumerate}
\item[1)] $0<x<1 \implies x<f(x)<1$
\item[2)] $1<x \implies 1<f(x)<x$.
\end{enumerate} \end{lem}
\begin{thm}
Let $\sigma$ be a nontrivial mean.
For each $A \geqslant 0$, if $I \,\sigma\, A =A$, then $A$ is a projection.
Hence, the following statements are equivalent: \begin{enumerate}
\item[(1)] $\sigma$ is nonregular ;
\item[(2)] for each $A \geqslant 0$, $A$ is a projection if and only if $I \,\sigma\, A = A$. \end{enumerate} \end{thm} \begin{proof}
Since $I \,\sigma\, A=A$, we have $f(A)=A$ by \eqref{eq: f(A) = I sm A}.
Hence $f(x)=x$ for all $x \in \Sp(A)$ by the injectivity of the continuous functional calculus.
Since $\sigma$ is a nontrivial mean, the previous lemma forces that $\Sp(A) \subseteq \{0,1\}$,
i.e. $A$ is a projection. \end{proof}
\begin{thm}
Under the condition that $\sigma$ is a left-cancellable connection
with representing function $f$, the following
statements are equivalent:
\begin{enumerate}
\item[(1)] $\sigma$ is nonregular.
\item[(2)] The equation $f(x)=0$ has a solution $x$.
\item[(3)] The equation $f(x)=0$ has a unique solution $x$.
\item[(4)] The only solution to $f(x)=0$ is $x=0$.
\item[(5)] For each $A>0$, the equation $A \,\sigma\, X =0$ has a solution $X$.
\item[(6)] For each $A>0$, the equation $A \,\sigma\, X =0$ has a unique solution $X$.
\item[(7)] For each $A>0$, the only solution to the equation $A \,\sigma\, X =0$ is $X=0$.
\item[(8)] The equation $I \,\sigma\, X =0$ has a solution $X$.
\item[(9)] The equation $I \,\sigma\, X =0$ has a unique solution $X$.
\item[(10)] The only solution to the equation $I \,\sigma\, X =0$ is $X=0$.
\end{enumerate}
Similar results for the case of right-cancellability hold. \end{thm} \begin{proof}
It is clear that (1) $\Rightarrow$ (2), (6) $\Rightarrow$ (5) and (10) $\Rightarrow$ (8).
Since $f$ is injective by Theorem \ref{thm: cancallability 1},
we have (2) $\Rightarrow$ (3) $\Rightarrow$ (4).
(4) $\Rightarrow$ (10): Let $X \geqslant 0$ be such that $I \,\sigma\, X =0$.
Then $f(X)=0$ by \eqref{eq: f(A) = I sm A}.
By spectral mapping theorem, $f(\Sp(X)) =\{0\}$. Hence, $\Sp(X)=\{0\}$, i.e. $X=0$.
(8) $\Rightarrow$ (9): Consider $X \geqslant 0$ such that $I \,\sigma\, X =0$. Then $f(X)=0$.
Since $f$ is injective with continuous inverse, we have $X=f^{-1}(0)$.
(9) $\Rightarrow$ (6): Use congruence invariance.
(5) $\Rightarrow$ (7): Let $A>0$ and consider $X \geqslant 0$ such that $A \,\sigma\, X=0$.
Then $A^{\frac{1}{2}} (I \,\sigma\, A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) A^{\frac{1}{2}} =0$,
i.e. $f(A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) = I \,\sigma\, A^{-\frac{1}{2}} X A^{-\frac{1}{2}} =0$.
Hence,
\begin{align*}
f\left( \Sp(A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) \right)
= \Sp\left( f(A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) \right)
=\{0\}.
\end{align*}
Suppose there exists $\lambda\in \Sp(A^{-\frac{1}{2}} X A^{-\frac{1}{2}})$
such that $\ld>0$.
Then $f(0)<f(\ld)=0$, a contradiction.
Hence, $\Sp(A^{-\frac{1}{2}} X A^{-\frac{1}{2}}) =\{0\}$, i.e.
$A^{-\frac{1}{2}} X A^{-\frac{1}{2}} =0$ or $X=0$.
(7) $\Rightarrow$ (1): We have $f(0)I = I \,\sigma\, 0 =0$, i.e. $f(0)=0$. \end{proof}
\end{document} |
\begin{document}
\title {Entropy and the Combinatorial Dimension} \author {S. Mendelson\footnote{Research School of Information Sciences and Engineering, The Australian National University, Canberra, ACT 0200, Australia, e-mail: [email protected]} \and
R. Vershynin\footnote{
Department of Mathematical Sciences,
University of Alberta,
Edmonton, Alberta T6G 2G1, Canada,
e-mail: [email protected]}} \date{}
\maketitle
\begin{abstract} We solve Talagrand's entropy problem: the $L_2$-covering numbers of every uniformly bounded class of functions are exponential in its shattering dimension. This extends Dudley's theorem on classes of $\{0,1\}$-valued functions, for which the shattering dimension is the Vapnik-Chervonenkis dimension.
In convex geometry, the solution means that the entropy of a convex body $K$ is controlled by the maximal dimension of a cube of a fixed side contained in the coordinate projections of $K$. This has a number of consequences, including the optimal Elton's Theorem and estimates on the uniform central limit theorem in the real valued case. \end{abstract}
\section{Introduction}
The fact that the covering numbers of a set are exponential in its linear algebraic dimension is fundamental and simple. Let $A$ be a class of functions bounded by $1$, defined on a set $\Omega$. If $A$ is a finite dimensional class then for every probability measure on $\mu$ on $\Omega$, \begin{equation} \label{volumetric}
N(A, t, L_2(\mu)) \le \Big( \frac{3}{t} \Big)^{\dim(A)},
\ \ \ \ 0 < t < 1, \end{equation} where $\dim(A)$ is the linear algebraic dimension of $A$ and the left-hand side of \eqref{volumetric} is the covering number of $A$, the minimal number of functions needed to approximate any function in $A$ within an error $t$ in the $L_2(\mu)$-norm. This inequality follows by a simple volumetric argument (see e.g. \cite{Pi} Lemma 4.10) and is, in a sense, optimal: the dependence both on $t$ and on the dimension is sharp (except, perhaps, for the constant $3$).
The linear algebraic dimension of $A$ is often too large for \eqref{volumetric} to be useful, as it does not capture the ``size'' of $A$ in different directions but only determines in how many directions $A$ does not vanish. The aim of this paper is to replace the linear algebraic dimension by a combinatorial dimension originated from the classical works of Vapnik and Chervonenkis \cite{VC 71}, \cite{VC 81}.
We say that a subset $\s$ of $\Omega$ is $t$-shattered by a class $A$ if there exists a level function $h$ on $\s$ such that, given any subset $\s'$ of $\s$, one can find a function $f \in A$ with $f(x) \le h(x)$ if $x \in \s'$ and $f(x) \ge h(x) + t$ if $x \in \s \setminus \s'$. The {\em shattering dimension} of $A$, denoted by $\vc(A,t)$ after Vapnik and Chervonenkis, is the maximal cardinality of a set $t$-shattered by $A$. Clearly, the shattering dimension does not exceed the linear algebraic dimension, and is often much smaller. Our main result states that the linear algebraic dimension in \eqref{volumetric} can be essentially replaced by the shattering dimension.
\begin{theorem} \label{main}
Let $A$ be a class of functions bounded by $1$,
defined on a set $\Omega$.
Then for every probability measure $\mu$ on $\Omega$,
\begin{equation} \label{combinatorial}
N(A, t, L_2(\mu))
\le \Big( \frac{2}{t} \Big)^{K \cdot \vc(A,\, c t)},
\ \ \ \ 0 < t < 1,
\end{equation}
where $K$ and $c$ are positive absolute constants. \end{theorem} There also exists a (simple) reverse inequality complementing \eqref{combinatorial}: for some measure $\mu$, one has $N(A, t, L_2(\mu)) \ge 2^{K \cdot \vc(A,\, c t)}$, where $K$ and $c$ are some absolute constants, see e.g. \cite{T 02}.
The origins of Theorem \ref{main} are rooted in the work of Vapnik and Chervonenkis, who first understood that entropy estimates are essential in determining whether a class of functions obeys the uniform law of large numbers. The subsequent fundamental works of Koltchinskii \cite{K} and Gin\`{e} and Zinn \cite{GZ} enhanced the link between entropy estimates and uniform limit theorems (see also \cite{T 96}).
In 1978, R.~Dudley proved Theorem \ref{main} for classes of $\{0,1\}$-valued functions (\cite{Du}, see \cite{LT} 14.3). This yielded that a $\{0,1\}$-class obeys the uniform law of large numbers (and even the uniform Central Limit Theorem) if and only if its shattering dimension is finite for $0 < t < 1$. The main difficulty in proving such limit theorems for general classes has been the absence of a uniform entropy estimate of the nature of Theorem \ref{main} (\cite{T 88}, \cite{T 92}, \cite{T 96}, \cite{ABCH}, \cite{BL}, \cite{T 02}). However, proving Dudley's result for general classes is considerably more difficult due to the lack of the obvious property of the $\{0,1\}$-valued classes, namely that if a set $\s$ is $t$-shattered for some $0 < t < 1$ then it is automatically $1$-shattered.
In 1992, M.~Talagrand proved a weaker version of Theorem \ref{main}: under some mild regularity assumptions, $\log N(A, t, L_2(\mu)) \le K \cdot \vc(A, \,ct) \log^M (\frac{2}{t})$, where $K$, $c$ and $M$ are some absolute constants (\cite{T 92}, \cite{T 02}). Theorem \ref{main} is Talagrand's inequality with the best possible exponent $M = 1$ (and without regularity assumptions).
Talagrand's inequality was motivated not only by limit theorems in probability, but to a great extent by applications to convex geometry. A subset $B$ of $\R^n$ can be viewed as a class of real valued functions on $\{1, \ldots, n\}$. If $B$ is convex and, for simplicity, symmetric, then its shattering dimension $\vc(B,t)$ is the maximal cardinality of a subset $\s$ of $\{1, \ldots, n\}$ such that $P_\s (B) \supset [-\frac{t}{2}, \frac{t}{2}]^\s$, where $P_\s$ denotes the orthogonal projection in $\R^n$ onto $\R^\s$. In the general, non-symmetric, case we allow translations of the cube $[-\frac{t}{2}, \frac{t}{2}]^\s$ by a vector in $\R^\s$.
The following entropy bound for convex bodies is then an immediate consequence of Theorem \ref{main}. Recall that $N(B,D)$ is the covering number of $B$ by a set $D$ in $\R^n$, the minimal number of translates of $D$ needed to cover $B$.
\begin{corollary} \label{main corollary}
There exist positive absolute constants $K$ and $c$ such that
the following holds.
Let $B$ be a convex body contained in $[0,1]^n$,
and $D_n$ be the unit Euclidean ball in $\R^n$.
Then for $0 < t < 1$
$$
N(B, t \sqrt{n} D_n)
\le \Big( \frac{2}{t} \Big)^{K d},
$$
where $d$ is the maximal cardinality of a subset $\s$
of $\{1, \ldots, n\}$ such that
$$
P_\s (B) \supseteq h + [0, ct]^\s
\ \ \ \text{for some vector $h$ in $\R^n$.}
$$ \end{corollary}
As M.~Talagrand notices in \cite{T 02}, Theorem \ref{main} is a ``concentration of pathology'' phenomenon. Assume one knows that a covering number of the class $A$ is large. All this means is that $A$ contains many well separated functions, but it tells nothing about the structure these functions form. The conclusion of \eqref{combinatorial} is that $A$ must shatter a large set $\s$, which detects a very accurate pattern: one can find functions in
$A$ oscillating on $\s$ in all possible $2^{|\s|}$ ways around fixed levels. The ``largeness'' of $A$, {\em a priori} diffused, is {\em a fortiori} concentrated on the set $\s$.
The same phenomenon is seen in Corollary \ref{main corollary}: given a convex body $B$ with large entropy, one can find an entire cube in a coordinate projection of $B$, the cube that certainly {\em witnesses} the entropy's largeness.
When dualized, Corollary \ref{main corollary} solves the problem of finding the best asymptotics in Elton's Theorem. Let $x_1, \ldots, x_n$ be vectors in the unit ball of a Banach space, and $\e_1, \ldots, \e_n$ be Rademacher random variables (independent Bernoulli random variables taking values $1$ and $-1$ with probability $1/2$). By the triangle inequality, the expectation
$\E \| \sum_{i=1}^n \e_i x_i\|$ is at most $n$, and assume that
$\E \| \sum_{i=1}^n \e_i x_i \| \ge \d n$ for some number $\d > 0$.
In 1983, J.~Elton \cite{E} proved an important result that there exists a subset $\s$ of $\{1, \ldots, n\}$ of size proportional to $n$ such that the set of vectors $(x_i)_{i \in \s}$ is equivalent to the $\ell_1$ unit-vector basis. Specifically, there exist numbers $s, t > 0$, depending only on $\d$, such that \begin{equation} \label{st}
|\s| \ge s^2 n \ \ \ \text{and} \ \ \ \Big\| \sum_{i \in \s} a_i x_i \Big\|
\ge t \sum_{i \in \s} |a_i| \ \ \text{for all real numbers $(a_i)$}. \end{equation} Several steps have been made towards finding the best possible $s$ and $t$ in Elton's Theorem. A trivial upper bound is $s, t \le \d$ which follows from the example of identical vectors and by shrinking the usual $\ell_1$ unit-vector basis. As for the lower bounds, J.~Elton proved \eqref{st} with $s \sim \d / \log(1/\d)$ and $t \sim \d^3$. A.~Pajor \cite{Pa} removed the logarithmic factor from $s$. M.~Talagrand \cite{T 92}, using his inequality discussed above, improved $t$ to $\d / \log^M (1/\d)$. In the present paper, we use Corollary \ref{main corollary} to solve this problem by proving the optimal asymptotics: $s, t \sim \d$.
\begin{theorem} \label{intro elton}
Let $x_1, \ldots, x_n$ be vectors in the unit
ball of a Banach space, satisfying
$$
\E \Big\| \sum_{i=1}^n \e_i x_i \Big\| \ge \d n
\ \ \ \text{for some number $\d > 0$}.
$$
Then there exists a subset $\s \subset \{ 1, \ldots, n \}$
of cardinality $|\s| \ge c \d^2 n$ such that
$$
\Big\| \sum_{i \in \s} a_i x_i \Big\|
\ge c \d \sum_{i \in \s} |a_i|
\ \ \text{for all real numbers $(a_i)$},
$$
where $c$ is a positive absolute constant. \end{theorem} Furthermore, there is an interplay between the size of $\s$ and the isomorphism constant -- they can not attain their worst possible values together. Namely, we prove that $s$ and $t$ in \eqref{st} satisfy in addition to $s,t \gtrsim \d$ also the lower bound $s \cdot t \log^{1.6}(2/t) \gtrsim \d$, which, as an easy example shows, is optimal for all $\d$ within the logarithmic factor. The power 1.6 can be replaced by any number greater than 1.5. This estimate improves one of the main results of the paper \cite{T 92} where this phenomenon in Elton's Theorem was discovered and proved with a constant (unspecified) power of logarithm.
The paper is organized as follows. In the remaining part of the introduction we sketch the proof of Theorem \ref{main}; the complete proof will occupy Section \ref{s:proof}. Section \ref{s:convexity} is devoted to applications to Elton's Theorem and to empirical processes.
Here is a sketch of the proof of Theorem \ref{main}. Starting with a set $A$ which is separated with respect to the $L_2(\mu)$-norm, it is possible find a coordinate $\omega \in \Omega$ (selected randomly) on which $A$ is diffused, i.e. the values $\{f(\omega),
\; f \in A\}$ are spread in the interval $[-1,1]$. Then there exist two nontrivial subsets $A_1$ and $A_2$ of $A$ with their set of values $\{f(\omega), \; f \in A_1\}$ and $\{f(\omega), \; f \in A_2\}$ well separated from each other on the line. Continuing this process of separation for $A_1$ and $A_2$, etc., one can construct a dyadic tree of subsets of $A$, called a separating tree, with at least $|A|^{1/2}$ leaves. The ``largeness'' of the class $A$ is thus captured by its separating tree.
The next step evoked from a beautiful idea in \cite{ABCH}. First, there is no loss of generality in discretizing the class: one can assume that $\Omega$ is finite (say $|\Omega|=n$) and that the functions in $A$ take values in $\frac{t}{6}\Z \cap [-1,1]$. Then, instead of producing a large set $\s$ shattered by $A$ with a certain level function $h$, one can count the number of different pairs $(\s,h)$ for which $\s$ is shattered by $A$ with the level function $h$. If this number exceeds $\sum_{k=0}^d
\binom{n}{k}(\frac{12}{t})^k$ then there must exist a set $\s$ of size $|\s|>d$ shattered by $A$ (because there are $\binom{n}{k}$ possible sets $\s$ of cardinality $k$, and for such a set there are at most $(\frac{12}{t})^k$ possible level functions).
The only thing remaining is to bound below the number of pairs $(\s,h)$ for which $\s$ is shattered by $A$ with a level function
$h$. One can show that this number is bounded below by the number of the leaves in the separating tree of $A$, which is $|A|^{1/2}$. This implies that $|A|^{1/2} \leq\sum_{k=0}^d \binom{n}{k}(\frac{12}{t})^k \sim (\frac{n}{td})^d$, where $d=\vc(A,ct)$. The ratio $\frac{n}{d}$ can be eliminated from this estimate by a probabilistic extraction principle which reduces the cardinality of $\Omega$.
\noindent ACKNOWLEDGEMENTS
\noindent The first author was supported by an Australian Research Council Discovery grant. The second author thanks Nicole Tomczak-Jaegermann for her constant support. He also acknowledges a support from the Pacific Institute of Mathematical Sciences, and thanks the Department of Mathematical Sciences of the University of Alberta for its hospitality. Finally, we would like to thank the referee for his valuable comments and suggestions.
\section{The Proof of Theorem \ref{main}} \label{s:proof}
For $t > 0$, a pair of functions $f$ and $g$ on $\Omega$ is {\em
$t$-separated in $L_2(\mu)$} if $\|f - g\|_{L_2(\mu)} > t$. A set of functions is called $t$-separated if every pair of distinct points in the set is $t$-separated. Let $N_{\rm sep} (A, t, L_2(\mu))$ denote the maximal cardinality of a $t$-separated subset of $A$. It is standard and easily seen that \begin{equation*}
N (A, t, L_2(\mu))
\le N_{\rm sep} (A, t, L_2(\mu))
\le N (A, \frac{t}{2}, L_2(\mu)). \end{equation*} This inequality shows that in the proof of Theorem \ref{main} we may assume that $A$ is $t$-separated in the $L_2(\mu)$ norm, and replace its covering number by its cardinality.
We will need two probabilistic results, the first of which is straightforward.
\begin{lemma} \label{l:xx'}
Let $X$ be a random variable
and $X'$ be an independent copy of $X$.
Then
$$
\E |X - X'|^2 = 2 \E |X - \E X|^2 = 2 \inf_a \E |X - a|^2.
$$ \end{lemma}
The next lemma is a small deviation principle. Denote by $\s(X)^2
= \E |X - \E X|^2$ the variance of the random variable $X$.
\begin{lemma} \label{l:small deviation}
Let $X$ be a random variable with nonzero variance.
Then there exist numbers $a \in \R$ and
$0 < \b \le \frac{1}{2}$, so that letting
\begin{align*}
p_1 &= \P \{ X > a + {\textstyle \frac{1}{6}} \s(X) \}
\ \ \ \text{and} \\
p_2 &= \P \{ X < a - {\textstyle \frac{1}{6}} \s(X) \},
\end{align*}
one has either $p_1 \ge 1-\b$ and $p_2 \ge \frac{\b}{2}$,
or $p_2 \ge 1-\b$ and $p_1 \ge \frac{\b}{2}$. \end{lemma}
\proof Recall that a median of $X$ is a number $M_X$ such that $\P \{ X \ge M_X \} \ge 1/2$ and $\P \{ X \le M_X \} \ge 1/2$; without loss of generality we may assume that $M_X = 0$. Therefore $\P \{ X > 0 \} = 1 - \P \{ X \le 0 \} \le 1/2$ and similarly $\P \{ X < 0 \} \le 1/2$.
By Lemma \ref{l:xx'}, \begin{align} \label{integrals} \s(X)^2
&\le \E |X|^2
= \int_0^\infty \P \{ |X| > \l \} \; d\l^2 \nonumber \\
&= \int_0^\infty \P \{ X > \l \} \; d\l^2
+ \int_0^\infty \P \{ X < -\l \} \; d\l^2 \end{align} where $d\lambda^2 = 2\lambda \;d\lambda$.
Assume that the conclusion of the lemma fails, and let $c$ be any number satisfying $\frac{1}{3} < c < \frac{1}{\sqrt{8}}$. Divide $\R_+$ into intervals $I_k$ of length $c \s(X)$ by setting $$ I_k = \Big( c \s(X) k, \; c \s(X) (k + 1) \Big], \ \ \ k = 0, 1, 2, \ldots $$ and let $\b_0, \b_1, \b_2, \ldots$ be the non-negative numbers defined by $$ \P \{ X > 0 \} = \b_0 \le 1/2, \ \ \ \ \ P \{X \in I_k \} = \b_k - \b_{k+1}, \ \ \ k = 0, 1, 2, \ldots $$ We claim that \begin{equation} \label{bk}
\text{for all $k \ge 0$,} \ \ \
\b_{k+1} \le \frac{1}{2} \b_k. \end{equation} Indeed, assume that $\b_{k+1} > \frac{1}{2} \b_k$ for some $k$ and consider the intervals $J_1 = \left(-\infty, c \s(X) k \right]$ and $J_2 = \left(c \s(X) (k+1), \infty \right)$. Then $J_1 = (-\infty, 0] \cup ( \bigcup_{0 \le l \le k-1} I_l)$, so $$ \P \{ X \in J_1 \} = (1 - \b_0) + \sum_{0 \le l \le k-1} (\b_l - \b_{l+1}) = 1- \b_k. $$ Similarly, $J_2 = \bigcup_{l \ge k+1} I_l$ and thus $$ \P \{ X \in J_2 \} = \sum_{l \ge k+1} (\b_l - \b_{l+1}) = \b_{k+1} > \frac{1}{2} \b_k. $$ Moreover, since the sequence $(\b_k)$ is non-increasing by its definition, then $\b_k \ge \b_{k+1} > \frac{1}{2} \b_k \ge 0$ and $\b_k \le \b_0 \le \frac{1}{2}$. Then the conclusion of the lemma would hold with $a$ being the middle point between the intervals $J_1$ and $J_2$ and with $\b = \b_k$, which contradicts the assumption that the conclusion of the lemma fails. This proves \eqref{bk}.
Now, one can apply \eqref{bk} to estimate the first integral in \eqref{integrals}. Note that whenever $\l \in I_k$, $$ \P \{ X > \l \} \le \P \{ X > c \s(X) k \} = \P \Big( \bigcup_{l \ge k} I_l \Big) = \b_k. $$ Then \begin{align} \int_0^\infty \P \{ X > \l \} \; d\l^2
&\le \sum_{k \ge 0} \int_{I_k} \b_k \cdot 2 \l \;d\l \nonumber\\
&\le \sum_{k \ge 0} \b_k \cdot
2 c \s(X) (k+1) \;{\rm length}(I_k). \label{series} \end{align} Applying \eqref{bk} inductively, it is evident that $\b_k \le (\frac{1}{2})^k \b_0 \le \frac{1}{2^{k+1}}$, and since ${\rm length }(I_k) = c \s(X)$, \eqref{series} is bounded by $$ 2 c^2 \s(X)^2 \sum_{k \ge 0} \frac{k+1}{2^{k+1}} = 4 c^2 \s(X)^2 < \frac{1}{2} \s(X)^2. $$
By an identical argument one can show that the second integral in \eqref{integrals} is also bounded by $\frac{1}{2} \s(X)^2$. Therefore $$ \s(X)^2 < \frac{1}{2} \s(X)^2 + \frac{1}{2} \s(X)^2 = \s(X)^2, $$ and this contradiction completes the proof. \endproof
\subsection*{Constructing a separating tree}
Let $A$ be a finite class of functions on a probability space
$(\Omega, \mu)$, which is $t$-separated in $L_2 (\mu)$. Throughout the proof we will assume that $|A| > 1$. One can think of the class $A$ itself as a (finite) probability space with the uniform measure on it, that is, each element $x$ in $A$ is assigned probability $\frac{1}{|A|}$.
\begin{lemma} \label{separationlemma}
Let $A$ be a $t$-separated subset of $L_2(\mu)$. Then, there
exist a coordinate $i$ in $\Omega$
and numbers $a \in \R$ and $0<\beta \leq 1/2$, so that setting
\begin{align*}
N_1 &= | \{ x \in A : \; x(i) > a + {\textstyle \frac{1}{12}} t \}
|
\ \ \ \text{and} \\
N_2 &= | \{ x \in A : \; x(i) < a - {\textstyle \frac{1}{12}} t \}
|,
\end{align*}
one has either $N_1 \ge (1 - \b) |A|$
and $N_2 \ge \frac{\b}{2} |A|$, or vice versa. \end{lemma}
\begin{proof} Let $x, x'$ be random points in $A$ selected independently according to the uniform (counting) measure on $A$. By Lemma \ref{l:xx'}, \begin{align} \label{exys}
\E \|x - x'\|_{L_2(\mu)}^2
&= \E \int_\Omega |x(i) - x'(i)|^2 \; d\mu(i)
= \int_\Omega \E |x(i) - x'(i)|^2 \; d\mu(i) \nonumber \\
&= 2 \int_\Omega \E |x(i) - \E x(i)|^2 \; d\mu(i) \\ \nonumber
&= 2 \int_\Omega \s(x(i))^2 \; d\mu(i) \end{align} where $\s(x(i))^2$ is the variance of the random variable $x(i)$ with respect to the uniform measure on $A$.
On the other hand, with probability $1 - \frac{1}{|A|}$ we have $x
\ne x'$ and, whenever this event occurs, the separation assumption on $A$ implies that $\|x - x'\|_{L_2(\mu)} \ge t$. Therefore $$
\E \|x - x'\|^2_{L_2(\mu)} \ge \Big( 1 - \frac{1}{|A|} \Big) t^2 \ge \frac{t^2}{2} $$
provided that $|A| > 1$.
Together with \eqref{exys} this proves the existence of a coordinate $i \in \Omega$, on which \begin{equation} \label{sxi}
\s(x(i)) \ge \frac{t}{2}, \end{equation} and the claim follows from Lemma \ref{l:small deviation} applied to the random variable $x(i)$. \end{proof}
This lemma should be interpreted as a separation lemma for the set $A$. It means that one can always find two nontrivial subsets of $A$ and a coordinate in $\Omega$, on which the two subsets are separated with a ``gap" proportional to $t$.
Based on Lemma \ref{separationlemma}, one can construct a large separating tree in $A$. Recall that a {\em tree of subsets} of a set $A$ is a finite collection $T$ of subsets of $A$ such that, for every pair $B, D \in T$ either $B$ and $D$ are disjoint or one of them contains the other. We call $D$ a {\em son} of $B$ if $D$ is a maximal (with respect to inclusion) proper subset of $B$ that belongs to $T$. An element of $T$ with no sons is called a {\em leaf}.
\begin{definition}
Let $A$ be a class of functions on $\Omega$ and $t > 0$.
A {\em $t$-separating tree $T$ of $A$} is a tree of subsets of $A$
such that every element $B \in T$ which is not a leaf has
exactly two sons $B_+$ and $B_{-}$ and, for some
coordinate $i \in \Omega$,
$$
f(i) > g(i) + t
\ \ \ \text{for all $f \in B_+$, $g \in B_-$.}
$$ \end{definition}
\begin{proposition} \label{thm:separatingtree} Let $A$ be a finite class of functions on a probability space $(\Omega,\mu)$. If $A$ is $t$-separated with respect to the
$L_2(\mu)$ norm, then there exists a $\frac{1}{6} t$-separating tree of $A$ with at least $|A|^{1/2}$ leaves. \end{proposition}
\begin{proof} By Lemma \ref{separationlemma}, any finite class $A$ which is $t$-separated with respect to the $L_2(\mu)$ norm has two subsets $A_+$ and $A_{-}$ and a coordinate $i \in \Omega$ for which $f(i) > g(i) + \frac{1}{6} t$ for every $f \in A_+$ and $g \in A_{-}$. Moreover, there exists some number $0<\beta \leq 1/2$ such that \begin{equation*}
|A_+|\geq (1-\beta)|A| \ \ \ \text{and} \ \ \ |A_{-}| \geq \frac{\beta}{2}, \ \ \ \text{or vice versa}. \end{equation*} Thus, $A_+$ and $A_{-}$ are sons of $A$ which are both large and well separated on the coordinate $i$.
The conclusion of the proposition will now follow by induction on the cardinality of $A$. The proposition clearly holds for $|A|=2$. Assume it holds for every $t$-separated class of cardinality bounded by $N$, and let $A$ be a $t$-separated class of cardinality $N+1$. Let $A_+$ and $A_{-}$ be the sons of $A$ as above; since $\b > 0$, we have $|A_+|,|A_{-}| \leq N$. Moreover, if $A_+$ has a $\frac{1}{6} t$-separating tree with $N_+$ leaves and $A_{-}$ has a $\frac{1}{6} t$-separating tree with $N_{-}$ leaves then, by joining these trees, $A$ has a $\frac{1}{6}
t$-separating tree with $N_+ + N_{-}$ leaves, the number bounded below by $|A_+|^{1/2}+|A_-|^{1/2}$ by the induction hypothesis. Since $\b \le 1/2$, \begin{align*}
|A_+|^{\frac{1}{2}}+|A_{-}|^{\frac{1}{2}}
&\geq \bigl((1-\beta)|A|\bigr)^{\frac{1}{2}}
+\bigl(\frac{\beta}{2}|A|\bigr)^{\frac{1}{2}}\\
&= \Bigl[(1-\beta)^{\frac{1}{2}}
+\bigl(\frac{\beta}{2}\bigr)^{\frac{1}{2}}\Bigr]
|A|^{\frac{1}{2}}
\geq |A|^{\frac{1}{2}} \end{align*} as claimed. \end{proof}
The exponent $1/2$ has no special meaning in Proposition \ref{thm:separatingtree}. It can be improved to any number smaller that $1$ at the cost of reducing the constant $\frac{1}{6}$.
\subsection*{Counting shattered sets}
As explained in the introduction, our aim is to construct a large set shattered by a given class. We will first try to do this for classes of integer-valued functions.
Let $A$ be a class of integer-valued functions on a set $\Omega$. We say that a couple $(\s, h)$ is a {\em center} if $\s$ is a finite subset of $\Omega$ and $h$ is an integer-valued function on $\s$. We call the cardinality of $\s$ the dimension of the center. For convenience, we introduce (the only) $0$-dimensional center $(\emptyset, \emptyset)$, which is the {\em trivial center}.
\begin{definition} \label{shatteredcenter} The {\em set $A$ shatters a center $(\s, h)$} if the following holds: \begin{itemize}
\item{either $(\s, h)$ is trivial and $A$ is nonempty,}
\item{or, otherwise, for every choice of signs
$\theta \in \{-1,1\}^\s$
there exists a function $f \in A$ such that for $i \in \s$
\begin{equation} \label{discrete shatter}
\begin{cases}
f(i) > h(i) & \text{when $\theta(i) = 1$},\\
f(i) < h(i) & \text{when $\theta(i) = -1$}.
\end{cases}
\end{equation}
} \end{itemize} \end{definition} It is crucial that both inequalities in \eqref{discrete shatter} are strict: they ensure that whenever a $d$-dimensional center is shattered by $A$, one has $\vc(A, 2) \ge d$. In fact, it is evident that $\vc(A,2)$ is the maximal dimension of a center shattered by $A$.
\begin{proposition} \label{centers vs leaves}
The number of centers shattered by $A$ is
at least the number of leaves in any $1$-separating tree of $A$. \end{proposition}
\proof Given a class $B$ of integer-valued functions, denote by $s(B)$ the number of centers shattered by $B$. It is enough to prove that if $B_+$ and $B_-$ are the sons of an element $B$ of a $1$-separating tree in $A$ then \begin{equation} \label{HHH}
s(B) \ge s(B_+) + s(B_-). \end{equation} By the definition of the $1$-separating tree, there is a coordinate $i_0 \in \Omega$, such that $f(i_0) > g(i_0) + 1$ for all $f \in B_+$ and $g \in B_-$. Since the functions are integer-valued, there exists an integer $t$ such that $$ f(i_0) > t \ \ \text{for $f \in B_+$} \ \ \ \text{and} \ \ \ g(i_0) < t \ \ \text{for $g \in B_-$}. $$
If a center $(\s, h)$ is shattered either by $B_+$ or by $B_-$, it is also shattered by $B$. Next, assume that $(\s, h)$ is shattered by both $B_+$ and $B_-$. Note that in this case $i_0 \not\in \s$. Indeed, if the converse holds then $\s$ contains $i_0$ and hence is nonempty. Thus the center $(x,\s)$ is nontrivial and there exist $f \in B_+$ and $g \in B_-$ such that $t < f(i_0) < h(i_0)$ (by \eqref{discrete shatter} with $\theta(i_0) = -1$) and $t > g(i_0) > h(i_0)$ (by \eqref{discrete shatter} with $\theta(i_0) = 1$), which is impossible. Consider the center $(\s', h') = (\s \cup \{i_0\}, h \oplus t)$, where $h \oplus t$ is the extension of the function $h$ onto the set $\s \cup \{i_0\}$ defined by $(h \oplus t) (i_0) = t$.
Observe that $(\s', h')$ is shattered by $B$. Indeed, since $B_+$ shatters $(\s, h)$, then for every $\theta \in \{-1,1\}^\s \times \{1\}^{\{i_0\}}$ there exists a function $f \in B_+$ such that \eqref{discrete shatter} holds for $i \in \s$. Also, since $f \in B_+$, then automatically $f(i_0) > t = h'(i_0)$. Similarly, for every $\theta \in \{-1,1\}^\s \times \{-1\}^{\{i_0\}}$, there exists a function $f \in B_-$ such that \eqref{discrete shatter} holds for $i \in \s$ and automatically $f(i_0) < t = h'(i_0)$.
Clearly, $(\s', h')$ is shattered by neither $B_+$ nor by $B_-$, because $f(i_0) > t = h'(i_0)$ for all $f \in B_+$, so \eqref{discrete shatter} fails if $\theta(i_0) = -1$; a similar argument holds for $B_-$.
Summarizing, $(\s, h) \to (\s', h')$ is an injective mapping from the set of centers shattered by both $B_+$ and $B_-$ into the set of centers shattered by $B$ but not by $B_+$ or $B_-$, which proves our claim. \endproof
Combining Propositions \ref{thm:separatingtree} and \ref{centers vs leaves}, one bounds from below the number of shattered centers.
\begin{corollary} \label{c: lowerbound}
Let $A$ be a finite class of integer-valued functions on
a probability space $(\Omega,\mu)$.
If $A$ is $6$-separated with respect to the $L_2(\mu)$ norm
then it shatters at least $|A|^{1/2}$ centers. \end{corollary}
To show that there exists a large dimensional center shattered by $A$, one must assume that the class $A$ is bounded in some sense, otherwise one could have infinitely many low dimensional centers shattered by the class. A natural assumption is the uniform boundedness of $A$, under which we conclude a preliminary version of Theorem \ref{main}.
\begin{proposition} \label{p: upperbound}
Let $(\Omega,\mu)$ be a probability space,
where $\Omega$ is a finite set of cardinality $n$.
Assume that $A$ is a class of functions on
$\Omega$ into $\{0, 1, \ldots, p \}$,
which is $6$-separated in $L_2(\mu)$.
Set $d$ to be the maximal dimension of a center
shattered by $A$.
Then
\begin{equation} \label{A discrete}
|A| \le \Big( \frac{p n}{d} \Big)^{C d},
\end{equation}
where $C$ is an absolute constant.
In particular, the same assertion holds for $d=\vc(A,2)$. \end{proposition}
\proof By Corollary \ref{c: lowerbound}, $A$ shatters at least
$|A|^{1/2}$ centers. On the other hand, the total number of centers whose dimension is at most $d$ that a class of $\{0, 1,
\ldots, p \}$-valued functions on $\Omega$ can shatter is bounded by $\sum_{k=0}^d \binom{n}{k} p^k$. Indeed, for every $k$ there exist at most $\binom{n}{k}$ subsets $\s \subset \Omega$ of cardinality $k$ and, for each $\s$ with $|\s| = k$ there are at most $p^k$ level functions $h$ for which the center $(\s, h)$ can be shattered by such a class. Therefore $|A|^{1/2} \le \sum_{k=0}^d \binom{n}{k} p^k$ (otherwise there would exist a center of dimension larger than $d$ shattered by $A$, contradicting the maximality of $d$). The proof is completed by approximating the binomial coefficients using Stirling's formula. \endproof
Actually, the ratio $n/d$ can be eliminated from \eqref{A discrete} (perhaps at the cost of increasing the separation parameter $6$). To this end, one needs to reduce the size of $\Omega$ without changing the assumption that the class is ``well separated". This is achieved by the following probabilistic extraction principle.
\begin{lemma} \label{extraction}
There is a positive absolute constant $c$ such that the
following holds.
Let $\Omega$ be a finite set with the uniform probability measure
$\mu$ on it.
Let $A$ be a class of functions bounded by $1$, defined
on $\Omega$.
Assume that for some $0 < t < 1$
$$
\text{$A$ is $t$-separated with respect to the $L_2(\mu)$ norm.}
$$
If $|A| \leq \frac{1}{2} \exp(c t^4 k)$
for some positive number $k$,
there exists a subset $\s \subset \Omega$
of cardinality at most $k$ such that
$$
\text{$A$ is $\frac{t}{2}$-separated with respect to the $L_2(\mu_\s)$
norm,}
$$
where $\mu_\s$ is the uniform probability measure on $\s$. \end{lemma}
As the reader guesses, the set $\s$ will be chosen randomly in $\Omega$. We will estimate probabilities using a version of Bernstein's inequality (see e.g. \cite{VW}, or \cite{LT} 6.3 for stronger inequalities).
\begin{lemma}[Bernstein's inequality] \label{thm:bernstein} Let $X_1, \ldots, X_n$ be independent random variables with zero mean. Then, for every $u>0$, \begin{equation*}
\P \Big\{ \big| \sum_{i=1}^n X_i \big| > u \Big\}
\leq 2 \exp \Big(-\frac{u^2}{2(b^2 + a u/3)} \Big), \end{equation*}
where $a=\sup_i \|X_i\|_\infty$ and $b^2=\sum_{i=1}^n \E |X_i|^2$. \end{lemma}
\noindent {\bf Proof of Lemma \ref{extraction}. } For the sake of simplicity we identify $\Omega$ with $\{1,2, \ldots, n\}$. The difference set $S = \{f - g |\; f \not = g, \ f,g \in A\}$ has cardinality $|S| \leq |A|^2$. For each $x \in S$ we have $|x(i)|
\leq 2$ for all $i \in \{1,...,n\}$ and $\sum_{i=1}^n |x(i)|^2 \geq t^2 n$. Fix an integer $k$ satisfying the assumptions of the lemma and let $\delta_1, \ldots, \d_n$ be independent $\{0,1\}$-valued random variables with $\E \delta_i = \frac{k}{2n} =: \delta$. Then for every $z \in S$ \begin{align*}
\P \Big\{\sum_{i=1}^n \delta_i |x(i)|^2 \leq \frac{t^2\delta n}{2} \Big\}
& \leq \P \Big\{ \Big| \sum_{i=1}^n \delta_i |x(i)|^2
- \delta \sum_{i=1}^n |x(i)|^2 \Big |
> \frac{t^2 \delta n}{2} \Big\} \\
&= \P \Big\{ \Big|\sum_{i=1}^n (\delta_i -\delta)|x(i)|^2 \Big|
> \frac{t^2 \d n}{2} \Big\} \\
&\le 2 \exp \Big(-\frac{c t^4 \d n}{1+t^2} \Big)
\le 2 \exp (-c t^4 k), \end{align*} where the last line follows from Bernstein's inequality for
$a=\sup_i \|X_i\| \leq 2$ and $$
b^2 = \sum_{i=1}^n \E |X_i|^2
= \sum_{i=1}^n |x(i)|^4 \; \E(\delta_i - \delta)^2 \leq 16 \d n. $$ Therefore, by the assumption on $k$ $$ \P \Big\{ \exists x \in S :
\Big( \frac{1}{k} \sum_{i = 1}^n \d_i |x(i)|^2 \Big)^{1/2}
\leq \frac{t}{2} \Big\}
\le |S| \cdot 2 \exp(- c t^4 k) < 1/2. $$
Moreover, if $\s$ is the random set $\{i \,|\; \delta_i =1\}$ then by Chebyshev's inequality, $$
\P \{ |\s| > k \} = \P \Big\{ \sum_{i=1}^n \d_i > k \Big\} \le 1/2, $$ which implies that $$
\P \big \{ \exists x \in S : \|x\|_{L_2(\mu_\s)} \le \frac{t}{2} \big\} < 1. $$ This translates into the fact that with positive probability the class $A$ is $\frac{t}{2}$-separated with respect to the $L_2(\mu_\s)$ norm. \endproof
\qquad
\noindent {\bf Proof of Theorem \ref{main}. } One may clearly assume that $|A| > 1$ and that the functions in $A$ are defined on a finite domain $\Omega$, so that the probability measure $\mu$ on $\Omega$ is supported on a finite number of atoms. Next, by splitting these atoms (by replacing an atom $\w$ by, say, two atoms $\w_1$ and $\w_2$, each carrying measure $\frac{1}{2} \mu(\w)$ and by defining $f(\w_1) = f(\w_2) = f(\w)$ for $f \in A$), one can make the measure $\mu$ almost uniform without changing neither the covering numbers nor the shattering dimension of $A$. Therefore, assume that the domain $\Omega$ is $\{1, 2, \ldots, n\}$ for some integer $n$, and that $\mu$ is the uniform measure on $\Omega$.
Fix $0 < t \leq 1/2$ and let $A$ be a $2 t$-separated in the
$L_2(\mu)$ norm. By Lemma \ref{extraction}, there is a set of coordinates $s \subset \{1,...,n\}$ of size $|\s| \leq
\frac{C\log|A|}{t^4}$ such that $A$ is $t$-separated in $L_2(\mu_\s)$, where $\mu_\s$ is the uniform probability measure on $\s$.
Let $p = \lfloor 7/t \rfloor$, define $\tilde{A} \subset \{0,1,...,p\}^\s$ by $$ \tilde{A}=\Bigl\{ \Bigl( \Bigl \lfloor\frac{7f(i)}{t} \Big \rfloor
\Bigr)_{i \in \s} \, | \; f \in A \Bigr\}, $$ and observe that $\tilde{A}$ is $6$-separated in $L_2(\mu_\s)$. By Proposition \ref{p: upperbound},
$$
|A|=|\tilde{A}| \le \Big( \frac{p |\sigma|}{d} \Big)^{C d}
$$ where $d=\vc(\tilde{A},2)$, implying that
$$
|A| \leq \Big(\frac{C\log |A|}{dt^5}\Big)^{Cd}.
$$ By a straightforward computation,
$$
|A| \leq \Bigl(\frac{1}{t}\Bigr)^{Cd},
$$ and our claim follows from the fact that $\vc(\tilde{A},2) \leq \vc(A, t/7)$. \endproof
\qquad
\remark Theorem \ref{main} also holds for the $L_p(\mu)$ covering numbers for all $0 < p < \infty$, with constants $K$ and $c$ depending only on $p$. The only minor modification of the proof is in Lemma \ref{l:xx'}, where the equations would be replaced by appropriate inequalities.
\qquad
\section{Applications: Gaussian Processes and Convexity} \label{s:convexity}
The first application is a bound on the expectation of the supremum of a Gaussian processes indexed by a set $A$. Such a bound is provided by Dudley's integral in terms of the $L_2$ entropy of $A$; the entropy, in turn, can be majorized through Theorem \ref{main} by the shattering dimension of $A$. The resulting integral inequality improves the main result of M.~Talagrand in \cite{T 92}.
If $A$ be a class of functions on the finite set $I$, then a natural Gaussian process $(X_a)_{a \in A}$ indexed by elements of $A$ is $$ X_a = \sum_{i \in I} g_i \, a(i) $$ where $g_i$ are independent standard Gaussian random variables.
\begin{theorem} \label{thm:talagrand}
Let $A$ be a class of functions bounded by $1$,
defined on a finite set $I$ of cardinality $n$.
Then $E = \E \sup_{a \in A} X_a$
is bounded as
$$
E \le K \sqrt{n}
\int_{cE/n}^{1} \sqrt{\vc(A,t) \cdot \log (2/t)}\; dt,
$$
where $K$ and $c$ are absolute positive constants. \end{theorem}
The nonzero lower limit in the integral will play an important role in the application to Elton's Theorem.
The first step in the proof is to view $A$ as a subset of $\R^n$. Dudley's integral inequality can be stated as $$ E \le K \int_0^\infty \sqrt{\log N(A, t D_n)} \; dt, $$ where $D_n$ is the unit Euclidean ball in $\R^n$, see \cite{Pi} Theorem 5.6. The lower limit in this integral can be improved by a standard argument. This fact was first noticed by A. Pajor.
\begin{lemma} \label{dudley}
Let $A$ be a subset of $\R^n$.
Then $E = \E \sup_{a \in A} X_a$
is bounded as
$$
E \le K \int_{cE/\sqrt{n}}^\infty \sqrt{\log N(A, t D_n)} \; dt,
$$
where $K$ is an absolute constant. \end{lemma}
\proof Fix positive absolute constants $c_1, c_2$ whose values will be specified later. There exists a subset $\NN$ of $A$, which is a $(\frac{c_1 E}{\sqrt{n}})$-net of $A$ with respect to the Euclidean norm and has cardinality $|\NN| \le N(A, \frac{c_1 E}{2 \sqrt{n}} D_n)$. Then $A \subset \NN + \frac{c_1 E}{2 \sqrt{n}} D_n$, and one can write \begin{equation} \label{E}
E = \E \sup_{a \in A} X_a
\le \E \max_{a \in \NN} X_a
+ \E \sup_{a \in \frac{c_1 E}{\sqrt{n}} D_n} X_a. \end{equation} The first summand is estimated by Dudley's integral as \begin{equation} \label{first NN}
\E \max_{a \in \NN} X_a
\le K \int_0^\infty \sqrt{\log N(\NN, t D_n)} \; dt. \end{equation} On the interval $(0, \frac{c_2 E}{\sqrt{n}})$, \begin{align*} K \int_0^{ \frac{c_2 E}{\sqrt{n}} } \sqrt{\log N(\NN, t D_n)} \; dt
&\le K \frac{c_2 E}{\sqrt{n}} \cdot \sqrt{\log|\NN|} \\
&\le K \frac{c_2 E}{\sqrt{n}} \cdot
\sqrt{\log N(A, {\textstyle \frac{c_1 E}{2 \sqrt{n}} } D_n)}. \end{align*} The latter can be estimated using Sudakov's inequality \cite{D,Pi}, which states that $\e \sqrt{\log(N, \e D_n)} \le K \,\E \sup_{a \in A} X_a$ for all $\e > 0$. Indeed, $$ K \frac{c_2 E}{\sqrt{n}} \cdot
\sqrt{\log N(A, {\textstyle \frac{c_1 E}{2 \sqrt{n}} } D_n)}
\leq K_1 (2c_2 / c_1) \,\E \sup_{a \in A} X_a
= K_1 (2c_2 / c_1) E
\le \frac{1}{4} E, $$ if we select $c_2$ as $c_2 = c_1 / 8 K_1$. Combining this with \eqref{first NN} implies that \begin{equation} \label{first sum}
\E \max_{x \in \NN} X_a
\le \frac{1}{4} E + K \int_{ \frac{c_2 E}{\sqrt{n}} }^\infty
\sqrt{\log N(A, t D_n)} \; dt \end{equation} because $\NN$ is a subset of $A$.
To bound the second summand in \eqref{E}, we apply the Cauchy-Schwarz inequality to obtain that for any $t>0$, $$ \E \sup_{a \in tD_n} X_a \le t \cdot \E \Big( \sum_{i \in I} g_i^2 \Big)^{1/2} \le t \sqrt{n}. $$
In particular, if $c_1 < 1/4$ then $$ \E \sup_{a \in \frac{c_1E}{\sqrt{n}}D_n} X_a \leq c_1E \leq \frac{1}{4}E. $$ This, \eqref{E} and \eqref{first sum} imply that $$ E \le K_2 \int_{ \frac{c_2 E}{\sqrt{n}} }^\infty
\sqrt{\log N(A, t D_n)} \; dt, $$ where $K_2$ is an absolute constant. \endproof
\qquad
\noindent{\bf Proof of Theorem \ref{thm:talagrand}. } By Lemma \ref{dudley}, $$ E \le K \int_{cE/\sqrt{n}}^\infty \sqrt{\log N(A, t D_n)} \; dt. $$ Since $A \subset [-1,1]^n \subset \sqrt{n} D_n$, the integrand vanishes for $t \ge \sqrt{n}$. Hence, by Theorem \ref{main} \begin{align*} E &\le K \int_{cE/\sqrt{n}}^{\sqrt{n}}
\sqrt{\log N(A, t D_n)} \; dt \\
&= K \sqrt{n} \int_{cE/n}^1
\sqrt{\log N(A, t \sqrt{n} D_n)} \; dt \\
&\le K_1\sqrt{n} \int_{cE/n}^1
\sqrt{\vc(A, c_1 t) \cdot \log (2/t)} \; dt. \end{align*} The absolute constant $0 < c_1 < 1/2$ can be made $1$ by a further change of variable. \endproof
\qquad
The main consequence of Theorem \ref{thm:talagrand} is Elton's Theorem with the optimal dependence on $\d$.
\begin{theorem} \label{thm:elton}
There is an absolute constant $c$ for which the following
holds.
Let $x_1, \ldots, x_n$ be vectors in the unit
ball of a Banach space.
Assume that
$$
\E \Big\| \sum_{i = 1}^n g_i x_i \Big\| \ge \d n
\ \ \ \text{for some number $\d > 0$}.
$$
Then there exist numbers $s, t \in (c \d, 1)$,
and a subset $\s \subset \{ 1, \ldots, n \}$
of cardinality $|\s| \ge s^2 n$,
such that
\begin{equation} \label{lower l1}
\Big\| \sum_{i \in \s} a_i x_i \Big\|
\ge t \sum_{i \in \s} |a_i|
\ \ \ \text{for all real numbers $(a_i)$}.
\end{equation}
In addition, the numbers $s$ and $t$ satisfy the inequality
$s \cdot t \log^{1.6} (2 / t) \ge c\d$. \end{theorem}
Before the proof, recall the interpretation of the shattering dimension of convex bodies. If a set $B \subset \R^n$ is convex and symmetric then $\vc(B,t)$ is the maximal cardinality of a subset $\s$ of $\{1, \ldots, n\}$ such that $P_\s(B) \supset [-\frac{t}{2}, \frac{t}{2}]^\s$. Indeed, every convex symmetric set in $\R^n$ can be viewed as a class of functions on $\{1,...,n\}$. If $\s$ is $t$-shattered with a level function $h$ then for every $\s' \subset \s$ there is some $f_{\s'}$ such that $f_{\s'}(i) \geq h(i)+t$ if $i \in \s'$ and $f_{\s'} \leq h$ on $\s \backslash \s'$. By selecting for every such $\s'$ the function $(f_{\s'}-f_{\s \backslash \s'})/2$ and since the class is convex and symmetric, it follows that $P_\s(B) \supset [-\frac{t}{2},\frac{t}{2}]^\s$, as claimed.
Taking the polars, this inclusion can be written as $\frac{t}{2}
(B^\circ \cap \R^\s) \subset B_1^n$, where $B_1^n$ is the unit ball of $\ell_1^n$. Denoting by $\|\cdot\|_{B^\circ}$ the Minkowski functional (the norm) induced by the body $B^\circ$, one can rewrite this inclusion as the inequality $$
\Big\| \sum_{i \in \s} a_i e_i \Big\|_{B^\circ}
\ge \frac{t}{2} \sum_{i \in \s} |a_i|
\ \ \ \text{for all real numbers $(a_i)$}, $$ where $(e_i)$ is the standard basis of $\R^n$. Therefore, to prove Theorem \ref{thm:elton}, one needs to bound below the shattering dimension of the dual ball of a given Banach space.
\qquad
\noindent {\bf Proof of Theorem \ref{thm:elton}.} By a perturbation argument, one may assume that the vectors $(x_i)_{i
\le n}$ are linearly independent. Hence, using an appropriate linear transformation one can assume that $X = (\R^n, \|\cdot\|)$ and that $(x_i)_{i \le n}$ are the unit coordinate vectors
$(e_i)_{i \le n}$ in $\R^n$. Let $B=(B_X)^\circ$ and note that the assumption $\|e_i\|_X \leq 1$ implies that $B \subset [-1,1]^n$.
Set $$
E = \E \Big\|\sum_{i=1}^n g_i x_i \Big\|_X
= \E \sup_{b \in B} \sum_{i=1}^n g_i \,b(i). $$ By Theorem \ref{thm:talagrand}, \begin{equation*} \d n \leq E \leq K\sqrt{n} \int_{c \delta}^{1}
\sqrt{\vc(B,t) \cdot \log(2/t)} \; dt. \end{equation*} Consider the function $$ h(t) = \frac{c_0}{t \log^{1.1} (2 / t)} $$ where the absolute constant $c_0 > 0$ is chosen so that $\int_0^{1} h(t) \; dt = 1$. It follows that there exits some $c\d \le t \le 1$ such that $$ \sqrt{\vc(B, t) / n \cdot \log(2 / t)} \ge \d h(t). $$ Hence $$ \vc(B, t) \ge \frac{c_0 \d^2}{t^2 \log^{3.2} (2 / t)} n. $$ Therefore, letting $s^2 = \vc(B, t) / n$, it follows that $s \cdot t \log^{1.6} (2 / t) \ge \sqrt{c_0} \d$ as required, and by the discussion preceding the proof there exists a subset $\s$ of $\{1,
\ldots, n\}$ of cardinality $|\s| \ge s^2 n$ such that \eqref{lower l1} holds with $t/2$ instead of $t$. The only thing remaining is to check that $s \gtrsim \d$. Indeed, $s \ge \frac{\sqrt{c_0} \d}{t \log^{1.6} (2 / t)} \ge c_1 \d$, because $t \le 1$. \endproof
\noindent {\bf Remarks. } 1. As the proof shows, the exponent $1.6$ can be reduced to any number larger than $3/2$.
2. The relation between $s$ and $t$ in Theorem \ref{thm:elton} is optimal up to a logarithmic factor for all $0 < \d < 1$. This is seen from by the following example, shown to us by Mark Rudelson. For $0 < \d < 1 / \sqrt{n}$, the constant vectors $x_i = \d \sqrt{n} \cdot e_1$ in $X = \R$ show that $s t$ in Theorem \ref{thm:elton} can not exceed $\d$. For $1 / \sqrt{n} \le \d \le 1$, we consider the body $D = \conv( B_1^n \cup \frac{1}{\d
\sqrt{n}} D_n )$ and let $X = (\R^n, \|\cdot\|_D)$ and $x_i =
e_i$, $i = 1, \ldots, n$. Clearly, $\E \| \sum g_i x_i \|_X \ge
\E \| \sum \e_i e_i \|_D = \d n$. Let $0 < s, t < 1$ be so that \eqref{lower l1} holds for some subset $\s \subset \{ 1, \ldots, n
\}$ of cardinality $|\s| \ge s^2 n$. This means that $\|x\|_D
\ge t \|x\|_1$ for all $x \in \R^\s$. Dualizing, $\frac{t}{\d
\sqrt{n}} \|x\|_2 \le t \|x\|_{D^\circ} \le \|x\|_\infty$ for all $x \in \R^\s$. Testing this inequality for $x = \sum_{i \in
\s} e_i$, it is evident that $\frac{t}{\d \sqrt{n}} \sqrt{|\s|} \le 1$ and thus $s t \le \d$.
\qquad
We end this article with an application to empirical processes. A key question is when a class of functions satisfies the central limit theorem uniformly in some sense. Such classes of functions are called {\it uniform Donsker classes}. We will not define these classes formally but rather refer the reader to \cite{D,VW} for an introduction on the subject. It turns out that the uniform Donsker property is related to uniform estimates on covering numbers via the Koltchinskii-Pollard entropy integral.
\begin{theorem} \cite{D} \label{dud} Let $F$ be a class of functions bounded by $1$. If \begin{equation*}
\int_0^\infty \sup_{n}\sup_{\mu_n} \sqrt{ \log N \bigl(F,L_2(\mu_n),\e\bigr) } \;d\eps < \infty, \end{equation*} then $F$ is a uniform Donsker class. \end{theorem} Having this condition in mind, it is natural to try to seek entropy estimates which are ``dimension free", that is, do not depend on the size of the sample. In the $\{0,1\}$-valued case, such bounds where first obtained by Dudley who proved Theorem \ref{main} for these classes (see \cite{LT} Theorem 14.13) which implied through Theorem \ref{dud} that every VC class is a uniform Donsker class.
Theorem \ref{main} solves the general case: the following corollary extends Dudley's result on the uniform Donsker property from $\{0,1\}$ classes to classes of real valued functions.
\begin{corollary}
Let $F$ be a class of functions bounded by $1$
and assume that the integral
$$
\int_0^1 \sqrt{ \vc(F,t)\log \frac{2}{t}} \; dt
$$
converges.
Then $F$ is a uniform Donsker class. \end{corollary}
In particular this shows that if $\vc(F,t)$ is ``slightly better" than $1/t^2$, then $F$ is a uniform Donsker class.
This result has an advantage over Theorem \ref{dud} because in many cases it is easier to compute the shattering dimension of the class rather than its entropy (see, e.g. \cite{AB}).
{\small
}
\end{document} |
\begin{document}
\begin{abstract} The number of standard Young tableaux possible of shape corresponding to a partition $\lambda$ is called the dimension of the partition and is denoted by $f^{\lambda}$. Partitions with odd dimensions were enumerated by McKay and were further classified by Macdonald. Let $a_i(n)$ be the number of partitions of $n$ with dimension congruent to $i$ modulo 4. In this paper, we refine Macdonald's and McKay's results by calculating $a_1(n)$ and $a_3(n)$ when $n$ has no consecutive 1s in its binary expansion or when the sum of binary digits of $n$ is 2 and providing values for $a_2(n)$ for all $n$. We also present similar results for irreducible representations of alternating groups. \end{abstract} \title{Enumeration of partitions modulo 4}
\section{Introduction} \subsection{Foreword} A partition of $n$ is a tuple of natural numbers, $\lambda:= (\lambda_1, \ldots, \lambda_k)$, with non-increasing entries that add up to $n$. The \textit{dimension of the partition $\lambda$}, $f^\lambda$, is the number of standard Young tableaux of shape $\lambda$ (ref. \cref{introsec}) and can be calculated by the famous hook-length formula (\cref{HLF}). Denote by $m_p(n)$, the number of partitions whose dimensions are not divisible by a given natural number, $p$.\\ Macdonald, in his landmark paper \cite{mcd}, gave a complete answer for $m_p(n)$ which can be understood quite elegantly using the $p$-core tower approach (ref. \cref{sec:2mod4}). The case for $p=2$ has a wonderful form: for $n = 2^{k_1}+\ldots + 2^{k_\ell}$ with $k_1 > \ldots > k_\ell$, we have, \[m_2(n) = 2^{k_1 + \ldots + k_\ell},\] which is entirely dependent on the binary expansion of $n$.\\ Note that $m_2(n)$ counts all partitions of $n$ which have odd dimension. We call these \textit{odd partitions} and will denote their count by $a(n) := m_2(n)$.
Recently, there has been some interest in extending Macdonald's results. In their unpublished notes, Amrutha P and T. Geetha \cite{geetha} tackle the problem of computing $m_{2^k}(n)$. They provide general recursive results and in the case of ``hook-partitions" of $2^\ell$, they find $m_4(2^\ell)$ and $m_8(2^\ell)$. As we shall see, the enumeration results for $m_4(n)$ in the preprint follow from our analysis of ``sparse numbers", i.e., numbers with no consecutive 1s in their binary expansions.\\ This study of partitions has a representation theoretic context as partitions of $n$ index the irreducible representations of the symmetric group, $S_n$. There are results regarding the behaviour of irreducible representations corresponding to odd partitions under restriction (\cite{gian}, \cite{aps}). There are results (\cite{evensarah}, \cite{evensteven}) which show that the density of odd character values (not just degrees) goes to zero. Although, they add to the literature of odd partitions, none specifically focus on analysing the dimensions further. Some papers (\cite{rosa}, \cite{lass}) give explicit results for character values of symmetric groups but the formulae do not aid us in enumeration.\\ We aim to fill this gap by providing explicit enumeration results which extend the odd dimensional partition enumeration formula of Macdonald.
\subsection{Main Results} Let $a_i(n)$ denote the number of partitions of $n$ with dimension congruent to $i$ modulo 4. Then, the number of odd partitions, $a(n)= m_2(n) = a_1(n) + a_3(n)$. We compute the values of $a_1(n)$ and $a_3(n)$ for some specific values of $n$. The case where $n$ has more than two 1s in its binary expansion and the leftmost two digits are 1 still remains unsolved.\\ Define $\delta(n) := a_1(n) - a_3(n)$. In this paper, we present the following main theorem: \begin{thm}\label{mainthm} Let $n, m, R \in \mathbb{N}$ with $R\geq 2$ and $m > 0$. Suppose, $n = 2^{R} + m$ with $2^{R-1} > m$. Then, we have \[ \delta(n) = \begin{cases} 0, & \text{if }n \text{ is even}\\ 4 \delta(m), &\text{if } n \text{ is odd}. \end{cases} \] Equivalently, for $k_1 > \ldots > k_\ell$, we have \[ a_1(2^{k_1}+\ldots + 2^{k_\ell}) = \begin{cases} 2^{k_1+\ldots + k_\ell - 1}, & \text{if }k_\ell >0 \text{}\\ 4a_1(\sum\limits_{i=2}^{l} 2^{k_i}) + (2^{k_1-1}-2)2^{k_2+\ldots + k_\ell}, &\text{if } k_\ell = 0 \text{}, \end{cases} \] and \[ a_3(2^{k_1}+\ldots + 2^{k_\ell}) = \begin{cases} 2^{k_1+\ldots + k_\ell - 1}, & \text{if }k_\ell >0 \text{}\\ 4a_3(\sum\limits_{i=2}^{l} 2^{k_i}) + (2^{k_1-1}-2)2^{k_2+\ldots + k_\ell}, &\text{if } k_\ell = 0 \text{} \end{cases} \] \end{thm}
We call a positive integer \textit{sparse} if it does not have any consecutive ones in its binary expansion, i.e., $k_i > k_{i+1} + 1$ in our notation. For instance, $(42)_{10}= (101010)_2$ is sparse. This case has the following pleasing corollary: \begin{cor}\label{maincor} If $n$ is a sparse number, then \[ \delta(n) = \begin{cases} 2\text{,} & \text{if }n = 2\\ 0, &\text{if } n > 2 \text{ is even}\\ 4^{\nu(n) - 1}, & \text{if } n\text{ is odd}, \end{cases} \] where $\nu(n)$ denotes the number of 1s (or equivalently the sum of digits) in the binary expansion of $n$. For $k_1 > \ldots > k_\ell\geq 0$ with $k_i > k_{i+1}+1$ such that $\ell \geq 2$, we have \[ a_1(2^{k_1}+\ldots + 2^{k_\ell}) = \begin{cases} 2^{k_1+\ldots + k_\ell - 1}, &\text{if } k_\ell > 0\\ 2^{k_1+\ldots + k_\ell - 1} + 4^{\ell - 2}, & \text{if }k_\ell = 0\text{}, \end{cases} \] and \[ a_3(2^{k_1}+\ldots + 2^{k_\ell}) = \begin{cases} 2^{k_1+\ldots + k_\ell - 1}, &\text{if } k_\ell > 0\\ 2^{k_1+\ldots + k_\ell - 1} - 4^{\ell - 2}, & \text{if }k_\ell = 0\text{}, \end{cases} \] \end{cor} Note that we recover explicit and recursion formulae for $a_1(n)$ and $a_3(n)$ by using $\delta(n) = a_1(n) - a_3(n)$ and $a(n) = a_1(n) + a_3(n)$.
In the case where the binary expansion starts with $\mathtt{11}$, we have the following partial result: \begin{thm}\label{11thm} Let $n = 2^R + 2^{R-1}$ with $R\geq 1$, then \[ \delta(n) = \begin{cases} 2, & \text{if }n = 3\\ 8, & \text{if } n = 6\\ 0, & else. \end{cases} \] Explicitly, we have\[ a_1(2^R + 2^{R-1}) = \begin{cases} 2, & \text{if }R = 1\\ 8, & \text{if } R = 2\\ 4^{R-1}, & else, \end{cases} \] and \[ a_3(2^R + 2^{R-1}) = \begin{cases} 0, & \text{if }R = 1\\ 0, & \text{if } R = 2\\ 4^{R-1}, & else. \end{cases} \] \end{thm}
For $a_2(n)$, which counts even dimensional partitions with $f^{\lambda}$ not divisible by 4, we have recursive results for all natural numbers and closed form results for sparse $n$: \begin{thm}\label{2mod4thm} Let $n = 2^R + m$ such that $m < 2^R$. Then we have \[a_2(n) = \begin{cases} 2^R\cdot a_2(m) + \binom{2^{R-1}}{2}\cdot a(m), & \text{ if }m < 2^{R-1}\\ 2^R\cdot a_2(m) + \left(\binom{2^{R-1}}{3} + 2^{R-1}\right) \cdot \displaystyle{\frac{a(m)}{2^{R-1}}}, & \text{ if } 2^{R-1} \leq m < 2^R. \end{cases}\] \end{thm} \begin{cor}\label{2mod4cor} When $n$ is sparse, we have \[a_2(n) = \begin{cases} \displaystyle{\frac{a(n)}{8}(n - 2 \nu(n))},& \text{if }n \text{ is even}\\ a_2(n-1), &\text{if } n \text{ is odd}. \end{cases}\] \end{cor}
\subsection{Structure of the paper} In \cref{sec:notation}, we recall the basic notions relating to partitions such as tableaux, cores and quotients, and the hook-length formula. We present a characterization of odd partitions due to Macdonald \cite{mcd} and introduce the notion of parents. We also define a function, $\Od$, which extracts the odd part of the input and returns it modulo 4. In \cref{sec:workhorse}, we prove ``the workhorse formula" that relates the $\Od$ values of the cores and parents which allows us to enumerate $\delta(n)$ recursively. In \cref{sec:proofmain} and \cref{sec:proof11}, we analyse the workhorse formula to obtain enumeration results and prove \cref{mainthm} and \cref{11thm} respectively. In \cref{sec:2mod4}, we use the theory of 2-core towers to prove \cref{2mod4thm}. In \cref{sec:problems}, we present some relevant problems that remain unsolved.
\section{Notations and Definitions}\label{sec:notation} We denote by $\mathbb{N}$ the set of natural numbers beginning at 1. \subsection{Partitions, Ferrers diagrams and Young Tableaux}\label{introsec} Let \\${\Lambda \subset \bigcup\limits_{i=1}^\infty \mathbb{N}^i}$ denote the set of all tuples $\lambda = (\lambda_1, \ldots, \lambda_k)$ such that \\$\lambda_1 \geq \ldots \geq \lambda_k$. \begin{defn}[Partition] An element of $\Lambda$ is known as a \textit{partition}. We call $\lambda \in \Lambda$ a \textit{partition of $n$} if $\sum\limits_{i=1}^k \lambda_i = n$. \end{defn} \begin{notation}
We use the notation $\lambda\vdash n$ to denote $\lambda$ is a partition of $n$. Denote the \textit{size of $\lambda$} by $|\lambda| := \sum\limits_{i=1}^k \lambda_i = n$. \end{notation} We can represent a partition in the Cartesian plane by constructing a top-left justified array of boxes with the \nth{i} row containing $\lambda_i$ many boxes. \begin{example}The partition $\lambda = (4, 3, 3, 1)$ can be represented as \begin{center} \ydiagram{4, 3, 3, 1}. \end{center}\end{example} This is called the \textit{Ferrers diagram} of $\lambda$ and will be denoted by $\sh(\lambda)$, short for shape on $\lambda$.
The boxes of the Ferrers diagram $\sh(\lambda)$ can be filled with distinct numbers from $1$ to $n:= |\lambda|$. We impose the constraint that the numbers increase from top to bottom and from left to right. Such a filling is called a \textit{standard Young tableau (SYT)} on the shape $\sh(\lambda)$. \begin{example}Continuing with the above example, the following filling \begin{center} \ytableaushort{1 2 7 8, 3 5 {10}, 4 9 {11}, 6} \end{center} is an SYT on $\sh((4, 3, 3, 1))$. \end{example}
We can have multiple such SYT of a given shape, and the total number of SYT on $\sh(\lambda)$ is denoted by $f^{\lambda}$. We call this number, the \textit{dimension of the partition $\lambda$}.
\subsection{Hooks and hook-lengths} We call the boxes in Ferrers diagrams \textit{cells}. We label the cell in the \nth{i} row from top and \nth{j} column from left by $(i,j)$. Write $(i,j) \in \sh(\lambda)$ if the $(i,j)$ cell exists.\\ A very useful tool in the study of partitions is the notion of a \textit{hook}. For a cell at $(i,j)$ consider all the cells to the right, to the bottom of it and the cell itself. This constitutes the \textit{hook at $(i,j)$}. The number of cells is known as its \textit{hook-length} and is denoted by $h_{i,j}$. \begin{example} In the previous example, the cell at $(1,2)$ has the hook-length, $h_{1,2} = 5$. The hook is denoted in pink. \begin{center} \ydiagram{4, 3, 3, 1}*[*(pink)]{1 + 3, 1 +1, 1+ 1} \end{center} \end{example}
\subsection{Hook removal and cores} A hook of length $t$ is called a \textit{$t$-hook}. If the Ferrers diagram has a $t$-hook, then remove all the cells contained in the hook. This gives us two (possibly empty) Ferrers diagrams. Slide the bottom diagram to the left and then upwards to reconnect and form a Ferrers diagram.
\begin{example} Consider the partition $(5,5, 5, 4, 2)$ and its Ferrers diagram \begin{center} \ydiagram{5, 5, 5, 4, 2}. \end{center} In our case, let $t = 5$, that is, we wish to remove a 5-hook from the diagram. We undergo the following procedure: \begin{tiny} \begin{center} \ytableausetup{boxsize = 0.3cm, aligntableaux = center} \ydiagram[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 0, 3 + 2, 3 + 1}*[*(lightgray)]{0, 2 + 3, 2 + 1, 2 + 1}$\rightarrow$ \ydiagram[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 0, 3 + 2, 3 + 1}$\rightarrow$ \ytableaushort{\none, \none, \none \none {\none[\leftarrow]} , \none \none {\none[\leftarrow]}, \none}*[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 0, 3 + 2, 3 + 1} \quad $\rightarrow$ \ytableaushort{\none, \none, \none \none \none , \none \none \none, \none}*[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 0, 2+ 2, 2 + 1}$\rightarrow$ \ytableaushort{\none, \none\none {\none[\uparrow]}{\none[\uparrow]}, \none \none \none , \none \none \none, \none}*[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 0, 2+ 2, 2 + 1}$\rightarrow$ \ytableaushort{\none, \none, \none \none \none , \none \none \none, \none}*[*(cyan)]{5, 2, 2, 2, 2}*[*(pink)]{0, 2+ 2, 2 + 1} \ytableausetup{boxsize = normal, aligntableaux = top} \end{center} \end{tiny}
to obtain the partition $(5, 4, 3, 2, 2)$ \begin{center} \ydiagram{5, 4, 3, 2, 2}. \end{center} \end{example} \begin{defn}[$t$-core of a partition] If $\sh(\mu)$ does not have a $t$-hook, then we call $\mu$ a \textit{$t$-core}. \end{defn} For a partition $\lambda$, if we keep applying this $t$-hook removal process on the subsequent partitions we obtain, we will eventually reach a $t$-core. The $t$-core of $\lambda$ is independent of the order of hook-removal and is thus unique (cf. Example 8(c) \cite{SymmF} p. 12). We denote it by $\core_t(\lambda)$.
\begin{notation} The set of all $t$-cores is denoted by $\widetilde{\Lambda}_t$. \end{notation}
\subsection{\texorpdfstring{$\beta$}-sets} The set of first column hook-lengths of $\sh(\lambda)$ for $\lambda = (\lambda_1, \ldots, \lambda_k)$ will be denoted by \[H(\lambda) = \{h_{i, 1}\mid 1\leq i \leq k\},\] which we conventionally order in a decreasing fashion. Also, we have the relation, $h_{i,1} = \lambda_i + k - i$.
For any finite $X\subset \N$, define the \textit{$r$-shift of $X$} to be \[X^{+r} = \{x + r\mid x\in X\} \cup \{0, \ldots, r-1\}.\] We fix $X^{+0} = X$. \begin{defn}[$\beta$-set] For a partition $\lambda$, all sets of the form $H(\lambda)^{+r}$ for $r\geq 0$ are known as the \textit{$\beta$-sets of $\lambda$}. \end{defn} We can impose the relation, $\sim_{\beta}$, on finite sets $X, Y\subset \mathbb{N}$ such that $X\sim_{\beta} Y$ if and only if $X = Y^{+r}$ or $Y = X^{+r}$, for some $r \in \N$. This is an equivalence relation on the set of $\beta$-sets, i.e., $\N$.
There is a natural way to understand $t$-hooks and $t$-cores through $\beta$-sets. \begin{prop}\label{hookremprop} Let $\lambda$ be a partition and $X$ be a $\beta$-set of $\lambda$. We have that $\sh(\lambda)$ contains a $t$-hook if and only if there exists an $h$ in $X$ such that $h >t$ and $h - t$ is not in $X$. Furthermore, if $\mu$ is the partition obtained after removing the $t$-hook, then $H(\mu) \sim_{\beta} (H(\lambda)\cup \{h-t\})\backslash \{h\}$. \end{prop} \begin{proof} The proof can be found in Example 8(a) of \cite{SymmF} and Corollary 1.5 on page 7 of \cite{olsson}. \end{proof} \begin{remark}\label{remark:aff} The element $h$, as above, will be referred to as the \textit{affected hook-length}. With the above notation, we write $h^\lambda_\mu:= h$. \end{remark} The recipe for hook removal is quite clear. Pick an element $h$ of the $\beta$-set, $X$, and subtract $t$ from it. If $h-t\not\in X$, then replace $h$ by $h-t$; otherwise choose another element from $X$. If no ``$x \rightarrow x-t$" replacements can be performed, declare the partition as a $t$-core. \begin{example} Let $H(\lambda) = \{10, 8, 7, 5, 2\}$. In this case, we have that $8 \in H(\lambda)$ but $3 \not\in H(\lambda)$. Thus, there exists a 5-hook in $\sh(\lambda)$ which we can remove to obtain $\lambda'$. Following the above rules, we get $H(\lambda') = \{10, 7, 5, 3, 2\}$. Now, we can further replace $5$ by 0 to obtain $H(\lambda'') = \{10, 7, 5, 3, 2, 0\} \sim_{\beta} \{9,6,4,2,1\}$. No further replacements can be performed. Thus, $\lambda'' = (5,3,2,1,1)$ is the 5-core of $\lambda$. \end{example}
We can reconstruct the original partition from a $\beta$-set by setting $\lambda_i = h_i + i - k$ and considering only the values where $\lambda_i >0$. \begin{notation} If $X$ is a $\beta$-set of $\lambda$, then we define $\Part(X)= \lambda$. \end{notation}
\subsection{Odd dimensional partitions}\label{hlfsec} Using the notation, $f^{\lambda}$, for the dimension of a partition $\lambda$ (refer \cref{introsec}), we have the following proposition due to Frobenius: \begin{prop}[Hook-length formula]\label{HLF} Let $\lambda\in \Lambda$ and $X$ be a $\beta$-set of $\lambda$. If explicitly, $X = \{h_1, \ldots, h_k\}$ such that $h_1 > \ldots > h_k$, then we have \[
f^{\lambda} = \dfrac{|\lambda|!\prod\limits_{1\leq i<j\leq k} (h_i - h_j)}{\prod\limits_{i=1}^k h_i!}. \] \end{prop}
\begin{remark}\label{frthlf} The more famous hook-length formula \cite{frthook} \[ f^\lambda = \dfrac{n!}{\prod\limits_{h} h} \] has a product of all hook-lengths of $\lambda$ in the denominator. This can be used to derive the formula in \cref{HLF}. \end{remark}
We call $\lambda$ an \textit{odd partition} if $f^{\lambda}$ is odd. Macdonald~\cite{mcd} gave the following characterization of odd partitions which forms the backbone of our analysis:
\begin{prop}\label{mcdprop} If $\lambda$ is a partition of $n = 2^R + m$ with $m < 2^R$, then $\lambda$ is an odd partition if and only if $\lambda$ contains exactly one $2^R$-hook and $\core_{2^R}(\lambda)$ is also an odd partition. \end{prop}
\begin{defn}\label{parentsrem} If $\core_{2^R}(\lambda) = \mu$, then we call $\lambda$ a \textit{$2^R$-parent of $\mu$}. \end{defn}
We can now give an explicit form for the $2^R$-parents of odd partitions: \begin{prop}\label{parents} Let $2^R>m$ and $\mu$ be a partition of $m$. If $\lambda$ is a partition such that $\core_{2^R}(\lambda) = \mu$, then exactly one of the following holds: \begin{enumerate} \item[Type I.] $H(\lambda) = \left(H(\mu)\cup \{x+2^R\}\right)\backslash \{x\}$ for some $x\in H(\mu)$, \item[Type II.] $H(\lambda) = \left(H(\mu)^{+r}\cup \{2^R\}\right)\backslash\{0\}$ for some $1\leq r\leq 2^R$ such that $2^R\not\in H(\mu)^{+r}$. \end{enumerate} \end{prop}
\begin{proof} By \cref{hookremprop}, we get $(H(\lambda)\cup\{h - 2^R\})\backslash \{h\}\sim_{\beta}H(\mu)$ which implies $(H(\lambda)\cup\{h - 2^R\})\backslash \{h\} = H(\mu)^{+r}$.\\ The reader can show that for non-empty finite sets $A$, $B$, $C \subset \N$, we have \begin{enumerate} \item $A\backslash B = C \implies A = B \cup C$ if and only if $B \subset A$ \item $ A = B \cup C \implies A\backslash B = C$ if and only if $B\cap C = \varnothing$. \end{enumerate} Using this, we get \textit{Type I} by letting $r = 0$, putting $h - 2^R = x$. Similarly, we get \textit{Type II} by putting $h = 2^R$. \end{proof} \begin{example} Let $\mu = (2,2,2)$ with $2^R = 8$. We have $H(\mu) = \{4,3,2\}$. If we let $x = 3$, then we obtain a Type I parent, $\lambda$, with $H(\lambda) = \{11, 4, 2\}$. For a Type II parent, we choose $r= 2$, which gives us $\lambda'$ with $H(\lambda') = \{8,6,5,4,1\}$. The corresponding Ferrers diagrams for $\lambda$ (left) and $\lambda'$ (right) with hooks added to $\mu$ are: \begin{center} \ydiagram{2,2,2}*[*(pink)]{2 + 7, 2 + 1} \qquad \ydiagram{2,2,2}*[*(pink)]{2 + 2, 2 + 1, 2 + 1, 3, 1}. \end{center} \end{example} Using this, we can recover the enumeration result for odd partitions: \begin{prop}\label{count} Let $n = 2^{k_1} + \ldots + 2^{k_r}$ with $k_1 > \ldots > k_r$. Then, the number of odd partitions of $n$ is given by $a(n) := 2^{k_1 + \ldots + k_r}$. \end{prop}
\begin{proof} Let $n = 2^{k_1} + m$ and $\mu\vdash m$ be an odd partition. There are exactly $2^{k_1}$ many $2^{k_1}$-parents of $\mu$ and all of them are odd partitions. This gives us the recursion, $a(n) = 2^{k_1}a(m)$. \end{proof} \subsection{\textnormal{Od} function}\label{odsubs}
\begin{notation} For $n\in \mathbb{N}$, let $v_2(n)$ denote the largest power of 2 that divides $n$. \end{notation} \begin{defn} Let $\Od:\mathbb{N} \rightarrow \{\pm1\}$ be defined as follows \[ \Od(n) = \begin{cases} 1, & \text{if }n/2^{v_2(n)}\equiv 1 \mod 4\\ -1, &\text{if } n/2^{v_2(n)}\equiv 3 \mod 4. \end{cases} \] \end{defn} \begin{lem} For all $m, n\geq 1$, we have $\Od(mn) = \Od(m)\Od(n)$. \end{lem} \begin{proof} The proof is elementary and follows from $o(n):= n/2^{v_2(n)}$ being multiplicative. \end{proof}
\begin{notation}[Binary expansion]\label{not:binary} Let $n\in \N$. \begin{enumerate} \item Let $n = \sum\limits_{i=0}^k b_i 2^i$ such that $b_i = \{0,1\}$. We write $n = b_k\ldots b_0$ if $b_k = 1$ and $b_i =0$ for $i>k$. This is the \textit{binary expansion of $n$.} \item Let $\bin(n) :=\{i \mid b_i = 1\}$, i.e., the ``positions" of 1s in the binary expansion of $n$. \item Denote the sum of digits by $\nu(n):= \sum\limits_{i\geq 0} b_i$. \item Let $s_2(n) := b_k + b_{k-1}$, i.e., the sum of the leftmost two digits of $n$. \end{enumerate}
\end{notation}
\begin{remark} Let $n$ be as above. Denote $j := \min(\bin(n))$. Then, $\Od(n) \equiv b_{j+1}b_{j}\mod 4$. This corresponds to taking the binary expansion of $n$, removing all the rightmost zeros and returning the rightmost two digits of the newly obtained string modulo 4. \end{remark}
\section{The Workhorse Formula}\label{sec:workhorse} In this section, we construct a relationship between partitions and their $2^R$-parents, which will allow us to analyse their dimensional behaviour modulo 4. We first define a statistic of consequence inspired by \cite{Bin4}: \begin{defn} For a natural number $n$ (with binary expansion $b_k\ldots b_0$), let the number of pairs of consecutive 1s (disjoint or overlapping) be denoted by $D(n)$. Notationally, $D(n)$ is the number of $i$ such that the product $b_i\cdot b_{i+1} = 1$. \end{defn} \begin{example} We have $D(7) = 2$, $D(42) =0$ and $D(367) = 4$. \end{example} We start with the following lemma: \begin{lem}\label{lem:odnfac} For any natural number $n$, we have \[\Od(n!) = (-1)^{D(n) + \nu(\lfloor n/4 \rfloor)},\] where $\nu(n)$ is the number of 1s in the binary expansion of $n$ as in \cref{not:binary} and $\lfloor \cdot \rfloor$ denotes the integral part. \end{lem} \begin{proof} As $\Od$ is multiplicative, we have \[ \Od(n!) = \prod\limits_{r=1}^n \Od(r) = \prod\limits_{\substack{1 \leq r \leq n\\ r \text{ odd}}} \Od(r) \prod\limits_{\substack{1 \leq r \leq n\\ r \text{ even}}} \Od(r). \] The above rearrangement allows us to use the following two facts to obtain a recursion. Firstly, $\Od(2r) = \Od(r)$ which gives \[\prod\limits_{\substack{1 \leq r \leq n\\ r \text{ even}}} \Od(r) = \Od(\lfloor n/2 \rfloor!).\] We have \begin{align*} \prod\limits_{\substack{1 \leq r \leq n\\ r \text{ odd}}} \Od(r) &= \prod\limits_{\substack{1 \leq r \leq n\\ r \in 4\mathbb{N}+1}} \Od(r) \prod\limits_{\substack{1 \leq r \leq n\\ r \in 4\mathbb{N}-1}} \Od(r)\\ &= \prod\limits_{\substack{1 \leq r \leq n\\ r \in 4\mathbb{N}-1}} (-1). \end{align*}
As there are exactly $\lfloor (n+1)/4\rfloor$ terms of the arithmetic progression $3, 7, 11, \ldots$ less than or equal to $n$, we get
\[ \Od(n!) = (-1)^{\lfloor (n+1)/4\rfloor}\Od(\lfloor n/2 \rfloor!). \] Let $n$ have the binary expansion $b_k\ldots b_0$ as in \cref{not:binary}. By using $\lfloor \frac{b_\ell\ldots b_0}{2}\rfloor = b_\ell\ldots b_1$, we can rewrite our expression as \[ \Od(n!) = (-1)^{\lfloor \frac{b_k\ldots b_{0} + 1}{4}\rfloor + \lfloor \frac{b_k\ldots b_{1} + 1}{4}\rfloor + \lfloor \frac{b_k\ldots b_{2} + 1}{4}\rfloor + \ldots+ \lfloor \frac{b_k b_{k-1} + 1}{4}\rfloor} \Od(b_k!). \]
Notice that $\lfloor \frac{b_k\ldots b_{j+1}b_j + 1}{4}\rfloor = \lfloor \frac{b_k\ldots b_{j+1}b_j}{4}\rfloor + 1$ if and only if both $b_j$ and $b_{j+1}$ are 1. For all other cases, we have the equality, $\lfloor \frac{b_k\ldots b_{j+1}b_j + 1}{4}\rfloor = \lfloor \frac{b_k\ldots b_{j+1}b_j}{4}\rfloor$.\\
The inequality occurs exactly $D(n)$ times, giving us \[ \Od(n!) = (-1)^{D(n)}(-1)^{\lfloor \frac{b_k\ldots b_{0} }{4}\rfloor + \lfloor \frac{b_k\ldots b_{1}}{4}\rfloor + \ldots+ \lfloor \frac{b_kb_{k-1}b_{k-2}}{4}\rfloor + \lfloor \frac{b_k b_{k-1}}{4}\rfloor} \Od(b_k!). \] Using $\lfloor \frac{b_\ell\ldots b_0}{4}\rfloor = b_\ell\ldots b_2$, and the fact that $b_k \in \{0,1\}$, we get \[ \Od(n!) = (-1)^{D(n)}(-1)^{{b_k\ldots b_{2} } + {b_k\ldots b_{3}} + \ldots+ b_k + 0}. \] When $(-1)$ is raised to a number, the parity of the number entirely decides the result, thus $(-1)^{b_k\ldots b_j} = (-1)^{b_j}$. Using this, we obtain, \begin{align*} \Od(n!) &= (-1)^{D(n) + b_2 + b_3 + \ldots + b_k}\\ &= (-1)^{D(n) + \nu(\lfloor n/4 \rfloor)}. \end{align*} \end{proof} Before stating the main formula, we define an important quantity: \begin{defn}\label{defn:inversions}
Let $\lambda$ be a $2^R$-parent of $\mu$ with $|\mu| < 2^R$. Let $h^\lambda_\mu \in H(\lambda)$ be the affected hook-length (\cref{remark:aff}). We define $\eta^\lambda_\mu \in \mathbb{Z}/2\mathbb{Z}$ as follows: \[
(-1)^{\eta^{\lambda}_\mu} = \prod\limits_{\substack{x \in H(\lambda)\\ x \neq h^\lambda_\mu}} \dfrac{\Od(|h^\lambda_\mu- x|)}{\Od(|h^\lambda_\mu - 2^R - x|)}. \] \end{defn}
Now, we have the arsenal to approach the hook-length formula and apply our $\Od$ function to it. This gives us the following proposition: \begin{prop}[Workhorse formula]\label{workhorse} Let $n = 2^R +m > 3$ with $m<2^R$. Let $\mu\vdash m$ and $\lambda$ be a $2^R$-parent of $\mu$. Let $h^\lambda_\mu\in H(\lambda)$ be the affected hook-length. Then, \[ \Od(f^{\lambda}) = (-1)^{s_2(n) + s_2(h^\lambda_\mu) + \eta^\lambda_\mu}\Od(f^{\mu}), \] where $\eta^{\lambda}_{\mu}$ is as defined above and $s_2$ is as in \cref{not:binary}. \end{prop} \begin{proof} By applying $\Od$ on \cref{HLF} for $\lambda$, we get \[
\Od(f^{\lambda}) = (-1)^{D(n) + \nu(\lfloor \frac{n}{4} \rfloor)}\prod\limits_{h\in H(\lambda)} (-1)^{D(h) + \nu(\lfloor \frac{h}{4} \rfloor)} \prod\limits_{\substack{x > y\\ x,y\in H(\lambda)}} \Od(|x-y|). \]
Suppose, $|H(\mu)^{+r}| = |H(\lambda)|$ for some $r$. Then, we obtain, \[
\Od(f^{\mu}) = (-1)^{D(m) + \nu(\lfloor \frac{m}{4} \rfloor)}\prod\limits_{h\in H(\mu)^{+r}} (-1)^{D(h) + \nu(\lfloor \frac{h}{4} \rfloor)} \prod\limits_{\substack{x> y\\ x,y\in H(\mu)^{+r}}} \Od(|x-y|).\] Let $n$ have the binary expansion $b_R\ldots b_0$. Then, $m = n-2^R = b_{R-1}\ldots b_0$. Clearly, $D(m) = D(n)-1$ if and only if $b_{R-1} = 1$. Also, $\nu(\lfloor m/4 \rfloor) = \nu(\lfloor n/4 \rfloor) - 1$. These two facts, give us \[(-1)^{D(n) + \nu(\lfloor \frac{n}{4} \rfloor) - D(m) - \nu(\lfloor \frac{m}{4} \rfloor)} = (-1)^{s_2(n)}.\]\\ We have that $h^\lambda_\mu\in H(\lambda)$ and $h^\lambda_\mu-2^R \in H(\mu)^{+r}$. All the other elements of the sets $H(\lambda)$, $H(\mu)^{+r}$ are same. Using the same analysis as above, we get \begin{align*} &\dfrac{\prod\limits_{h\in H(\lambda)} (-1)^{D(h) + \nu(\lfloor \frac{h}{4} \rfloor)}}{\prod\limits_{h\in H(\mu)^{+r}} (-1)^{D(h) + \nu(\lfloor \frac{h}{4} \rfloor)}}\\ &= (-1)^{D(h^\lambda_\mu) + \nu(\lfloor \frac{h^\lambda_\mu}{4} \rfloor) - {D(h^\lambda_\mu - 2^R) - \nu(\lfloor \frac{h^\lambda_\mu - 2^R}{4}\rfloor)}} \\&= (-1)^{s_2(h^\lambda_\mu)}. \end{align*} Extending this argument to the remaining productand using \cref{defn:inversions} gives us the formula. \end{proof}
Although the above recursion is quite compact, it must be untangled to aid in enumeration. The $s_2$ terms are simple to deal with and so we must investigate the $\eta^\lambda_\mu$ term. \begin{lem} For any natural number $a$, we have: \begin{enumerate}\label{lem:inv} \item for $2^{R} < a < 2^{R+1}$, $\Od(a - 2^R) \neq \Od(a)\iff a = 2^R + 2^{R-1}$, \item for $0<a < 2^R$, $\Od(a +2^R) \neq \Od(a) \iff a = 2^{R-1}$, and \item for $0<a < 2^R$, $\Od(2^R - a) = \Od(a)\iff a = 2^{R-1}$. \end{enumerate} \end{lem} \begin{proof} Let $a$ have the binary expansion $b_R\ldots b_j\ldots 0$. The results can be shown by noticing that $\Od(a) \equiv b_{j+1}b_{j} \mod 4$ and performing elementary algebraic manipulations. \end{proof} \begin{defn}[Indicator function] Let $\mathbb{I}_{X}(x)$ be equal to 1 if $x\in X$ otherwise 0. We call $\mathbb{I}_{X}$, \textit{the indicator function} \textit{on $X$}. \end{defn} \begin{notation} For a partition $\lambda$ of $n = 2^R + m$ with $m < 2^R$ and $h\in H(\lambda)$, let $N_{\lambda}(h)= \#\{y \in H(\lambda)\mid h - 2^R< y < h\}$. \end{notation}
\cref{lem:inv} enables us to prove the following proposition which forms the crux of our enumeration endeavour: \begin{prop}\label{prop:inversions}
Let $\lambda$ be a $2^R$-parent of $\mu$ with $|\mu| < 2^R$. Then, we have \[ \eta^{\lambda}_{\mu} = N_{\lambda}(h^\lambda_\mu) - \mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 2^{R-1}) + \mathbb{I}_{H(\lambda)}(h^\lambda_\mu + 2^{R-1}) + \mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 3\cdot 2^{R-1}). \] \end{prop} \begin{proof} We consider the ratio
\[\rho_h(x) := \Od(|h-x|)/\Od(|h - 2^R - x|). \] In this proof, we will put $h= h^\lambda_\mu$ and $x\in H(\lambda)$ except for $x = h$. The idea of this proof is to find values of $x$ such that $\rho_h(x) = -1$, which allows us to compute \[ (-1)^{\eta^\lambda_\mu} = \prod\limits_{\substack{x\in H(\lambda)\\ x\neq h}} \rho_h(x). \] We split the proof into three cases: \begin{enumerate} \item Let $x < h-2^R < h$. The ratio $\rho_h(x)$ simplifies to $\frac{\Od(h-x)}{\Od(h - 2^R - x)}$. Put $h-x$ as $a$ and use (1) in \cref{lem:inv}, to give $x = h - 2^R - 2^{R-1}$. Thus, \[ \rho_h(h - 3\cdot 2^{R-1}) = -1. \] \item Let $h-2^R < h < x$. Similarly we use (2) from the lemma to get \[ \rho_h(h + 2^{R-1}) = -1. \] \item Let $h -2^R < x < h$. Now, $\rho_h(x)$ becomes $\dfrac{\Od(h-x)}{\Od(- h +2^R + x)}$. Using (3) from the lemma, we get that in this range, $\rho_h(x) = 1$ if and only if $x = h- 2^{R-1}$. Thus, for exactly \[{N_{\lambda}(h) - \mathbb{I}_{H(\lambda)}(h- 2^{R-1})}\] many values, we have $\rho_h(x) = -1$. \end{enumerate} Combining these three together gives us the proposition. \end{proof}
We are now ready to enumerate odd dimensional partitions.
\section{Odd Partitions of Sparse Numbers}\label{sec:proofmain} In this section, we prove \cref{mainthm} and \cref{maincor} through heavy use of \cref{workhorse} and \ref{prop:inversions}. \begin{defn}[Sparse number] A natural number $n$ with the binary expansion $b_k\ldots b_0$ is called \textit{sparse} if for all $0\leq i \leq k-1$, we have the product $b_{i+1}\cdot b_i = 0$. \end{defn} Recall that $a_i(n)$ denotes the number of partitions of $n$ whose dimensions are congruent to $i$ modulo 4. Further, $\delta(n):= a_1(n) - a_3(n)$, and the total number of odd partitions of $n$ is denoted by $a(n) = a_1(n) + a_3(n)$. We start by first defining \textit{hook partitions} and looking into their dimensions, which simplify to binomial coefficients. \begin{defn}\label{def:hooks} A partition $\lambda\vdash n$ is called a \textit{hook partition} if it is of the form $(a+1,1, \ldots, 1)$ for $0 \leq a \leq n-2$ or $(n)$. Equivalently, $H(\lambda) = \{n, b,b-1, \ldots, 1\}$ or $H(\lambda) = \{n\}$ where $a + b + 1 = n$. \end{defn} \begin{lem}\label{lem:hookdims} We have $\delta(1) = 1$, $\delta(2) = 2$ and $\delta(2^R) = 0$ for $R \geq 2$. \end{lem} \begin{proof} The cases of $\delta(1)$ and $\delta(2)$ are easy to see as they have 1 and 2 partitions respectively, all of dimension 1. By \cref{mcdprop}, we deduce that all odd partitions of $2^R$ are hook-partitions. Let $\lambda\vdash 2^R$ such that $H(\lambda) = \{2^R, b, b-1, \ldots, 1\}$. Putting this into the hook-length formula (\cref{HLF}), we get $f^{\lambda} = \binom{2^R-1}{b}$. By using \cref{davisandwebb} (stated below) and noticing that $2^R-1$ contains only 1s in its binary expansion, we get the lemma. \end{proof} \begin{prop}[Davis and Webb, \cite{Bin4}]\label{davisandwebb} Let $n\in \mathbb{N}$ be \textit{not} sparse, then \[ \#\{0 \leq k \leq n\mid\binom{n}{k}\equiv 1 \textnormal{ mod } 4\} = \#\{0 \leq k\leq n\mid \binom{n}{k}\equiv 3\textnormal{ mod } 4\}. \] \end{prop} \begin{proof} Refer to Theorem 6 of \cite{Bin4} for the proof. \end{proof}
Let $n = 2^R + m$ with $m < 2^{R-1} < 2^R$. We proceed with the following strategy: consider an odd partition $\mu\vdash m$ as our $2^R$-core. Consider all partitions of $n$ which are the $2^R$-parents of $\mu$. These are all odd partitions by \cref{mcdprop} and by \cref{workhorse}, we can determine $\delta(n)$ in terms of $\delta(m)$. Firstly, we see that \cref{workhorse} simplifies nicely in the $m < 2^{R-1}$ case. \begin{cor} Let $n = 2^R + m$ with $m < 2^{R-1}$. For odd partitions, $\lambda \vdash n$ and $\mu\vdash m$ such that $\lambda$ is a $2^R$-parent of $\mu$, we have \[ \Od(f^{\lambda}) = (-1)^{\eta^\lambda_\mu}\Od(f^\mu). \] \end{cor} \begin{proof} Let $n$ have the binary expansion $b_Rb_{R-1}\ldots b_0$. As $n$ is sparse, it must be that $b_{R-1} = 0$ and $s_2(n) = b_R + b_{R-1} = 1$. Also, we have $2^R \leq h^\lambda_\mu < n < 2^R + 2^{R-1}$ as all elements of $H(\lambda)$ are less than or equal to $n$. By the same logic as above, we get $s_2(h^\lambda_\mu) = 1$. \end{proof}
This makes finding $\eta^\lambda_\mu$ our primary concern. In our particular case of $m < 2^{R-1}$, even this calculation is simplified. We obtain a corollary of \cref{prop:inversions}: \begin{cor}\label{cor:inversions} Let $n = 2^R + m$ with $m < 2^{R-1}$. For odd partitions, $\lambda \vdash n$ and $\mu\vdash m$ such that $\lambda$ is a $2^R$-parent of $\mu$, we have \[ \eta^{\lambda}_{\mu} = N_{\lambda}(h^\lambda_\mu) - \mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 2^{R-1}) \] with notation as in \cref{prop:inversions}. \end{cor} \begin{proof} We have $2^R \leq h^\lambda_\mu < n < 3\cdot2^{R-1}$. Clearly, $\mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 3\cdot 2^{R-1}) = 0$. Along similar lines, $h^\lambda_\mu + 2^{R-1} \geq 2^R + 2^{R-1} > n$ and so it cannot be an element of $H(\lambda)$. Thus, $\mathbb{I}_{H(\lambda)}(h^\lambda_\mu + 2^{R-1}) = 0$. \end{proof} We enumerate Type I and Type II parents (\cref{parents}) separately. \begin{notation} When $2^R$ is understood, we denote the set of all $2^R$-parents of $\mu$ by $\p(\mu)$. Denote the set of Type I and Type II $2^R$-parents of $\mu$ by $\p_1(\mu)$ and $\p_2(\mu)$ respectively. \end{notation}
Clearly, we have $\p(\mu) = \p_1(\mu) \cup \p_2(\mu)$. Also, $|\p_1(\mu)| = H(\lambda)$ and $|\p_2(\mu)| = 2^R - H(\lambda)$. \begin{notation}[Signed-sum] For any finite $\Lambda' \subset \Lambda$, and a partition $\mu$, define the \textit{signed-sum}, $S_{\Lambda'}(\mu) = \frac{1}{\Od(f^\mu)}\sum\limits_{\lambda\in \Lambda'} \Od(f^\lambda)$. \end{notation} We see that $S_{\Lambda'}(\mu)$ counts the number of partitions in $\Lambda'$ with same value of $\Od$ as $\mu$ minus the number of partitions in $\Lambda'$ with a different value of $\Od$. For the following discussion, we will consider natural numbers $m$ and $R$ such that $m < 2^{R-1}$.\\ We now count Type I parents. \begin{prop}\label{prop:phistuff} Let $\mu$ be an odd partition of $m$. Then, for $\lambda\in \p_1(\mu)$, we have $\mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 2^{R-1}) = 0$. Further, \[ S_{\p_1(\mu)}(\mu) = \begin{cases}
0, & \text{if }|H(\mu)| \text{ is even}\\
1, & \text{if }|H(\mu)| \text{ is odd}.\\ \end{cases} \] \end{prop} \begin{proof} By definition, we have $H(\lambda) = (H(\mu)\cup \{h^{\lambda}_\mu\})\backslash \{h^{\lambda}_\mu - 2^R\}$ where $h^{\lambda}_\mu > 2^R$. Thus, if $h^{\lambda}_\mu - 2^{R-1} \in H(\lambda)$ then $h^{\lambda}_\mu - 2^{R-1} \in H(\mu)$, which is not possible as $h^{\lambda}_\mu - 2^{R-1} > m$. This shows $\mathbb{I}_{H(\lambda)}(h^\lambda_\mu - 2^{R-1}) = 0$. We also have \begin{align*} S_{\p_1(\mu)}(\mu) &= \sum\limits_{\lambda\in \p_1(\mu)} \frac{\Od(f^\lambda)}{\Od(f^\mu)}\\ &= \sum\limits_{\lambda\in \p_1(\mu)}(-1)^{\eta^\lambda_\mu}\\ &= \sum\limits_{\lambda\in \p_1(\mu)}(-1)^{N_{\lambda}(h^\lambda_\mu)}. \end{align*} If, explicitly, $H(\mu) = \{h_1, \ldots, h_k\}$ with $h_1 > \ldots > h_k$, then the map $\phi: H(\mu)\rightarrow \p_1(\mu)$ defined by \[ \phi(h_i) = \Part((H(\mu)\cup\{h_i + 2^R\})\backslash \{h_i\}) \] is a bijection such that $h^{\phi(h_i)}_\mu = h_i + 2^R$. As all elements of $H(\mu)$ are strictly smaller than $2^R$, the element $h^{\phi(h_i)}_\mu$ is the largest element in $H(\phi(h_i))$. Thus, \begin{align*} N_{\phi(h_i)}(h^{\phi(h_i)}_\mu) &= \#\{y\in H(\phi(h_i))\mid h_i + 2^R > y > h_i\} \\ &= \#\{y\in H(\mu)\mid y > h_i\}\\ &= i - 1. \end{align*} This gives us, \[ S_{\p_1(\mu)}(\mu) = \sum\limits_{i=1}^k (-1)^{i-1}. \]
By considering the parity of $k = |H(\mu)| = |H(\lambda)|$, we get the result. \end{proof} The similar calculation for Type II parents is more involved. We will eventually prove the following proposition: \begin{prop}\label{prop:type2} Let $\mu$ be an odd partition of $m$ and $2^{R-1}>m$. Then, \[ S_{\p_2(\mu)}(\mu) = \begin{cases}
2 - 2(-1)^{m}, &\text{if }|H(\mu)| \text{ is even}\\
1 - 2(-1)^{m}, & \text{if }|H(\mu)| \text{ is odd}.\\ \end{cases} \] \end{prop}
Fix $\mu\vdash m$ and $n = 2^R + m$ with $m < 2^{R-1}$ as before. If $\lambda$ is a Type II $2^R$-parent, i.e., $\lambda\in\p_2(\mu)$, we have $h^\lambda_\mu = 2^R$. Thus, \cref{cor:inversions} simplifies to give $\eta^\lambda_\mu = N_{\lambda}(h^\lambda_\mu) - \mathbb{I}_{H(\lambda)}(2^{R-1})$. Although it looks simpler on the surface, this calculation comes with its own caveat that $r$-shifts do not give a $2^R$-parent. \begin{notation}\label{notn:defined} Define \[ \D := \{1\leq r \leq 2^R\mid 2^R \not\in H(\mu)^{+r}\}, \] wherein we assume that $R$ and $\mu$ are understood. For a set $X\subset [1, 2^R]\subset \mathbb{N}$, we write $\D_X := \D\cap X$.\\ For $r\in \D$, let \[\lambda_{[r]} = \Part\left((H(\mu)^{+r}\cup\{2^R\})\backslash \{0\}\right).\] \end{notation}
We define a new quantity, which may seem arbitrary at this point, but will be quite useful in our analysis: \begin{defn}[Parity gap] For a finite $X\subset \mathbb{N}$, define the \textit{parity gap of $X$}, $\g(X)$, to be the number of even elements of $X$ minus the number of odd elements of $X$. In notation,\[ \g(X) = \#\{x\in X\mid x \equiv 0\text{ mod } 2\} - \#\{x\in X\mid x\equiv 1\text{ mod } 2 \}. \] \end{defn} \begin{example} If $X = \{13, 12, 8, 5, 3, 1, 0\}$ then $\g(X) = 3 - 4 = -1$. \end{example} \begin{notation}\label{not:typeii} Let \[\p_2^\uparrow(\mu) := \{ \lambda_{[r]}\mid r\in \D_{[1, 2^{R-1}]}\}\] and \[\p_2^\downarrow(\mu) := \{\lambda_{[r]}\mid r\in \D_{[2^{R-1}+ 1, 2^{R}]}\}.\] \end{notation} \begin{lem}\label{lem:toprow} Following the above notation, we claim that $D_{[1, 2^{R-1}]} = [1, 2^{R-1}]$. Further, \[
S_{\p_2^\uparrow(\mu)}(\mu) = 2(-1)^{|H(\mu)|}\g(H(\mu)). \] \end{lem} \begin{proof}
The maximum possible element of $H(\mu)^{+r}$ is ${\max (|H(\mu)| + r)}$ which itself is \textit{strictly} smaller than $2^R$ as $|H(\mu)|< m< 2^{R-1}$ and $r \leq 2^{R-1}$. Thus, for $1\leq r \leq 2^{R-1}$, we have $2^R \not\in H(\mu)^{+r}$.\\
For the signed-sum part, it is easy to see that $N_{\lambda_{[r]}}(2^R) = |H(\mu)|+r - 1$. Thus, we get the following summation: \[
S_{\p_2^\uparrow(\mu)}(\mu) = \sum\limits_{r=1}^{2^{R-1}} (-1)^{|H(\mu)| + r - 1}(-1)^{\mathbb{I}_{H(\lambda_{[r]})}(2^{R-1})}. \] For every $h\in H(\mu)$, we can choose an $r_h := 2^{R-1} - h$ which ensures that $2^{R-1} \in H(\mu)^{+r_h}$. Conversely, if $2^{R-1} \in H(\mu)^{+r}$, then as $r\leq 2^{R-1}$, we must have $h + r = 2^{R-1}$ for some $h\in H(\mu)$. Thus, there is an injective map $H(\mu)\rightarrow\p_2^{\uparrow}(\mu)$ given by $h\mapsto \lambda_{[r_h]}$. Further, if $h\in H(\mu)$ and $h+r_h = 2^{R-1}$, then $h$ and $r_h$ have the same parity. We can now compute the signed-sum as follows: \begin{align*}
S_{\p_2^\uparrow(\mu)}(\mu) &= (-1)^{|H(\mu)| - 1}\sum\limits_{r=1}^{2^{R-1}} (-1)^{r +\mathbb{I}_{H(\lambda_{[r]})}(2^{R-1})} \end{align*} We break the sum into two parts depending on whether $2^{R-1}$ belongs to $H(\lambda_{[r]})$. \begin{align*}
&S_{\p_2^\uparrow(\mu)}(\mu)\\ &= (-1)^{|H(\mu)|-1}\left(\sum\limits_{\substack{r_h\\ h\in H(\mu)}} (-1)^{r_h +\mathbb{I}_{H(\lambda_{[r_h]})}(2^{R-1})} +\sum\limits_{\substack{\text{other } r}} (-1)^{r +\mathbb{I}_{H(\lambda_{[r]})}(2^{R-1})}\right)\\
&= (-1)^{|H(\mu)|-1}\left(\sum\limits_{\substack{h\in H(\mu)}} (-1)^{r_h + 1} +\sum\limits_{\substack{\text{other } r}} (-1)^{r}\right) \\
&= (-1)^{|H(\mu)|-1}\left(\sum\limits_{\substack{h\in H(\mu)}} \left((-1)^{r_h + 1} - (-1)^{r_h}\right) +\sum\limits_{r=1}^{2^{R-1}} (-1)^{r}\right)\\
&= 2(-1)^{|H(\mu)|-1}\left(\sum\limits_{\substack{h\in H(\mu)}} \left((-1)^{r_h+1}\right)+0\right)\\
&= 2(-1)^{|H(\mu)|}\left(\sum\limits_{\substack{h\in H(\mu)}} (-1)^{r_h}\right)\\
&= 2(-1)^{|H(\mu)|}\g(H(\mu)). \end{align*} \end{proof}
We now hand the reader the following proposition which explicitly states what values $\g(X)$ can take where $X$ is a $\beta$-set of an odd partition of $n$. \begin{lem}\label{prop:parity} Let $\lambda$ be an odd partition of $n$ and $X$ be its $\beta$-set. Then, we have \[ \g(X) = \begin{cases}
1 - (-1)^n, &\text{if }|X| \text{ is even}\\
(-1)^n, & \text{if }|X| \text{ is odd}.\\ \end{cases} \] \end{lem} \begin{proof} Recall that if $\lambda\in \p(\mu)$, then there exists an $0\leq s\leq 2^R$ such that $H(\lambda) = (H(\mu)^{+s}\cup \{h^\lambda_\mu\})\backslash\{h^\lambda_\mu - 2^R\}$ for $h^\lambda_\mu\in H(\lambda)$. The case $s = 0$ corresponds to Type I parents.
Using the fact that $h^\lambda_\mu$ and $h^\lambda_\mu - 2^R$ have the same parity, we can deduce that $\g(H(\lambda)) = \g(H(\mu)^{+s})$. With elementary computations, one can check that $\g(X^{+1}) = 1 - \g(X)$. This shows that \[ \g(X^{+s}) = \begin{cases} \g(X), &\text{if } s \text{ is even}\\ 1-\g(X),&\text{if }s \text{ is odd.} \end{cases} \] We can now work our way down hook-by-hook and calculate $\g(H(\lambda))$ by finding $\g(H(\alpha))$ where $\alpha = \varnothing$ when $n$ is even and $\alpha = (1)$ when $n$ is odd. Applying the above relation repeatedly, we get, \[\g(H(\lambda)) = \begin{cases}
\g(H(\alpha)), & \text{if }|H(\lambda)| - |H(\alpha)| \text{ is even}\\
1 -\g(H(\alpha)),&\text{if }|H(\lambda)| - |H(\alpha)|\text{ is odd.} \end{cases} \]
In a sense, $|H(\lambda)| - |H(\alpha)|$ counts the total number of $r$-shifts required to reach $\lambda$.\\ Trivially, $\g(H(\varnothing)) = 0$. If $n$ is even,we obtain \[\g(H(\lambda)) = \begin{cases}
0 & |H(\lambda)| \text{ is even}\\
1 &|H(\lambda)|\text{ is odd.} \end{cases} \] On the other hand, we have $\g(\{1\}) = -1$ and if $n$ is odd, then \[\g(H(\lambda)) = \begin{cases}
2, &\text{if }|H(\lambda)|\text{ is even.}\\
-1, & \text{if }|H(\lambda)| \text{ is odd}
\end{cases} \] The claim follows immediately from this computation. \end{proof} \begin{example} Let $\lambda$, $\mu$, $\nu$ and $\pi$ be partitions such that \begin{itemize} \item $H(\lambda) = (H(\mu^{+5})\cup\{2^R\})\backslash \{0\}$ \item $H(\mu)$ is a Type I $2^S$-parent of $\nu$ \item $H(\nu) = (H(\pi^{+7})\cup\{2^T\})\backslash \{0\}$ \end{itemize} By above, we have $\g(H(\nu)) = 1 - \g(H(\pi))$. Further, $\g(H(\mu)) = \g(H(\nu))$ and $\g(H(\lambda)) = 1 - \g(H(\mu))$. Thus, $\g(H(\lambda)) = 1 - \g(H(\nu)) = 1 - (1 - \g(H(\pi))) = \g(H(\pi))$. \end{example}
\begin{lem}\label{lem:bottomrow} For $2^{R-1}+ 1 \leq r \leq 2^R$, we have $\lambda_{[r]} \in \p_2^\downarrow(\mu)$ if and only if $2^{R-1}\not\in H(\lambda_{[r-2^{R-1}]})$. Further, \vspace*{-5pt} \[ S_{\p_2^\downarrow(\mu)}(\mu) = \begin{cases}
0, &\text{if }|H(\mu)| \text{ is even}\\
1, &\text{if } |H(\mu)| \text{ is odd}.\\ \end{cases} \] \end{lem} \begin{proof}
It is clear that $2^{R-1}\in \lambda_{[r-2^{R-1}]}$ if and only if $2^R \in H(\mu)^{+r}$ for $2^{R-1}+1 \leq r \leq 2^R$. In this range, the values of $r\not\in \D$ are exactly the values of $r$ such that $2^{R-1}\in H(\lambda_{[r]})$. By considering these values as $r_h$ (refer \cref{lem:toprow}), we get that $|\p_2^\downarrow(\mu)| = 2^{R-1} - |H(\mu)|$.\\ For $r > 2^{R-1}$, we have $2^{R-1}\in H(\lambda_{[r]})$ and thus, $\eta^{\lambda}_\mu = N_{\lambda_{[r]}}(2^R) + 1$.\\ Suppose, $\lambda_{[r]}, \lambda_{[r+s]}\in \p_2^\downarrow(\mu)$ and $r+1,\ldots,r+s-1\not\in \D$. This gives us that $H(\mu) = \{h_1, \ldots, h_k, h_{k+1}, \ldots, h_{k+s-1}\ldots h_{l}\}$ where for $1\leq i \leq s-1$, we have $h_{k + i} = h_{k+1} - i +1$ and $h_{k+i} + r + i = 2^R$. It follows that $h_k + r >2^R$ and $h_{k+s} + r + s < 2^R$.
We have $N_{\lambda_{[r]}}(2^R) = |H(\mu)| + r - 1 - j_r$ where $j_r$ is the number of elements in $H(\lambda_{[r]})$ strictly greater than $2^R$. As $h_{k+i} + r + s > 2^R$ for $1\leq i \leq s-1$, we have $j_{r+s} = j_r + s - 1 $. This gives us $N_{\lambda_{[r+s]}}(2^R) = |H(\mu)| + (r + s - 1) - (j_r + s - 1) = N_{\lambda_{[r]}}(2^R) + 1$.\\
Let $r_1 < \ldots < r_{2^{R-1} - |H(\mu)|}$ such that $\lambda_{[r_1]}, \ldots, \lambda_{[r_{2^{R-1} - |H(\mu)|}]}\in \p_2^\downarrow(\mu)$. By the above discussion, we have $N_{\lambda_{[r_i]}}(2^R) = N_{\lambda_{[r_1]}}(2^R) + i - 1$. Thus, \begin{align*}
S_{\p_2^\downarrow(\mu)}(\mu) &= \sum\limits_{i = 1}^{2^{R-1}-|H(\mu)|} (-1)^{N_{\lambda_{[r_i]}(2^R)} + 1}\\
&= (-1)^{N_{\lambda_{[r_1]}}(2^R) + 1}\sum\limits_{i = 1}^{2^{R-1}-|H(\mu)|}(-1)^{i-1}\\
&= (-1)^{|H(\mu)| + r_1 - j_{r_1}}\sum\limits_{i = 1}^{2^{R-1}-|H(\mu)|}(-1)^{i-1}\\
&= (-1)^{|H(\mu)| + 2^{R-1}}\sum\limits_{i = 1}^{2^{R-1}-|H(\mu)|}(-1)^{i}. \end{align*}
The last equality follows by noticing that if $r_1 = 2^{R-1} + x$, then $j_{r_1} = x-1$ as $2^R \in H(\mu)^{+(2^{R-1} + 1)}, \ldots, H(\mu)^{+(2^{R-1} + x - 1)}$. Putting the proper parity of $|H(\mu)|$ and evaluating the expression gives us our result. \end{proof} We are now ready to prove the result for Type II parents. \begin{proof}[Proof of \cref{prop:type2}] We use the value of $\g(H(\mu))$ from \cref{prop:parity} and apply it to the result in \cref{lem:toprow} to obtain \[ S_{\p_2^\uparrow(\mu)}(\mu) = \begin{cases}
2 - 2(-1)^m, &\text{if }|H(\mu)| \text{ is even}\\
2(-1)^{m+1}, & \text{if }|H(\mu)| \text{ is odd}.\\ \end{cases} \] As $S_{\p_2(\mu)}(\mu) = S_{\p_2^\uparrow(\mu)}(\mu) + S_{\p_2^\downarrow(\mu)}(\mu)$, we add the result of \cref{lem:bottomrow}, to obtain the result of \cref{prop:type2} which is \[ S_{\p_2(\mu)}(\mu) = \begin{cases}
2 - 2(-1)^{m}, &\text{if }|H(\mu)| \text{ is even}\\
1 - 2(-1)^{m}, & \text{if }|H(\mu)| \text{ is odd}.\\ \end{cases} \] \end{proof} We now have all the tools in our hands to give a proof of \cref{mainthm} and \cref{maincor}. \begin{proof}[Proof of \cref{mainthm}] If $\p(\mu)$ denotes the set of $2^R$-parents of $\mu$, then $S_{\p(\mu)}(\mu) = S_{\p_1(\mu)}(\mu) + S_{\p_2(\mu)}(\mu)$. Clearly, $S_{\p(\mu)}(\mu) = 2 - 2(-1)^m$. The theorem follows from the following computation and the observation that $n$ and $m$ have the same parity: \begin{align*} \delta(n) &= \sum\limits_{\substack{\\\lambda\vdash n\\\lambda \text{ is odd}}} \Od(f^\lambda)\\ & = \sum\limits_{\substack{\mu\vdash m\\\mu \text{ is odd}}} S_{\p(\mu)}({\mu})\Od(f^{\mu})\\ &= (2 - 2(-1)^m) \sum\limits_{\substack{\mu\vdash m\\\mu \text{ is odd}}} \Od(f^{\mu})\\ &= (2-2(-1)^m )\delta(m). \end{align*} This gives us the result: \[ \delta(n) = \begin{cases} 0, & \text{if }n \text{ is even}\\ 4 \delta(m), &\text{if } n \text{ is odd}. \end{cases} \] \end{proof} \begin{proof}[Proof of \cref{maincor}] Repeatedly apply \cref{mainthm} and use \cref{lem:hookdims}. The $\nu(n)-1$ comes from $\delta(1) = 1$. This gives us the result in the sparse case and we find $n = 2$ separately: \[ \delta(n) = \begin{cases} 2\text{,} & \text{if }n = 2\\ 0, &\text{if } n > 2 \text{ is even}\\ 4^{\nu(n) - 1}, & \text{if } n\text{ is odd}. \end{cases} \] \end{proof}
\section{Odd Partitions of $n = 2^R + 2^{R-1}$}\label{sec:proof11} We wish to extend the results of the previous section beyond the sparse case. Although, a general formula remains elusive; we can take confident steps towards it. We consider all numbers whose binary expansions contain a pair of adjacent 1s with all other bits zero. In notation, we consider $n$ with $\nu(n) = 2$ and $D(n) = 1$. We now prove \cref{11thm}.\\ We use similar methods as we did in the previous section. Recall that for $\Lambda'\subset \Lambda$ (the set of all partitions), we have \[ S_{\Lambda'}(\mu) := \frac{1}{\Od(f^\mu)}\sum\limits_{\lambda\in \Lambda'} \Od(f^\lambda).\] We also state a corollary of \cref{workhorse} which will be pertinent in this case. \begin{cor} Let $n = 2^R + 2^{R-1}$ for some $R \geq 2$. Let $\lambda\vdash n$ and $\mu\vdash 2^{R-1}$ be odd partitions such that $\lambda$ is a $2^R$-parent of $\mu$. Then, \[ \Od(f^{\lambda}) = (-1)^{s_2(h^\lambda_\mu) + \eta^\lambda_{\mu}}\Od(f^\mu). \] \end{cor}
The proof of the above corollary is trivial as the sum of first two digits of $n$ is 2. Notice that we start from $R = 2$ as the case of $R = 1$ is slightly different and can be handled independently.\\ Throughout our discussion, we will take $\mu$ to be a hook partition such that $H(\mu) = \{2^{R-1}, b, b-1,\ldots, 1\}$, where $b = 0$ implies $H(\mu) = \{2^{R-1}\}$. Now, we first count the $2^R$-parents of Type I which, as before, we denote by $\p_1(\mu)$. \begin{lem}\label{lem:7} With the above notation, we have \[ S_{\p_1(\mu)}(\mu) = \begin{cases} 1 ,& \text{if }b \text{ is even}\\ 2, & \text{if }b \text{ is odd}.\\ \end{cases} \] \end{lem} \begin{proof} As in the proof of \cref{prop:phistuff}, define the bijection $\phi:H(\mu)\rightarrow \p_1(\mu)$ such that \[ \phi(h) = \Part((H(\mu)\cup\{h+2^R\})\backslash\{h\}). \] Firstly, consider $\lambda:= \phi(2^{R-1})$. In this case, $s_2(h^\lambda_\mu) = 2$. Further, using \cref{prop:inversions} regarding $\eta^\lambda_\mu$, we have \[ \eta^\lambda_\mu = N_\lambda(2^R + 2^{R-1}) - \mathbb{I_{H(\lambda)}}(2^R) + \mathbb{I_{H(\lambda)}}(2^{R+1}) + \mathbb{I_{H(\lambda)}}(0). \] As $2^{R-1}$ is the largest entry in $H(\mu)$, it is easy to check that that $N_\lambda(2^R + 2^{R-1}) = 0$. Further, $H(\lambda) = \{2^R + 2^{R-1}, b, \ldots, 1\}$ and we can see that it does not contain 0, $2^{R}$ or $2^{R+1}$. Thus, $\eta^\lambda_\mu$ in this case is equal to 0, which gives us $(-1)^0 = 1$.\\ On the other hand, for $1\leq x \leq b$, define $\lambda_{[x]} := \phi(x)$ . In this case, $s_2(h^{\lambda_{[x]}}_\mu) = 1$ and \[ \eta^{\lambda_{[x]}}_\mu = N_{\lambda_{[x]}}(2^R + x) - \mathbb{I}_{H({\lambda_{[x]})}}(x + 2^{R-1}) + \mathbb{I}_{H(\lambda_{[x]})}(x + 2^R + 2^{R-1}) + \mathbb{I}_{H(\lambda_{[x]})}(x - 2^{R-1}). \]
We have $H(\lambda_{[x]}) = \{x + 2^R, 2^{R-1},\ldots \}$ and $N_{\lambda_{[x]}}(2^R + x) = b+1-x$. As $1 \leq x < 2^{R-1}$ and $|\lambda| = 2^{R} + 2^{R-1}$, $H(\lambda_{[x]})$ cannot contain, $ x - 2^{R-1}$ or $x + 2^{R} + 2^{R-1}$. From the explicit form of $H(\lambda_{[x]})$, it is easy to see the only element greater than $2^{R-1}$ is $x + 2^R$. Thus, $\eta^{\lambda_{[x]}}_\mu = b + 1 - x$. This gives us,
\begin{align*} S_{\p_1(\mu)}({\mu}) &= 1 + \sum\limits_{x=1}^{b} (-1)^{b + 1 - x + 1}\\ &= 1 + \sum\limits_{i=0}^{b-1} (-1)^x \end{align*} which with appropriate values of $b$ simplifies to give the intended result. \end{proof}
Notice that we are using the parity of $b$ which is opposite to the parity of $H(\mu)$. As one gives the other, we use $b$ for convenience.\\ Now, we move on to the results for Type II $2^R$-parents. \begin{lem}\label{lem:8} With the above notation, we have \[ S_{\p_2(\mu)}({\mu}) = \begin{cases} 3 ,& \text{if }b \text{ is even}\\ 2, & \text{if }b \text{ is odd}.\\ \end{cases} \] \end{lem} \begin{proof} As before, let $1\leq r \leq 2^R$ and $\lambda_{[r]}$ denote the $2^R$-parent corresponding to that $r$-shift. In notation, \[ \lambda_{[r]} = \Part\left((H(\mu)^{+r}\cup\{2^R\})\backslash \{0\}\right) \textnormal{ if and only if } r\in \D, \] where $\D$ is as in \cref{notn:defined}. For $r\in \D$, we have $\lambda_{[r]}\in\p_2(\mu)$.\\ As for all $\lambda\in\p_2(\mu)$, $h^\lambda_\mu = 2^R$, we have $s_2(h^\lambda_\mu) = 1$. Thus, \begin{align*} S_{\p_2(\mu)}({\mu}) &= \sum\limits_{\lambda\in \p_2(\mu)} (-1)^{\eta^\lambda_\mu + 1}\\ &= (-1)\cdot\sum\limits_{\lambda\in \p_2(\mu)} (-1)^{\eta^\lambda_\mu} \end{align*}
where \[ \eta^\lambda_\mu = N_{\lambda}(2^R) - \mathbb{I}_{H(\lambda)}(2^{R-1}) + \mathbb{I}_{H(\lambda)}(2^R +2^{R-1}). \]
We divide the values taken by $r$ into 6 (possibly empty, depending on $b$) intervals, which although overkill, would help illuminate the process better. It is recommended that the reader works out the assertions in the upcoming discussion using the basic definitions. As before, $H(\mu) = \{2^{R-1}, b, b-1,\ldots, 1\}$. Here, $b$ can take values from 0 to $2^{R-1}$ where $b=0$ implies $H(\mu) =\{2^{R-1}\}$. \begin{enumerate}
\item \underline{$1 \leq r\leq 2^{R-1} - b - 1$:}\\ In this case, $\eta^{\lambda_{[r]}}_\mu = N_{\lambda_{[r]}}(2^R) = |H(\mu)^{+r}|-1 = b + r$. \item\underline{$2^{R-1} - b \leq r \leq 2^{R-1}-1$:}\\ We have $\eta^{\lambda_{[r]}}_\mu = N_{\lambda_{[r]}}(2^R) - 1$ as $2^{R-1}\in H(\mu)^{+r}$. Thus, $\eta^{\lambda_{[r]}}_\mu = b + r - 1$. \item \underline{$r = 2^{R-1}:$}\\ As $2^{R-1}+2^{R-1} = 2^R\in H(\mu)^{+2^{R-1}}$, $2^{R-1}\not\in\D$. \item\underline{$2^{R-1} + 1 \leq r \leq 2^{R}-b-1$.}\\ For this, we have $\eta^{\lambda_{[r]}}_\mu = (b + r - 1) - 1 = b + r$. \item\underline{$2^R - b \leq r \leq 2^{R}-1$.}\\ For all these values, $r\not\in\D$. \item\underline{$r = 2^R$:}\\ In this case, $\eta^{\lambda_{\left[2^R\right]}}_\mu= N_{\lambda_{\left[2^R\right]}}(2^R) = 2^{R-1} - 1$ as $\mathbb{I}_{H(\lambda_{\left[2^R\right]})}(2^{R-1}) = \mathbb{I}_{H(\lambda_{\left[2^R\right]})}(2^R +2^{R-1})=1$. \end{enumerate}
We can now combine the above information to give \[ S_{\p_2(\mu)}({\mu}) = (-1)\cdot\left(\sum\limits_{r = 1}^{2^{R-1}-b-1} (-1)^{b+r} + \sum\limits_{r = 2^{R-1}-b}^{2^{R-1}-1} (-1)^{b+r-1} + \sum\limits_{r=2^{R-1} +1}^{2^{R}-b-1} (-1)^{b+r} + (-1)^{2^R - 1}\right). \] With some sleight-of-hand, this can be simplified to give \[ S_{\p_2(\mu)}({\mu}) = 1 + 2(-1)^b\sum\limits_{r = 1}^{2^{R-1}-b-1} (-1)^{r+1} + \sum\limits_{r=0}^{b-1} (-1)^r. \] When $b$ is even, we get $S_{\p_2(\mu)}({\mu}) = 1 + 2\cdot 1 + 0 = 3$. When $b$ is odd, we get $S_{\p_2(\mu)}({\mu}) = 1 + 0 + 1 = 2$. \end{proof}
With all of these ingredients in our hands, we are ready to give a \begin{proof}[Proof of \cref{11thm}] Combining the results of \cref{lem:7} and \cref{lem:8}, we see that $S_{\p(\mu)}(\mu) = 4$ when $\mu\vdash 2^{R-1}$. Thus, for $n = 2^R + 2^{R-1}$, we can do the following computation: \begin{align*} \delta(n) &= \sum\limits_{\substack{\\\lambda\vdash n\\\lambda \text{ is odd}}} \Od(f^\lambda)\\ & = \sum\limits_{\substack{\mu\vdash 2^{R-1}\\\mu \text{ is odd}}} S_{\p(\mu)}({\mu})\Od(f^{\mu})\\ &= 4 \sum\limits_{\substack{\mu\vdash 2^{R-1}\\\mu \text{ is odd}}} \Od(f^{\mu})\\ &= 4\delta(2^{R-1}). \end{align*} By the results of \cref{lem:hookdims}, we have our theorem, except for $R = 1$, which can be done by hand. \end{proof} \section{Partitions with Dimension 2 modulo 4}\label{sec:2mod4} We now consider partitions whose dimensions are congruent to 2 modulo 4, i.e., $v_2(f^{\lambda}) = 1$ for all such partitions $\lambda$.\\ To tackle the problem of enumeration, we use the machinery of \textit{2-core towers} which requires us to introduce the notion of a \textit{2-quotient}.\\ Recall that $\widetilde{\Lambda}_t$ is the set of all $t$-cores. It is well-known (refer Proposition 3.7 of \cite{olsson} p. 20) that there exists a bijection between the sets, $\Lambda$ and $\Lambda^2 \times \widetilde{\Lambda}_2$, given by $\lambda\mapsto (\quo_2(\lambda), \core_2(\lambda))$. Here, $\quo_2(\lambda)$ (known as the \textit{2-quotient} of $\lambda$) is a pair $(\lambda^{(0)}, \lambda^{(1)})$ with the property, \[
|\lambda| = 2(|\lambda^{(0)}| + |\lambda^{(1)}|) + |\core_2(\lambda)|. \]
\begin{notation} We simplify the notation by writing $\lambda^{(ij)}$ for $(\lambda^{(i)})^{(j)}$ for any binary string $i$ and $j\in \{0,1\}$. Further, $\lambda^{(\varnothing)} = \lambda$ and $(\lambda^{(\varnothing)})^{(j)} = \lambda^{(j)}$. \end{notation}
We recall the construction of 2-core towers as presented in \cite{olsson}.\\ For $\lambda\in \Lambda$, construct an infinite rooted binary tree with nodes labelled by 2-cores as follows: \begin{itemize} \item Label the root node with $\core_2(\lambda) = \core_2(\lambda^{(\varnothing)})$. \item If the length of the unique path from the root node to a node $v$ is $i$, then we say that the \textit{node $v$ is in the \nth{i} row.} \item Every node in the \nth{i} row is adjacent to two nodes in the $(i+1)^{\text{st}}$ row. Define a recursive labelling as follows: if the label of some node, $v$, in the \nth{i} row, is $\core_2(\lambda^{(b)})$ for some binary string $b$, then the two nodes in the $(i+1)^{\text{st}}$ row adjacent to $v$ have labels $\core_2(\lambda^{(b0)})$ and $\core_2(\lambda^{(b1)})$ respectively. \end{itemize} This tree is known as the 2-core tower of $\lambda$. \begin{example} The partition $(6,5,4,2,1,1)$ has the 2-core tower:\\ \adjustbox{scale = {0.5}{0.65}, center}{ \begin{tikzcd}
&&&&&&& {(2,1)} \\
\\
&&& \varnothing &&&&&&&& \varnothing \\
\\
& {(1)} &&&& \varnothing &&&& \varnothing &&&& {(1)} \\
\varnothing && \varnothing && \varnothing && \varnothing && {(1)} && \varnothing && \varnothing && \varnothing
\arrow[from=5-2, to=6-1]
\arrow[from=5-2, to=6-3]
\arrow[from=5-6, to=6-5]
\arrow[from=5-6, to=6-7]
\arrow[from=5-10, to=6-9]
\arrow[from=5-10, to=6-11]
\arrow[from=5-14, to=6-13]
\arrow[from=3-4, to=5-2]
\arrow[from=3-4, to=5-6]
\arrow[from=5-14, to=6-15]
\arrow[from=1-8, to=3-4]
\arrow[from=1-8, to=3-12]
\arrow[from=3-12, to=5-14]
\arrow[from=3-12, to=5-10] \end{tikzcd} }. \end{example}
Every partition has a unique 2-core tower and every 2-core tower comes from a unique partition. This bijection is inherited from the core-quotient bijection above.\\ We state a well-known classification result for 2-cores which we encourage the reader to prove on their own. \begin{lem}\label{lem:2cores} A partition $\lambda$ is a 2-core if and only if $\lambda = (n , n-1, \ldots 1)$ for some $n\geq 0$. \end{lem}
\begin{notation}
Let $\T_k(w)$ denote the number of solutions $(\mu_{[i]})_{i=1}^{2^k}$ to \[\sum\limits_{i=1}^{2^k}|\mu_{[i]}| = w,\] where each $\mu_{[i]}$ is a 2-core. \end{notation} \begin{lem}\label{lem:weight} With the above notation, we have, \[ \T_k(w) = \begin{cases} 1, & \text{if }w = 0\\ 2^k, &\text{if } w = 1\\ \binom{2^k}{2},&\text{if } w = 2\\ \binom{2^k}{3} + 2^k, & \text{if }w = 3. \end{cases} \] \end{lem} \begin{proof} When $w = 0$, the only solution is $\mu_{[i]}= \varnothing$ for all $i$. Thus, $\T_k(0) = 1$. \\ When $w = 1$, choose $\mu_{[i_0]} = (1)$ for some $1\leq i_0\leq 2^k$ and $\varnothing$ for the rest. As there are $2^k$ possible options for $i_0$, we get $\T_k(1) = 2^k$.\\ For $w = 2$, we must choose $\mu_{[i]} = \mu_{[j]} = (1)$, for some $i$ and $j$, and the rest $\varnothing$. Thus, $\T_{k}(2) = \binom{2^k}{2}$.\\ There does exist a 2-core of size 3, which is $(2,1)$. If $w=3$, we have two options, either we choose $i, j, k$ such that $\mu_{[i]} = \mu_{[j]} = \mu_{[k]} = (1)$ or $p$ such that $\mu_{[p]} = (2,1)$. For the former, we get $\binom{2^k}{3}$ ways and the latter can be done in $2^k$ ways, thus giving $\T_k(3) = \binom{2^k}{3} + 2^k$. \end{proof}
\begin{defn}[Weight in a 2-core tower]\label{defn:weight} The \textit{weight of the \nth{k} row of the 2-core tower of a partition $\lambda$ }is given by \[
w_k(\lambda) := \sum\limits_{b\in\{0,1\}^k} |\core_2(\lambda^{(b)})|, \]
where we define $w_0(\lambda) = |\core_2(\lambda)|$. \end{defn} We state some important properties of 2-core towers and how they relate to dimensions. For the subsequent discussion, we will let $n = \sum\limits_{i\geq 0} b_i2^i$, where $b_i\in\{0,1\}$. Further, recall that $\bin(n) = \{i\mid b_i = 1\}$. \begin{notation} Let $\bin'(n) = \{i>0\mid b_i = 1\}$. \end{notation} Note the strict inequality in the definition. Furthermore, we have the relation, $\bin'(n) = \bin(n)\backslash \{0\}$. \begin{prop}[Macdonald]\label{prop:mcdmain} Let $\lambda$ be a partition of $n$. We write $w_i:= w_i(\lambda)$. In this case, the following hold: \begin{enumerate} \item The 2-core tower of $\core_{2^k}(\lambda)$ is given by labelling the rows $0$ to $k-1$ as in the 2-core tower of $\lambda$ and by labelling every node in \nth{i} row, for $i\geq k$, with $\varnothing$, the empty partition. \item $\lambda$ is an odd partition if and only $w_i = b_i$ for all $i\geq 0$. \item $v_2(f^{\lambda}) = 1$ if and only if for some $R \in \bin'(n)$, we have $w_{R -1} = b_{R-1} + 2$, $w_{R} = 0$ and $w_i = b_i$ otherwise. \end{enumerate} \end{prop} \begin{proof} The proofs of (1) and (2) are given in \cite{mcd}. Through section 4 of \cite{mcd}, we know that there exists a sequence of non-negative integers $(z_i)_{i\geq 0}$ with $z_0 = 0$ such that \[ w_i + z_i = b_i + 2z_{i+1}. \] From Section 3 of \cite{mcd}, we know that if $v_2(f^\lambda) = 1$, then we have $\sum\limits_{i\geq 0} w_i = \sum\limits_{i\geq 0}b_i + 1$. Combining these, we get that we must have $z_i = 1$ for exactly one $i > 0$ and zero for the rest. Putting $z_k = 1$ gives us the equations, \begin{align*} w_{k-1} &= b_{k-1} + 2\\ w_k + 1 &= b_k. \end{align*} Notice that the second equation above tells us that $z_k = 1$ is possible only when $k \in \bin(n)$. Thus, $w_k = 0$ and $w_{k-1} = b_{k-1} + 2$. The rest is clear as $z_i = z_{i+1} = 0$. \end{proof}
\begin{notation}\label{notation:wk} For $k>0$, define a sequence of non-negative integers, $\mathbf{w}_k := (w^k_i)_{i\geq 0}$ with the following properties: \begin{enumerate} \item $w^k_{k-1} = b_{k-1}+2$, \item $w^k_{k} = 0$, and \item $w^k_i = b_i$ for all other values of $i$. \end{enumerate} Let $\T(\mathbf{w}_k) := \prod\limits_{i\geq 0} \T_i(w^k_i)$. \end{notation} Note that the above product is the number of ways of constructing 2-core towers such that $w_i(\lambda) = w^k_i$, for some given $k$.
\begin{proof}[Proof of \cref{2mod4thm}] By the above discussion, we have that the number of partitions with dimensions congruent to 2 modulo 4 is given by \begin{align*} a_2(n) &= \sum\limits_{k\in \bin'(n)} \T(\mathbf{w}_k)\\ &= \sum\limits_{k\in \bin'(n)} \prod\limits_{i\geq 0} \T_i(w^k_i). \end{align*} Denote the binary expansion of $n$ as we did before. If we suppose $n = 2^R + m$ with $m<2^R$, then $b_R = 1$ and $b_i = 0$ for $i > R$. We can break the sum up as, \[ a_2(n) = \left(\sum\limits_{k\in \bin'(n)\backslash \{R\}} \prod\limits_{i\geq 0} \T_i(w^k_i) \right)+ \T(\mathbf{w}_R). \] For the term on the left of the plus sign, we have \begin{align*} \sum\limits_{k\in \bin'(n)\backslash \{R\}} \prod\limits_{i\geq 0} \T_i(w^k_i) &= \sum\limits_{k\in \bin'(n)\backslash \{R\}} \T_R(w^k_R)\prod\limits_{i \neq R} \T_i(w^k_i)\\ &= 2^R\sum\limits_{k\in \bin'(m)} \prod\limits_{i\neq R} \T_i(w^k_i)\\ &= \frac{2^R}{\T_R(0)}\sum\limits_{k\in \bin'(m)} \prod\limits_{i\geq 0} \T_i(w^k_i)\\ &= 2^R a_2(m). \end{align*} Here, the last equality follows from point (1) in \cref{prop:mcdmain}.\\ Now, we deal with the term on the right of the plus sign. We see that \begin{align*} \T(\mathbf{w}_R) &= \T_{R-1}(w^R_{R-1})\T_{R}(w^R_R)\prod\limits_{i\neq R, R-1} \T_i(w^R_i)\\ &= \T_{R-1}(w^R_{R-1})\T_{R}(w^R_R)\prod\limits_{i\neq R, R-1} \T_i(b_i)\\ &= \frac{\T_{R-1}(w^R_{R-1})\T_{R}(w^R_R)}{\T_{R-1}(b_{R-1})\T_{R}(b_R)} a(n). \end{align*}
The $a(n)$ appearing is a consequence of (2) in \cref{prop:mcdmain}. We have $b_R = 1$, $w_R = 0$ and $w_{R-1} = b_{R-1} + 2$. This simplifies the above expression to \[ \frac{\T_{R-1}(b_{R-1} + 2)}{2^{R}\T_{R-1}(b_{R-1})} a(n). \] By \cref{lem:weight} and $a(n) = 2^Ra(m)$, we can complete the proof to get \[a_2(n) = \begin{cases} 2^R\cdot a_2(m) + \binom{2^{R-1}}{2}\cdot a(m), & \text{ if }m < 2^{R-1}\\ 2^R\cdot a_2(m) + \left(\binom{2^{R-1}}{3} + 2^{R-1}\right) \cdot \displaystyle{\frac{a(m)}{2^{R-1}}}, & \text{ if } 2^{R-1} < m < 2^R. \end{cases}\]
\end{proof}
In the sparse case, this theorem takes a nice form as evident in \cref{2mod4cor}. Although, we can prove the corollary using the recursive relations in \cref{11thm}, we choose to do perform direct computations as it is more illuminating. \begin{proof}[Proof of \cref{2mod4cor}] Let $n$ be a sparse number, i.e., it has no consecutive 1s in its binary expansion. The summation as before is: \begin{align*} a_2(n) &= \sum\limits_{k\in \bin'(n)} \T(\mathbf{w}_k)\\ &= \sum\limits_{k\in \bin'(n)} \frac{\T_{k-1}(w^k_{k-1})\T_{k}(w^k_k)}{\T_{k-1}(b_{k-1})\T_{k}(b_k)} a(n). \end{align*} As $n$ is sparse, for $k\in \bin'(n)$, we have $b_{k-1} = 0$. Further, $w^k_{k} = 0$ and $w^k_{k-1} = b_{k-1} +2 = 2$.\\ This gives us the following summation, \[ a_2(n) = \sum\limits_{k\in \bin'(n)} \frac{\T_{k-1}(2)}{\T_{k}(1)} a(n). \] By using the results from \cref{lem:weight}, this simplifies to \begin{align*} a_2(n) &= a(n)\sum\limits_{k\in \bin'(n)} \frac{\binom{2^{k-1}}{2}}{2^k}\\ &= a(n)\sum\limits_{k\in \bin'(n)} \frac{(2^{k-1})(2^{k-1}-1)}{2\cdot 2^k}\\ &= \frac{a(n)}{8}\sum\limits_{k\in \bin'(n)} 2^k - 2.\\ \end{align*} If $n$ is even, then this summation becomes $n - 2\nu(n)$. If $n$ is odd, the summation becomes $(n-1) + 2(\nu(n) - 1) = (n-1) + 2(\nu(n-1))$. This gives us the final answer as, \[a_2(n) = \begin{cases} \displaystyle{\frac{a(n)}{8}(n - 2 \nu(n))},& \text{if }n \text{ is even}\\ a_2(n-1), &\text{if } n \text{ is odd}. \end{cases}\] \end{proof} \begin{remark} The theorem above immediately leads us to the value of $m_4(n)$. We have $m_4(n) = a_1(n) + a_2(n) + a_3(n) = a(n) + a_2(n)$ by definition. \end{remark}
\section{Irreducible Representations of Alternating Groups} We can show similar results about degrees of irreducible representations in the case of $A_n$, the alternating group on $n$ letters, which is defined to be the unique subgroup of index 2. \begin{defn} Let $a_i^\circ(n)$ be the number of irreducible representations of $A_n$ (upto isomorphism) whose dimensions are congruent to $i$ modulo 4. Let $\delta^\circ(n) := a_1^\circ(n) - a_3^\circ(n)$. Let $a^\circ(n):= a_1^\circ(n) + a_3^\circ(n)$ be the number of odd dimensional irreducible representations of $A_n$. \end{defn} In \cite{geetha}, the value of $a^\circ(n)$ was computed in terms of $a(n)$ for which we present a proof sketch here. Recall that a partition, $\hat{\lambda}$, is called the \textit{conjugate} of $\lambda$ if there is a cell in position $(j,i)$ in $\sh(\hat{\lambda})$ if and only if there is a cell in position $(i,j)$ in $\sh(\lambda)$. If $\hat{\lambda} = \lambda$, then $\lambda$ is called a \textit{self-conjugate partition}. \begin{remark}\label{rem:towerflip} The 2-core tower of $\hat{\lambda}$ can be obtained by flipping the 2-core tower of $\lambda$ along the central axis of symmetry. That is, the node of the 2-core tower of $\hat{\lambda}$ indexed by $\core_2(\hat{\lambda}^{(b)})$ is indexed by $\core_2(\lambda^{(a)})$ where $a$ and $b$ are complementary binary strings. \end{remark} \begin{notation} Let $\hat{m}_2(n)$ denote the number of self-conjugate partitions with dimension congruent to 2 modulo 4. \end{notation} \begin{lem}[\cite{geetha}]\label{lemmaan} For $n\in \mathbb{N}$, we have \[ \hat{m}_2(n) = \begin{cases} 1, & \text{if } n = 3 \text{}\\ 2^{k-2}, & \text{if } n = 2^k, 2^k + 1, k>1 \text{}\\ 0, & \text{otherwise} \text{}. \end{cases} \] \end{lem} \begin{proof} For a given $n$, define $w^k_i$ as in \cref{notation:wk}. By point 3 of \cref{prop:mcdmain}, we know that such sequences of weights of rows in the 2-core tower, $\mathbf{w}_k$, correspond to partitions with dimensions congruent to 2 modulo 4.\\ Firstly, consider the case when the root node is labelled by $\varnothing$. We know that $w^k_i \in \{0,1,2,3\}$ and for $w^k_i = 1, 3$, we cannot flip the 2-core tower about the central axis to preserve symmetry (\cref{rem:towerflip}). Thus, $w^k_i = 0$ or $2$ for all $i>0$ in this case and $w_0^k = 0$ as the root node is labelled by $\varnothing$.\\ Similarly, in the case when the root node is labelled by $(1)$, we have $w^k_0=1$ and $w^k_i = 0$ or $2$ for all $i>0$ and $k\in \bin(n)$. In both these cases, we cannot have two rows $j > i > 0$ such that both $i$ and $j$ in $\bin(n)$. If this was the case, then we will have $w^i_i=0$ but $w^i_j = 1$ which violates our condition on $w_i^k$ taking only the values 0 and 2. Thus, we must have $\nu(n) = 1$ or when $0\in\bin(n)$, $\nu(n) = 2$. Thus, self-conjugate partitions with dimension congruent to 2 modulo 4 exist only for $n = 2^k, 2^k+1$.\\ In these cases, we have $w^k_k = 0$, $w^k_{k-1} = 2$ and the $(k-1)^{\text{st}}$ row is labelled by two $(1)$s arranged symmetrically about the centre. Notationally, $\core_2(\lambda^{(x)}) = \core_2(\lambda^{(y)}) = (1)$ where $x$ and $y$ are binary complementary strings in $\{0,1\}^{k-1}$ and the other nodes are labelled by $\varnothing$. Clearly, there are $2^{k-2}$ many ways to achieve such an arrangement, which gives us our result. \end{proof}
The partition $(3,3,3)\vdash 9$ has dimension congruent to 2 modulo 4 and is self-conjugate. Its 2-core tower is the following and satisifes the symmetric arrangement: This partition has the 2-core tower:\\ \adjustbox{scale = {0.5}{0.65}, center}{ \begin{tikzcd}
&&&&&&& {(1)} \\
\\
&&& \varnothing &&&&&&&& \varnothing \\
\\
& {(1)} &&&& \varnothing &&&& \varnothing &&&& {(1)} \\
\varnothing && \varnothing && \varnothing && \varnothing && \varnothing && \varnothing && \varnothing && \varnothing
\arrow[from=5-2, to=6-1]
\arrow[from=5-2, to=6-3]
\arrow[from=5-6, to=6-5]
\arrow[from=5-6, to=6-7]
\arrow[from=5-10, to=6-9]
\arrow[from=5-10, to=6-11]
\arrow[from=5-14, to=6-13]
\arrow[from=3-4, to=5-2]
\arrow[from=3-4, to=5-6]
\arrow[from=5-14, to=6-15]
\arrow[from=1-8, to=3-4]
\arrow[from=1-8, to=3-12]
\arrow[from=3-12, to=5-14]
\arrow[from=3-12, to=5-10] \end{tikzcd} }.
\begin{remark}\label{rem:cliff} By Clifford theory (refer Theorem 5.12.5 in \cite{amribook} and Chapter 6 in \cite{olsson}), we know that a self-conjugate partition, $\lambda\vdash n$, corresponds to two irreducible representations of $A_n$ with dimension $f^\lambda/2$. Furthermore, a conjugate pair of distinct partitions of $n$, $\mu$ and $\hat{\mu}$, corresponds to one irreducible representation of $A_n$ with dimension $f^\mu$. \end{remark} \begin{prop}[\cite{geetha}] For $n\in \mathbb{N}$, the number of odd dimensional irreducible representations of $A_n$ is given by \[ a^\circ(n) = \begin{cases} 1, & \text{if } n=1,2 \text{}\\ 3, & \text{if } n=3 \text{}\\ 2^k, & \text{if } n = 2^k, 2^k + 1, k>1\text{}\\ a(n)/2, & \text{for other values of } n > 4\text{}\\ \end{cases} \] where $a(n)$ is the number of odd partitions. \end{prop} \begin{proof} By \cref{rem:cliff}, we get that \[ a^\circ(n) = 2\hat{m}_2(n) + \frac{a(n)}{2}. \] Using \cref{lemmaan} and $a(2^k) = a(2^k + 1) = 2^k$, we have our result. \end{proof} We now wish to extend this result to find $a_1^\circ(n)$ and $a_3^\circ(n)$. We do so by calculating $\delta^\circ(n)$. Pick $\lambda$ such that $\lambda = \widehat{\lambda}$. If we want it to correspond to an irreducible representation of $A_n$ with odd dimension, then $f^\lambda$ must be congruent to 2 modulo 4 by \cref{rem:cliff}. As the dimension gets halved, we only need to find what $\Od(f^\lambda)$ is.\\ In order to do so, we require what $\lambda\vdash 2^k, 2^k +1$ with $f^\lambda$ congruent to 2 modulo 4 look like. For this, we cite two propositions from Geetha and Amruta's paper. \begin{prop}[\cite{geetha}]\label{prop:geethapowersof2} If $\lambda\vdash 2^k$ such that $f^\lambda \equiv 2$ mod 4, then \begin{enumerate} \item $\lambda$ is a hook partition of $2^k$ or, \item $\lambda$ is of the form $(2^{k-1}-i, 2^{k-1}-j, \underbrace{2, \ldots, 2}_{i \text{ times}}, \underbrace{1, \ldots, 1}_{j-i\text{ times}})$ where $0 \leq i \leq j \leq 2^{k-1}-2$. \end{enumerate} \end{prop} \begin{prop}[\cite{geetha}]\label{prop:geetha2kplusone} If $\lambda\vdash 2^k+1$ such that $f^\lambda \equiv 2$ mod 4 and $\lambda$ is self-conjugate, then \begin{enumerate} \item $\lambda$ has $h_{1,1} = 2^k + 1$ or \item $\lambda$ has $h_{1,1} = p_1$, $h_{2,2} = p_2$ and $h_{3,3} = 1$ for some odd $p_1 > p_2 > 1$. \end{enumerate} \end{prop} \begin{thm} For $n\geq 3$, we have \[ \delta^\circ(n) = \begin{cases} 3, & \text{if } n = 3 \text{}\\ 2^{k-1}, & \text{if } n = 2^k, k>1 \text{}\\ 2^{k-1}-2, & \text{if } n = 2^k+1, k>1 \text{}\\ \delta(n)/2, & \text{otherwise.} \text{} \end{cases}\] \end{thm} \begin{proof} For $n\neq 2^k, 2^{k}+1$, there are no self-conjugate partitions corresponding to irreducible representations of $A_n$ with dimension congruent to 2 modulo 4, thus $a_i^\circ(n) = a_i(n)/2$ for $i=1,3$ by \cref{rem:cliff} and $\delta^\circ(n) = \delta(n)/2$.\\ Notice that $\Od(a)^2 = 1$. Thus, for a self conjugate partition as we have $h_{i,j} = h_{j,i}$ for all $(i,j)\in\sh(\lambda)$, it follows that \[ \Od\left(\prod\limits_{(i,j)\in\sh(\lambda)} h_{i,j}\right) = \Od\left(\prod\limits_{\substack{i\\(i,i)\in\sh(\lambda)}} h_{i,i}\right). \] We saw above that for only $n = 2^k, 2^k + 1$ for $k >1$, we have $\hat{m}_2(n)\neq 0$. By using the formula in \cref{frthlf} and \[\Od(2^k!) = \Od((2^k+1)!) = -1,\text{(\cref{lem:odnfac})}\] we can show that for the case when $\lambda\vdash 2^k$, $k>1$,and $\lambda$ is self conjugate with dimension congruent to 2 mod 4, we have \[ \Od(f^\lambda) = -\Od\left(\prod\limits_{\substack{i\\(i,i)\in\sh(\lambda)}} h_{i,i}\right). \] To continue with our computations, we use \cref{prop:geethapowersof2}. Following the notation from \cite{geetha}, we deduce that that for $\lambda\vdash 2^k$ with $f^\lambda\equiv 2 \mod 4$, we have $h_{1,1} = 2^{k-1} - i + j + 1$ and $h_{2,2} = 2^{k-1} + i - j - 1$.\\ Further imposing the condition that $\lambda = \hat{\lambda}$, we get $h_{1,1} = 2^k - (2\alpha + 1)$ and $h_{2,2} = 2\alpha+1$, for $0\leq \alpha \leq 2^{k-2}-1$. It is easy to see that $\Od(2^k - (2\alpha + 1)) = -\Od(2\alpha+1)$ and thus, \[ \Od(f^\lambda) = -\Od(h_{1,1})\Od(h_{2,2}) = -\Od(2\alpha+1)\cdot (-\Od(2\alpha+1)) = 1 \] for this case. Thus, by \cref{rem:cliff}, each irreducible representation of $S_n$ with degree congruent to 2 modulo 4 which is indexed by a self-conjugate representation restricts to two irreducible representations of $A_n$ with degrees which are always 1 modulo 4.\\ An irreducible representation of $A_n$ has degree congruent to 1 modulo 4 if it corresponds to a non-self conjugate pair with dimensions congruent to 1 modulo 4 or it corresponds to a self-conjugate partition with dimension congruent to 2 modulo 4. This gives us that \[ a_1^\circ(n) = 2\hat{m}_2(n) + \frac{a_1(n)}{2}. \] On the other hand, $a_3^\circ(n)$ gets no contribution from $\hat{m}_2(n)$. Thus, we have \begin{enumerate} \item $a_1(2^k) = a_3(2^k) = 2^{k-1}$ where each \textit{pair} gives \textit{one} irreducible representation. \item There are $2^{k-2}$ possible values of $\alpha$, each of which corresponds to \textit{two} irreducible representations of $A_n$ with dimension congruent to 1 modulo 4 coming from representations of $S_n$ with dimension congruent to 2 modulo 4. \end{enumerate} We get that $a^\circ_1(2^k) = 2^{k-1} + 2^{k-2}$ and $a_3^\circ(2^k) = 2^{k-2}$. Thus, $\delta^\circ(2^k) = 2^{k-1}$.\\
Now, we consider the case when $\lambda\vdash 2^k+1$. We deal with the two cases in \cref{prop:geetha2kplusone}: \begin{enumerate} \item When $\lambda$ has $h_{1,1} = 2^k+1$, we get $\Od(f^\lambda) = -\Od(2^k+1) = -1$. \item In the other case, we have $p_1 + p_2 = 2^k$. As $p_1 \equiv -p_2\mod 4$, we have $\Od(f^\lambda) = -\Od(p_1)\Od(p_2)\Od(1)= 1$. Note that because of the conditions $p_1 > p_2$ and $p_1$ an odd integer, $p_1$ takes exactly $2^{k-2}-1$ values. \end{enumerate} We again use \cref{rem:cliff}. We add up the contributions from odd partitions of $2^k+1$ and partitions of $2^k+1$ with dimension congruent to 2 modulo 4 which we computed above. We have \[ a_1^\circ(2^k+1) = \frac{2^k-1 + 2}{2} + 2\cdot(2^{k-2}-1)\] and \[ a_3^\circ(2^k+1) = \frac{2^k-1 - 2}{2} + 2\cdot 1.\]
Thus, $\delta^\circ(2^k+1) = 2^{k-1}-2$. \end{proof} \section{Closing Remarks and Open Problems}\label{sec:problems} It is natural to ask why the case of $n = 2^R + m$ for $2^{R-1}<m<2^R$ is so difficult that it evades our methods. As we saw before, the values of $S_{\p(\mu)}(\mu)$ are constants, thus, independent of the core $\mu$ that we are building upon. In the harder case, we do not have this luxury. The values of $S_{\p(\mu)}(\mu)$ vary wildly and resist coalescing into nice summations. Along with this roadblock, we present some relevant problems that remain unsolved. \begin{enumerate} \item What is $\delta(n)$ in the case when $n = 2^R + m$ for $2^{R-1} < m < 2^{R}$? \item Is it possible to provide a reasonable bound for $S_{\p(\mu)}(\mu)$? \item Can we say something about the dimension of a partition by just looking at its 2-core tower? \item Is there a characterization of odd partitions modulo 4 in terms of $\beta$-sets? \end{enumerate} It is possible to answer (3) and (4) in the case of hook partitions as the analysis simplifies to looking at binomial coefficients.
\end{document} |
\begin{document}
\markboth{Sl. Shtrakov and I. Damyanov} {On the complexity of finite valued functions}
\title{On the complexity of finite valued functions}
\author{Slavcho Shtrakov}
\address{Department of Computer Science,\\ South-West University, Blagoevgrad, Bulgaria\\
}
\author{Ivo Damyanov}
\address{Department of Computer Science,\\ South-West University, Blagoevgrad, Bulgaria\\
}
\begin{abstract} The essential variables in a finite function $f$ are defined as variables which occur in $f$ and weigh with the values of that function.
The number of essential variables is an important measure of complexity for discrete functions.
When replacing some variables in a function with constants the resulting functions are called subfunctions, and when replacing all essential variables in a function with constants we obtain an implementation of this function.
Such an implementation corresponds with a path in an ordered decision diagram (ODD) of the function which connects the root with a leaf of the diagram. The sets of essential variables in subfunctions of $f$ are called separable in $f$. In this paper we study several properties of separable sets of variables in functions which directly impact on the number of implementations and subfunctions in these functions.
We define equivalence relations which classify the functions of $k$-valued logic into classes with same number of implementations, subfunctions or separable sets. These relations induce three transformation groups which are compared with the lattice of all subgroups of restricted affine group (RAG). This allows us to solve several important computational and combinatorial problems.
\end{abstract}
\keywords{Ordered decision diagram; implementation; subfunction; separable set.} \maketitle \section{Introduction}\label{sec1}
Understanding the complexity of $k$-valued functions is still one of the fundamental tasks in the theory of computation. At present, besides classical methods like substitution or degree arguments a bunch of combinatorial and algebraic techniques have been introduced to tackle this extremely difficult problem. There has been significant progress analysing the power of randomness and quantum bits or multiparty communication protocols that help to capture the complexity of switching functions. For tight estimations concerning the basic, most simple model switching circuits there still seems a long way to go (see \cite{comp_sem}).
In Section \ref{sec2} we introduce the basic notation and give definitions of separable sets, subfunctions, etc. The properties of distributive sets of variables with their s-systems are also discussed in Section \ref{sec2}. In Section \ref{sec3} we study the ordered decomposition trees (ODTs), ordered decision diagrams (ODDs), and implementations of discrete functions. We also discuss several problems with the complexity of representations of functions with respect to their ODDs, subfunctions and separable sets. In Section \ref{sec5} we classify discrete functions by transformation groups and equivalence relations concerning the number of implementations, subfunctions and separable sets in functions. In Section \ref{sec6} we use these results to classify all boolean (switching) functions with "small" number of its essential variables. Here we calculate the number of equivalence classes and cardinalities of equivalence classes of boolean functions depending on 3, 4 and 5 variables. \section{Separable and Distributive Sets of Variables}\label{sec2}
We start this section with basic definitions and notation. A discrete function is defined as a mapping: $f:A\to B$ where the domain $A={\times}_{i=1}^n A_i$ and range $B$ are non-empty finite or countable sets.
To derive the means and methods to represent, and calculate with finite valued functions, some algebraic structure is imposed on the domain $A$ and the range $B$. Both $A$ and $B$ throughout the present paper will be finite ring of integers.
Let $X=\{x_1,x_2,\ldots \}$ be a countable set of variables and $X_n=\{x_1,x_2,\ldots,x_n\}$ be a finite subset of $X$. Let $k$, $k\geq 2$ be a natural number
and let us denote by $Z_k=\{0,1,\ldots,k-1\}$ the set (ring) of
remainders modulo $k$. The set $Z_k$ will identify the ring of residue classes $mod\ k$, i.e. $Z_k=Z/_{kZ}$, where $Z$ is the ring of all integers. An {\it $n$-ary $k$-valued function (operation) on $Z_k$ } is a mapping $f: Z_k^n\to Z_k$ for some natural number $n$, called {\it the arity} of $f$. $P_k^n$ denotes the set of all $n$-ary $k$-valued functions on $Z_k$. It is well known fact that there are $k^{k^n}$ functions in $P_k^n$. The set of all $k$-valued functions $P_k=\bigcup_{n=1}^\infty P_k^n$ is called {\it the algebra of $k$-valued logic}.
All results obtained in the present paper can be easily extended to arbitrary algebra of finite operations.
For a given variable $x$ and $\alpha\in Z_k$, $x^\alpha$ is defined as follows: $$
x^\alpha=\left\{\begin{array}{ccc}
1 \ &\ if \ &\ x=\alpha \\
0 & if & x\neq\alpha.
\end{array}
\right. $$
We use {\it sums of products (SP)} to represent the functions from $P_k^n$. This is the most natural representation and it is based on so called operation tables of the functions. Thus each function $f\in P_k^n$ can be uniquely represented of SP-form as follows \[ f=a_0.x_1^{0}\ldots x_n^{0}\oplus\ldots\oplus
a_{m}.x_1^{\alpha_1}\ldots
x_n^{\alpha_n}\oplus\ldots\oplus a_{k^n-1}.x_1^{k-1}\ldots
x_n^{k-1}\] with ${\mathbf{\alpha}}={({\alpha_1,\ldots,\alpha_n})}\in Z_k^n$, where $m=\sum_{i=0}^n\alpha_ik^i\leq k^n-1$. $"\oplus"$ and $"."$ denote the operations addition (sum) and multiplication (product) modulo $k$ in the ring $Z_k$. Then $(a_0,\ldots,a_{k^n-1})$ is the vector of output values of $f$ in its table representation.
Let $f\in P_k^n$ and $var(f)=\{x_1,\ldots,x_n\}$ be the set of all variables, which occur in $f$.
We say that the $i$-th variable $x_i\in var(f)$ is {\it essential} in $f$, or $f$ {\it essentially} {\it depends} on $x_i$, if there exist values $a_1,\ldots,a_n,b\in Z_k$, such that \[
f(a_1,\ldots,a_{i-1},a_{i},a_{i+1},\ldots,a_n)\neq f(a_1,\ldots,a_{i-1},b,a_{i+1},\ldots,a_n). \]
The set of all essential variables in the function $f$ is denoted by $Ess(f)$ and the number of essential variables in $f$ is denoted by
$ess(f)=|Ess(f)|$.
The variables from $var(f)$ which are not essential in
$f$ are called {\it inessential} or {\it fictive}.
The set of all output values of a function $f\in P_k^n$ is called {\it the range} of $f$, which is denoted as follows:
\[range(f)=\{c\in Z_k\ |\ \exists (a_1,\ldots,a_n)\in Z_k^n, \quad such\ that\quad f(a_1,\ldots,a_n)=c\}.\] \begin{definition}\label{d1.2} Let $x_i$ be an essential variable in $f$ and $c\in Z_k$ be a constant from $Z_k$. The function $g=f(x_i=c)$ obtained from $f\in P_{k}^{n}$ by replacing the variable $x_i$ with $c$ is called a {\it simple subfunction of $f$}.
When $g$ is a simple subfunction of $f$ we shall write $g\prec f$. The transitive closure of $\prec$ is denoted by $\preceq$. $Sub(f)=\{g\ | \ g\preceq f\}$ is the set of all subfunctions of $f$ and $sub(f)=|Sub(f)|$. \end{definition}
For each $m=0,1,\ldots, n$ we denote by $sub_m(f)$ the number of subfunctions in $f$ with $m$ essential variables, i.e. $sub_m(f)=|\{g\in Sub(f)\ |\ ess(g)=m\}|$.
Let $g\preceq f$, $\mathbf{c}=(c_1,\ldots,c_m)\in Z_k^m$ and $M=\{x_1,\ldots,x_m\}\subset X$ with \[g\prec g_1\prec\ldots\prec g_m=f,\quad g=g_1(x_1=c_1)\quad and \quad g_i=g_{i+1}(x_{i+1}=c_{i+1})\] for $i=1,\ldots,m-1$. Then we shall write $g=f(x_1=c_1,\ldots,x_m=c_m)$ or equivalently, $g\preceq_M^{\mathbf{c}} f$ and we shall say that the vector $\mathbf{c}$ {\it determines} the subfunction $g$ in $f$.
\begin{definition}\label{d1.3} Let $M$ be a non-empty set of essential variables in the function $f$. Then $M$ is called {\it a separable set} in $f$ if there exists a subfunction $g$, $g\preceq f$ such that $M=Ess(g)$.
$Sep(f)$ denotes the set of the all separable sets in $f$ and $sep(f)=|Sep(f)|$. \end{definition}
The sets of essential variables in $f$ which are not separable are called {\em inseparable} or {\em non-separable}.
For each $m= 1,\ldots, n$ we denote by $sep_m(f)$ the number of separable sets in $f$ which consist of $m$ essential variables, i.e. $sep_m(f)=|\{M\in Sep(f)\ |\ |M|=m\}|$. The numbers $sub(f)$ and $sep(f)$ characterize the computational complexity of the function $f$ when calculating its values. Our next goal is to classify the functions from $P_k^n$ under these complexities. The initial and more fundamental results concerning essential variables and separable sets were obtained in the work of Y. Breitbart \cite{bre}, K. Chimev \cite{ch51}, O. Lupanov \cite{lup}, A. Salomaa \cite{sal}, and others. ~
\begin{remark}\label{r1.1} Note that if $g\preceq f$ and $x_i\notin Ess(f)$ then $x_i\notin Ess(g)$ and also if $M\notin Sep(f)$ then $M\notin Sep(g)$. \end{remark}
\begin{definition}\label{d2.1} Let $M$ and $J$ be two non-empty sets of essential variables in the function $f$ such that $M\cap J=\emptyset.$ The set $J$ is called {\it a distributive set of $M$ in $f$,
} if for every $|J|$-tuple of constants $\mathbf{c}$ from $Z_k$ it holds $M\not\subseteq Ess(g)$, where $g\preceq_J^{\mathbf{c}} f$
and $J$ is minimal with respect to the order $\subseteq$.
$Dis(M,f)$ denotes the set of the all distributive sets of $M$ in $f.$ \end{definition}
It is clear that if $M\notin Sep(f)$ then $Dis(M,f)\neq\emptyset$. So, the distributive sets ``dominate'' on the inseparable sets of variables in a function. We are interested in the relationship between the structure of the distributive sets of variables and complexity of functions concerning $sep(f)$ and $sub(f)$, respectively, which is illustrated by the following example.
\begin{example}\label{ex2.1}
Let $k=2$, $f=x_1x_2\oplus x_1x_3$ and $g=x_1x_2\oplus x_1^0x_3.$ It is easy to verify that the all three pairs of variables $\{x_1,x_2\}$, $\{x_1,x_3\}$ and $\{x_2,x_3\}$ are separable in $f$, but $\{x_2,x_3\}$ is inseparable in $g$. {Figure} \ref{f1} presents graphically, separable pairs in $f$ and $g$. The set $\{x_1\}$ is distributive of $\{x_2,x_3\}$ in $g$. \end{example} \begin{figure}
\caption{Separable pairs.}
\label{f1}
\end{figure}
\begin{definition}\label{d2.2} Let $\mathcal F=\{P_1,\ldots,P_m\}$ be a family of non-empty sets. A set $\beta=\{x_1,\ldots,x_p\}$ is called {\it an $s$-system} of $\mathcal F$, if for all $P_i\in \mathcal F$, $1\leq i\leq m$ there exists $x_j\in\beta$ such that $x_j\in P_i$ and for all $x_s\in\beta$ there exists $P_l\in \mathcal F$ such that $\{x_s\}=P_l\cap\beta.$ $Sys(\mathcal F)$ denotes the set of the all $s$-systems of the family $\mathcal F$. \end{definition}
Applying the results concerning $s$-systems of distributive sets is one of the basic tools for studying inseparable pairs and inseparable sets in functions. These results are deeply discussed in \cite{ch51,s27,s23}.
\begin{theorem}\label{t2.1}\ Let $M\subseteq Ess(f)$ be a non-empty inseparable set of essential variables in $f\in P_k^n$ and $\beta\in Sys(Dis(M,f))$. Then the following statements hold: \begin{enumerate}
\item[(i)] $M\cup \beta \in Sep(f)$;
\item[(ii)] $(\ \forall \alpha,\ \alpha\subseteq\beta,\ \alpha\neq\beta)\quad M\cup \alpha\notin Sep(f)$. \end{enumerate} \end{theorem} \begin{proof} $(i)$\
First, note that $M\notin Sep(f)$ implies $|M|\geq 2$. Without loss of generality assume that $\beta=\{x_1,\ldots,x_m\}\in Sys(Dis(M,f))$ and $M=\{x_{m+1},\ldots,x_{p}\}$ with $1\leq m<p\leq n$. Let us denote by $L$ the following set of variables $L=Ess(f)\setminus(M\cup\beta )=\{x_{p+1},\ldots,x_{n}\}.$
Since $\beta\in Sys(Dis(M,f))$ it follows that for each $Q\subseteq L$ we have $Q\notin Dis(M,f)$. Hence there is a vector $\mathbf{c}=(c_{p+1},\ldots,c_n)\in Z_k^{n-p}$ such that $M\subseteq Ess(g)$ where $g\preceq_L^{\mathbf{c}} f$.
Next, we shall prove that
$\beta\subset Ess(g).$
For suppose this were not the case and without loss of generality, assume $x_1\notin Ess(g)$, i.e. $g=g(x_1=d_1)$ for each $d_1\in Z_k.$ Let $J\in Dis(M,f)$ be a distributive set of $M$ such that $J\cap\beta=\{x_1\}.$ The existence of the set $J$ follows because $\beta$ is an $s$-system of $Dis(M,f)$ (see Definition \ref{d2.2}). Since $J\cap M=\emptyset$ and
$x_1\notin Ess(g)$ it follows that $J\cap Ess(g)=\emptyset.$ Now $M\subset Ess(g)$ implies that $J\notin Dis(M,f)$, which is a contradiction. Thus we have $x_1\in Ess(g)$ and $\beta\subset Ess(g)$. Then $Ess(f)\setminus L=M\cup\beta$ shows that $M\cup\beta=Ess(g)$ and hence $M\cup\beta\in Sep(f).$
$(ii)$\ Let $\alpha$, $\alpha\subseteq\beta,\ \alpha\neq\beta$ be a proper subset of $\beta$. Let $x_i\in \beta\setminus \alpha$. Then $\beta\in Sys(Dis(M,f))$ implies that there
is a distributive set $P\in Dis(M,f)$ of $M$ such that $P\cap\beta=\{x_i\}$. Hence $P\cap\alpha=\emptyset$ which shows that there is an non-empty distributive set $P_1$ for $M\cup\{\alpha\}$ with $P_1\subseteq P$. Hence
$M\cup\alpha\notin Sep(f)$.
\end{proof} \begin{corollary}\label{c2.1} Let $\emptyset\neq M\subset Ess(f)$ and $M\notin Sep(f)$. If $\beta\in Sys(Dis(M,f))$ and $x_i\in\beta$ then $M\setminus Ess(f(x_i=c))\neq\emptyset$ for all $c\in Z_k$. \end{corollary}
\begin{theorem}\label{t2.2}\cite{s23} For each finite family $\mathcal F$ of non-empty sets there exists at least one $s-$system of $\mathcal F$. \end{theorem}
\begin{theorem}\label{t2.3} Let $M$ be an inseparable set in $f$. A set $\beta\subset Ess(f)$ is an $s$-system of $Dis(M,f)$ if and only if $\beta\cap J\neq\emptyset$ for all $J\in Dis(M,f)$ and $\alpha\subseteq\beta,\ \alpha\neq\beta$ implies $\alpha\cap P=\emptyset$ for some $P\in Dis(M,f)$.
\end{theorem} \begin{proof} "$\Leftarrow$" Let $\beta\cap J\neq \emptyset$ for all $J\in Dis(M,f)$ and $\alpha\varsubsetneqq \beta$ implies $\alpha\cap P=\emptyset$ for some $P\in Dis(M,f)$. Since $\beta\cap J\neq \emptyset$ it follows that there is a set $\beta'$, $\beta'\subset \beta\subset Ess(f)$ and $\beta'\in Sys(Dis(M,f))$. If we suppose that $\beta'\neq\beta$ then there is $P\in Dis(M,f)$ with $\beta'\cap P=\emptyset$. Hence $M\cup\beta'\notin Sep(f)$ because of $P\in Dis(M\cup\beta',f)$ which contradicts Theorem \ref{t2.1}.
"$\Rightarrow$" Let $\beta$ be an $s$-system of $Dis(M,f)$ and $\alpha\varsubsetneqq \beta$. Let $x\in \beta\setminus\alpha$ and $P\in Dis(M,f)$ be a distributive set of $M$ for which $\beta\cap P=\{x\}$. Hence $\alpha\cap P=\emptyset$ and we have $P\in Dis(M\cup \alpha,f)$ and $M\cup\alpha\notin Sep(f)$ which shows that $\alpha\notin Sys(Dis(M,f))$. ~~~~~
\end{proof}
\section{Ordered Decision Diagrams and Complexity of Functions}\label{sec3}
The distributive sets are also important when constructing efficient procedures for simplifying in analysis and synthesis of functional schemas.
In this section we discuss {\it ordered decision diagrams} (ODDs) for the functions obtained by restrictions on their {\it ordered decomposition trees} (ODTs).
{Figure} \ref{f2} shows an ordered decomposition tree for the function $g=x_1x_2\oplus x_1^0x_3\in P_2^3$ from Example \ref{ex2.1}, which essentially depends on all its three variables $x_1, x_2$ and $x_3$. The node at the top, labelled $g$ - is the {\it function} node. The nodes drawn as filled circles labelled with variable names are the {\it internal (non-terminal)} nodes, and the rectangular nodes (leaves of the tree) are the {\it terminal} nodes. The terminal nodes are labelled by the numbers from $Z_k$. Implementation of $g$ for a given values of $x_1, x_2$ and $x_3$ consists of selecting a path from the function node to a terminal node. The label of the terminal node is the sought value. At each non-terminal node the path follows the solid edge if the variable labelling the node evaluates to $1$, and the dashed edge if the variable evaluates to $0$. In the case of $k>2$ we can use colored edges with $k$ distinct colors.
The ordering in which the variables appear is the same along all paths of an ODT. {Figure} \ref{f2} shows the ODT for the function $g$ from Example \ref{ex2.1}, corresponding to the variable ordering $x_1,x_2,x_3$ (denoted briefly as $\langle 1; 2; 3\rangle$). It is known that for a given function $g$ and a given ordering of its essential variables there is a unique ODT.
We extend our study to ordered decision diagrams for the functions from $P_k^n$ which were studied by D. Miller and R. Drechsler \cite{mil1,mil2}.
\begin{figure}
\caption{Decomposition tree for $g=x_1x_2\oplus x_1^0x_3.$}
\label{f2}
\end{figure}
An {\it ordered decision diagram} of a function $f$ is obtained from the corresponding ODT by {\it reduction} of its nodes applying of the following two rules starting from the ODT and continuing until neither rule can be applied:
{\bf Reduction rules} \begin{enumerate} \item[$\bullet$] If two nodes are terminal and have the same label, or are non-terminal and have the same children, they are merged. \item[$\bullet$] If an non-terminal node has identical children it is removed from the graph and its incoming edges are redirected to the child. \end{enumerate}
When $k=2$ ODD is called {\it a binary decision diagram} (BDD). BDDs are extensively used in the theory of {\it switching circuits} to represent and manipulate Boolean functions and to measure the complexity of binary terms.
The size of the ODD is determined both by the function being represented and the chosen ordering of the variables. It is of crucial importance to care about variable ordering when applying ODDs in practice. The problem of finding the best variable ordering is NP-complete (see \cite{bra}).
{Figure} \ref{f3} shows the BDDs for the functions from Example \ref{ex2.1} obtained from their decomposition trees under the natural ordering of their variables - $\langle 1; 2; 3\rangle$. The construction of the ODT for $f$ under the natural ordering of the variables is left to the reader.
\begin{figure}
\caption{BDD for $f$ and $g$ under the natural ordering of variables.}
\label{f3}
\end{figure}
The BDD of $f$ is more complex than the BDD of $g$. This reflects the fact that $f$ has more separable pairs.
Thus we have $M=\{x_2,x_3\}\notin Sep(g)$, $\{x_1\}\in Dis(M,g)$ and $\{x_1\}\in Sys(Dis(M,g))$. Additionally, the diagram of $g$ starts with $x_1$ - a variable which belongs to an $s$-system of $Dis(M,g)$. In this simple case we have $Sys(Dis(M,g))= Dis(M,g)=\{x_1\}$.
Figure \ref{f3} shows that when constructing the ODD of a function, it is better to start with the variables from an $s$-system of the family of distributive sets of an inseparable set $M$ in this function. In \cite{ivo2} it is shown that the BDDs of functions have to be most simple when starting with variables from $Sys(Dis(M,f))$.
Consequently, the inseparable sets with their distributive sets are important in theoretical and applied computer science concerning the computational complexity of the functions.
Next, we define and explore complexity measures of the functions in $P_k^n$ which are directly connected with the computational complexity of functions. We might think that the complexity of a function $f$ depends on the complexity of its ODDs.
Let ${f\in P_k^n}$ and let $DD(f)$ be the set of the all ODDs for $f$ constructed under different variable orderings in $f$.
\begin{definition}\label{d3.2} Each path starting from the function node and finishing into a terminal node is called an {\it implementation} of the function $f$ under the given variable ordering.
The set of the all implementations of $D_f$ we denote by $Imp(D_f)$ and \[Imp(f)=\bigcup_{D_f\in DD(f)}Imp(D_f).\]
\end{definition}
Each implementation of the function ${f\in P_k^n}$, obtained from the diagram $D_f$ of $f$ by the non-terminal nodes $x_{i_1},\ldots,x_{i_r}$ and corresponding constants $c_{1},\ldots,c_{r},c\in Z_k$ with $f(x_{i_1}=c_{1},\ldots,x_{i_r}=c_{r})=c,\quad r\leq ess(f)$,
can be represented as a pair $\mathbf{(i,c)}$ of two words (strings) over $\mathbf{n}=\{1,\ldots,n\}$ and $Z_k$ where $\mathbf{i}=i_1i_2\ldots i_r\in \mathbf{n}^*$ and $\mathbf{c}=c_1c_2\ldots c_rc\in Z_k^*$.
There is an obvious way to define a measure of complexity of a given ordered decision diagram $D_f$, namely as the number ${imp}(D_f)$ of all paths in $D_f$ which starts from the function node and finish in a terminal node of the diagram.
The {\it implementation complexity} of a function ${f\in P_k^n}$ is defined as the number of all implementations of $f$, i.e.
$imp(f)=|Imp(f)|.$
We shall study also two other measures of computational complexity of functions as $sub(f)$ and $sep(f)$.
\begin{example}\label{ex3.1} Let us consider again the functions $f$ and $g$ from Example \ref{ex2.1}, namely $f=x_1x_2\oplus x_1x_3\quad and\quad g=x_1x_2\oplus x_1^0x_3.$
Then $(123, 1011)$ is an implementation of $f$ obtained by the diagram $D_f$ presented in {Figure} \ref{f3}, following the path $\pi= (f; x_1: 1; x_2: 0; x_3: 1;terminal\ node: 1)$.
It is easy to see that there are six distinct BDDs for $f$ and five distinct BDDs for $g$. We shall calculate the implementations of $f$ and $g$ for the variable orderings $\langle 1; 2; 3\rangle$ (see Figure \ref{f3}) and $\langle 2; 1; 3\rangle$, only. Thus for $f$ we have:
\noindent
\begin{tabular}{|l|c|} \hline
ordering & implementations\\ \hline
$\langle 1; 2; 3\rangle$ & $(1,00);\ (123,1000);\ (123,1011);\ (123,1101);\ (123, 1110)$ \\ \hline $\langle 2; 1; 3\rangle$& $(21,000);\ (213,0100);\ (213,0111);\ (21,100); (213,1101);\ (213,1110)$ \\ \hline \end{tabular}
For the function $g$ we obtain:
\noindent
\begin{tabular}{|l|c|} \hline
ordering &implementations\\ \hline
$\langle 1; 2; 3\rangle$ & $(13,000);\ (13,011);\ (12,100);\ (12,111)$\\ \hline $\langle 2; 1; 3\rangle$& $(21,010);\ (213,0000);\ (213,0011);\ (213,1000); (213,1011);\ (21,111)$\\ \hline \end{tabular}
For the diagrams in
{Figure} \ref{f3} we have ${imp}(D_f)=5$ and ${imp}(D_g)=4$.
Since $f$ is a symmetric function with respect to $x_2$ and $x_3$ one can count that $imp(f)=33$. Note that the implementation $(1,00)$ occurs in two distinct diagrams of $f$, namely under the orderings $\langle 1; 2; 3\rangle$ and $\langle 1; 3; 2\rangle$. Hence, it has to be counted one time and we obtain that $imp(f)$ is equal to $33$ instead of $34$.
For the function $g$, the diagrams under the orderings $\langle 1; 2; 3\rangle$ and $\langle 1; 3; 2\rangle$ have the same implementations, i.e. the diagrams are identical (isomorphic). This fact is a consequence of inseparability of the set $\{x_2,x_3\}$. Hence $g$ has five (instead of six for $f$) distinct ordered decision diagrams. Then, one might calculate that $imp(g)=28$.
For the other two measures of complexity we obtain: $sub(f)=13$ because of $Sub(f)=\{0,1,x_1,x_2,x_3,x_2^0,x_3^0,x_2\oplus x_3,x_1x_2,$ $ x_1x_2^0,x_1x_3,x_1x_3^0, f\}$ and $sub(g)=11$ because of $Sub(g)=\{0,1,x_1,x_2,x_3,x_1^0,x_1 x_2,x_1^0x_3,x_1\oplus x_1^0x_3,x_1x_2\oplus x_1^0,g\}$. Furthermore, $sep(f)=7$ because of
$M\in Sep(f)$ for all $M$, $\emptyset\neq M\subseteq \{x_1,x_2,x_3\}$ and $sep(g)=6$ because of $Sep(g)=\{\{x_1\},\{x_2\},\{x_3\},\{x_1,x_2\},\{x_1,x_3\},\{x_1,x_2,x_3\}\}$. \end{example}
\begin{lemma}\label{l17} A variable $x_i$ is essential in $f\in P_k^n$ if and only if $x_i$ occurs as a label of an non-terminal node in any ODD of $f$. \end{lemma} \begin{proof} $"\Rightarrow"$ Let us assume that $x_i$ does not occur as a label of any non-terminal node in an ordered decision diagram $D_f$ of $f$. Since all values of the function $f$ can be obtained by traversal walk-trough all paths in $D_f$ from function node to leaf nodes this will mean that $x_i$ will not affect the function value and hence $x_i$ is an inessential variable in $f$.
$"\Leftarrow"$ Let $x_i\notin Ess(f)$ be an inessential variable in $f$. It is obvious that for each subfunction $g$ of $f$ we have $x_i\notin Ess(g)$. Then we have $f(x_i=c)=f(x_i=d)$ for all $c,d\in Z_k$. Consequently, if there is a non-terminal node labelled by $x_i$ in an ODT of $f$ then its children have to be identical, which shows that this node has to be removed from the ODT, according to the reduction rules, given above. \end{proof}
An essential variable $x_i$ in a function $f$ is called {\it a strongly essential variable } in $f$ if there is a constant $c\in Z_k$ such that $Ess(f(x_i=c))=Ess(f)\setminus\{x_i\}$.
\begin{fact}\label{fc1} If $ess(f)\geq 1$ then there is at least one strongly essential variable in $f$. \end{fact} This fact was proven by O. Lupanov \cite{lup} in case of Boolean functions and by A. Salomaa \cite{sal} for arbitrary functions.
Later, Y. Breitbart \cite{bre} and K. Chimev \cite{ch51} proved that if $ess(f)\geq 2$ then there exist at least two strongly essential variables in $f$.
We need Fact \ref{fc1} to prove the next important theorem.
\begin{theorem}\label{t3.2} A non-empty set $M$ of essential variables is separable in $f$ if and only
if there exists an implementation $\mathbf{(j,c)}$ of the form \[\mathbf{(j,c)}=(j_1j_2\ldots j_{r-m}j_{r-m+1}\ldots j_r, c_1c_2\ldots c_{r-m}c_{r-m+1}\ldots c_r c)\in Imp(f)\] where $M=\{x_{j_{r-m+1}},\ldots, x_{j_r}\}$ and $1\leq m\leq r\leq ess(f)$. \end{theorem} \begin{proof} "$\Leftarrow$" Let \[\mathbf{(j,c)}=(j_1\ldots j_{r-m}j_{r-m+1}\ldots j_r, c_1\ldots c_{r-m}c_{r-m+1}\ldots c_rc)\in Imp(f)\] be an implementation of $f$ and let $M=\{x_{j_{r-m+1}},\ldots, x_{j_r}\}$. Hence the all variables from $\{x_{j_{r-m+1}},\ldots,x_{j_r}\}$ are essential in the following subfunction of $f$ \[g=f(x_{j_1}=c_{1},\ldots,x_{j_{r-m}}=c_{{r-m}})\] which shows that $M\in Sep(f)$.
"$\Rightarrow$" Without loss of generality let us assume that $M=\{x_1,\ldots,x_m\}$ is a non-empty separable set in $f$ and $n=ess(f)$. Then there are constants $d_{m+1},\ldots,d_n\in Z_k$ such that $M=Ess(h)$ where $h=f(x_{m+1}=d_{m+1},\ldots,x_{n}=d_{n})$. From Fact \ref{fc1} it follows that there is a variable $x_{i_1}\in M$ and a constant $d_{1}\in Z_k$ such that $Ess(h_1)=M\setminus \{x_{i_1}\}$ where $h_1=h(x_{i_1}=d_{1})$. Consequently, we might inductively obtain that there are variables $x_{i_r}\in M$ and constants $d_{r}\in Z_k$ for $r=2,\ldots,m$, such that $Ess(h_r)=M\setminus \{x_{i_1},\ldots,x_{i_r}\}$ where $h_r=h_{r-1}(x_{i_r}=d_{r})$. Hence, the string $m+1m+2\ldots n$ has a substring $j_1\ldots j_s$ such that $(j_1\ldots j_si_1\ldots i_m, d_{j_1}\ldots d_{j_s}d_{1}\ldots d_{m}d)$ is an implementation of $f$ with $M=\{x_{i_1},\ldots,x_{i_m}\}$ and $d=h_m$. \end{proof} \begin{corollary}\label{c3.2} For each variable $x_i\in Ess(f)$ there is an implementation $\mathbf{(j,c)}$ of $f$ whose last letter of $\mathbf{j}$ is $i$, i.e. $\mathbf{(j,c)}=(j_1\ldots j_{m-1}i, c_{j_1}\ldots c_{j_{m-1}}c_ic)\in Imp(f)$, $m\leq ess(f)$. \end{corollary} Note that there exists an ODD of a function whose non-terminal nodes are labelled by the variables from a given set, but this set might not be separable. For instance, the implementation $(231,0101)\in Imp(g)$ of the function $g$ from Example \ref{ex3.1} shows that the variables from the set $M=\{x_2,x_3\}$ occur as labels of the starting two non-terminal nodes in the BDD of $g$ under the ordering $\langle 2; 3; 1 \rangle$, but $M\notin Sep(g)$.
\begin{lemma}\label{l2.1} If $ess(f)=n$, $g\preceq f$ with $ess(g)= m<n$ then there exists a variable $x_t\in Ess(f)\setminus Ess(g)$ such that $Ess(g)\cup\{x_t\}\in Sep(f)$.
\end{lemma} \begin{proof} Let $M=Ess(g)$. Then $M\in Sep(f)$ and from Theorem \ref{t3.2} it follows that there is an implementation $\mathbf{(j,c)}$ of the form $\mathbf{(j,c)}=(j_1j_2\ldots j_{r-m}j_{r-m+1}\ldots j_r,$ $c_1c_2\ldots c_{r-m}c_{r-m+1}\ldots c_r c)\in Imp(f)$ where $M=\{x_{j_{r-m+1}},\ldots, x_{j_r}\}$ and $1\leq m\leq r\leq ess(f)$. Since $m<n$ it follows that $r-m>0$ and Lemma \ref{l17} shows that there is $x_{j_i}\in Ess(h)$ where \[h=f(x_{j_1}=c_1,\ldots,x_{j_{i-1}}=c_{i-1},x_{j_{i+1}}=c_{i+1},\ldots x_{j_{r-m}}=c_{r-m}).\] It is easy to see that $Ess(h)=M\cup\{x_{j_i}\}$. \end{proof}
Now, as an immediate consequence of the above lemma we obtain Theorem \ref{t2.4} which was inductively proven by K. Chimev. \begin{theorem}\label{t2.4} \cite{ch51} If $ess(f)=n$, $g\preceq f$ with $ess(g)= m\leq n$ then there exist $n-m$ subfunctions $g_1,\ldots,g_{n-m}$ such that \[g \prec g_1 \prec g_2\prec \ldots\prec g_{n-m}= f\] and $ess(g_i)=m+i$ for $i=1,\ldots,n-m$. \end{theorem}
The {\it depth}, (denoted by $Depth(D_f)$) of an ordered decision diagram $D_f$ for a function $f$ is defined as the number of the edges in a longest path from the function node in $D_f$ to a leaf of $D_f$.
Thus for the diagrams in Figure \ref{f3} we have $Depth(D_f)=4$ and $Depth(D_g)=3$.
Clearly, if $ess(f)=n$ then $Depth(D_f)\leq n+1$ for all ODDs of $f$.
\begin{theorem}\label{l3.2} If $ess(f)=n\geq 1$ then there is an ordered decision diagram $D_f$ of $f$ with $Depth(D_f)=n+1$. \end{theorem} \begin{proof} Let $Ess(f)=\{x_1,\ldots,x_n\}$, $n\geq 1$. Since $x_1$ is an essential variable it follows that $\{x_1\}\in Sep(f)$. Theorem \ref{t2.4} implies that there is an ordering $\langle i_1; i_2; \ldots; i_{n-1}\rangle$ of the rest variables $x_{2}, \ldots, x_{{n}}$ such that for each $j$, $1\leq j\leq n-1$ we have $g_j\prec_J^{\mathbf{c}} f$ where $J=\{x_{i_1},\ldots,x_{i_j}\}$, $\mathbf{c}\in Z_k^{J}$ and $Ess(g_j)=\{x_1,x_{i_{j+1}},\ldots,x_{i_{n-1}}\}$. This shows that the all variables from $J$ have to be labels of non-terminal nodes in a path $\pi$ of the ordered decision diagram $D_f$ of $f$ under the variable ordering $\langle i_1; i_2; \ldots; i_{n-1}; 1\rangle$. Hence $\pi$ has to contain all essential variables in $f$ as labels at the non-terminal nodes of $\pi$. Hence $Depth(D_f)=n+1$. \end{proof}
\begin{theorem}\label{t3.3} Let $f\in P_k^n$ and $Ess(f)=\{x_1,\ldots,x_n\}$, $n\geq 1$. If $M\neq \emptyset$, $M\subset Ess(f)$ and $M\notin Sep(f)$ then there is a decision diagram $D_f$ of $f$ with $Depth(D_f)<n+1$. \end{theorem} \begin{proof} Without loss of generality, let us assume that $M=\{x_1,\ldots,x_m\}$, $m<n$. Since $M$ is inseparable in $f$, the family $Dis(M,f)$ of the all distributive sets of $M$ is non-empty. According to Theorem \ref{t2.2} there is a non-empty $s$-system $\beta=\{x_{i_1},\ldots,x_{i_t}\}$ of $Dis(M,f)$. Since $f(x_{i_1}=c_1)\neq f(x_{i_1}=c_2)$ for some $c_1,c_2\in Z_k$ it follows that there exists an ODD $D_f$ for $f$ under a variable ordering with $x_{i_1}$ as the label of the first non-terminal node of $D_f$. According to Corollary \ref{c2.1} for all $c\in Z_k$ there is a variable $x_j\in M$ which is inessential in $f(x_{i_1}=c)$. Hence, each path of $D_f$ does not contain at least one variable from $M$ among its labels of non-terminal nodes. Hence $Depth(D_f)<n+1$. \end{proof}
\section{Equivalence Relations and Transformation Groups in $P_k^n$}\label{sec5}
Many of the problems in applications of the $k$-valued functions are compounded because of the large number of the functions, namely $k^{k^n}$. Techniques which involve enumeration of functions can only be used if $k$ and $n$ are trivially small. A common way for extending the scope of such enumerative methods is to classify the functions into equivalence classes under some natural equivalence relation.
In this section we define equivalence relations in $P_k^n$ which classify functions with respect to number of their implementations, subfunctions and separable sets. We are intended to determine several numerical invariants of the transformation groups generated by these relations. The second goal is to compare these groups with so called classical subgroups of the Restricted Affine Group(RAG) \cite{lech} which have a variety of applications such as coding theory, switching theory, multiple output combinational logic, sequential machines and other areas of theoretical and applied computer sciences.
Let us denote by $S_A$ the symmetric group of all permutations of a given no-empty set $A$. $S_m$ denotes the symmetric group $S_{\{1,\ldots,m\}}$ for a natural number $m$, $m\geq 1$.
Let us define the following three equivalence relations: $\simeq_{imp}$, $\simeq_{sub}$ and $\simeq_{sep}$. \begin{definition}\label{d5.1} Let $f,g\in P_k^n$ be two functions. \begin{enumerate} \item[(i)] If $ess(f)=ess(g)\leq 1$ then $f\simeq_{imp} g$; \item[(ii)] Let $ess(f)=n>1$. We say that $f$ is $imp$-equivalent to $g$ (written $f\simeq_{imp} g$) if there are $\pi\in S_n$ and $\sigma_i\in S_{Z_k}$ such that $f(x_i=j)\simeq_{imp} g(x_{\pi(i)}=\sigma_i(j))\quad\mbox{for all}\quad i=1,\ldots,n\quad\mbox{and}\quad j\in Z_k.$ \end{enumerate} \end{definition} Hence two functions are ${imp}$-equivalent if they produce same number of implementations, i.e.
$imp(f)=imp(g)$ and there are $\pi\in S_n$, and $\sigma$, $\sigma_i\in S_{Z_k}$ such that $(i_1\ldots i_m,c_{1},\ldots,c_{m}c)\in Imp(f)\iff $ $ (\pi(i_1)\ldots \pi(i_m), \sigma_1(c_{1})\ldots\sigma_m(c_{m})\sigma(c))\in Imp(g).$
Table \ref{tb1} shows the classification of Boolean functions of two variables into four classes, called {\it imp-classes} under the equivalence relation $\simeq_{imp}$. The second column shows the number of implementations of the functions from the $imp$-classes given at the first column. The third column presents the number of functions per each $imp$-class.
\begin{table}[h] \caption{$Imp$-classes in $P_2^2$.} \label{tb1}
\centering
\begin{tabular}{l|l|l} \hline\hline
$[\ 0,\ 1\ ]$ & \ 1& 2\\ \hline $[\ x_1,\ x_2,\ x_1^0,\ x_2^0\ ]$&\ 2& 4\\ \hline $[\ x_1x_2,\ x_1x_2^0,\ x_1^0x_2,\ x_1^0x_2^0,\ x_1\oplus x_1x_2,$ &&\\ $ x_2^0\oplus x_1x_2,\ x_1^0\oplus x_1x_2,\ x_1^0\oplus x_1x_2^0\ ]$& \ 6 & 8\\ \hline $[\ x_1\oplus x_2,\ x_1\oplus x_2^0\ ]$&\ 8& 2\\ \hline\hline \end{tabular} \end{table}
\begin{definition}\label{d5.2} Let $f,g\in P_k^n$ be two functions. \begin{enumerate} \item[(i)] If $ess(f)=ess(g)=0$ then $f\simeq_{sub} g$; \item[(ii)] If $ess(f)=ess(g)=1$ then $f\simeq_{sub} g\iff range(f)=range(g)$; \item[(iii)] Let $ess(f)=n>1$. We say that $f$ is $sub$-equivalent to $g$ (written $f\simeq_{sub} g$) if $sub_m(f)=sub_m(g)$ for all $m=0,1,\ldots, n$. \end{enumerate} \end{definition} It is easy to see that the equivalence relation $\simeq_{sub}$ partitions the algebra of Boolean functions of two variables in the same equivalence classes (called {\it the sub-classes}) as the relation $\simeq_{imp}$ (see Table \ref{tb1}). \begin{definition}\label{d5.3} Let $f,g\in P_k^n$ be two functions. \begin{enumerate} \item[(i)] If $ess(f)=ess(g)\leq 1$ then $f\simeq_{sep} g$; \item[(ii)]Let $ess(f)=n>1$. We say that $f$ is $sep$-equivalent to $g$ (written $f\simeq_{sep} g$) if $sep_m(f)=sep_m(g)$ for all $m=1,\ldots, n$. \end{enumerate} \end{definition} The equivalence classes under $\simeq_{sep}$ are called {\it sep-classes}.
Since $P_k^n$ is a finite algebra of $k$-valued functions each equivalence relation $\simeq$ on $P_k^n$ makes a partition of the algebra in the set of disjoint equivalence classes $Cl(\simeq)=\{P_1^\simeq,\ldots,P_r^\simeq\}$. Then, in the set of all equivalence relations a partial order is defined as follows: $\simeq_1\ \leq\ \simeq_2$ if for each $P\in Cl(\simeq_1)$ there is a $Q\in Cl(\simeq_2)$ such that $P\subseteq Q$. Thus $\simeq_1\ \leq\ \simeq_2$ if and only if $f\simeq_1 g\ \Rightarrow f\simeq_2 g$, for $f,g\in P_k^n$.
\begin{theorem}\label{t5.1}
~~~ \begin{enumerate} \item[(i)] $ \simeq_{imp}\ \leq\ \simeq_{sep}$; \quad (iii) $\simeq_{imp}\ \not\leq\ \simeq_{sub}$; \item[(ii)] $ \simeq_{sub}\ \leq\ \simeq_{sep}$; \quad (iv) $ \simeq_{sub}\ \not\leq\ \simeq_{imp}$. \end{enumerate}
\end{theorem}
\begin{proof} (i)\ Let $f,g\in P_k^n$ be two ${imp}$-equivalent functions, i.e. $f\simeq_{imp} g$. We shall proceed by induction on the number $n=ess(f)$ of essential variables in $f$ and $g$.
Clearly, if $n\leq 1$ then $f\simeq_{sub} g$, which is our inductive basis. Let us assume that $f\simeq_{imp} g$ implies $f\simeq_{sep} g$ if $n< r$ for some natural number $r$, $r\geq 2$.
Let $f$ and $g$ be two functions with $f\simeq_{imp}g$ and $ess(f)=ess(g)=r$. Then there are $\pi\in S_r$ and $\sigma_i\in S_{Z_k}$ for $i=1,\ldots,r$ such that $f(x_i=j)\simeq_{imp} g(x_{\pi(i)}=\sigma_i(j))$. Let $M$, $\emptyset\neq M\in Sep(f)$ be a separable set of essential variables in $f$ with $|M|=m$, $1\leq m\leq r$. Theorem \ref{t3.2} implies that there is an implementation \[\mathbf{(j,c)}=(j_1\ldots j_{r-m} i_1\ldots i_m, c_{j_1}\ldots c_{j_{r-m}}c_{i_1}\ldots c_{i_m}c)\] of $f$ obtained under an ODD whose variable ordering finishes with the variables from $M$, i.e. $M=\{x_{i_1}\ldots,x_{i_m}\}$. Then $f(x_{j_1}=c_{j_1})\simeq_{imp} g(x_{\pi(j_1)}=\sigma_{j_1}(c_{j_1}))$ implies that \[({\pi({j_1})}\ldots {\pi({j_{r-m}})}{\pi({i_1})}\ldots {\pi({i_m})},\sigma_{j_1}(c_{j_1})\ldots \sigma_{j_{r-m}}(c_{j_{r-m}})\sigma_{i_1}(c_{i_1})\ldots \sigma_{i_m}(c_{i_m})\sigma(c))\]
is an implementation of $g$, for some $\sigma\in S_{Z_k}$. Again, from Theorem \ref{t3.2} it follows that $\pi(M)=\{x_{\pi(i_1)},\ldots,x_{\pi(i_m)}\}\in Sep(g).$ Since $\pi$ is a permutation of $S_r$ it follows that $sep_m(f)=sep_m(g)$ for $m=1,\ldots,r$ and hence $\simeq_{imp}\ \leq\ \simeq_{sep}.$
(ii) \
Definition \ref{d1.3} shows that $M\in Sep(f)$ if and only if there is a subfunction $g\in Sub(f)$ with $g\prec_Q^{\mathbf{c}}f$ where $Q=Ess(f)\setminus M$ and $\mathbf{c}\in Z_k^{n-|M|}$. Hence \[\forall f,g\in P_k^n,\quad Sub(f)=Sub(g)\implies Sep(f)=Sep(g),\] which implies that $sub_m(f)=sub_m(g)\implies sep_m(f)=sep_m(g)$ and $\simeq_{sub}\leq \simeq_{sep}$.
(iii)\ Let us consider the functions \[f=x_1^0x_2x_3\oplus x_1x_2^0x_3^0\ (mod\ 2)\quad\mbox{and}\quad g=x_2x_3\oplus x_1x_2^0x_3\oplus x_1x_2x_3^0\ (mod\ 2).\]
The set of the all simple subfunctions in $f$ is: $\{x_1x_2^0, x_1^0x_2, x_1x_3^0, x_1^0x_3, x_2x_3, x_2^0x_3^0\}$ and in $g$ is: $\{x_1x_2, x_1x_3, x_2x_3, x_2\oplus x_1x_2^0, x_3\oplus x_1x_3^0, x_2^0x_3^0\oplus 1\}$.
Hence $f$ and $g$ have six simple subfunctions, which depends essentially on two variables. Table \ref{tb1} shows that all these subfunctions belong to same $imp$-class and the number of their implementations is $6$. Thus we might calculate that $imp(f)=imp(g)=36$ and $f\simeq_{imp}g$.
The set of the all subfunctions with one essential variable in the function $f$ is: $\{ x_1, x_2, x_3, x_1^0, x_2^0, x_3^0\}$ and in $g$ is: $\{x_1, x_2, x_3\}$.
Then we have $sub_0(f)=sub_0(g)=2$, $sub_1(f)=6$, $sub_1(g)=3$ and $sub_2(f)=sub_2(g)=6$ and hence $f\not\simeq_{sub}g$. It is clear that $sub(f)=15$, $sub(g)=12$ and $\simeq_{imp}\ \not\leq\ \simeq_{sub}$.
(iv)\ Let us consider the functions \[f=x_1x_2^0x_3^0\oplus x_1\ (mod\ 2)\quad\mbox{and}\quad g= x_1x_2x_3\ (mod\ 2).\]
The simple subfunctions in $f$ and $g$ are:\\ \begin{tabular}{lll} $f(x_1=0)=0,$&$f(x_3=0)=x_1x_2^0\oplus x_1,$& $g(x_2=0)=0,$ \\ $f(x_1=1)=x_2^0x_3^0\oplus 1,$& $f(x_3=1)=x_1,$ & $g(x_2=1)=x_1x_3,$\\ $f(x_2=0)=x_1x_3^0\oplus x_1,$ & $g(x_1=0)=0,$&$g(x_3=0)=0,$\\
$f(x_2=1)=x_1,$ & $g(x_1=1)=x_2x_3,$&$g(x_3=1)=x_1x_2$.\\
\end{tabular}
Now, using Table \ref{tb1}, one can easily calculate that $imp(f)=23$ and $imp(g)=21$, and hence $ f \not\simeq_{imp} g$. On the other side we have $Sub(f)=\{0, 1, x_1, x_2, x_3, x_2^0x_3^0\oplus 1, x_1x_3^0\oplus x_1, x_1x_2^0\oplus x_1, f\}$ and $Sub(g)=\{0, 1, x_1, x_2, x_3, x_2x_3, x_1x_3, x_1x_2, g\}$ which show that $sub_m(f)=sub_m(g)\quad\mbox{for}\quad m=0,1,2,3$ and $f\simeq_{sub} g$. Hence $ \simeq_{sub}\ \not\leq\ \simeq_{imp}$. \end{proof}
A {\it transformation} $\psi:P_k^n\longrightarrow P_k^n$ can be viewed as an $n$-tuple of functions \[\psi=(g_1,\ldots,g_n),\quad g_i\in P_k^n,\quad i=1,\ldots,n\] acting on any function $f=f(x_1,\ldots,x_n)\in P_k^n$ as follows $\psi(f)=f(g_1,\ldots,g_n)$. Then the composition of two transformations $\psi$ and $\phi=(h_1,\ldots,h_n)$ is defined as follows \[\psi\phi=(h_1(g_1,\ldots,g_n),\ldots,h_n(g_1,\ldots,g_n)).\]
Thus the set of all transformations of $P_k^n$ is the {\it universal monoid $\Omega_k^n$} with unity - the identical transformation. When taking only invertible transformations we obtain the {\it universal group} $C_k^n$ isomorphic to the symmetric group $S_{Z_k^n}$. Throughout this paper we shall consider invertible transformation, only. The groups consisting of invertible transformations of $P_k^n$ are called {\it transformation groups}.
Let $\simeq$ be an equivalence relation in $P_k^n$. A mapping $\varphi:P_k^n\longrightarrow P_k^n$ is called {\it a transformation, preserving $\simeq$} if $f\simeq \varphi(f)$ for all $f\in P_k^n$. Taking only invertible transformations which preserve $\simeq$, we get the group $G$ of all transformations preserving $\simeq$, whose {\it orbits} (also called {\it $G$-types}) are the equivalence classes $P_1,\ldots,P_r$ under $\simeq$. The number of orbits of a group $G$ of transformations in finite algebras of functions is denoted by $t(G)$.
Next, we relate groups to combinatorial problems trough the following obvious, but important definition:
\begin{definition}\label{d5.4}~Let $G$ be a transformation group acting on the algebra of functions $P_k^n$and suppose that $f,g\in P_k^n$. We say that $f$ is $G$-equivalent to $g$ (written $f\simeq_G g$) if there exists $\psi\in G$ so that $g=\psi(f)$. \end{definition} Clearly, the relation $\simeq_G $ is an equivalence relation. We summarize and extend the results for the "classical" transformation groups, following \cite{har2,lech,str3}, where these notions are used to study classification and enumeration in the algebra of boolean functions. Such groups are induced under the following notions of equivalence: complementation and/or permutation of the variables; any linear or affine function of the variables. Since we want to classify functions from $P_k^n$ into equivalence classes, three natural problems occur.
\begin{enumerate}
\item[$\bullet$] We ask for the number $t(G)$ of such equivalence classes. This problem will be partially discussed for the family of ``natural'' equivalence relations in the algebra of boolean functions. \item[$\bullet$] We ask for the cardinalities of the equivalence classes. This problem is important in applications as functioning the switching gates, circuits etc. For boolean functions of 3 and 4 variables we shall solve these two problems, also concerning $imp$-, $sub$- and $sep$-classes.
\item[$\bullet$] We want to give a method which will decide the class to which an arbitrary function belongs. In some particular cases this problem will be discussed below. We also develop a class of algorithms for counting the complexities $imp$, $sub$ and $sep$ for each boolean function which allow us to classify the algebras $P_2^n$ for $n=2,3,4$ with respect to these complexities as group invariants. \end{enumerate} These problems are very hard and for $n\geq 5$ they are practically unsolvable.
We use the denotation $\leq$ also, for order relation ``subgroup''. More precisely, $H\leq G$ if there is a subgroup $G'$ of $G$ which is isomorphic to $H$.
Let us denote by $IM_k^n$, $SB_k^n$ and $SP_k^n$ the transformation groups induced by the equivalence relations $\simeq_{imp}$, $\simeq_{sub}$ and $\simeq_{sep}$, respectively.
Now, as a direct consequence of Theorem \ref{t5.1} we obtain the following proposition.
\begin{proposition}\label{c5.1}~
\begin{enumerate}\item[(i)] $IM_k^n\leq SP_k^n$; \quad (iii) $IM_k^n\not\leq SB_k^n$; \item[(ii)] $SB_k^n\leq SP_k^n$; \quad (iv) $SB_k^n\not\leq IM_k^n$. \end{enumerate} \end{proposition}
We deal with "natural" equivalence relations which involve variables in some functions. Such relations induce permutations on the domain $Z_k^n$ of the functions. These mappings form a transformation group whose number of equivalence classes can be determined.
The restricted affine group (RAG) is defined as a subgroup of the symmetric group on the direct sum of the vector space $Z_k^n$ of arguments of functions and the vector space $Z_k$ of their outputs. The group RAG permutes the direct sum $Z_k^n+Z_k$ under restrictions which preserve single-valuedness of all functions from $P_k^n$. The equivalence relation induced by RAG is called {\it prototype equivalence relation}.
In the model of RAG an affine transformation operates on the domain or space of inputs $\mathbf{x}=(x_1,\ldots,x_n)$ to produce the output $\mathbf{y}=\mathbf{xA}\oplus \mathbf{c}$, which might be used as an input in a function $g$. Its output $g(\mathbf{y})$ together with the function variables $x_1,\ldots,x_n$ are linearly combined by a range transformation which defines the image $f(\mathbf{x})$ as follows: \begin{equation}\label{eq2} f(\mathbf{x})=g(\mathbf{y})\oplus a_1x_1\oplus\ldots\oplus a_nx_n\oplus d=g(\mathbf{xA}\oplus \mathbf{c})\oplus \mathbf{a^tx}\oplus d \end{equation} where $d$ and $a_i$ for $i=1,\ldots,n$ are constants from $Z_k$.
Such a transformation belongs to RAG if $\mathbf{A}$ is a non-singular matrix. The name RAG was given to this group by R. Lechner in 1963 (see \cite{lech1}) and it was studied by Ninomiya (see \cite{nin}) who gave the name "prototype equivalence" to the relation it induces on the function space $P_k^n$.
We want to extract basic facts about some of the subgroups of RAG which are "neighbourhoods" or "relatives" of our transformation groups - $IM_k^n$, $SB_k^n$ and $SP_k^n$.
First, we consider a group which is called $CA_k^n$ (complement arguments) and each transformation $\mathbf{j}\in CA_k^n$ is determined by an $n$-tuple from $Z_k^n$, i.e. $CA_k^n=\{(j_1,\ldots,j_n)\in Z_k^n\}.$ Intuitively, $CA_k^n$ will complement some of the variables of a function. If $\mathbf{j}=(j_1,\ldots,j_n)$ is in $CA_k^n$, define $\mathbf{j}(x_1,\ldots,x_n)=(x_1\oplus j_1,\ldots,x_n\oplus j_n).$ The group operation is sum mod $k$ and written $\oplus$. For example if $n=k=3$ and $\mathbf{j}=(2,1,0)$ then $\mathbf{j}(x_1,x_2,x_3)=(x_1\oplus 2,x_2\oplus 1,x_3)$ and $\mathbf{j}$ induces a permutation on $Z_3^3=\{0,1,2\}^3$. Then the following sequence of images: $\mathbf{j}: 000\rightarrow 210\rightarrow 120\rightarrow 000$ determines the cycle $(0,21,15)$ and if we agree to regard each triple from $Z_3^3$ as a ternary number, then the permutation induced by $\mathbf{j}$ can be written in cyclic notation as $(0,21,15)(1,22,16)(2,23,17)(3,24,9)(4,25,10)(5,26,11)$ $(6,18,12)(7,19,13)(8,20,14).$ In \cite{har2} M. Harrison showed that the boolean functions of two variables are grouped into seven classes under the group $CA_2^2$.
Another classification occurs when permuting arguments. If $\pi\in S_n$ then $\pi$ acts on variables by: $\pi(x_1,\ldots,x_n)=(x_{\pi(1)},\ldots,x_{\pi(n)}).$ Each permutation induces a map on the domain $Z_k^n$. For instance the permutation $\pi=(1,2)$ induces a permutation on $\{0,1,2\}^3$ when considering the algebra $P_3^3$. Then we have $\pi: 010\rightarrow 100 \rightarrow 010$ and in cyclic notation it can be written as \[(3,9)(4,10)(5,11)(6,18)(7,19)(8,20)(15,21)(16,22)(17,23).\] $S_k^n$ denotes the transformation group induced by permuting of variables. It is clear that $S_k^n$ is isomorphic to $S_n$.
If we allow both complementations and permutations of the variables, then a transformation group, called $G_k^n$, is induced. The group action on variables is represented by $((j_1,\ldots,j_n),\pi)(x_1,\ldots,x_n)=(x_{\pi(1)}\oplus j_1,\ldots, x_{\pi(n)}\oplus j_n)$ where $j_m\in Z_k$ for $1\leq m\leq n$ and $\pi\in S_n$. The group $G_2^n$ is especially important in switching theory and other areas of discrete mathematics, since it is the symmetry group of the $n$-cube. The classification of the boolean functions under $G_2^2$ into six classes is shown in \cite{har2}.
Let us allow a function to be equivalent to its complements as well as using equivalence under $G_k^n$. Then the transformation group which is induced by this equivalence relation is called the {\it genera} of $G_k^n$ and it is denoted by $GE_k^n$. Thus the equivalence relation $\simeq_{gen}$ which induces genera of $G_k^n$ is defined as follows $f\simeq_{gen}g\iff f\simeq_{G_k^n}g$ or $f=g\oplus j$ for some $j\in Z_k$. Then there exist only four equivalence classes in $P_2^2$, induced by $GE_2^2$.
These classes are the same as the classes induced by the group $IM_2^2$ in the algebra $P_2^2$ (see \cite{har2} and Table \ref{tb1}, given above).
Next important classification is generated by equivalence relations which allow adding linear or affine functions of variables. In order to preserve the group property we shall consider invertible linear transformations and assume that $k$ is a prime number such that $LG_k^n$ the general linear group on an $n$-dimensional vector space is over the field $Z_k$. The transformation groups $LG_2^n$ and $A_2^n$ of linear and affine transformations in the algebra of boolean functions are included in the lattice of the subgroups of RAG. We extend this view to the functions from $P_k^n$. The algebra of boolean functions in the simplest case of two variables is classified in eight classes under $LG_2^2$ and in five classes under $A_2^2$. Table \ref{tb1_1} presents both equivalence classes of boolean functions from $P_2^2$ under the transformation group $RAG$. \begin{table} \caption{Classes in $P_2^2$ under $RAG$.} \label{tb1_1}
\centering \begin{tabular}{l} \hline\hline
$[\ 0,\
1,\
x_1,\ x_2,\ x_1^0,\ x_2^0,\ x_1\oplus x_2,\ x_1\oplus x_2^0\ ]$\\ \hline $[\ x_1x_2,\ x_1x_2^0,\ x_1^0x_2,\ x_1^0x_2^0,\
x_1\oplus x_1x_2,\ x_2^0\oplus x_1x_2,\
x_1^0\oplus x_1x_2,\ x_1^0\oplus x_1x_2^0\ ]$\\ \hline\hline \end{tabular} \end{table}
The subgroups of RAG defined above are determined by equivalence relations as it is shown in Table \ref{tb2}, where $\mathbf{P}$ denotes a permutation matrix, $\mathbf{I}$ is the identity matrix, $\mathbf{b\mbox{ and }c}$ are vectors from $Z_k^n$ and $d\in Z_k$.
\begin{table} \caption{Subgroups of RAG}\label{tb2}
\begin{tabular}{||l|l|l||}\hline\hline Subgroup& Equivalence relations& Determination\\ \hline RAG & Prototype equivalence& $\mathbf{A}$-non-singular\\ $GE_k^n$ & genus & $\mathbf{A}=\mathbf{P}$, $\mathbf{a}=\mathbf{0}$\\
$CF_k^n$ & complement function & $\mathbf{A}=\mathbf{I}$, $\mathbf{a}=\mathbf{0}$, $\mathbf{c}=\mathbf{0}$\\
$A_k^n$ &affine transformation & $\mathbf{a}=\mathbf{0}$, $d=0$\\
$G_k^n$ & permute \& complement & \\
& variables (symmetry types) & $\mathbf{A}=\mathbf{P}$, $\mathbf{a}=\mathbf{0}$, $d=0$\\
$LF_k^n$ & add linear function & $\mathbf{A=I}$, $\mathbf{c=0}$, $d=0$\\ $CA_k^n$ & complement arguments & $\mathbf{A}=\mathbf{I}$, $\mathbf{a}=\mathbf{0}$, $d=0$\\
$LG_k^n$ & linear transformation & $\mathbf{c}=\mathbf{0}$, $\mathbf{a}=\mathbf{0}$, $d=0$\\
$S_k^n$ & permute variables & $\mathbf{A}=\mathbf{P}$, $\mathbf{c}=\mathbf{0}$, $\mathbf{a}=\mathbf{0}$, $d=0$\\ \hline\hline \end{tabular} \end{table} It is naturally to ask which subgroups of RAG are subgroups of the groups $IM_k^n$ or $SB_k^n$. The answer of this question is our next goal. \begin{example}\label{ex5.1} Let $f=x_1x_2^0x_3\oplus x_1^0\quad\mbox{and}\quad g=x_1x_2^0x_3\oplus x_1x_2$ be two boolean functions. Then \[sub_1(f)=sub_1(g)=3,\ sub_2(f)=sub(g)=3\quad\mbox{and}\quad sub_3(f)=sub_3(g)=1.\] Hence $f\simeq_{sub} g$. In a similar way, it can be shown that $f\simeq_{imp} g$.
The details are left to the reader.
On the other side, one can prove that there is no transformation $\varphi\in RAG$ such that $\varphi(x_1^0)=x_1x_2$ (see Table \ref{tb1_1}) and hence there is no affine transformation $\varphi\in RAG$ for which $g=\varphi(f)$.
Consequently, each group among $IM_k^n$, $SB_k^n$ and $SP_k^n$ can not be a subgroup of $RAG$. \end{example}
Table \ref{tb2} allows us to establish the following fact. \begin{fact}\label{fc2} If $f$ and $g$ satisfy (\ref{eq2}) with $\mathbf{A\notin \{0,P,I\}}$ or $\mathbf{a\neq 0}$ then $f\not\simeq_{imp} g$, $f\not\simeq_{sub} g$ and $f\not\simeq_{sep} g$. \end{fact}
\begin{proposition}\label{p5.1}~~ \begin{enumerate} \item[(i)] $LG_k^n\not\leq SP_k^n$;\quad (ii) $LF_k^n\not\leq SP_k^n$; \item[(iii)] $IM_k^n\not\leq RAG$;\quad (iv) $SB_k^n\not\leq RAG$. \end{enumerate} \end{proposition} \begin{proof} Immediate from Fact \ref{fc2} and Example \ref{ex5.1}. \end{proof} Let $\sigma:Z_k\longrightarrow Z_k$ be a mapping and $\psi_\sigma:P_k^n\longrightarrow P_k^n$ be a transformation of $P_k^n$ determined by $\sigma$ as follows $\psi_\sigma(f)(\mathbf{a})=\sigma(f(\mathbf{a}))$ for all $\mathbf{a}=(a_1,\ldots,a_n)\in Z_k^n$.
\begin{theorem}\label{t5.2} $\psi_\sigma\in IM_k^n$ and $\psi_\sigma\in SB_k^n$ if and only if $\sigma$ is a permutation of $Z_k$, $k>2$. \end{theorem} \begin{proof} "$\Leftarrow$" Let $\sigma\in S_{Z_k}$ be a permutation of $Z_k$ and let $f$ be an arbitrary function with $ess(f)=n\geq 0$. We shall proceed by induction on $n$, the number of essential variables in $f$.
If $n=0$ then clearly $\psi_\sigma(f)$ is a constant and hence $f\simeq_{imp} \psi_\sigma(f)$ and $f\simeq_{sub} \psi_\sigma(f)$.
Assume that if $n<p$ then $f\simeq_{imp} \psi_\sigma(f)$ and $f\simeq_{sub} \psi_\sigma(f)$ for some natural number $p, p>0$. Hence $f(x_i=j)\simeq_{imp}\psi_\sigma(f(x_i=j))$ and $sub_m(f(x_i=j))=sub_m(\psi_\sigma(f(x_i=j)))$ for all $i\in\{1,\ldots,n\}$, $m\in\{1,\ldots,n-1\}$ and $j\in Z_k$.
Let $n=p$. Let $x_i\in\{x_1,\ldots,x_n\}=Ess(f)$ and $j\in Z_k$, and let us set $g=f(x_i=j)$. Then $\psi_\sigma(g)=\psi_\sigma(f(x_i=j)$ and $ess(g)=n-1<p$. Hence our inductive assumption implies $g\simeq_{imp}\psi_\sigma(g)$ and $g\simeq_{sub}\psi_\sigma(g)$. Consequently, we have \[f(x_i=j)\simeq_{imp}\psi_\sigma(f(x_i=j)) \quad\mbox{and}\quad sub_m(f(x_i=j))=sub_m(\psi_\sigma(f(x_i=j)))\] for all $x_i\in\{x_1,\ldots,x_n\}$ and $j\in Z_k$, which shows that $f\simeq_{imp}\psi_\sigma(f)$ and $f\simeq_{sub}\psi_\sigma(f)$.
"$\Rightarrow$" Let us assume that $\sigma$ is not a permutation of $Z_k$. Hence there exist two constants $a_1$ and $a_2$ from $Z_k$ such that $a_1\neq a_2$ and $\sigma(a_1)=\sigma(a_2)$. Let us fix the vector $\mathbf{b}=(b_1,\ldots,b_n)\in Z_k^n$. Then we define the following function from $P_k^n$: \[ f(x_1,\ldots,x_n)=\left\{\begin{array}{ccc}
a_1 \ &\ if \ &\ x_i=b_i\ for\ i=1,\dots,n \\
a_2 & & otherwise.
\end{array}
\right. \] Clearly, $Ess(f)=X_n$. On the other hand the range of $f$ is $range(f)=\{a_1,a_2\}$ and $\sigma(range(f))=\{\sigma(a_1)\}$, which implies that $\psi_\sigma(f)(c_1,\ldots,c_n)=\sigma(a_1)$ for all $(c_1,\ldots,c_n)\in Z_k^n$. Hence $\psi_\sigma(f)$ is the constant $\sigma(a_1)\in Z_k$ and $Ess(\psi_\sigma(f))=\emptyset$. Thus we have $f\not\simeq_{imp} \psi_\sigma(f)$ and $f\not\simeq_{sub} \psi_\sigma(f)$. \end{proof}
\begin{theorem}\label{t5.3} Let $\pi\in S_n$ and $\sigma_i\in S_{Z_k}$ for $i=1,\ldots,n$. Then $f(x_1,\ldots,x_n)\simeq_{imp} f(\sigma_1(x_{\pi(1)}),\ldots,\sigma_n(x_{\pi(n)}))$ and $f(x_1,\ldots,x_n)\simeq_{sub} f(\sigma_1(x_{\pi(1)}),\ldots,\sigma_n(x_{\pi(n)}))$. \end{theorem} \begin{proof} Let $f\in P_k^n$ be an arbitrary function and assume $Ess(f)=X_n$.
First, we shall prove that \[f(x_1,\ldots,x_n)\simeq_{imp} f(x_{\pi(1)},\ldots,x_{\pi(n)})\] {and} \[f(x_1,\ldots,x_n)\simeq_{sub} f(x_{\pi(1)},\ldots,x_{\pi(n)}).\] Let $g=f(x_{\pi(1)},\ldots,x_{\pi(n)})$. Clearly, if $n\leq 1$ then $f\simeq_{imp} g$ and $f\simeq_{sub} g$. Assume that if $n<p$ then $f\simeq_{imp} g$ and $f\simeq_{sub} g$ for some natural number $p$, $p\geq 1$.
Let us suppose $n=p$. Let $x_i\in Ess(f)$ be an arbitrary essential variable in $f$ and let $c\in Z_k$ be an arbitrary constant from $Z_k$. Then we have \[f(x_i=c)(x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_p)=\]
\[=g(x_{\pi^{-1}(i)}=c)(x_{\pi^{-1}(1)},\ldots,x_{\pi^{-1}({i-1})},x_{\pi^{-1}({i+1})},\ldots,x_{\pi^{-1}(p)}).\]
Our inductive assumption implies $f(x_i=c)\simeq_{imp}g(x_{\pi(i)}=c)$
{and} $sub_m(f(x_i=c))=sub_m(g(x_{\pi(i)}=c))$ for all $x_i\in X_n$, $m\in\{1,\ldots,p-1\}$ and $c\in Z_k$. Hence $f\simeq_{imp}g \ \mbox{and}\ f\simeq_{sub}g$.
Second, let us prove that \[f(x_1,\ldots,x_n)\simeq_{imp} f(\sigma_1(x_{1}),\ldots,\sigma_n(x_{n}))\]
{and} \[f(x_1,\ldots,x_n)\simeq_{sub} f(\sigma_1(x_{1}),\ldots,\sigma_n(x_{n})).\] Let $h=f(\sigma_1(x_{1}),\ldots,\sigma_n(x_{n}))$. Then we have \[f(a_1,\ldots,a_n)=h(\sigma_1^{-1}(a_1),\ldots,\sigma_n^{-1}(a_n)).\] Hence, if $(i_1\ldots i_r, a_{i_1}\ldots a_{i_r}c)\in Imp(f)$ then $(i_1\ldots i_r, \sigma_{i_1}^{-1}(a_{i_1})\ldots \sigma_{i_r}^{-1}(a_{i_r})c)\in Imp(h)$ for some $r$, $1\leq r\leq n$. Since $\sigma_i$ is a permutation of $Z_k$ for $i=1,\ldots,n$ it follows that $f\simeq_{imp}h$. By similar arguments it follows that $f\simeq_{sub}h$. \end{proof}
\begin{corollary}\label{c5.4} (i) $GE_k^n\leq IM_k^n$; \quad (ii) $GE_k^n\leq SB_k^n$; \quad (iii) $GE_k^n\leq SP_k^n$. \end{corollary}
\section{Classification of Boolean Functions}\label{sec6}
In this section we compare a collection of subgroups of RAG with the groups of transformations preserving the relations $\simeq_{imp}$, $\simeq_{sub}$ and $\simeq_{sep}$ and to obtain estimations for the number of equivalence classes, and for the cardinalities of these classes in the algebra of Boolean functions. Our results are based on Proposition \ref{p5.1}, Theorem \ref{t5.2} and Theorem \ref{t5.3}. Thus we have \begin{equation}\label{eq3} GE_2^n\leq IM_2^n,\quad GE_2^n\leq SB_2^n, \quad LG_2^n\not\leq SP_2^n \quad\mbox{and}\quad LF_2^n\not\leq SP_2^n. \end{equation}
These relationships determine the places of the groups $IM_2^n$, $SB_2^n$ and $SP_2^n$ with respect to the subgroups of RAG. Figure \ref{f4} shows the location of these groups together with the subgroups of RAG.
M. Harrison \cite{har2} and R. Lechner \cite{lech} counted the number of equivalence classes and the cardinalities of the classes under some transformation subgroups of RAG for Boolean functions of 3 and 4 variables.
The relations (\ref{eq3}) show that if we have the values of $t(GE_2^n)$ then we can count the numbers $t(IM_2^n)$, $t(SB_2^n)$ and $t(SP_2^n)$ because the equivalence classes under these transformation groups are union of equivalence classes under $GE_2^n$ and hence we have $t(IM_2^n)\leq t(GE_2^n)$ and $t(SB_2^n)\leq t(GE_2^n)$. Moreover, if we know the factor-set $P_2^n/_{\simeq_{gen}}$ of representative functions under $\simeq_{gen}$ then we can effectively calculate the sets $P_2^n/_{\simeq_{imp}}$, $P_2^n/_{\simeq_{sub}}$ and $P_2^n/_{\simeq_{sep}}$ because of $P_2^n/_{\simeq_{imp}}\subseteq P_2^n/_{\simeq_{gen}}$ and $P_2^n/_{\simeq_{sub}}\subseteq P_2^n/_{\simeq_{gen}}$.
The next theorem allows us to count the number $imp(f)$ of the implementations of any function $f$ by a recursive procedure. Such a procedure is realized and its execution is used when calculating the number of the implementations and classifying the functions under the equivalence $\simeq_{imp}$. \begin{theorem} \label{t35} Let $f\in P_2^n$ be a boolean function. The number of all implementations in $f$ is determined as follows: \[imp(f) = \left\{\begin{array}{ccc}
1 \ &\ if \ &\ ess(f)=0 \\
&&\\
2 & if\ & ess(f)=1\\
&&\\
\sum_{x\in Ess(f)}[imp(f(x=0)) + imp(f(x=1))] & if\ & ess(f)\geq 2.
\end{array}
\right. \] \end{theorem} \begin{proof} We shall proceed by induction on $n=ess(f)$ - the number of essential variables in $f$. The lemma is clear if $ess(f)=0$. If $f$ depends essentially on one variable $x_1$, then there is a unique BDD of $f$ with one non-terminal node which has two outcoming edges. These edges together with the labels of the corresponding terminal nodes form the set $Imp(f)$ of all implementations of $f$, i.e. $imp(f)=2$.
Let us assume that \[imp(f)= \sum_{i=1}^n[imp(f(x_i=0)) + imp(f(x_i=1))]\] if $n< s$ for some natural number $s$, $1\leq s$.
Next, let us consider a function $f$ with $ess(f)=s$. Without loss of generality, assume that $Ess(f)=\{x_1,\ldots,x_n\}$ with $n=s$. Since $x_i\in Ess(f)$ for $i=1,\ldots,n$ it follows that $f(x_i=0)\neq f(x_i=1)$ and there exist BDDs of $f$ whose label of the first non-terminal node is $x_i$. Let $D_f$ be a such BDD of $f$ and let $(ij_2\ldots j_m,c_1c_2\ldots c_mc)\in Imp(f)$ with $m\leq n$. Hence \[(j_2\ldots j_m,c_2\ldots c_mc)\in Imp(g)\] where $g=f(x_i=c_1)$. On the other side it is clear that if $(j_2\ldots j_m,d_2\ldots d_md)\in Imp(g)$ then $(ij_2\ldots j_m,c_1d_2\ldots d_md)\in Imp(f)$. Consequently, there is an one-to-one mapping between the set of implementations of $f$ with first variable $x_i$ and first edge labelled by $c_1$, and $Imp(g)$, which completes the proof. \end{proof} We also develop recursive algorithms to count $sub_m(f)$ and $sep_m(f)$ for $f\in P_2^n$, presented below.
Table \ref{tb4} shows the number of equivalence classes under the equivalence relations induced by the transformation groups $G_2^n$, $IM_2^n$, $SB_2^n$ and $SP_2^n$ for $n=1,2,3,4$. M. Harrison found from applying Polya's counting theorem (see \cite{har2}) the numbers $t(G_2^5)$ and $t(G_2^6)$, which are upper bounds of $t(IM_2^n)$, $t(SB_2^n)$ and $t(SP_2^n)$ for $n=5,6$.
Figure \ref{f4} and Table \ref{tb3} show that for the algebra $P_2^3$ there are only 14 different generic equivalent classes, 13 imp-classes, 11 sub-classes and 5 sep-classes. Hence three mappings that converts each generic class into an imp-class, into a sub-class and into a sep-class are required. Each generic class is a different row of Table \ref{tb3}. For example, the generic class \textnumero 12 (as it is numbered in Table VIII, \cite{lech}) is presented by 10-th row of Table \ref{tb3}. It consists of 8 functions obtained by complementing function $f$ and/or permuting and/or complementing input variables in all possible ways, where $f=x_1x_2^0x_3\oplus x_1x_2x_3^0\oplus x_2x_3$. This generic class \textnumero 12 is included in imp-class \textnumero 9, sub-class \textnumero 8 and sep-class \textnumero 5 which shows that $imp(f)=36$, $sub(f)=12$ and $sep(f)=7$. The average cardinalities of equivalence classes and complexities of functions are also shown in the last row of Table \ref{tb3}.
Table \ref{tb5} shows the $sep$-classes of boolean functions depending on at most five variables. Note that there are $2^{32}=4294967296$ functions in $P_2^5$. All calculations were performed on a computer with two Intel Xeon E5/2.3 GHz CPUs. The execution with total exhaustion took 244 hours.
\begin{figure}
\caption{Transformation groups in $P_2^n$ ($n=3/n=4$)}
\label{f4}
\end{figure}
\begin{table} \centering \caption{Number of classes under $symmetry$ $type$, $\simeq_{imp}$, $\simeq_{sub}$ and $\simeq_{sep}$}\label{tb4}
\begin{tabular}{rrrrr} $n$ & $t(G_2^n)$ & $t(IM_2^n)$ &$t(SB_2^n)$ &$t(SP_2^n)$ \\ \hline\hline 1&3&2&2&2\\ 2&6&4&4&3 \\ 3&22&13&11&5 \\ 4&402&104&74&11 \\ 5&1\ 228\ 158&1606&{$<$ 1228158}& 38 \\ 6&400\ 507\ 806\ 843\ 728&\multicolumn{3}{c}{$<$ 400\ 507\ 806\ 843\ 728} \\ \hline\hline \end{tabular} \end{table}
\begin{sidewaystable}[p]\centering
\caption{Classification of $P_2^3$ under $\simeq_{sep}$, $\simeq_{sub}$, $\simeq_{imp}$ and genus.} \label{tb3}
\begin{tabular}{||c|c|c||c|c|c||c|c|c||c|c||c||} \hline \hline sep- & $sep(f)$ & func. & sub- & $sub(f)$ & func. & imp- & $imp(f)$ & func.& Generic & func. & representative \\ class & & per & class & & per & class & & per &class \cite{lech}& per & function $f$ \\ \textnumero &&class&\textnumero &&class&\textnumero &&class&\textnumero &class&\\
\hline\hline 1 & 0 & 2 & 1 & 1 & 2 & 1 & 1 & 2&1&2 & $0$ \\ \hline 2 & 1 & 6 & 2 & 3 & 6 & 2 & 2 & 6&9&6& $x_1$ \\ \hline \multirow{2}{*}{3} & \multirow{2}{*}{3} & \multirow{2}{*}{30} & 3 & 5 & 24 & 3 & 6 & 24&3&24& $x_1x_2$ \\ \cline{4-12}
& & & 4 & 7 & 6 & 4 & 8 & 6&10&6& $x_1\oplus x_2$ \\ \hline 4 & 6 & 24 & 5 & 11 & 24 & 5 & 28 & 24&13&24& $x_1\oplus x_1x_3\oplus$ \\
& & & & & & & & &&& $ x_2x_3$ \\
\hline \multirow{9}{*}{5} & \multirow{9}{*}{7} & \multirow{9}{*}{194} & \multirow{2}{*}{6} & \multirow{2}{*}{9} & \multirow{2}{*}{64} & 6 & 21 & 16&2&16 & $x_1x_2x_3$ \\ \cline{7-12}
& & & & & & 7 & 23 & 48&6&48& $x_1x_2^0x_3^0\oplus x_1$ \\ \cline{4-12}
& & & 7 & 12 & 48 & 8 & 30 & 48&7&48 & $x_1x_2^0x_3^0\oplus $ \\ & & & & & & & & & & & $x_2x_3$ \\ \cline{4-12}
& & & 8 & 12 & 8 & \multirow{2}{*}{} & \multirow{2}{*}{} & \multirow{2}{*}{}&12&8& $x_1x_2^0x_3\oplus$ \\ & & & & & & \multirow{2}{*}{9} & \multirow{2}{*}{36} & \multirow{2}{*}{16}&& & $ x_1x_2x_3^0 \oplus x_2x_3$ \\ \cline{4-6} \cline{10-12}
& & & \multirow{3}{*}{9} & \multirow{3}{*}{15} & \multirow{3}{*}{26} & & & &5&8& $x_1^0x_2x_3\oplus x_1x_2^0x_3^0$ \\ \cline{7-12}
& & & & & & 10 & 42 & 16&8&16& $x_1x_2^0x_3\oplus $ \\
& & & & & & & & & && $ x_1x_2x_3^0\oplus x_1^0x_2x_3$ \\
\cline{7-12}
& & & & & & 11 & 48 & 2& 11&2& $x_1\oplus x_2\oplus x_3$ \\ \cline{4-12}
& & & 10 & 13 & 24 & 12 & 32 & 24&14&24 & $x_1\oplus x_2x_3$ \\ \cline{4-12}
& & & 11 & 13 & 24 & 13 & 33 & 24&4&24 & $x_1x_2^0x_3\oplus x_1x_2x_3^0$ \\ \hline aver.&6.2&51.2&&10.6&23.3& &26.0&19.7&&18.3&\\ \hline\hline \end{tabular} \end{sidewaystable}
\begin {table} \centering \caption{Classes in $P_2^5$ under $\simeq_{sep}$ }\label{tb5}
\noindent
\begin{tabular}{||c|c|c|c|c|c|c|c||} \hline\hline
sep- & \multirow{3}{*}{\rotatebox{00}{$sep_5(f)$}} & \multirow{3}{*}{\rotatebox{0}{$sep_4(f)$}} & \multirow{3}{*}{\rotatebox{0}{$sep_3(f)$}} & \multirow{3}{*}{\rotatebox{0}{$sep_2(f)$}} & \multirow{3}{*}{\rotatebox{0}{$sep_1(f)$}} & \multirow{3}{*}{\rotatebox{90}{$sep(f)$}} & functions \\ class & & & & & & & per \\ \textnumero & & & & & & & class \\ \hline
1 & 0 & 0 & 0 & 0 & 0 & 0 & 2\\ \hline 2 & 0 & 0 & 0 & 0 & 1 & 1 & 10\\ \hline 3 & 0 & 0 & 0 & 1 & 2 & 3 & 100\\ \hline 4 & 0 & 0 & 1 & 2 & 3 & 6 & 240\\ \hline 5 & 0 & 0 & 1 & 3 & 3 & 7 & 1940\\ \hline 6 & 0 & 1 & 2 & 5 & 4 & 12 & 1920\\ \hline 7 & 0 & 1 & 3 & 4 & 4 & 12 & 2400\\ \hline 8 & 0 & 1 & 3 & 5 & 4 & 13 & 8160\\ \hline 9 & 0 & 1 & 4 & 4 & 4 & 13 & 120\\ \hline 10 & 0 & 1 & 4 & 5 & 4 & 14 & 8400\\ \hline 11 & 0 & 1 & 4 & 6 & 4 & 15 & 301970\\ \hline 12 & 1 & 2 & 7 & 9 & 5 & 24 & 20480\\ \hline 13 & 1 & 3 & 5 & 7 & 5 & 21 & 3840\\ \hline 14 & 1 & 3 & 5 & 8 & 5 & 22 & 9600\\ \hline 15 & 1 & 3 & 6 & 6 & 5 & 21 & 1920\\ \hline 16 & 1 & 3 & 6 & 7 & 5 & 22 & 1920\\ \hline 17 & 1 & 3 & 6 & 8 & 5 & 23 & 38400\\ \hline 18 & 1 & 3 & 7 & 7 & 5 & 23 & 1920\\ \hline 19 & 1 & 3 & 7 & 8 & 5 & 24 & 38400\\ \hline 20 & 1 & 3 & 7 & 9 & 5 & 25 & 130560\\ \hline 21 & 1 & 4 & 6 & 6 & 5 & 22 & 3000\\ \hline 22 & 1 & 4 & 7 & 7 & 5 & 24 & 34720\\ \hline 23 & 1 & 4 & 7 & 8 & 5 & 25 & 177120\\ \hline 24 & 1 & 4 & 7 & 9 & 5 & 26 & 274560\\ \hline 25 & 1 & 4 & 8 & 7 & 5 & 25 & 7680\\ \hline 26 & 1 & 4 & 8 & 8 & 5 & 26 & 274560\\ \hline 27 & 1 & 4 & 8 & 9 & 5 & 27 & 1847280\\ \hline 28 & 1 & 5 & 7 & 9 & 5 & 27 & 81920\\ \hline 29 & 1 & 5 & 8 & 8 & 5 & 27 & 600\\ \hline 30 & 1 & 5 & 8 & 9 & 5 & 28 & 1013760\\ \hline 31 & 1 & 5 & 8 & 10 & 5 & 29 & 38400\\ \hline 32 & 1 & 5 & 9 & 7 & 5 & 27 & 1200\\ \hline 33 & 1 & 5 & 9 & 8 & 5 & 28 & 449040\\ \hline 34 & 1 & 5 & 9 & 9 & 5 & 29 & 4093200\\ \hline 35 & 1 & 5 & 9 & 10 & 5 & 30 & 5443200\\ \hline 36 & 1 & 5 & 10 & 8 & 5 & 29 & 13680\\ \hline 37 & 1 & 5 & 10 & 9 & 5 & 30 & 5826160\\ \hline 38 & 1 & 5 & 10 & 10 & 5 & 31 & 4274814914\\ \hline\hline \end{tabular} \end{table}
\end{document} |
\begin{document}
\title{Transitionless quantum drivings for the harmonic oscillator}
\author{J. G. Muga$^{1}$, X. Chen$^{1,2}$, S. Ib\'a\~nez$^{1}$, I. Lizuain$^{1}$, A. Ruschhaupt$^{3}$ }
\address{$^{1}$ Departamento de Qu\'{\i}mica-F\'{\i}sica, UPV-EHU, Apdo 644, 48080 Bilbao, Spain}
\address{$^{2}$ Department of Physics, Shanghai University, 200444 Shanghai, P. R. China}
\address{$^{3}$ Institut f\"ur Theoretische Physik, Leibniz Universit\"{a}t Hannover, Appelstra$\beta$e 2, 30167 Hannover, Germany}
\begin{abstract} Two methods to change a quantum harmonic oscillator frequency without transitions in a finite time are described and compared. The first method, a transitionless-tracking algorithm, makes use of a generalized harmonic oscillator and a non-local potential. The second method, based on engineering an invariant of motion, only modifies the harmonic frequency in time, keeping the potential local at all times. \end{abstract} \pacs{37.10.De, 42.50.-p, 37.10.Vz}
\section{Introduction}
Changing the external parameters of the Hamiltonian is a fundamental and standard operation to probe, control, or prepare a quantum system. In many cases it is desirable to go from an initial parameter configuration to a final one without inducing transitions, as in the expansions performed in fountain clocks \cite{Bize}. In fact most of the current experiments with cold atoms are based on a cooling stage and then an adiabatic drive of the system to some desired final trap or regime \cite{Polkov}. These ``transitionless'' \cite{Berry09}, or ``frictionless'' \cite{Ronnie} adiabatic processes may require exceedingly large times and become impractical, even impossible \cite{Polkov}, or quite simply a faster process is desirable, e.g. to increase the repetition rate of a cycle, or a signal-to-noise ratio. This motivates the generic objective of achieving the same final state as the slow adiabatic processes, possibly up to phase factors, but in a much shorter time. One may try to fulfill that goal in two different ways: (a) designing appropriate ``parameter trajectories'' of the Hamiltonian from the initial to the final times, or (b) applying entirely new interactions that modify the Hamiltonian beyond a simple parameter evolution of the original form, for example by adding different terms to it. In this paper we shall analyze and discuss, for the harmonic oscillator, two recently proposed methods whose relation had not been investigated. It turns out that they actually implement these two different routes. While most of the treatment is applicable to an ``abstract'' harmonic oscillator, we shall discuss physical implementations specific of ultracold atoms or ions. Indeed, harmonic traps and their manipulation are basic working horses of this field.
For the harmonic oscillator the parameter we consider is the trap frequency, which should go from $\omega_0$ to $\omega_f$ in a time $t_f$, preserving the populations of the levels, $P_n(t_f)=P_n(0)$. ``$n$'' labels the instantaneous $n$-th eigenstate of the initial and final harmonic oscillator Hamiltonians,
\begin{eqnarray} H_0(0)|n(0)\rangle&=&\hbar\omega_0(n+1/2)|n(0)\rangle, \nonumber\\
H_0(t_f)|n({t_f})\rangle&=&\hbar\omega_f(n+1/2)|n({t_f})\rangle. \label{hot} \end{eqnarray}
One of the methods we shall discuss here relies on a general framework set by Kato in a proof of the adiabatic theorem \cite{Kato}, and has been formulated recently by Berry \cite{Berry09}. We shall term it ``transitionless-tracking'' approach, or TT for short; the other one \cite{harmo,becs} engineers the Lewis-Riesenfeld invariant \cite{LR69} by an inverse method \cite{Palao} to satisfy the desired boundary conditions; we shall call this method ``inverse-invariant'', or II for short.
In the basic version of TT the dynamics is set to follow at all
intermediate times the adiabatic path defined by an auxiliary Hamiltonian $H_0(t)$ (in our case a regular harmonic oscillator with frequency $\omega(t)$ and boundary conditions $\omega(0)=\omega_0$ and $\omega_f=\omega(t=t_f)$), and its instantaneous eigenvectors $|n(t)\rangle$, up to phase factors. Instead, in the II approach the auxiliary object is an engineered Lewis-Riesenfeld invariant $I(t)$ set to commute with $H_0(0)$ at $t=0$ and with $H_0(t_f)$ at $t_f$. In both cases intermediate states may be highly non-adiabatic with respect to the instantaneous eigenstates of the Hamiltonians actually applied, $H_{TT}(t)$ and $H_{II}(t)$.
We shall provide first the equations characterizing the two approaches and then comment on possible physical implementations.
\section{Transitionless tracking algorithm}
\subsection{General formalism}
For the general formalism we follow \cite{Berry09} closely. Assume a time-dependent Hamiltonian $H_0(t)$ with initial and final values (\ref{hot}), instantaneous eigenvectors
$|n(t)\rangle$ and eigenvalues $E_n(t)$,
\begin{equation} H_0(t)|n(t)\rangle = E_n(t)|n(t)\rangle. \end{equation}
A slow change would preserve the eigenvalue and eigenvector along the dynamical evolution times a phase factor,
\begin{equation}
|\psi_n(t)\rangle = \exp\left\{
-\frac{i}{\hbar}\int_0^{t} dt' E_n(t') -\int_0^t dt' \langle n(t')|\partial_{t'}n(t')\rangle\right\}|n(t)\rangle. \label{22} \end{equation}
We now seek a Hamiltonian $H(t)$ such that the adiabatic approximation
$|\psi_n(t)\rangle$ represents the {\it exact} dynamics,
\begin{equation} i\hbar\partial_t|\psi_n(t)\rangle=H(t)|\psi_n(t)\rangle. \end{equation}
$H(t)$ (which is $H_{TT}$ if distinction with the other method is needed) is related to the corresponding unitary operator by
\begin{eqnarray} i\hbar\partial_t U(t)&=&H(t)U(t), \\ \label{ht} H(t)&=&i\hbar(\partial_t U(t))U^\dagger(t). \end{eqnarray}
Choosing
\begin{equation} U(t)=\sum_n\exp\left\{-\frac{i}{\hbar}\int_0^t dt' E_n(t')- \int_0^t dt'\langle n(t')|\partial_{t'} n(t')\rangle\right\}
|n(t)\rangle\langle n(0)|, \end{equation}
we find from (\ref{ht}),
\begin{equation}
\hat{H}(t)=\sum_n|n\rangle E_n\langle n|
+i\hbar\sum_n(|\partial_t n\rangle\langle n|-\langle n|\partial_t n\rangle|n\rangle\langle n|) \equiv \hat{H}_0+\hat{H}_1, \end{equation}
where we have simplified the notation, $|n\rangle=|n(t)\rangle$. It is also possible to choose other phases in (\ref{22}) \cite{Berry09}. The simplest case is
$U(t)=\sum |n(t)\rangle\langle n(0)|$, without phase factors, which leads to
$H(t)=i\hbar\sum |\partial_t n\rangle\langle n|$. Note that with this choice
$H_0(t)$ has been formally suppressed in $H(t)$ but still plays a role through its eigenfunctions $|n(t)\rangle$.
\subsection{Application to the harmonic oscillator}
We now apply the above to the harmonic oscillator
\begin{equation} \hat{H}_0(t)=\hat{p}^2/2m+\omega(t)^2 \hat{x}^2/2m
=\hbar\omega(t)(\hat{a}^\dagger_t \hat{a}_t +1/2), \label{ho} \end{equation}
where $\hat{a}_t$ and $\hat{a}_t^+$ are the (Schr\"odinger picture!) annihilation and creation operators at time $t$,
\begin{eqnarray} \hat{x}&=&\sqrt{\frac{\hbar}{2m\omega(t)}}(a_t^\dagger +a_t), \\ \hat{p}&=&i\sqrt{\frac{\hbar m\omega(t)}{2}}(a_t^\dagger -a_t), \\ \hat{a}_t&=&\sqrt{\frac{m\omega(t)}{2\hbar}} \left(\hat{x}+\frac{i}{m\omega(t)}\hat{p}\right), \\ \hat{a}^\dagger_t&=&\sqrt{\frac{m\omega(t)}{2\hbar}} \left(\hat{x}-\frac{i}{m\omega(t)}\hat{p}\right). \end{eqnarray}
This time dependence may be misleading and a bit unusual at first so we insist: since the frequency depends on time the ``instantaneous'' ladder operators $\hat{a}_t$, $\hat{a}^\dagger_t$ create or annihilate different ``instantaneous'' states, adapted to the corresponding frequency. Thus, ladder operators with different time labels do not commute in general, although some combinations, e.g. those equivalent to powers of $\hat{x}$ and/or $\hat{p}$, do commute, as we shall see later.
The instantaneous eigenstates $|n(t)\rangle$ can be written in coordinate representation as
\begin{equation}
\langle x|n(t)\rangle =\frac{1}{\sqrt{2^n n!}}\left(\frac{m \omega (t)}{\pi \hbar}\right)^{1/4}\exp{\left(-\frac{1}{2}\frac{m \omega (t)}{\hbar} x^2\right)}H_{n}\left(\sqrt{\frac{m \omega (t)}{\hbar}}x\right), \end{equation}
and their derivative with respect to $t$ is
\begin{eqnarray}
\langle x |\partial_t n(t) \rangle = \left(\frac{1}{4} - \frac{m \omega (t)}{2 \hbar} x^2 \right)\frac{\dot{\omega}}{\omega(t)} |n \rangle
+ \sqrt{\frac{m \omega (t)}{2 \hbar}}x \frac{\dot{\omega}}{\omega(t)} \sqrt{n} | n-1 \rangle. \end{eqnarray}
We find, using the recursion relation of Hermite polynomials and their orthogonality,
\begin{eqnarray}
\langle k|\partial_t n \rangle= \left\{ \begin{array}{ll} \frac{1}{4}\sqrt{n(n-1)}\frac{\dot{\omega}}{\omega(t)} &~~~ k=n-2 \\ \\ -\frac{1}{4}\sqrt{(n+1)(n+2)}\frac{\dot{\omega}}{\omega(t)} &~~~ k=n+2 \\ \\ 0 &~~~ (\mbox{otherwise}) \end{array} \right., \end{eqnarray}
so that $\hat{H}_1 (t)$ can be written as
\begin{eqnarray}
\hat{H}_1 (t)&=& i \hbar \sum_n |\partial_t n \rangle \langle n| \equiv i \hbar \frac{\dot{\omega}}{\omega(t)}\sum_n \Bigg[\left(\frac{1}{4} - \frac{m \omega (t)}{2 \hbar} \hat{x}^2 \right)|n \rangle \langle n| \nonumber \\
&+& \sqrt{\frac{m \omega (t)}{2 \hbar}}\hat{x} \sqrt{n} | n-1 \rangle \langle n|\Bigg]. \end{eqnarray}
Using $a_t=\sum_n \sqrt{n} |n-1(t) \rangle\langle n(t)|$, and the relations between $\hat{x}$, $\hat{p}$, $\hat{a}_t$ and $\hat{a}_t^\dagger$ written above,
\begin{eqnarray} \hat{H}_1 (t)&=& i \hbar \frac{\dot{\omega}}{\omega(t)}\sum_n \left[\frac{1}{4} - \frac{m \omega (t)}{2 \hbar} \hat{x}^2 + \sqrt{\frac{m \omega (t)}{2 \hbar}}\hat{x} \hat{a}_t \right] \nonumber \\
&=&\frac{i \hbar}{4}\frac{\dot{\omega}}{\omega(t)} - \frac{1}{2}\frac{\dot{\omega}}{\omega(t)} \hat{x} \hat{p}. \end{eqnarray}
Using $[\hat{x}, \hat{p}]= i \hbar$, we finally write the Hamiltonian $\hat{H}_1(t)$ in the following simple forms
\begin{equation} \hat{H}_1(t)=-\frac{\dot{\omega}}{4\omega}(\hat{x}\hat{p}+\hat{p}\hat{x})
=i\hbar\frac{\dot{\omega}}{4\omega}[{\hat{a}}^2-({\hat{a}}^\dagger)^2]. \label{h1a} \end{equation}
In the last expression the subscript $t$ in $\hat{a}$ and $\hat{a}^\dagger$ has been dropped because the squeezing combination ${\hat{a}}^2-(\hat{a}^\dagger)^2$ is actually independent of time, so one may evaluate it at any convenient time, e.g. at $t=0$. The connection with squeezing operators is worked out in the appendix.
$H_1$ is therefore a non-local operator, and does not have the form of a regular harmonic oscillator potential with an $x^2$ term. Nevertheless the final Hamiltonian $H=H_0+H_1$ is still quadratic in $\hat{x}$ and $\hat{p}$, so it may be considered a generalised harmonic oscillator \cite{gho}.
\subsection{Physical realization}
The nonlocality of $\hat{H}_1$, with a constant prefactor, can be realized in a laboratory by means of 2-photon Raman transitions for trapped ions \cite{Itano,Zeng}. Since we have to evaluate as well the possibility of making the prefactor in $\hat{H}_1$ time dependent we need to provide the derivation with some detail, first for a time-independent $\omega$.
\subsubsection{Raman two-photon transition in a trapped ion}
Let us consider a harmonically trapped two-level system in 1D driven by two different lasers (with coupling strengths $\Omega_j$ and frequencies $\omega_j$, $j=1,2$), see Fig. \ref{level_scheme_fig} and Refs. \cite{zeng95,zeng98}. The time dependent ``Raman'' Hamiltonian in the Schr\"odinger picture will be given by
\begin{eqnarray} \hat{H}_{R}(t)&=&\hat{H}_T+\hat{H}_A+\hat{H}_{int}, \end{eqnarray}
with ``trap'' ($T$), ``atomic'' ($A$), and interaction ($int$) terms
\begin{eqnarray} \hat{H}_T&=&\hbar\omega \hat{a}^\dag \hat{a},\\
\hat{H}_A&=&\hbar\omega_e|e\rangle\langle e|,\\
\hat{H}_{int}&=&\sum_{j=1}^2\hbar\Omega_j\cos\left(\omega_jt-k_jx+\phi_j\right) (|g\rangle\langle e|+|e\rangle\langle g|), \end{eqnarray}
where $\hbar\omega_e$ is the energy of the excited state $|e\rangle$ and ${\bf k}_j=k_j{\bf \hat x}$ the wavevector of each laser which are assumed to be pointing along the principal trap direction, the $x$-direction.
\subsubsection{Interaction picture\label{ip}}
Let us now write the above Hamiltonian in an interaction picture defined by the Hamiltonian $\hat{h}_0=\hat{H}_T+\hbar\tilde\omega_L|e\rangle\langle e|$, where $\tilde\omega_L=(\omega_1+\omega_2)/2$ has been introduced. The interaction Hamiltonian $\hat{H}_I=e^{i\hat{h}_0t/\hbar}(\hat{H}_R-\hat{h}_0)e^{-i\hat{h}_0t/\hbar}$ reads
\begin{eqnarray} \label{H_int_time_dep}
\hat{H}_I(t)&=&-\hbar\tilde\Delta|e\rangle\langle e| \nonumber\\ &+&\sum_{j=1}^2\frac{\hbar\Omega_j}{2} \left( e^{i\eta_j\left[\hat{a}(t)+\hat{a}^\dag(t)\right]}e^{-i(\omega_j-\tilde\omega_L) t}
e^{-i\phi_j}|e\rangle\langle g| + H.c.\right), \end{eqnarray}
where $\tilde\Delta=\tilde\omega_L-\omega_e$, and now $\hat{a}(t)=\hat{a} e^{-i\omega t}$, $\hat{a}^\dag(t)=\hat{a}^\dag e^{i\omega t}$ are the time dependent Heisenberg annihilation and creation operators respectively. Note also that fast oscillating off-resonant $e^{\pm i(\omega_j+\tilde\omega_L)t}$ terms have been neglected in the rotating wave approximation (RWA). The parameter $\eta_j=k_jx_0$ is known as the Lamb-Dicke (LD) parameter, where $x_0=\sqrt{\hbar/2m\omega}$ is the extension (square root of the variance) of the ion's ground state, i. e., $\hat{x}=x_0(\hat{a}+\hat{a}^\dag)$.
\begin{figure}
\caption{Schematic electronic and vibrational level structure for a two-photon transition in an ion trapped with frequency $\omega$. $\omega_1$ and $\omega_2$ are the laser frequencies, and $\omega_e$ the transition frequency between ground and excited states. See the text for further details.}
\label{level_scheme_fig}
\end{figure}
\subsubsection{Adiabatic elimination and effective Hamiltonian}
For a general wavefunction (in the corresponding interaction picture) such as
\begin{equation}
|\psi_I(t)\rangle=\sum_{n=0}^\infty \left[g_n(t)|g,n\rangle+e_n(t)|e,n\rangle\right] \end{equation}
the differential equations of motion for the probability amplitudes $g_n(t)$ and $e_n(t)$ are obtained from the Schr\"odinger equation $i\hbar\partial_t|\psi_I(t)\rangle=\hat{H}_I|\psi_I(t)\rangle$,
\begin{eqnarray} \label{system_gn} i\dot g_n(t)&=&\frac{1}{2}\sum_{j=1}^2\sum_{n'=0}^\infty
\Omega_je^{i(\theta_jt+\phi_j)}\langle n|e^{-i\eta_j\left[\hat{a}(t)+\hat{a}^\dag(t)\right]}|n'\rangle e_{n'}(t),\\
i\dot e_n(t)&=&-\tilde\Delta e_n(t)+\frac{1}{2}\sum_{j=1}^2\sum_{n'=0}^\infty
\Omega_je^{-i(\theta_jt+\phi_j)}\langle n|e^{i\eta_j\left[\hat{a}(t)+\hat{a}^\dag(t)\right]}|n'\rangle g_{n'}(t), \label{system_en} \end{eqnarray}
where $\theta_j=\omega_j-\tilde\omega_L$. For large detunings, i. e., for $|\tilde\Delta|\gg\Omega_j,\omega$, see Fig. \ref{level_scheme_fig}, and for an ion initially in the ground state one may assume that the excited state $|e\rangle$ is scarcely populated and it may be adiabatically eliminated. Then, setting $\dot e(t)=0$, $e_n(t)$ may be written as a function of the $g_{n'}(t)$ from Eq. (\ref{system_en}), and substituting this result into (\ref{system_gn}) there results a differential equation for the ground state probability amplitude,
\begin{equation}
i\dot g_n(t)=s g_n(t)+\frac{\tilde\Omega}{2}\sum_{n'=0}^\infty \mathcal{F}_{n,n'}(t)g_{n'}(t), \end{equation}
where
\begin{eqnarray} s&=&\frac{\Omega_1^2+\Omega_2^2}{4\tilde\Delta}, \\
\mathcal{F}_{n,n'}(t)&=&\langle n|e^{-i\tilde\eta\left[\hat{a}(t)+\hat{a}^\dag(t)\right]}|n'\rangle e^{i(\tilde\delta t+\tilde\phi)}
+\langle n|e^{i\tilde\eta\left[\hat{a}(t)+\hat{a}^\dag(t)\right]}|n'\rangle e^{-i(\tilde\delta t+\tilde\phi)}, \end{eqnarray}
and where the effective two-photon Raman parameters, denoted by tildes, are given by
\begin{eqnarray} \tilde\delta&=&\omega_1-\omega_2, \nonumber\\ \tilde\eta&=&\eta_1-\eta_2, \nonumber\\ \tilde\phi&=&\phi_1-\phi_2, \nonumber\\ \frac{\tilde\Omega}{2}&=&\frac{\Omega_1\Omega_2}{4\tilde\Delta}. \end{eqnarray}
The equation for the ground state probability amplitude corresponds to an effective Hamiltonian
\begin{eqnarray} \label{effective_ham_g}
\hat{H}_{eff}&=&\hbar s|g\rangle\langle g|+
\frac{\hbar\tilde\Omega}{2}\left(e^{i\tilde\eta[\hat{a}(t)+\hat{a}^\dag(t)]}e^{-i(\tilde\delta t+\tilde\phi)}+H.c\right)|g\rangle\langle g|. \end{eqnarray}
Note that the Stark-Shift produced by off resonant driving is included in $s$, which is a constant of motion and produces no effect on the Raman coupling between sidebands. We have thus adiabatically eliminated the excited state $|e\rangle$ ending with a Hamiltonian of the same form as (\ref{H_int_time_dep}) where the transitions between electronic levels are not present.
\subsubsection{Two-photon Jaynes-Cummings Hamiltonian in the Raman Scheme: Vibrational RWA}
Using the Baker-Campbell-Hausdorff (BCH) identity, the exponential in the effective Hamiltonian (\ref{effective_ham_g}) may be expanded in power series of $\tilde\eta$ \cite{Orszag,LME08},
\begin{eqnarray} \label{time_dep_ham_02} \hat{H}_{eff}=\frac{\hbar\tilde\Omega}{2} \left[e^{-\tilde\eta^2/2}\sum_{nn'} \frac{\left(i \tilde\eta\right)^{n+n'}}{n!n'!} \hat{a}^{\dag n}\! \hat{a}^{n'} e^{i (n-n') \omega t}e^{-i\tilde\delta t}e^{-i\tilde\phi}+H.c\right]. \end{eqnarray}
If the effective detuning is $\tilde\delta=\omega_1-\omega_2=2\omega$, the second blue sideband becomes resonant, and we may neglect rapidly oscillating terms in a second or vibrational RWA \cite{LME08}. The above Hamiltonian is then simplified to a two-photon Jaynes-Cummings-like Hamiltonian without electronic transitions. To leading order in $\tilde\eta$ it takes the form
\begin{equation} \hat{H}_{2B}=\tilde\eta^2\frac{\hbar\tilde\Omega}{4}\left(\hat{a}^{\dag 2} e^{i\tilde\phi}+\hat{a}^2 e^{-i\tilde\phi}\right) =i\hbar\frac{\tilde\eta^2\tilde\Omega}{4}\left(\hat{a}^2 -\hat{a}^{\dag 2}\right), \label{2b} \end{equation}
where for the last step a relative phase between the applied fields $\tilde\phi=\phi_1-\phi_2=-\pi/2$ has been assumed.
\subsubsection{Validity for time-dependent $\omega$}
Unfortunately the above formal manipulations and approximations cannot be carried out in general for a time dependent $\omega$. The interaction picture performed in \ref{ip}, in particular, assumes a constant $\hat{h}_0$. A time dependent one would require a more complex approach with time-ordering operators \cite{Glauber}. Similarly, the vibrational rotating wave approximation requires the stability of the frequency for times larger than a period to avoid off-resonant couplings. One may still obtain (\ref{2b}) for a sufficiently slowly varying $\omega$, the criterion being that the change of the time-dependent trapping frequency in one time period $T$ has to be much smaller than the frequency itself. We can write this condition as $\dot\omega(t) T\ll\omega(t)$ or
\begin{equation} \frac{\dot\omega(t)}{\omega(t)^2}\ll1, \end{equation}
which turns out to be the adiabaticity condition for the harmonic oscillator. Of course, if satisfied, the whole enterprise of applying the TT method would be useless. These arguments are far from constituting a proof that the TT method cannot be implemented for the harmonic oscillator. They simply leave this as an open question.
\section{Engineering the Lewis-Riesenfeld invariant}
In this section we describe a different method for transitionless dynamics of the harmonic oscillator \cite{harmo}. A harmonic oscillator such as $H_0(t)$ in Eq. (\ref{ho}) has the following time dependent invariant \cite{LR69}
\begin{equation} I(t)=\frac{1}{2}\left(\frac{\hat{x}^2}{b^2} m \omega_0^2+\frac{1}{m} \hat{\pi}^2\right), \end{equation}
where $\hat{\pi}=b(t)\hat{p}-m\dot{b}\hat{x}$ plays the role of a momentum conjugate to $\hat{x}/b$, the dots are derivatives with respect to time, and $\omega_0$ is in principle an arbitrary constant. The scaling, dimensionless function $b=b(t)$ satisfies the subsidiary condition
\begin{equation}\label{subsi} \ddot{b}+\omega(t)^2 {b}=\omega_0^2/b^3, \end{equation}
an Ermakov equation where real solutions must be chosen to make $I$ Hermitian. $\omega_0$ is frequently rescaled to unity by a scale transformation of $b$ \cite{LR69}. Other common and convenient choice, which we shall adopt here, is $\omega_0=\omega(0)$. The eigenstates of $I(t)$ become, with appropriate phase factors, solutions of the time-dependent Schr\"odinger equation,
\begin{eqnarray} \Psi_n (t,x) &=& \left(\frac{m\omega_0}{\pi\hbar}\right)^{1/4} \!\frac{1}{(2^n n! b)^{1/2}} \fexp{-i (n+1/2) \int_0^t dt'\, \frac{\omega_0}{b(t')^2}} \\ &\times&\fexp{i \frac{m}{2\hbar}\left(\frac{\dot{b}}{b(t)} + \frac{i\omega_0}{b^2}\right)x^2} H_n\left[\left(\frac{m\omega_0}{\hbar}\right)^{1/2}\frac{x}{b}\right], \label{emode} \end{eqnarray}
and form a complete basis to expand any time-dependent state, $\psi(x,t)=\sum_n c_n \Psi_n(x,t)$, with the amplitudes $c_n$ constant. A method to achieve frictionless, population preserving processes is to leave $\omega(t)$ undetermined first, and then set $b$ so that $I(0)=H_0(0)$ and $[I(t_f),H_0(t_f)]=0$. This guarantees that the eigenstates of $I$ and $H_0$ are common at initial and finite times. We can do this by setting
\begin{eqnarray} b(0)=1, \dot{b}(0)=1, \ddot{b}=0 \nonumber\\ b(t_f)=\gamma=[\omega_0/\omega_f]^{1/2}, \dot{b}(t_f)=0, \ddot{b}(t_f)=0, \label{bes} \end{eqnarray}
and interpolating $b(t)$ with some real function that satisfies these boundary condition. The simplest choice is a polynomial,
\begin{equation}\label{ans} b(t)=\sum_{j=0}^5 a_j t^j. \end{equation}
Once the $a_j$ are determined from (\ref{bes}), $\omega(t)$ is obtained from the Ermakov equation (\ref{subsi}), and one gets directly a transitionless Hamiltonian $H_{II}(t)=H_0(t)$ with a local, ordinary, harmonic potential, but note that $\omega(t)^2$ may become negative for some time interval, making the potential an expulsive parabola \cite{harmo,Salomon}.
The II method is thus clearly distinct from from TT and implements a different Hamiltonian. Note also, by comparison of the coefficients, that the invariant operator $I$ corresponding to $H_{II}$ is different from $H_{TT}$, although they are both generalized harmonic oscillators.
\subsection{Physical realization}
The TT method only requires the time variation of a parabolic potential. Effective harmonic optical traps for neutral atoms may be formed by magnetic and/or optical means and their frequencies are routinely varied in time as part of many cold atom experiments. In magnetic traps, for example the frequency is modulated harmonically to look for collective excitation modes of a condensate \cite{Cornell96}, and ramped down adiabatically to change its conditions (critical temperature, particle number, spatial extension) \cite{Ketadiab,Cornell96}, or as a preliminary step to superimpose an optical lattice \cite{Cas}. Some experiments involve both time-dependent magnetic and optical traps or antitraps \cite{ch}. Purely optical traps are also manipulated in time, e.g. for adiabatic cooling of single neutral atoms \cite{acoo}.
In particular laser beams detuned with respect to the atomic transition form effective potentials for the ground state depending on Rabi frequency $\Omega$ and detuning $\Delta$ as $\Omega^2/4\Delta$ by adiabatic elimination of the excited state, thus forming attractive or repulsive potentials. This effective interaction can be made time dependent by varying the laser intensity, the frequency, or both \cite{Bize}, since the optical frequencies are many orders of magnitude larger than Rabi frequencies or detunings, and the changes will be slowly varying in the scale of optical periods. The intensity of a dipole trap can be changed by three or four orders of magnitude in $100$ ns using acousto-optics or electro-optics modulators. To monitor the sign of the square frequencies, one can superimpose two dipole beams locked respectively on the blue and red side of the line. By controlling their relative intensity, one can shape the square frequencies and their signs at will.
\section{Discussion}
We have compared and distinguished two different methods: a ``transitionless-tracking'' (TT) algorithm, and an ``inverse-invariant'' (II) method, to achieve transitionless dynamics for a fast frequency change of a quantum harmonic oscillator. They imply different driving Hamiltonians. The one in the II method can be implemented for ultracold atoms or ions in the laboratory by varying the trap frequency in time along a certain trajectory, and a generalization to Bose Einstein condensates has been worked out \cite{becs}, but its extension to other potentials or systems may be difficult and remains an open question. By contrast, we have found some difficulties to realize the TT Hamiltonian for the harmonic oscillator, but the TT method has the advantage of being, at least formally, more generally applicable. The feasibility of the actual realization is quite another matter and has to be studied in each case. An example of application is provided in \cite{Berry09} for two-level systems.
\ack{We acknowledge funding by Projects No. GIU07/40, FIS2009-12773-C02-01, 60806041, 08QA14030, 2007CG52, S30105,
and Juan de la Cierva Program.}
\appendix \section{Relation to the squeezing operator}
The evolution operator takes a particularly simple form when using the simplified case $E_n(t)=0$, so that $\hat{H}(t)=\hat{H}_1(t)$. Taking into account that $[\hat{H}_1(t),\hat{H}_1(t')]=0$ we can write
\begin{equation} \hat{U}(t)=e^{-i\int_0^t \hat{H}_1(t)dt/\hbar}. \end{equation}
This may be evaluated explicitly with (\ref{h1a}) fixing the time of the creation and annihilation operators to 0,
\begin{equation} \hat{U}(t)=e^{\frac{1}{2}\ln\left(\sqrt{\frac{\omega(t)}{\omega(0)}}\right) \left[a_0^2-(a_0^\dagger)^2\right]}=\hat{S}[r(t)], \end{equation}
which is a sqeezing operator with real argument $r(t)=\ln\left(\sqrt{\frac{\omega(t)}{\omega(0)}}\right)$. It is unitary with inverse $[\hat{S}(r)]^{-1}=\hat{S}(-r)$. Using the relations
\begin{eqnarray} \hat{a}_t^\dagger+\hat{a}_t&=&\sqrt{\frac{\omega(t)}{\omega(0)}}(\hat{a}_0^\dagger+\hat{a}_0) \nonumber \\ \hat{a}_t^\dagger-\hat{a}_t&=&\sqrt{\frac{\omega(0)}{\omega(t)}}(\hat{a}_0^\dagger-\hat{a}_0) \end{eqnarray}
and the formal properties of $\hat{S}$, see e.g. \cite{bar}, it is easy to prove that
\begin{eqnarray} \hat{S}(r)\hat{a}_0\hat{S}(-r)&=&\hat{a}_t, \nonumber \\ \hat{S}(r)\hat{a}^\dagger_0\hat{S}(-r)&=&\hat{a}_t. \end{eqnarray}
In fact any combination of powers of $\hat{a}_0$ and $\hat{a}^\dagger_0$ is mapped to the same combination of powers of $\hat{a}_t$ and $\hat{a}^\dagger_t$ by this unitary transformation.
To show that $|0_t\rangle\equiv \hat{S}|0_0\rangle$ is indeed the vacuum at time $t$, note that
\begin{equation}
\hat{a}_t|0_t\rangle=\hat{S}(r)\hat{S}(-r)a\hat{S}(r)|0_0\rangle=\hat{S}(r)\hat{a}_0|0_0\rangle=0. \end{equation}
Similarly we note that, consistently,
\begin{eqnarray}
\hat{S}(r)|n(0)\rangle&=&\hat{S}(r)\frac{1}{\sqrt{n!}}(\hat{a}_0^\dagger)^n|0_0\rangle
=\frac{1}{\sqrt{n!}}\hat{S}(r)(\hat{a}_0^\dagger)^n\hat{S}(-r)\hat{S}(r)|0_0\rangle \nonumber \\
&=&\frac{1}{\sqrt{n!}}(\hat{a}_t^\dagger)^n|0_t\rangle=|n(t)\rangle. \end{eqnarray}
\section*{References}
\end{document} |
\begin{document}
\author[a]{Galit Ashkenazi-Golan} \author[b]{J\'{a}nos Flesch} \author[c]{Arkadi Predtetchinski} \author[d]{Eilon Solan}
\affil[a]{London School of Economics and Political Science, Houghton Street London WC2A 2AE, UK} \affil[b]{Department of Quantitative Economics, Maastricht University, P.O.Box 616, 6200 MD, The Netherlands} \affil[c]{Department of Economics, Maastricht University, P.O.Box 616, 6200 MD, The Netherlands} \affil[d]{School of Mathematical Sciences, Tel-Aviv University, Tel-Aviv, Israel, 6997800}
\title{Regularity of the minmax value and equilibria in multiplayer Blackwell games\thanks{Ashkenazi-Golan acknowledges the support of the Israel Science Foundation, grants \#217/17 and \#722/18, and the NSFC-ISF Grant \#2510/17. Solan acknowledges the support of the Israel Science Foundation, grant \#217/17. This work has been partly supported by COST Action CA16228 European Network for Game Theory.}} \maketitle
\begin{abstract} \noindent
A real-valued function $\varphi$ that is defined over all Borel sets of a topological space is \emph{regular} if for every Borel set $W$, $\varphi(W)$ is the supremum of $\varphi(C)$, over all closed sets $C$ that are contained in $W$, and the infimum of $\varphi(O)$, over all open sets $O$ that contain $W$.
We study Blackwell games with finitely many players. We show that when each player has a countable set of actions and the objective of a certain player is represented by a Borel winning set, that player's minmax value is regular.
We then use the regularity of the minmax value to establish the existence of $\varepsilon$-equilibria in two distinct classes of Blackwell games. One is the class of $n$-player Blackwell games where each player has a finite action space and an analytic winning set, and the sum of the minmax values over the players exceeds $n-1$. The other class is that of Blackwell games with bounded upper semi-analytic payoff functions, history-independent finite action spaces, and history-independent minmax values.
For the latter class, we obtain a characterization of the set of equilibrium payoffs. \end{abstract}
\noindent\textbf{Keywords:} Blackwell games, determinacy, value, equilibrium, regularity.
\noindent\textbf{AMS classification code:} Primary: \textsc{91A44} (Games involving topology, set theory, or logic). Secondary: \textsc{91A20} (Multistage and repeated games).
\section{Introduction} Blackwell games (Blackwell \cite{Blackwell69}) are dynamic multiplayer simultaneous-move games where the action sets of the players may be history dependent, and the payoff function is an arbitrary Borel-measurable function of the play. When the payoff function of a player is given by the characteristic function of a given set $W$, we say that $W$ is the \emph{winning set} of the player. These games subsume several familiar classes of dynamic games: repeated games with the discounted payoff or the limiting average payoff (e.g., Sorin \cite{Sorin92}, Mailath and Samuelson \cite{{Mailath06}}), games with perfect information (e.g., Gale and Stewart \cite{Gale53}), and graph games arising in the computer science applications (e.g., Apt and Gr\"{a}del \cite{AptGradel12}, Bruy\`{e}re \cite{Bruyere17, Bruyere21}, Chatterjee and Henzinger \cite{Chatterjee12}).
While two-player zero-sum Blackwell games and Blackwell games with perfect information are quite well understood (see, e.g., Martin \cite{Martin75, Martin98}, Mertens \cite{Mertens86}, Kuipers, Flesch, Schoenmakers, and Vrieze \cite{Kuipers21}), general multiplayer nonzero-sum Blackwell games have so far received relatively little attention.
The goal of this paper is to introduce a new technique to the study of multiplayer Blackwell games: regularity of the minmax value, along with a number of related approximation results. In a nutshell, the technique amounts to the approximation of the minmax value of a winning Borel set using a closed subset. This approach allows us to establish existence of $\varepsilon$-equilibria in two distinct classes of Blackwell games.
\noindent\textsc{Regularity and approximation results:} A real-valued function $\varphi$ that is defined over all Borel sets of a certain space is \emph{inner regular} if for every Borel set $W$, $\varphi(W)$ is the supremum of $\varphi(C)$, over all closed sets $C$ that are contained in $W$. The function $\varphi$ is \emph{outer regular} if for every Borel set $W$ it is the infimum of $\varphi(O)$, over all open sets $O$ that contain $W$. The function $\varphi$ is \emph{regular} if it is both inner regular and outer regular. Borel probability measures on metric spaces are one example of a regular function (see, e.g., Kechris \cite[Theorems 17.10 and 17.11]{Kechris95}).
When restricted to two-player zero-sum Blackwell games with finite action sets and Borel-measurable winning set for Player~1, the value function is known (Martin \cite{Martin98}) to be regular. This result was extended to two-player zero-sum stochastic games by Maitra, Purves, and Sudderth \cite{Maitra92}.
We show that in multiplayer Blackwell games with countable action sets and Borel winning sets, the minmax value of all players is regular. We thus extend the regularity result of Martin \cite{Martin98} in terms of both the number of actions (countable versus finite) and the number of players (finite versus two).
A related approximation result concerns the case when a player's objective is represented by a bounded Borel-measurable payoff function. Denote by $v_i(f)$ player~$i$'s minmax value when her payoff function is $f$. We show that $v_i(f)$ is the supremum of $v_i(g)$ over all bounded limsup functions $g \leq f$, and the infimum of $v_i(g)$ over all bounded limsup function $g \geq f$. A \emph{limsup function} is a function that can be written as the limit superior of a sequence of rewards assigned to the nodes of the game tree. This too, is an extension of results by Maitra, Purves, and Sudderth \cite{Maitra92} and Martin \cite{Martin98} for two-player games to multiplayer games. If, moreover, the player's minmax value is the same in every subgame, one obtains an approximation from below by an upper semi-continuous function, and an approximation from above by a lower semi-continuous function.
\noindent\textsc{Existence of $\varepsilon$-equilibria:} The main contribution of the paper is the application of the regularity of the minmax value to the problem of existence of an $\varepsilon$-equilibrium in multiplayer Blackwell games. We establish the existence in two distinct classes of Blackwell games.
One is the class of $n$-player Blackwell games with bounded upper semi-analytic payoff functions, history-independent finite action spaces, and history-independent minmax values. The latter assumption means that every player's minmax value is the same in each subgame. Under these assumptions, for each $\varepsilon > 0$, there is an $\varepsilon$-equilibrium with a pure path of play.
A prominent sufficient condition for the minmax value to be history-independent the is that payoff be tail-measurable. Roughly speaking, tail-measurability amounts to the requirement that the payoff is unaffected by a change of the action profile in any finite number of stages. We thus obtain the existence of $\varepsilon$-equilibria in Blackwell games with history-independent finite action spaces and bounded, upper semi-analytic, and tail-measurable payoff functions.
The second class of games for which we derive an existence result is $n$-player Blackwell games where each player has a finite action space at each history, her objective is represented by an analytic winning set, and the sum of the minmax values over the players exceeds $n-1$. Under these conditions we show that there exists a play that belongs to each player's winning set; any such play induces a $0$-equilibrium. At the heart of the proof is an approximation of each player's minmax value by the minmax value of a closed subset of the player's winning set.
The key idea of the proof of the first result is to consider an auxiliary Blackwell game with winning sets, where the winning set of player $i$ is the set of player $i$'s $\varepsilon$-individually rational plays: the plays that yield player $i$ a payoff no smaller then her minmax value minus $\varepsilon$. We show that, in the thus-defined auxiliary Blackwell game, each player's minmax value equals 1, and apply the second\textbf{} result.
The question whether $\varepsilon$-equilibria exist in multiplayer Blackwell games is a largely uncharted territory. An important benchmark is the result of Mertens and Neyman (see Mertens \cite{Mertens86}): all games of perfect information with bounded Borel-measurable payoff functions admit an $\varepsilon$-equilibrium for every $\varepsilon > 0$. Zero-sum Blackwell games (where at least one of the two players has a finite set of actions) are known to be determined since the seminal work of Martin \cite{Martin98}. Shmaya \cite{Shmaya11} extends the latter result by showing the determinacy of zero-sum games with eventual perfect monitoring, and Arieli and Levy \cite{Arieli15} extend Shmaya's result to stochastic signals.
Only some special classes of multiplayer dynamic games have been shown to have an $\varepsilon$-equilibrium. These include stochastic games with discounted payoffs (see, e.g., the survey by Ja\'{s}kiewicz and Nowak \cite{Nowak16}), two-player stochastic games with the limiting average payoff (Vieille \cite{Vieille00I,Vieille00II}), and graph games with classical computer science objectives (e.g., Secchi and Sudderth \cite{Sechi}, Chatterjee \cite{Chatterjee04,Chatterjee05}, Bruy\`{e}re \cite{Bruyere21}, Ummels, Markey, Brenguier, and Bouyer \cite{Ummels15}).
A companion paper (\cite{AFPS}) establishes the existence of $\varepsilon$-equilibria in Blackwell games with countably many players, finite action sets, and bounded, Borel-measurable, and tail-measurable payoff functions. The present paper departs from \cite{AFPS} in two dimensions. Firstly, it invokes a new proof technique, the regularity of the minmax value. Secondly, it makes different assumptions on the primitives. The second of our two existence results (Theorem \ref{theorem:sumofprob}) has, in fact, no analogue in \cite{AFPS}. The first (Theorem \ref{theorem:minmax_indt}) applies to a larger class of payoff functions than does the main result in \cite{AFPS}: it only requires players' minmax values to be history-independent. While tail-measurability of the payoff functions is a sufficient condition for history-independence of the minmax values, it is by no means a necessary condition. Furthermore, Borel-measurability imposed in \cite{AFPS} is relaxed here to upper semi-analyticity. On the other hand, \cite{AFPS} has a countable rather than a finite set of players, something that the methods developed here do not allow for.
\noindent\textsc{Characterisation of equilibrium payoffs:} An equilibrium payoff is an accumulation point of the expected payoff vectors of $\varepsilon$-equilibria, as $\varepsilon$ tends to $0$. We establish a characterisation of equilibrium payoffs in games with bounded upper semi-analytic payoff functions, history-independent finite action spaces, and history-independent minmax values.
In repeated games with patient players the folk theorem asserts that under proper conditions, the set of limiting average equilibrium payoffs (or the limit set of equilibrium payoffs, as the discount factor goes to 0 or the horizon increases to infinity) is the set of all vectors that are individually rational and lie in the convex hull of the range of the stage payoff function (see, e.g., Aumann and Shapley \cite{Aumann94}, Sorin \cite{Sorin92}, or Mailath and Samuelson \cite{{Mailath06}}). Our result identifies the set of equilibrium payoffs of a Blackwell game as the set of all vectors that lie in the convex hull of the set of feasible and individually rational payoffs. The intuition for this discrepancy is that in standard repeated games, a low payoff in one stage can be compensated by a high payoff in another stage, therefore payoff vectors that are convex combinations of the stage payoff function can be equilibrium payoffs as long as this convex combination of payoffs is individually rational. In particular, these combinations can place some positive weight on payoff vectors that are not individually rational. In Blackwell games, however, the payoff is obtained only at the end of the game, hence only plays that generate individually rational payoffs can be taken into account when constructing equilibria.
Our characterization of the set of equilibrium payoffs is related to the rich literature on the folk theorem, and the study of the minmax value is instrumental to this characterizaion (see, e.g., the folk theorems in Fudenberg and Maskin \cite{Fudenberg86}, Mailath and Samuelson \cite{Mailath06}, or H\"{o}rner, Sugaya, Takahashi, and Vieille \cite{Horner11}). The minmax value of a player would often be used in the proofs of equilibrium existence to construct suitable punishments for a deviation from the supposed equilibrium play (as is done, for instance, in Aumann and Shapley \cite{Aumann94}, Rubinstein \cite{Rubinstein94}, Fudenberg and Maskin \cite{Fudenberg86}, and Solan \cite{Solan01}).
The paper is structured as follows. Section \ref{secn.games} describes the class of Blackwell games. Section \ref{secn.approx} is devoted to the regularity of the minmax value and related approximation theorems. Section \ref{secn.appl} applies these tools to the problem of existence of equilibrium. Section~\ref{secn.folk} is devoted to the characterisation of equilibrium payoffs. Section \ref{secn.tail} discusses the implications of the results for games with tail-measurable payoffs. Section \ref{secn.disc} contains a discussion, concluding remarks, and open questions.
\section{Blackwell games}\label{secn.games} \noindent\textbf{Blackwell games:} An $n$-\textit{player Blackwell game} is a tuple $\Gamma = (I, A, H, (f_{i})_{i \in I})$. The elements of $\Gamma$ are as follows.
The set of players is $I$, a finite set of cardinality $n$. For a player $i \in I$ we write $-i$ to denote the set of $i$'s opponents, $I \setminus \{i\}$.
The set $A$ is a countable set and $H \subseteq \cup_{t \in \mathbb{N}}A^{t}$ is the game tree (throughout the paper $\mathbb{N} = \{0,1,\ldots\}$). Elements of $H$ are called histories. The set $H$ is assumed to have the following properties: (a) $H$ contains the empty sequence, denoted $\oslash$; (b) a prefix of an element of $H$ is an element of $H$; that is, if for some $h \in \cup_{t \in \mathbb{N}}A^{t}$ and $a \in A$ the sequence $(h,a)$ is an element of $H$, so is $h$; (c) for each $h \in H$ there is an element $a \in A$ such that $(h, a) \in H$; we define $A(h) := \{a \in A: (h,a) \in H\}$; and (d) for each $h \in H$ and each $i \in I$ there exists a set $A_{i}(h)$ such that $A(h) = \prod_{i \in I}A_{i}(h)$. The set $A_{i}(h)$ is called player $i$'s set of actions at history $h$, and $A(h)$ the set of action profiles at $h$.
Conditions (a), (b), and (c) above say that $H$ is a pruned tree on $A$. Condition (c) implies that the game has infinite horizon. Let $H_{t} := H \cap A^{t}$ denote the set of histories in stage $t$.
An infinite sequence $(a_0,a_1,\ldots) \in A^{\mathbb{N}}$ such that $(a_0,\ldots,a_t) \in H$ for each $t \in \mathbb{N}$ will be called a \emph{play}. The set of plays is denoted by $[H]$. This is the set of infinite branches of $H$. For $h \in H$ let $O(h)$ denote the set of all plays of $\Gamma$ having $h$ as a prefix. We endow $[H]$ with the topology generated by the basis consisting of the sets $\{O(h):h \in H\}$. The space $[H]$ is Polish. For $t \in \mathbb{N}$ let $\mathcal{F}_{t}$ be the sigma-algebra on $[H]$ generated by the sets $\{O(h):h \in H_{t}\}$. The Borel sigma-algebra of $[H]$ is denoted by $\mathscr{B}$. It is the minimal sigma-algebra containing the topology. A subset $S$ of $[H]$ is \emph{analytic} if it is the image of a continuous function from the Baire space $\mathbb{N}^\mathbb{N}$ to $[H]$. Each Borel set is analytic.
Each analytic set is universally measurable. Recall that a set $S \subseteq [H]$ is said to be universally measurable if (Kechris \cite[Section 17.A]{Kechris95}), for every Borel probability measure $\mathbb{P}$ on $[H]$, there exist Borel sets $B,Z \in \mathscr{B}$ such that $S \bigtriangleup B \subseteq Z$ and $\mathbb{P}(Z) = 0$; here $S \bigtriangleup B = (S \setminus B) \cup (B \setminus S)$ is the symmetric difference of the sets $S$ and $B$.
The last element of the game is a vector $(f_i)_{i \in I}$, where $f_i : [H] \to \mathbb{R}$ is player $i$'s \emph{payoff function}. The most general class of payoff functions we allow for are bounded upper semi-analytic functions. A function $f_i : [H] \to \mathbb{R}$ is said to be \emph{upper semi-analytic} if, for each $r \in \mathbb{R}$, the set $\{p \in [H]: r \leq f(p)\}$ is analytic. In particular, the indicator function $1_{S}$ of a subset $S \subseteq [H]$ of plays is upper semi-analytic if and only if $S$ is an analytic set. Each Borel-measurable function in upper semi-analytic. Note that a bounded upper semi-analytic function is universally measurable, i.e., for each open set $U \subseteq \mathbb{R}$, the set $f^{-1}(U) \subseteq [H]$ is universally measurable (see, e.g., Chapter 7 in Bertsekas and Shreve \cite{Bertsekas96}).
The play of the game starts at the empty history $h_0 = \oslash$. Suppose that by a certain stage $t \in \mathbb{N}$ a history $h_t \in H_t$ has been reached. Then in stage $t$, the players simultaneously choose their respective actions; thus player $i \in I$ chooses an action $a_{i,t} \in A_{i}(h_{t})$. This results in the stage $t$ action profile $a_{t} = (a_{i,t})_{i \in I} \in A(h_{t})$. Once chosen, the actions are revealed to all players, and the history $h_{t+1} = (h_{t},a_{t})$ is reached. The result of the infinite sequence of choices is the play $p = (a_0,a_1,\ldots)$, an element of $[H]$. Each player $i \in I$ receives the corresponding payoff $f_i(p)$.
Given a Blackwell game $\Gamma$ and a history $h \in H$, the \textit{subgame} of $\Gamma$ starting at $h$ is the Blackwell game $\Gamma_h = (I, A, H_h, (f_{i,h})_{i \in I})$. The set $H_h$ of histories of $\Gamma_h$ consists of finite sequences $g \in \bigcup_{t \in \mathbb{N}} A^{t}$ such that $hg \in H$, where $hg$ is the concatenation of $h$ and $g$. The payoff function $f_{i,h} : [H_h] \to \mathbb{R}$ is the composition $f_i \circ s_h$, with $s_h : [H_h] \to [H]$ given by $p \mapsto hp$, where $hp$ is the concatenation of $h$ and $p$. Note that $\Gamma_\oslash$ is just the game $\Gamma$ itself.
The Blackwell game $\Gamma$ is said to have \textit{history-independent action sets} if $A_{i}(h) = A_{i}(\oslash)$ for each history $h \in H$ and each player $i \in I$; the common action set is simply denoted by $A_i$. If $\Gamma$ has history-independent action sets, then the set of its histories is $H = \cup_{t \in \mathbb{N}}A^{t}$, and the set of plays in $\Gamma$ is $[H] = A^{\mathbb{N}}$. A Blackwell game with history-independent action sets can be described as a tuple $(I,(A_{i},f_{i})_{i \in I})$.
\noindent\textbf{Strategies and expected payoffs:} A strategy for player $i\in I$ is a function $\sigma_i$ assigning to each history $h \in H$ a probability distribution $\sigma_{i}(h)$ on the set $A_{i}(h)$. The set of player $i$'s strategies is denoted by $\Sigma_i$. We also let $\Sigma_{-i} := \prod_{j \in -i} \Sigma_{j}$ and $\Sigma := \prod_{i \in I} \Sigma_{i}$. Each strategy profile $\sigma=(\sigma_i)_{i\in I}$ induces a unique probability measure on the Borel sets of $[H]$, denoted $\mathbb{P}_{\sigma}$. The corresponding expectation operator is denoted $\mathbb{E}_{\sigma}$. In particular, $\mathbb{E}_{\sigma}[f_{i}]$ denotes an expected payoff to player $i$ in the Blackwell game under the strategy profile $\sigma$. It is well defined under the maintained assumptions, namely boundedness and upper semi-analyticity of $f_{i}$.
Take a history $h \in H_t$ in stage $t$. A strategy profile $\sigma \in \Sigma$ in $\Gamma$ induces the strategy profile $\sigma_h$ in $\Gamma_h$ defined as $\sigma_h(g)=\sigma(hg)$ for each history $g \in H_h$. Let us define $\mathbb{E}_{\sigma}(f_i \mid h)$ as the expected payoff to player $i$ in the Blackwell game $\Gamma_h$ under the strategy profile $\sigma_h$: that is, $\mathbb{E}_{\sigma}(f_i \mid h) := \mathbb{E}_{\sigma_h}(f_{i,h})$. Note that $\mathbb{E}_{\sigma}(f_i \mid h)$, when viewed as an $\mathcal{F}_{t}$-measurable function on $[H]$, is a conditional expectation of $f_i$ with respect to the measure $\mathbb{P}_{\sigma}$ and the sigma-algebra $\mathcal{F}_{t}$; whence our choice of notation.
\noindent\textbf{Minmax value:} Consider a Blackwell game $\Gamma$, and suppose that player $i$'s payoff function $f_i$ is bounded and upper semi-analytic. Player $i$'s \textit{minmax value} is defined as \[v_i(f_i) := \inf_{\sigma_{-i} \in \Sigma_{-i}}\sup_{\sigma_{i} \in \Sigma_{i}} \mathbb{E}_{\sigma_{-i},\sigma_{i}}(f_i).\] Whenever $f_i = 1_{W_i}$ is an indicator of an analytic set $W_i \subseteq [H]$ we write $v_i(W_i)$ for $v_i(1_{W_i})$.
Player $i$'s minmax value is said to be \textit{history-independent} if her minmax value in the subgame $\Gamma_h$ equals that in the game $\Gamma$, for each history $h \in H$.
\section{Regularity and approximation theorems}\label{secn.approx} In this section we state the regularity property of the minmax: the minmax value of a Borel winning set can be approximated from below by the minmax value of closed subset and from above by the minmax value of an open superset. We also describe two related approximation results: the minmax value of a bounded Borel-measurable payoff function can be approximated from below and from above by limsup functions. If, in addition, the minmax values are history-independent, then one can choose the approximation from below to be upper semicontinuous, and the approximation from above to be lower semicontinuous. The proofs of all results are detailed in the appendix.
\begin{theorem} {\rm (Regularity of the minmax value)}\label{thrm:reg} Consider a Blackwell game. Suppose that player $i$'s objective is given by a winning set $W_i \subseteq [H]$. Suppose that $W_i$ is Borel. Then \begin{align*} v_i(W_i) &= \sup\{v_i(C):C\subseteq W_i, C \text{ is closed}\}=\inf\{v_i(O):O\supseteq W_i, O \text{ is open}\}. \end{align*} \end{theorem}
One implication of Theorem~\ref{thrm:reg} concerns the complexity of strategies of player~$i$ that ensures that her probability of winning is close to her minmax value. Suppose, for example, that $v_i(W_i) = \frac{1}{2}$. Then for every strategy profile $\sigma_{-i}$ of the opponents of player~$i$ and every $\varepsilon > 0$, she has a response $\sigma_i$ such that $\mathbb{P}_{\sigma_{-i},\sigma_i}(W_i) \geq \frac{1}{2}-\varepsilon$. The strategy profile $\sigma_{-i}$ and the winning set $W_i$ may be complex, and accordingly the good response $\sigma_i$ may be complex as well. However, take now a closed subset $C \subseteq W_i$ such that $v_i(C) > v_i(W_i) - \varepsilon = \frac{1}{2}-\varepsilon$. The complement of $C$, denoted $C^c$, is open, hence it is the union of basic open sets; that is, it can be presented as a union $C^c = \bigcup_{h \in H'} O(h)$, for some subset $H' \subseteq H$ of histories. A strategy $\sigma'_i$ that satisfies $\mathbb{P}_{\sigma_{-i},\sigma_{i}'}(C_i) \geq \frac{1}{2}-\varepsilon$ must aim at avoiding $C^c$, that is, at avoiding histories in $H'$. In that sense, $\sigma'$ may have a simple structure.
\begin{exl}\label{exl.io}\rm Here we consider a Blackwell game where the same stage game is being played at every stage. The stage game specifies a stage winning set for each player. A player's objective in the Blackwell game is to win the stage game infinitely often.
Thus let $\Gamma = (I,(A_{i},1_{W_{i}})_{i \in I})$ be a Blackwell game with history-independent countable action sets, where player $i$'s winning set is \[W_i = \{(a_0,a_1,\ldots) \in A^\mathbb{N}:a_t \in U_i\text{ for infinitely many }t \in \mathbb{N}\};\] here $U_{i}$, called player $i$'s \textit{stage winning set}, is a given subset of $\prod_{i \in I}A_{i}$. If $a_t \in U_i$, we say that player $i$ \textit{wins stage $t$}. Thus, player~$i$'s objective is to win infinitely many stages of the Blackwell game. The set $W_{i}$ is a $G_\delta$-set, i.e., an intersection of countably many open subsets of $A^\mathbb{N}$.
Fix a player $i \in I$. Let \begin{equation}\label{eqn.stageminmax} d_{i} := \inf_{x_{-i} \in X_{-i}} \sup_{x_i \in X_i} \mathbb{P}_{x_{-i},x_i}(U_i) \end{equation} be player~$i$'s minmax value in the stage game. As follows from the arguments below, $v_{i}(W_{i})$ is either $0$ or $1$, and it is $1$ exactly when $d_{i} > 0$. In either case, there are intuitive approximations of player $i$'s wining sets by a closed set from below and an open set from above.
First assume that $d_{i} > 0$. Take an $\varepsilon > 0$. Let us imagine that player $i$'s objective is not merely to win infinitely many stages in the course of the Blackwell game, but to make sure that she wins at least once in every block of stages $t_{n},\ldots,t_{n+1}-1$, where the sequence of stages $t_0 < t_1 < \cdots$ is chosen to satisfy \[(1 - \tfrac{1}{2}d_{i})^{t_{n+1} - t_{n}} < 2^{-n-1}\cdot\varepsilon\] for each $n \in \mathbb{N}$. This, more demanding condition, defines an approximating set. Formally, define \[C_{i} := \bigcap_{n \in \mathbb{N}}\bigcup_{t_{n} \leq k < t_{n+1}} \{(a_0,a_1,\ldots) \in A^{\mathbb{N}}: a_k \in U_i\}.\]
As the intersection of closed sets, $C_{i}$ is a closed subset of $W_{i}$. Moreover, $1 - \varepsilon \leq v_{i}(C_{i})$. To see this, fix any strategy $\sigma_{-i}$ for $i$'s opponents. At any history $h$, player $i$ has a mixed action $\sigma_{i}(h)$ that, when played against $\sigma_{-i}(h)$, guarantees a win at history $h$ with probability of at least $\tfrac{1}{2}d_{i}$. Thus, under the measure $\mathbb{P}_{\sigma_{-i},\sigma_{i}}$ the probability for player $i$ not to win at least once in a block of stages $t_{n},\ldots,t_{n+1}-1$ is at most $2^{-n-1}\cdot\varepsilon$, for any history of play up to stage $t_{n}$. And hence the probability that there is a block within which player $i$ does not win once is at most $\varepsilon$.
Suppose that $d_{i} = 0$. Let us imagine that player $i$'s objective is merely to win the stage game at least once. This modest objective defines the approximating set: \[O_{i} = \bigcup_{t \in \mathbb{N}}\{(a_{0},a_{1},\ldots)\in A^\mathbb{N}: a_{t} \in U_i\}.\] As the union of open sets, $O_{i}$ is an open set containing $W_{i}$. Moreover, $v_{i}(O_{i}) \leq \varepsilon$. To see this, let $\sigma_{-i}$ be the strategy for $i$'s opponents such that, at any stage $t \in \mathbb{N}$ and any history $h \in A^{t}$ of stage $t$, the probability that the action profile $a_{t}$ is an element of $U_{i}$ is not greater than $2^{-t-1}\cdot\varepsilon$ regardless of the action of player $i$. Then, for any player $i$'s strategy $\sigma_{i}$, the probability that $i$ wins at least once is not greater than $\varepsilon$. $\Box$
\end{exl}
We turn to two related approximation results for Blackwell games with Borel payoff functions. A function $f : [H] \to \mathbb{R}$ is said to be a \textit{limsup function} if there exists a function $u : H \to \mathbb{R}$ such that for each play $(a_0,a_1,\ldots) \in [H]$, \[f(a_0,a_1,\ldots) = \limsup_{t \to \infty} u(a_0,\dots,a_t).\] The function $f : [H] \to \mathbb{R}$ is a \textit{liminf function} if $-f$ is a limsup function.
Limsup and liminf payoff functions are ubiquitous in the literature on infinite dynamic games. At least since the work of Gillette \cite{Gillette57}, the so-called limiting average payoff (that is, the limit superior or the limit inferior of the average of the stage payoffs) is a standard specification of the payoffs in a stochastic game (see for example Mertens and Neyman \cite{Mertens81}, or Levy and Solan \cite{Levy20}). Stochastic games with limsup payoff functions have been studied in Maitra and Sudderth \cite{Maitra93}.
Limsup functions have relatively ``low" set-theoretic complexity. Various characterizations of the limsup functions can be found in Hausdorff \cite{Hausdorff05}. In particular, $f$ is a limsup function if and only if, for each $r \in \mathbb{R}$, the set $\{p \in [H]: r \leq f(p)\}$ is a $G_{\delta}$-set.
We now state a result on the approximation of the minmax value for Blackwell games where a player's objective is represented by a bounded Borel-measurable payoff function.
\begin{theorem}\label{thrm:regfunc} Consider a Blackwell game. Suppose that player $i$'s payoff function $f_i : [H] \to \mathbb{R}$ is bounded and Borel-measurable. Then: \[\begin{aligned} v_i(f_i) &= \sup\{v_i(g): g\text{ is a bounded limsup function and }g \leq f_i\}\\ &= \inf\{v_i(g): g \text{ is a bounded limsup function and }f_i \leq g\}. \end{aligned}\] \end{theorem}
Theorems~\ref{thrm:reg} and~\ref{thrm:regfunc} have been proven by Martin \cite{Martin98} for the case $n=2$, see \cite[Theorem 5, and Remark (b)]{Martin98} and \cite[Remark (c)]{Martin98}. They have been extended to two-player stochastic games by Maitra and Sudderth \cite{Maitra98}. Theorems~\ref{thrm:reg} and~\ref{thrm:regfunc} extend the known results in two respects. First, they allow for more than two players, and second, they allow for countably many actions.
The proof of Theorem \ref{thrm:reg} combines and fine-tunes the arguments in Martin \cite{Martin98} and Maitra and Sudderth \cite{Maitra98}. The key element of the proof is a zero-sum perfect information game, denoted $G_i(f_i,c)$, where the aim of Player I is to ``prove" that the minmax value of $f_i$ is at least $c$. Roughly speaking, the game proceeds as follows. Player~I commences the game by proposing a fictitious continuation payoff, which one could think of as a payoff player $i$ hopes to attain, contingent on each possible stage $0$ action profile. The number $c$ serves as the initial threshold: player $i$'s minmax value of the proposed continuation payoffs is required to be at least $c$. Player II then chooses a stage $0$ action profile, and the corresponding continuation payoff serves as the new threshold. Player I then proposes a fictitious continuation payoff contingent on each possible stage $1$ action profile, and Player II chooses the stage $1$ action profile, etc. Player I wins if the sequence of continuation payoffs is ``justified" by the actual payoff on a play produced by Player II. Ultimately the proof rests on the determinacy of the game $G_i(f_i,c)$, which follows by Martin \cite{Martin75}.
The perfect information game $G_i(f_i,c)$ is a version of the games used in Martin \cite{Martin98}. The main difference is in the use of player $i$'s minmax value that constrains Player I's choice of fictitious continuation payoffs. The details of our proof are slightly closer to those in Maitra and Sudderth \cite{Maitra98}. Like them we invoke martingale convergence and the Fatou lemma.
Finally, we state an approximation result for a Blackwell game with history-independent minmax values. Recall that a function $g : A^\mathbb{N} \to \mathbb{R}$ is \emph{upper semicontinuous} if, for each $r \in \mathbb{R}$, the set $\{p \in [H]: r \leq f(p)\}$ is a closed set, and $g$ is \emph{lower semicontinuous} if $-g$ is upper semicontinuous. When $g = 1_B$ for some $B \subseteq A^\mathbb{N}$, $g$ is upper semicontinuous (resp.~lower semicontinuous) if and only if $B$ is closed (resp.~open).
\begin{theorem}\label{thrm:tailapprox} Consider a Blackwell game. Suppose that player $i$'s payoff function $f_i$ is bounded and Borel-measurable, and player $i$'s minmax values are history-independent. Then \[\begin{aligned} v_i(f_i) &= \sup\{v_i(g): \text{g is a bounded upper semicontinuous function and }g \leq f_i\}\\ &= \inf\{v_i(g): \text{g is a bounded lower semicontinuous function and }f_i \leq g\}. \end{aligned}\] \end{theorem}
As the proof reveals, both the upper semicontinuous and the lower semicontinuous functions can be chosen to be two-valued. Recall that (Hausdorff \cite{Hausdorff05}) an upper semicontinuous function and a lower semicontinuous function are both a limsup and a liminf function. Consequently, in comparison to Theorem \ref{thrm:regfunc}, an additional assumption of history-independence of the minmax values in Theorem \ref{thrm:tailapprox} leads to a stronger approximation result. The latter condition cannot be dropped; see Section~\ref{secn.disc} for an example of a game with a limsup payoff function such that the minmax value cannot be approximated from below by an upper semicontinuous function.
\section{Existence of equilibria}\label{secn.appl} In this section, we employ the results of the previous section to establish existence of $\varepsilon$-equilibria in two distinct classes of Blackwell games. Theorem~\ref{theorem:sumofprob} concerns $n$-player Blackwell games where each player has a finite action space at each history, her objective is represented by an analytic winning set, and the sum of the minmax values over the players exceeds $n-1$. Theorem~\ref{theorem:minmax_indt} concerns for Blackwell games with bounded upper semi-analytic payoff functions, history-independent finite action spaces, and history-independent minmax values.
Consider a Blackwell game $\Gamma$ and let $\varepsilon \geq 0$. A strategy profile $\sigma \in \Sigma$ is \emph{an $\varepsilon$-equilibrium} of $\Gamma$ if for each player $i \in I$ and each strategy $\eta_i \in \Sigma_i$ of player $i$, \[\mathbb{E}_{\sigma_{-i},\eta_{i}}(f_i) \leq \mathbb{E}_{\sigma_{-i},\sigma_{i}}(f_i) + \varepsilon.\]
We state our first existence result.
\begin{theorem}\label{theorem:sumofprob} Consider an $n$-player Blackwell game $\Gamma = (I, A, H, (1_{W_{i}})_{i \in I})$. Suppose that for each player $i \in I$ player $i$'s action set $A_i(h)$ at each history $h \in H$ is finite, and that her winning set $W_i$ is analytic. If $v_1(W_1)+ \cdots +v_n(W_n) > n-1$, then the set $W_1 \cap \cdots \cap W_n$ is not empty. Consequently, $\Gamma$ has a 0-equilibrium. \end{theorem}
Note that any play $p \in W_1 \cap \cdots \cap W_n$ is in fact a 0-equilibrium, or more precisely, any strategy profile that requires all the players to follow $p$ is a 0-equilibrium, because it yields all players the maximal payoff 1.
The key step of the proof is the approximation of the minmax value of a player using a closed subset of her winning set. To prove Theorem~\ref{theorem:sumofprob} we need the following technical observation.
\begin{lemma}\label{lemma:intersect} Let $(X,\mathscr{B},P)$ be a probability space, and let $Q_1,\ldots,Q_n\in \mathscr{B}$ be $n$ events. Then \[P(Q_1\cap\cdots\cap Q_n)\geq P(Q_1)+\cdots+P(Q_n)-n+1.\] \end{lemma}
\begin{proof} For $n=1$ the statement is obvious, and for $n=2$ we have \begin{equation}\label{indineq} P(Q_1\cap Q_2)=P(Q_1)+P(Q_2)-P(Q_1\cup Q_2)\geq P(Q_1)+P(Q_2)-1. \end{equation} Assume that the statement holds for some $n-1$. Then for $n$ we have \begin{align*} P(Q_1\cap\cdots\cap Q_n)\,&=\,P((Q_1\cap\cdots\cap Q_{n-1})\cap Q_n)\\ &\geq\, P(Q_1\cap\cdots\cap Q_{n-1}) + P(Q_n)-1\\ &\geq\, \big(P(Q_1)+\cdots+P(Q_{n-1})-n+2\big)+ P(Q_n)-1\\ &=\,P(Q_1)+\cdots+P(Q_n)-n+1, \end{align*} where the first inequality follows from Eq.~\eqref{indineq} and the second by the induction hypothesis. \end{proof}
\noindent\textbf{Proof of Theorem \ref{theorem:sumofprob}:} We first establish the theorem in the special case of Borel winning sets, and then generalize it to analytic winning sets.
\noindent\textsc{Part I:} Suppose that for each $i \in I$ the set $W_{i} \subseteq [H]$ is Borel.
By Theorem \ref{thrm:reg} there are closed sets $C_1\subseteq W_1,\ldots,C_n\subseteq W_n$ such that $v_1(C_1)+\cdots+v_n(C_n) > n-1$. We show that the intersection $C_1 \cap\cdots\cap C_n$ is not empty.
Given $m \in \mathbb{N}$ consider the $n$-player Blackwell game $\Gamma^{m} = (I, A, H, (1_{C_{i}^{m}})_{i \in I})$, where player $i$'s winning set is defined by \[C_i^{m} := \bigcup\{O(h): h \in H_{m}\text{ such that }O(h) \cap C_{i} \neq \oslash\}.\]
The game $\Gamma^{m}$ essentially ends after $m$ stages: by stage $m$ each player $i$ knows whether the play is an element of her winning set $C_{i}^{m}$ or not. In $\Gamma^{m}$, player $i$ wins if after $m$ stages there is a continuation play that leads to $C_i$. Note that this continuation play might be different for different players.
The set $C_i^{m}$ is a clopen set. For each $m \in \mathbb{N}$ and $i \in I$ we have the inclusion $C_i^{m} \supseteq C_i^{m+1}$ (winning in $\Gamma^{m+1}$ is more difficult than winning in $\Gamma^{m}$). Moreover, $\bigcap_{m \in \mathbb{N}} C_i^{m} = C_i$. Indeed, the inclusion $C_i^{m} \supseteq C_i$ is evident from the definition. Conversely, take an element $q$ of the set $[H] \setminus C_i$. Since $[H] \setminus C_i$ is an open set, there exists a history $h \in H$ such that $q \in O(h)$ and $O(h) \subseteq [H] \setminus C_{i}$. But then $q \in [H] \setminus C_i^{m}$, where $m$ is the length of the history $h$.
Define $C^m := C_1^m \cap\cdots\cap C_n^m$. Thus $\{C^{m}\}_{m \in \mathbb{N}}$ is a nested sequence of closed sets converging to $C_1 \cap\cdots\cap C_n$. Note that, since by the assumption of the theorem $H$ is a finitely branching tree, the space $[H]$ is compact. Thus $C^{m}$ is a compact set. Consequently, to prove that $C_{1} \cap \cdots \cap C_{n}$ is not empty, we only need to argue that $C^{m}$ is not empty for each $m \in \mathbb{N}$.
The game $\Gamma^m$ being finite, it has a $0$-equilibrium (Nash \cite{Nash50}), say $\sigma^m$. By the definition of $0$-equilibrium, the equilibrium payoff is not less than the minmax value: \[\mathbb{P}_{\sigma_{-i}^m,\sigma_{i}^{m}} (C_i^m) \,=\, \sup_{\sigma_{i} \in \Sigma_{i}} \mathbb{P}_{\sigma_{-i}^m,\sigma_{i}} (C_i^m)\, \geq\, \inf_{\sigma_{-i} \in \Sigma_{-i}} \sup_{\sigma_{i} \in \Sigma_{i}} \mathbb{P}_{\sigma_{-i},\sigma_{i}} (C_i^m) = v_i(C_i^m).\] Moreover, since $C_i^{m} \supseteq C_{i}$, it holds that $v_i(C_i^m) \geq v_i(C_i)$. We conclude that \[\mathbb{P}_{\sigma^m}(C_1^m)+ \cdots +\mathbb{P}_{\sigma^m}(C_n^m) > n-1.\] Finally, we apply Lemma \ref{lemma:intersect} to conclude that $\mathbb{P}_{\sigma^m}(C^m) > 0$, hence $C^{m}$ is not empty.
\noindent\textsc{Part II:} Now let $\Gamma$ be any game as in the statement of the theorem. Suppose by way of contradiction that $W_1 \cap \cdots \cap W_n$ is empty. By Novikov's separation theorem (Kechris \cite[Theorem 28.5]{Kechris95}) there exist Borel sets $B_1, \ldots, B_n$ such that $W_{i} \subseteq B_{i}$ for each $i \in I$ and $B_1 \cap \cdots \cap B_n = \oslash$. But since $v_{i}(W_{i})\leq v_{i}(B_{i})$ for each $i \in I$, the game $\Gamma = (I, A, H, (1_{B_{i}})_{i \in I})$ satisfies the assumptions of the theorem, and Part I of the proof yields a contradiction. $\Box$
We state our second and main existence result.
\begin{theorem}\label{theorem:minmax_indt} Consider a Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$. Suppose that for each player $i \in I$, player $i$'s action set $A_i(h)$ at each history $h \in H$ is finite, her payoff function $f_i$ is bounded and upper semi-analytic, and her minmax value is history-independent. Then for every $\varepsilon>0$ the game admits an $\varepsilon$-equilibrium. \end{theorem}
The key idea behind the proof is to consider an auxiliary Blackwell game with winning sets, the winning set of a player consisting of that player's $\varepsilon$-individually rational plays. We show that in the thus-defined auxiliary Blackwell game each player's minmax value equals 1, and apply Theorem \ref{theorem:sumofprob}.
Given $\varepsilon > 0$ we define the set of \textit{player $i$'s $\varepsilon$-individually rational plays}: \[Q_{i,\varepsilon}(f_i) := \{p\in [H] : f_i(p) \geq v_i(f_i) - \varepsilon\}.\] Also define the set \[U_{i,\varepsilon}(f_i) := \{p\in [H] : f_i(p) \geq v_i(f_i) + \varepsilon\}.\] Note that under the assumptions of Theorem \ref{theorem:minmax_indt} both sets are analytic.
\begin{proposition}\label{prop:v(Q)=1} Consider a Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$ and a player $i \in I$. Suppose that player $i$'s payoff function $f_i$ is bounded and upper semi-analytic, and that her minmax values are history-independent. Let $\varepsilon > 0$. Then \begin{enumerate} \item $v_i(Q_{i, \varepsilon}(f_i)) = 1$. In fact, for each strategy profile $\sigma_{-i} \in \Sigma_{-i}$ of players $-i$ there is a strategy $\sigma_i \in \Sigma_{i}$ for player $i$ such that $\mathbb{P}_{\sigma_{-i},\sigma_{i}}(Q_{i, \varepsilon}(f_i)) = 1$. \item $v_i(U_{i, \varepsilon}(f_i)) = 0$. In fact, there exists a strategy profile $\sigma_{-i} \in \Sigma_{-i}$ of players $-i$ such that for each strategy $\sigma_i \in \Sigma_{i}$ for player $i$ it holds that $\mathbb{P}_{\sigma_{-i},\sigma_{i}}(U_{i, \varepsilon}(f_i)) = 0$. \end{enumerate}
\begin{proof} \noindent\textsc{Claim 1:} It suffices to prove the second statement. Take a strategy profile $\sigma_{-i}$ of players $-i$. It is known that player $i$ has a strategy $\sigma_i$ that is an $\varepsilon/2$-best response to $\sigma_{-i}$ in each subgame (see, for example, Mashiah-Yaakovi \cite[Proposition 11]{Ayala15}, or Flesch, Herings, Maes, and Predtetchinski \cite[Theorem 5.7]{JJJJ}), and therefore \[\mathbb{E}_{\sigma_{-i},\sigma_i}(f_i \mid h) \geq v_i(f_{i,h}) -\varepsilon/2\,=\,v_i(f_i)-\varepsilon/2,\] for each history $h\in H$. Since the payoff function $f_i$ is bounded, it follows that there is $d>0$ such that \[\mathbb{P}_{\sigma_{-i},\sigma_i}(Q_{i, \varepsilon}(f_i) \mid h) \geq d\] for each $h \in H$. Indeed, it is easy to verify that one can choose \[d\,=\,\frac{\varepsilon}{2(\sup_{p \in [H]}f_i(p)-v_i(f_i)+\varepsilon)}.\]
Since $Q_{i,\varepsilon}(f_i)$ is an analytic set, there is a Borel set $B$ such that $\mathbb{P}_{\sigma_{-i},\sigma_i}(Q_{i, \varepsilon}(f_i) \bigtriangleup B) = 0$, where $\bigtriangleup$ stands for the symmetric difference of two sets. It follows that $\mathbb{P}_{\sigma_{-i},\sigma_i}(Q_{i, \varepsilon}(f_i) \bigtriangleup B \mid h) = 0$, and consequently $\mathbb{P}_{\sigma_{-i},\sigma_i}(B \mid h) \geq d$ for each history $h \in H$ that is reached under $\mathbb{P}_{\sigma_{-i},\sigma_i}$ with positive probability. L\'{e}vy's zero-one law implies that $\mathbb{P}_{\sigma_{-i},\sigma_i}(B) = 1$, and hence $\mathbb{P}_{\sigma_{-i},\sigma_i}(Q_{i, \varepsilon}(f_i)) = 1$.
\noindent\textsc{Claim 2:} By an argument similar to that
in Mashiah-Yaakovi \cite[Proposition 11]{Ayala15} or Flesch, Herings, Maes, and Predtetchinski \cite[Theorem 5.7]{JJJJ}, one shows that there is a strategy profile $\sigma_{-i} \in \Sigma_{-i}$ such that \[\mathbb{E}_{\sigma_{-i},\sigma_i}(f_{i} \mid h) \leq v_{i}(f_{i,h}) + \varepsilon/2 = v_{i}(f_{i}) + \varepsilon/2,\] for each history $h \in H$ and each strategy $\sigma_{i} \in \Sigma_{i}$. Fix any $\sigma_{i} \in \Sigma_{i}$. The rest of the proof of the claim is similar to that of Claim 1. \end{proof} \end{proposition}
\noindent\textbf{The proof of Theorem \ref{theorem:minmax_indt}:} Fix an $\varepsilon > 0$. By Proposition \ref{prop:v(Q)=1}, $v_{i}(Q_{i,\varepsilon}(f_{i})) = 1$.
Let $\Gamma^\varepsilon = (I, A, H, (1_{Q_{i, \varepsilon}(f_i)})_{i \in I})$ be an auxiliary Blackwell game where player $i$'s winning set is $Q_{i, \varepsilon}(f_i)$, the set of player $i$'s $\varepsilon$-individually rational plays in $\Gamma$. Each player's minmax value in the game $\Gamma^\varepsilon$ equals $1$. Therefore, the auxiliary game $\Gamma^\varepsilon$ satisfies the hypothesis of Theorem \ref{theorem:sumofprob}. We conclude that the intersection $\bigcap_{i \in I}Q_{i, \varepsilon}(f_i)$ is not empty, and hence there is a play $p^* \in [H]$ such that $f_i(p^*) \geq v_i(f_i) - \varepsilon$, for every $i \in I$.
The following strategy profile is a $2\varepsilon$-equilibrium of $\Gamma$ (see also Aumann and Shapley \cite{Aumann94}): \begin{itemize} \item The players follow the play $p^*$, until the first stage in which one of the players deviates from this play. Denote by $i$ the minimal index of a player who deviates from $p^*$ at that stage. \item From the next stage and on, the players in $-i$ switch to a strategy profile that reduces player~$i$'s payoff to $v_i(f_i) + \varepsilon$. A strategy profile with this property does exist by the assumption of history-independence of the minmax values. \end{itemize} This completes the proof of the theorem. $\Box$
We illustrate the construction of the $\varepsilon$-equilibrium with the following example.
\begin{exl}\label{exl.eq}\rm We consider a 2-player Blackwell game with history-independent action sets where the same stage game is being played at each stage, and a player's objective is to maximize the long-term frequency of the stages she wins. Specifically, $\Gamma = (\{1,2\}, A_{1}, A_{2}, f_{1}, f_{2})$, where $A_1$ and $A_2$ are finite, and \[f_{i}(a_{0},a_{1},\ldots) = \limsup_{t \to \infty}\tfrac{1}{t} \cdot \#\{k < t : a_{k} \in U_{i}\},\] for each $(a_{0},a_{1},\ldots) \in A^{\mathbb{N}}$. Here $U_i$ is player $i$'s stage winning set. We assume that $U_1$ and $U_2$ are disjoint, and let $d_i$ denote player $i$'s minmax value in the stage game.
Note that $f_i$ is a tail function (see Section \ref{secn.tail}), and it is a limsup function (in the sense of the definition in Section \ref{secn.approx}). We have $d_{i} = v_{i}(f_{i})$, i.e., player~$i$'s minmax value in the stage game is also player~$i$'s minmax value in the Blackwell game.
Take any Nash equilibrium $x \in \prod_{i \in I}\Delta(A_{i})$ of the stage game. Playing $x$ at each stage is certainly a $0$-equilibrium of the Blackwell game $\Gamma$, but typically it is \textbf{not} of the type that appears in the proof of Theorem \ref{theorem:minmax_indt}. An important feature of the $\varepsilon$-equilibrium constructed in the proof is that the equilibrium play is pure; only off the equilibrium path might a player be requested to play a mixed action. In this particular example we can even choose the equilibrium play to be periodic. This can be done as follows.
First note that $d_{1} + d_{2} \leq 1$. This follows since $\mathbb{P}_{x}(U_{1}) + \mathbb{P}_{x}(U_{2}) \leq 1$ (because $U_1$ and $U_2$ are disjoint by supposition) and since $d_i \leq \mathbb{P}_{x}(U_{i})$ for $i=1,2$ (because the Nash equilibrium payoff is at least the minmax value). Let $\varepsilon > 0$. Choose natural numbers $m$, $m_{1}$, and $m_{2}$ such that $d_{i} - \varepsilon \leq \tfrac{m_{i}}{m} \leq d_{i}$, for $i=1,2$. Note that $m_{1} + m_{2} \leq m$. Pick a point $a_{1} \in U_{1}$ and a point $a_{2} \in U_{2}$, and let $p^*$ be the periodic play with period $m_1+m_2$ obtained by repeating $a_{1}$ for the first $m_{1}$ stages, and repeating $a_{2}$ for the next $m_{2}$ stages. We have \[d_{i} - \varepsilon \leq \tfrac{m_{i}}{m} \leq \tfrac{m_{i}}{m_{1} + m_{2}} = f_{i}(p^*),\] for $i \in \{1,2\}$. One can support $p^*$ as an $\varepsilon$-equilibrium play by a threat of punishment: in case of a deviation by player $1$, player $2$ will switch to playing the minmax action profile from the stage game for the rest of the game, thus reducing $i$'s payoff to $d_{i}$. A symmetric punishment is imposed on player 2 in case of a deviation.
Under the periodic play $p^*$, the sum of the players' payoffs is $1$. There are alternative plays where the payoff to \textit{both} players is 1, which can support 0-equilibria. For example, consider the non-periodic play $p$ that is played in blocks of increasing size: for each $k \in \mathbb{N}$, the length of block $k$ is $2^{2^k}$. In even (resp.~odd) blocks the players play the action profile $a_1$ (resp.~$a_2$). The reader can verify that since the ratio between the length of block $k$ and the total length of the first $k$ blocks goes to $\infty$, the payoff to both players at $p$ is 1. \end{exl}
\section{Regularity and the folk theorem}\label{secn.folk}
A payoff vector $w\in \mathbb{R}^{|I|}$, assigning a payoff to each player, is called \emph{an equilibrium payoff} of the Blackwell game $\Gamma$ if for every $\varepsilon>0$ there exists an $\varepsilon$-equilibrium $\sigma^{\varepsilon}$ of $\Gamma$ such that $\|w-\mathbb{E}_{\sigma^{\varepsilon}}(f)\|_{\infty}\leq \varepsilon$. In other words, an equilibrium payoff is an accumulation point of $\varepsilon$-equilibrium payoff vectors as $\varepsilon$ goes to $0$. We let $\mathcal{E}$ denote the set of equilibrium payoffs. Our goal here is to provide a description of $\mathcal{E}$.
In repeated games with stage payoffs, where the total payoff is some average (discounted average with low discounting, liminf of average, limsup of average, etc.) of the stage game payoffs, the folk theorem states that the set of equilibrium payoffs coincide with the set of all individually rational vectors that are in the convex hull of the feasible payoff vectors, see, e.g., Aumann and Shapley \cite{Aumann94}, Sorin \cite{Sorin92}, and Mailath and Samuelson \cite{{Mailath06}}. As we will see, when the payoff functions are general, the set of equilibrium payoffs is the convex hull of the set of feasible payoff vectors that are individually rational. The reason for the difference is that in repeated games with stage payoffs, getting a low payoff in one stage can be compensated by getting a high payoff in the following stage; when the payoff is obtained only at the end of the game, there is no opportunity to compensate low payoffs.
Define \begin{align*} Q^\varepsilon(f)&:=\bigcap_{i\in I} Q_{i, \varepsilon}(f_i),\\ W^\varepsilon(f)&:=\{f(p):p\in Q^\varepsilon(f)\}. \end{align*}
The set $Q^\varepsilon(f)$ is the set of $\varepsilon$-individually rational plays, and $W^\varepsilon(f)$ is the set of feasible and $\varepsilon$-individually rational payoffs vectors. Whenever convenient, we write simply $Q^\varepsilon$ and $W^\varepsilon$. For every set $X$ in a Euclidean space we denote its closure by $\textnormal{cl}(X)$ and its convex hull by $\textnormal{conv}(X)$.
\begin{theorem}\label{thrm:folk} Consider a Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$. Suppose that for each player $i \in I$, player $i$'s action set $A_i(h)$ at each history $h \in H$ is finite, her payoff function $f_i$ is bounded and upper semi-analytic, and her minmax value is history-independent. Then \[\mathcal{E}=\bigcap_{\varepsilon>0}\textnormal{conv}(\textnormal{cl}(W^\varepsilon(f))).\] \end{theorem}
To prove Theorem~\ref{thrm:folk} we need the following result, which states that every $\varepsilon$-equilibrium assigns high probability to plays in $Q^{\varepsilon^{1/3}}(f)$.
\begin{lemma}\label{lemma:largeprob} Consider an $n$-player Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$. Suppose that for each player $i \in I$, player $i$'s action set $A_i(h)$ at each history $h \in H$ is finite, her payoff function $f_i$ is bounded and upper semi-analytic, and her minmax value is history-independent. Let $\varepsilon > 0$ be sufficiently small, and let $\sigma^\varepsilon$ be an $\varepsilon$-equilibrium. Then \[\mathbb{P}_{\sigma^{\varepsilon}}(A^{\mathbb{N}} \setminus Q^{\varepsilon^{1/3}}(f)) < n\varepsilon^{1/3}.\] \end{lemma} \begin{proof}
Set $\eta := \varepsilon^{1/3}$. It suffices to show that for every $i \in I$, \begin{equation}\label{equ:231} \mathbb{P}_{\sigma^{\varepsilon}}(A^{\mathbb{N}} \setminus Q_{i, \eta} (f_i)) < \eta. \end{equation}
Fix a player $i \in I$ and suppose to the contrary that Eq.~\eqref{equ:231} does not hold. We derive a contradiction by showing that player~$i$ has a deviation from $\sigma^{\varepsilon}$ that yields her a gain higher than $\varepsilon$.
For $t \in \mathbb{N}$, denote by $X_t := \mathbb{P}_{\sigma^{\varepsilon}} (Q_{i,\eta}(f_i) |\mathcal{F}_t)$ the conditional probability of the event $Q_{i,\eta}(f_i)$ under the strategy profile $\sigma^\varepsilon$ given the sigma-algebra $\mathcal{F}_t$. By Doob's martingale convergence theorem, $(X_{t})_{t \in \mathbb{N}}$ converges to the indicator function of the event $Q_{i,\eta}(f_i)$, almost surely under $\mathbb{P}_{\sigma^{\varepsilon}}$. Since by supposition $\mathbb{P}_{\sigma^{\varepsilon}}(A^{\mathbb{N}} \setminus Q_{i, \eta} (f_i)) > \eta$, we know that $\mathbb{P}_{\sigma^\varepsilon}(X_{t} \to 0) > \eta$.
Let $K$ be a bound on the game's payoffs, and let $\rho := \varepsilon^2/K$. Let us call a history $h \in H_{t}$ a \textit{deviation history} if under $h$, stage $t$ is the first one such that $X_t < \rho$. On the event $\{X_{t} \to 0\}$, a deviation history arises at some point during play. Consequently, under $\mathbb{P}_{\sigma^{\varepsilon}}$, a deviation history arises with probability of at least $\eta$.
Consider the following strategy $\sigma_{i}'$ of player $i$: play according to $\sigma_i^\varepsilon$ until a deviation history, say $h$, occurs (and forever if a deviation history never occurs). At $h$, switch to playing a strategy which guarantees player $i$ a payoff of at least $v_i(f_i) - \varepsilon$ against $\sigma_{-i}^{\varepsilon}$ in $\Gamma_h$. Such a strategy exists by our supposition of history-independence of the minmax values. To conclude the argument, we compute the gain from the deviation to $\sigma_{i}'$.
For every deviation history $h \in H_{t}$, \begin{eqnarray}\label{equ:232} \mathbb{E}_{\sigma_{-i}^{\varepsilon},\sigma_{i}'}(f_{i}\mid h) &\geq& v_{i}(f_i) - \varepsilon,\\ \mathbb{E}_{\sigma_{-i}^{\varepsilon},\sigma_{i}^{\varepsilon}}(f_{i}\mid h) &\leq& \rho K + (1- \rho)(v_i(f_i) - \eta). \label{equ:233} \end{eqnarray} Eq.~\eqref{equ:232} holds by the choice of $\sigma_{i}'$. To derive Eq.~\eqref{equ:233}, suppose that, following the history $h$, player $i$ conforms to $\sigma_{i}^{\varepsilon}$. Then, conditional on $h$, with probability at most $\rho$ the play belongs to $Q_{i,\eta}(f_i)$, and player $i$'s payoff is at most $K$, and with probability at least $1-\rho$ the play does not belong to $Q_{i,\eta}(f_i)$, and player~$i$'s payoff is at most $v_i(f_i)-\eta$.
We can now compute the gain from the deviation to $\sigma_{i}'$: If a deviation history never arises, $\sigma_{i}'$ recommends the same actions as $\sigma_{i}^{\varepsilon}$, and therefore the gain is 0. A deviation history occurs with a probability of at least $\eta$, and thus \begin{align*} \mathbb{E}_{\sigma_{-i}^{\varepsilon},\sigma_{i}'}(f_{i}) - \mathbb{E}_{\sigma_{-i}^{\varepsilon},\sigma_{i}^{\varepsilon}}(f_{i}) &\geq \eta \bigl(v_i(f_i)-\varepsilon - \rho K - (1 - \rho)(v_i(f_i)-\eta)\bigr)\\ &= \eta(-\varepsilon - \varepsilon^2 + \rho v_i(f_i) + \eta - \rho \eta)\\ &= \varepsilon^{\frac{2}{3}}(1 - \varepsilon^{\frac{2}{3}} - \varepsilon^{\frac{5}{3}} + \tfrac{v_i(f_i)}{K} \varepsilon^{\frac{5}{3}} - \tfrac{1}{K} \varepsilon^{2}), \end{align*} which behaves like $\varepsilon^{\frac{2}{3}}$ when $\varepsilon$ is small, and therefore exceeds $\varepsilon$. \end{proof}
\noindent\textbf{Proof of Theorem~\ref{thrm:folk}}: Let $|I| = n$. Let $w \in \mathbb{R}^{n}$ be an equilibrium payoff. Assume by contradiction that there is an $\alpha > 0$ such that $w \not\in \textnormal{conv}(\textnormal{cl}(W^{\alpha}))$. For a vector $z \in \mathbb{R}^{n}$ write ${\rm dist}(z)$ to denote the distance from $z$ to the set $\textnormal{conv}(\textnormal{cl}(W^{\alpha}))$ under the $\|\cdot\|_{\infty}$ metric on $\mathbb{R}^{n}$. By assumption, $\delta := \tfrac{1}{4}{\rm dist}(w) > 0$. Denote $\varepsilon: = \min(\delta, \alpha^3, (\frac{\delta}{Kn})^3) > 0$, where $K$ is a bound on the game payoff.
From $w$ being an equilibrium payoff, there exits an $\varepsilon$-equilibrium, say $\sigma^\varepsilon$, such that $\|w-\mathbb{E}_{\sigma^\varepsilon}(f)\|_{\infty} \leq \varepsilon \leq \delta$. We have the following chain of inequalities:
\[{\rm dist}(\mathbb{E}_{\sigma^\varepsilon}(f)) \leq \mathbb{E}_{\sigma^\varepsilon}({\rm dist}(f)) \leq 2K \cdot \mathbb{P}_{\sigma^\varepsilon}(A^{\mathbb{N}} \setminus Q^{\alpha}(f)) \leq 2K \cdot n \cdot \varepsilon^{\frac{1}{3}} \leq 2\delta,\] where the first inequality follows from the fact that ${\rm dist}:\mathbb{R}^{n} \to \mathbb{R}$ is a convex function, the second from the fact that $f(p) \in W^{\alpha}$ whenever $p \in Q^{\alpha}(f)$, the third follows since $Q^{\varepsilon^{1/3}}(f) \subseteq Q^{\alpha}(f)$ and by Lemma~\ref{lemma:largeprob}, and the last holds by the choice of $\varepsilon$. But then
\[{\rm dist}(w) \leq \|w-\mathbb{E}_{\sigma^\varepsilon}(f)\|_{\infty} + {\rm dist}(\mathbb{E}_{\sigma^\varepsilon}(f)) \leq 3\delta,\] contradicting the choice of $\delta$.
We turn to prove the other direction. Let $w\in \bigcap_{\varepsilon>0}\textnormal{conv}(\textnormal{cl}(W^\varepsilon))$. We need to show that $w$ is an equilibrium payoff. Fix an $\varepsilon>0$.
Carath\'eodory's Theorem (Carath\'eodory, \cite{Caratheodory07}) implies that $\textnormal{cl} (\textnormal{conv}(W^{\varepsilon})) = \textnormal{conv} (\textnormal{cl}(W^{\varepsilon}))$, hence $w$ is an element of $\textnormal{cl} (\textnormal{conv}(W^{\varepsilon}))$, and thus we can choose a vector $w_{\varepsilon}\in \textnormal{conv}(W^{\varepsilon})$ such that $\|w-w_{\varepsilon}\|_\infty \leq \varepsilon$. We argue that $w_{\varepsilon}$ is a vector of expected payoffs in some $3\varepsilon$-equilibrium.
The payoff $w_{\varepsilon}$ can be presented as a convex combination of $n + 1$ vector payoffs, say \linebreak $f(p^1), \ldots, f(p^{n+1})$, with each $p^{k}$ an element of $Q^{\varepsilon}(f)$. Using jointly controlled lotteries as done, e.g., in Forges \cite{Forges}, Lehrer \cite{Lehrer1996}, or Lehrer and Sorin \cite{LehrerSorin1997}, the players can generate the required randomization over the plays $p^1, \dots, p^{n+1}$ during the first stages of the game. Once a specific play $p^k$ has been chosen, the construction of the $3\varepsilon$-equilibrium is standard: the players play $p^k$, and if player $i$ deviates, her opponents revert to playing a strategy profile that gives player $i$ at most $v_i(f_i)+\varepsilon$. Such a strategy exists by the assumption of history-independence of the minmax values. $\Box$
\begin{exl}\label{exl.folk}\rm Consider the 2-player Blackwell game $\Gamma = (\{1,2\}, A_{1}, A_{2}, f_{1}, f_{2})$, where the action sets are $A_1=\left\{{\rm T},{\rm M},{\rm B}\right\}$ and $A_2=\left\{{\rm L},{\rm C},{\rm R}\right\}$, and for a play $p = (a_0,a_1,\ldots)$ the payoffs are \[(f_1(p),f_2(p)) = \begin{cases} (1,1)&\text{if }\displaystyle\liminf_{n\to \infty} \tfrac{1}{t} \cdot \#\{k < t: a_k = ({\rm T},{\rm L})\text{ or } a_k = ({\rm M},{\rm C})\} >\frac{1}{2},\\ (4,-1)&\text{if }\displaystyle\liminf_{n\to \infty} \tfrac{1}{t} \cdot \#\{k < t: a_k = ({\rm B},{\rm L})\} = 1,\\ (-1,4)&\text{if }\displaystyle\liminf_{n\to \infty} \tfrac{1}{t} \cdot \#\{k < t: a_k = ({\rm T},{\rm R})\} = 1,\\ (0,0)&\text{otherwise}. \end{cases}\] Thus the payoff is $(1,1)$ if the (liminf) frequency of the stages where either (T,L) or (M,C) is played is larger than $\tfrac{1}{2}$. It is $(4,-1)$ if (B,L) is played with frequency of 1, and $(-1,4)$ if (T,R) is played with the frequency of 1. All other cases result in a payoff of $(0,0)$.
Observe that when player 1 plays B repeatedly, the maximal payoff that player 2 can achieve is 0, and this is player 2's minmax value. Similarly, player 1's minmax value is 0. For each $\varepsilon \in (0,1)$, the set $W^{\varepsilon}(f)$ consists of the two points $(0,0)$ and $(1,1)$. By Theorem~\ref{thrm:folk}, the set of equilibrium payoffs $\mathcal{E}$ is the line segment connecting $(0,0)$ and $(1,1)$, see Figure \ref{figure}.
Naturally, all equilibrium payoffs $w$ are (a) convex combinations of the feasible payoffs vectors $(0,0)$, $(1,1)$, $(4,-1)$, and $(-1,4)$, and (b) individually rational, i.e., they satisfy $w_{1} \geq 0$ and $w_{2} \geq 0$. The set of all payoff vectors satisfying (a) and (b) is represented in Figure~\ref{figure} by the shaded triangle. The point we wish to make here is that the properties (a) and (b) are not sufficient for a payoff vector to be an equilibrium payoff.
Take for concreteness the point $(3,0)$. This payoff vector is in the convex hull of the feasible payoff vectors and is individually rational. Yet, for $\varepsilon < \tfrac{2}{3}$, there is no $\varepsilon$-equilibrium with the payoff (close to) the vector $(3,0)$. We give a heuristic argument.
Suppose to the contrary that $\sigma$ is such an $\varepsilon$-equilibrium. The strategy profile $\sigma$ necessarily assigns a probability of at least $\tfrac{2}{3}$ to the set of plays that yield the payoff vector $(4,-1)$. But this implies that Player 2 has a deviation that would improve her payoff over the candidate $\varepsilon$-equilibrium by at least $\tfrac{2}{3}$. Player 2 needs to deviate to playing R forever (for example), at any history of the game where her conditional expected payoff under $\sigma$ is close enough to $-1$. Since playing R would yield at least $0$, by such a deviation, she would improve her conditional expected payoff by at least $1$. Levy's zero-one law guarantees that the histories where player 2 is called to deviate in this way arise with a probability close to $\tfrac{2}{3}$, so that the expected gain from the deviation is also close to $\tfrac{2}{3}$.
\begin{figure}
\caption{The set of equilibrium payoffs (the segment connecting $(0,0)$ and $(1,1)$) vs. the set of convex combinations of feasible payoffs that are individually rational (the dark triangle).}
\label{figure}
\end{figure} \end{exl}
The above discussion of Example~\ref{exl.folk} leads to a slightly more general conclusion: if the set of feasible payoffs is finite, then the set of equilibrium payoffs is the convex-hull of the feasible payoffs that are individually rational (equal or larger than the minmax). For each player the minmax value is within the finite set of feasible payoffs, and placing any probability on a payoff that is not individually rational enables profitable deviations.
\section{Blackwell games with tail-measurable payoffs}\label{secn.tail}
An important class of games with history-independent minmax values are those where the payoff functions are tail-measurable. In this section we concentrate on games with tail-measurable payoffs.
Consider a Blackwell game with history-independent action sets, $\Gamma = (I,(A_{i},f_{i})_{i \in I})$. A set $Q \subseteq A^{\mathbb{N}}$ is said to be a \textit{tail set} if whenever a play $p = (a_{0},a_{1},\ldots)$ is an element of $Q$ and $q = (b_{0},b_{1},\ldots)$ is such that $a_t = b_t$ for all $t \in \mathbb{N}$ sufficiently large, then $q$ is also an element of $Q$. Let $\mathscr{T}$ denote the sigma-algebra of the tail subsets of $A^{\mathbb{N}}$. We note that the tail sigma-algebra $\mathscr{T}$ and the Borel sigma-algebra $\mathscr{B}$ are not nested. For constructions of tail sets that are not Borel, see Rosenthal \cite{Rosenthal75} and Blackwell and Diaconis \cite{Diaconis96}.
Examples of tail sets are: (1) the winning sets of Example \ref{exl.io}, (2) the set of plays in which a certain action profile $a\in A$ is played with limsup-frequency at most $\tfrac{1}{2}$, and (3) the set of plays in which a certain action profile $a^*\in A$ is played at most finitely many times at even stages (with no restriction at odd stages).
An important class of tail sets are the shift invariant sets. A set $Q \subseteq A^{\mathbb{N}}$ is a \textit{shift invariant set} if for each play $p = (a_0,a_1,\ldots)$, $p \in Q$ if and only if $(a_1,a_2,\ldots) \in Q$. Equivalently, shift invariant sets are the sets that are invariant under the backward shift operator on $A^{\mathbb{N}}$. Shift invariant sets are tail sets. The converse is not true: while the sets in examples (1) and (2) above are shift invariant, that of example (3) is not.
A function $f:A^{\mathbb{N}} \to \mathbb{R}$ is called tail-measurable if, for each $r\in\mathbb{R}$, the set $\{p\in A^{\mathbb{N}} : r \leq f(p)\}$ is an element of $\mathscr{T}$. Intuitively, a payoff function is tail measurable if an action taken in any particular stage of the game has no impact on the payoff. The payoff function in Example \ref{exl.eq} is tail-measurable.
\begin{remark}\rm The assumption that the set of actions of each player is history-independent is required so that the tail-measurability of the payoff functions has a bite. If the sets of actions were history-dependent, then by having a different set of actions at each history, any function could be turned into tail-measurable. \end{remark}
We now state one key implication of tail-measurability, namely the history-independence of minmax values.
\begin{proposition}\label{prop:minmaxtail} Let $\Gamma = (I,(A_{i},f_{i})_{i \in I})$ be a Blackwell game with history-independent action sets, and let $i \in I$ be a player. If player $i$'s payoff function is bounded, upper semi-analytic, and tail-measurable, then her minmax value is history-independent. \end{proposition} \begin{proof} It suffices to show that $v_{i}(f_{i,a}) = v_{i}(f_{i})$ for each $a \in A$, where, with a slight abuse of notation, we write $a$ for a history in stage $1$. Since $f_i$ is tail-measurable, all the functions $f_{i,a}$ for $a \in A$ are identical to each other. Hence, fixing any particular action profile $\bar{a} \in A$, letting $X_i := \Delta(A_i)$ and $X_{-i} := \prod_{j \in -i}X_{j}$, we have \begin{align*} v_{i}(f_{i}) &= \inf_{x_{-i} \in X_{-i}}\sup_{x_{i} \in X_{i}} \sum_{a \in A} \prod_{j \in I}x_{j}(a_{j}) \cdot \Big(\inf_{\sigma_{-i} \in \Sigma_{-i}}\sup_{\sigma_{i} \in \Sigma_{i}} \mathbb{E}_{\sigma_{-i},\sigma_{i}}(f_{i,a})\Big)\\ &= \inf_{x_{-i} \in X_{-i}}\sup_{x_{i} \in X_{i}} \sum_{a \in A} \prod_{j \in I}x_{j}(a_{j}) \cdot v_{i}(f_{i,\bar{a}}) = v_{i}(f_{i,\bar{a}}). \end{align*} \end{proof}
If the payoff functions of all the players in a game $\Gamma$ are tail-measurable, then, for each fixed stage $t \in \mathbb{N}$, all the subgames of $\Gamma$ starting at stage $t$ are identical. On the other hand, the subgames starting, say, at stage $1$, are not identical to the game itself (see example (3) of a tail-measurable payoff function above). Nonetheless, as Proposition \ref{prop:minmaxtail} implies, the players' minmax values \textit{are} the same in every subgame.
The condition of history-independence of the minmax values is more inclusive than that of tail-measurability of the payoffs; the examples that follow illustrate the point.
\begin{exl}\rm Consider a one-player Blackwell game where the player's payoff function is $1_{S}$, the indicator of a set $S \subseteq [H]$. If $S$ is dense in $[H]$, then the minmax value of the player is $1$ in each subgame. A dense set may or may not be a tail set. \end{exl}
\begin{exl}\rm We consider a Blackwell game similar to that of Example \ref{exl.io}, but where the stage game may depend on the history, as long as each player's stage minmax value is the same.
Specifically, let $\Gamma = (I, A, H, (1_{W_i})_{i \in I})$. Suppose that at each history $h \in H$, each player $i \in I$ has a stage winning set $U_{i}(h) \subseteq A(h)$, and her winning set in the Blackwell game $\Gamma$ is \[W_i = \{(a_0,a_1,\ldots) \in [H]:a_t \in U_i(a_0,\ldots,a_{t-1})\text{ for infinitely many }t \in \mathbb{N}\}.\]
Assume that the stage minmax value of player $i$ is the same at each history: there is a number $d_{i}$ such that \[d_{i} = \inf_{x_{-i} \in \Delta(A_{-i}(h))} \sup_{x_i \in \Delta(A_i(h))} \mathbb{P}_{x_{-i},x_i}(U_i(h))\] for every $h \in H$. Then player $i$'s minmax value in each subgame of $\Gamma$ is $0$ if $d_{i} = 0$, and is $1$ if $d_{i} > 0$. Thus player $i$'s minmax value is history-independent.
Note that the game $\Gamma$ need not have history-independent action sets. Even when the action sets \textit{are} history-independent, the winning sets need not necessarily be tail-measurable.
To illustrate the last claim, suppose that there are two players playing matching pennies at each stage. At stage 0, player 1 wants to match the choice of player 2 (and player 2 wants to mismatch the choice of player~1). Subsequently the roles of the two players swap as follows: the player to win stage $t$ wants to match her opponent's action at stage $t+1$, while the loser at stage $t$ wants to mismatch the action of her opponent at stage $t+1$. Formally, we let $\Gamma = (\{1,2\}, A_1, A_2, 1_{W_{1}}, 1_{W_{2}})$ be the 2-player Blackwell game with history-independent action sets, where $A_1 = A_2 = \{{\rm H},{\rm T}\}$, the winning sets $W_1$ and $W_2$ are as above, and the stage winning sets are defined recursively as follows: \[U_1(\oslash) = \{({\rm H},{\rm H}), ({\rm T},{\rm T})\}\quad\text{and}\quad U_2(\oslash) = \{({\rm H},{\rm T}), ({\rm T},{\rm H})\},\] and \[U_1(h,a) = \begin{cases} U_1(\oslash) &\text{if }a \in U_1(h),\\ U_2(\oslash) &\text{if }a \in U_2(h), \end{cases}\quad\text{and}\quad U_2(h,a) = \begin{cases} U_2(\oslash) &\text{if }a \in U_1(h),\\ U_1(\oslash) &\text{if }a \in U_2(h), \end{cases}\] for each $h \in H$ and $a \in A$. The sets $W_1$ and $W_2$ are not tail: out of the two plays \begin{align*} &(({\rm H},{\rm H}), ({\rm H},{\rm H}),({\rm H},{\rm H}),\ldots) \hbox{ and}\\ &(({\rm H},{\rm T}), ({\rm H},{\rm H}), ({\rm H},{\rm H}),\ldots), \end{align*} the first is an element of $W_1 \setminus W_2$, while the second is an element of $W_2 \setminus W_1$. \end{exl}
\begin{exl}\rm Consider a Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$, where player $i$'s objective is (as in Example \ref{exl.eq}) to maximize the long-term frequency of the stages she wins: \[f_{i}(a_{0},a_{1},\ldots) = \limsup_{t \to \infty}\tfrac{1}{t}\cdot\#\{k < t : a_{k} \in U_{i}(a_0,\ldots,a_{k-1})\}.\] As in the previous example, $U_{i}(h) \subseteq A(h)$ is player $i$'s stage winning set at history $h \in H$. Assume, as above, that player $i$'s minmax value in each stage game is $d_i$. Then also her minmax value in each subgame of $\Gamma$ is $d_{i}$. \end{exl}
\begin{exl}\rm Start with a Blackwell game with tail-measurable payoff functions. Suppose that the minmax values of all the players in the game are $0$. Take any history $h$, and redefine the payoff functions so that any play having $h$ as a prefix has a payoff of $0$. In the resulting game, the minmax value of each player in each subgame remains $0$, but the payoff functions are no longer tail-measurable (unless the original payoff functions are constant). A similar modification can be performed with any subset of histories, not just one. \end{exl}
From the results above we now deduce a number of implications for Blackwell games with tail-measurable payoffs.
\begin{corollary}\label{cor.0-1law} Consider a Blackwell game $\Gamma = (I, (A_{i}, 1_{W_{i}})_{i \in I})$ with history-independent action sets. If player $i$'s winning set $W_i$ is an analytic tail set, then $v_i(W_i)$ is either $0$ or $1$. \end{corollary} \begin{proof} Suppose that $v_i(W_i) > 0$. Let $\varepsilon := v_i(W_i)/2$. In view of Proposition \ref{prop:minmaxtail}, player $i$'s minmax value in $\Gamma$ is history-independent. Applying Proposition \ref{prop:v(Q)=1}, we conclude that $v_i(Q_{i, \varepsilon}(1_{W_{i}})) \linebreak = 1$. But $Q_{i, \varepsilon}(1_{W_{i}}) = W_{i}$ by the choice of $\varepsilon$. \end{proof}
The following conclusion follows directly from Proposition \ref{prop:minmaxtail} and Theorem \ref{theorem:minmax_indt}.
\begin{corollary}\label{cor:eq} Suppose that the game $\Gamma = (I, (A_{i}, 1_{W_{i}})_{i \in I})$ has history-independent action sets. Suppose, furthermore, that for each player $i \in I$, player $i$'s action set $A_i$ is finite and her payoff function $f_i$ is bounded, upper semi-analytic, and tail-measurable. Then for every $\varepsilon>0$ the game admits an $\varepsilon$-equilibrium. \end{corollary}
\section{Concluding remarks}\label{secn.disc} \noindent\textbf{Approximations by compact sets.} Any Borel probability measure on $[H]$ (recall that $[H]$ is Polish under the maintained assumptions), is not merely regular, but is tight: the probability of a Borel set $B \subseteq [H]$ can be approximated from below by the probability of a compact subset $K \subseteq B$ (Kechris \cite[Theorem 17.11]{Kechris95}). The minmax value is not tight in this sense. To see this, consider any 2-player Blackwell game where player 1's winning set $W_{1}$ is the entire set of plays $[H]$, so that $v_1(W_1) = 1$, and where $A_{2}(\o)$, player 2's action set at the beginning of the game, is $\mathbb{N}$. We argue that $v_1(K) = 0$ for every compact set $K \subseteq W_1$. Indeed, the projection of a compact set $K \subseteq W_1$ on $A_{2}(\o)$ is a compact, and hence a finite set. Therefore, player 2 can guarantee that the realized play is outside $K$ by choosing a sufficiently large action at stage $0$. Thus $v_1(K) = 0$, as claimed.
\noindent\textbf{Approximations by semicontinuous functions.} The conclusion of Theorem \ref{thrm:tailapprox} would no longer be true without the assumption of history-indepenence of the minmax values. Here we give an example of a game with a limsup payoff function where the minmax value cannot be approximated from below by an upper semicontinuous function.
Consider a zero-sum game $\Gamma$ where $A_{1} = A_{2} = \{0,1\}$, and player 1's payoff function is \[f(a_0,a_1,\ldots) = \begin{cases} \displaystyle\limsup_{t \to \infty}\tfrac{1}{t}\#\{k < t: a_{2,k} = 0\},&\text{if }\tau = \infty,\\ 2, &\text{if } \tau < \infty\text{ and }a_{2,\tau} = 1,\\ 0, &\text{if } \tau < \infty\text{ and }a_{2,\tau} = 0, \end{cases}\] where $\tau = \tau(a_0,a_1,\ldots) \in \mathbb{N} \cup \{\infty\}$ is the first stage where player 1 chooses action $1$. The game was analyzed in Sorin \cite{Sorin86}, who showed that $v_1(f) = 2/3$.
Let $g \leq f$ be a bounded upper semicontinuous function. We argue that $v_1(g) \leq1/2$. For $t \in \mathbb{N}$, let $S_t$ denote the set of plays $p$ such that $t \leq \tau(p)$. Note that $S_{t}$ is closed. We argue that \[\inf_{t \in \mathbb{N}} \sup\{g(p):p \in S_{t}\} \leq 1.\] Suppose this is not the case. Take an $\varepsilon > 0$ such that $1 + \varepsilon < \sup\{g(p):p \in S_{t}\}$ for each $t \in \mathbb{N}$. Let $U_{0} := \{1 + \varepsilon \leq g\}$, and for each $t \geq 1$ let $U_{t} := U_{0} \cap S_{t}$. The set $U_{t}$ is not empty for each $t \in \mathbb{N}$. Moreover, it is a closed, and hence a compact subset of $A^{\mathbb{N}}$. Thus $U_{0} \supseteq U_{1} \supseteq \cdots$ is a nested sequence of non-empty compact sets. Therefore, there is a play $p \in \bigcap_{t \in \mathbb{N}} U_{t}$. It holds that $\tau(p) = \infty$, and consequently $f(p) \leq 1 < g(p)$, a contradiction.
Take an $\varepsilon > 0$. Find a $t \in \mathbb{N}$ such that $\sup\{g(p):p \in S_{t}\} \leq 1 + \varepsilon$. Suppose that player 2 plays $0$ for the first $t$ stages, and thereafter plays $0$ with probability $1/2$ at each stage. This guarantees that the payoff under the function $g$ is at most $(1 + \varepsilon)/2$.
\noindent\textbf{On the assumption of finiteness of the action sets.} The hypothesis of Theorem \ref{theorem:sumofprob} requires that the action sets at each history be finite, and its conclusion is not true without this assumption. Indeed, consider the 2-player Blackwell game $\Gamma = (\{1,2\}, A_{1}, A_{2}, W_{1}, W_{2})$ with history-independent action sets $A_1 = A_2 = \mathbb{N}$. Player 1's winning set $W_1$ consists of all plays $(a_{1,t}, a_{2,t})_{t\in\mathbb{N}}$ such that $a_{1,t} > a_{2,t}$ holds for all sufficiently large $t \in \mathbb{N}$, and player 2's winning set $W_2$ consists of all plays $(a_{1,t},a_{2,t})_{t\in\mathbb{N}}$ such that $a_{1,t} < a_{2,t}$ holds for all sufficiently large $t \in \mathbb{N}$. Then $W_1$ and $W_2$ are Borel-measurable and tail-measurable, and $v_1(W_1) = v_2(W_2)=1$, but $W_1\cap W_2=\emptyset$. Hence, the game has no $\varepsilon$-equilibrium for any $\varepsilon < 1/2$. Indeed, an $\varepsilon$-equilibrium $\sigma$ would need to satisfy $\mathbb{P}_{\sigma}(W_i) \geq v_i(W_i) - \varepsilon > 1/2$ for both $i \in \{1,2\}$.
As discussed above, the assumption that the sets of actions are history-dependent is intertwined with the assumption that the payoffs are tail-measurable.
\noindent\textbf{Continuity of the minmax.} Unlike Borel probability measures, the minmax value is in general not continuous in the following sense: there is an increasing sequence of Borel sets $C_0 \subseteq C_1\subseteq \ldots$ such that $\lim_{n\to \infty} v_i(C_n) < v_i(\bigcup_{n \in \mathbb{N}} C_n)$. In fact, one can construct an example of this kind where $C_{n}$ is both a $G_{\delta}$ and an $F_{\sigma}$ set, as follows. Consider a 2-player Blackwell game with history-independent action sets where $A_1$ is a singleton (player 1 is a dummy) while $A_2$ contains at least two distinct elements. Let $\{p_{0},p_{1},\ldots\}$ be a converging (with respect to any compatible metric on $A^{\mathbb{N}}$) sequence of plays, no two members of which are the same. Let $C_{n} := A^{\mathbb{N}}\setminus\{p_{n},p_{n+1},\ldots\}$. Then $v_1(C_n) = 0$ for each $n \in \mathbb{N}$ while $v_1(\bigcup_{n \in \mathbb{N}}C_n) = v_1(A^{\mathbb{N}}) = 1$.
\noindent\textbf{Maxmin value.} Consider a Blackwell game $\Gamma$, and suppose that player $i$'s payoff function $f_i$ is bounded and upper semi-analytic. Player $i$'s \textit{maxmin value} is defined as \[z_i(f_i) = \sup_{\sigma_i \in \Sigma_{i}}\inf_{\sigma_{-i} \in \Sigma_{-i}}\mathbb{E}_{\sigma_{-i},\sigma_i}(f_i).\]
The minmax value is not smaller than the maxmin value: $z_i(f_i) \leq v_i(f_i)$. If $I = \{1,2\}$, player 1's payoff function $f_1$ is bounded and Borel-measurable, and for every $h \in H$ either the set $A_1(h)$ of player $1$'s actions or the set $A_2(h)$ of player 2's actions at $h$ is finite, then in fact $z_1(f_1) = v_1(f_1)$, as follows from the determinacy of zero-sum Blackwell games (Martin \cite{Martin98}). Strict inequality might arise for at least two reasons.
The first is the failure of determinacy. The results of Section \ref{secn.approx} are established under the assumption that the action sets be countable, an assumption that is insufficient to guarantee determinacy of a two-player zero-sum Blackwell game even if player 1's winning set is clopen. Wald's game provides an illustration. Suppose that each of the two players chooses a natural number; player 1 wins provided that his choice is at least as large as player 2's. Formally, consider a Blackwell game with $I = \{1,2\}$, where the action sets at $\o$ are $A_1(\o) = A_2(\o) = \mathbb{N}$, and player 1's winning set $W_1$ consists of plays such that player 1's stage $0$ action is at least as large as player 2's stage 0 action: $a_{1,0} \geq a_{2,0}$. Then player 1's minmax value is $v_1(W_1) = 1$ while her maxmin value is $z_1(W_1) = 0$.
The second possibility for a maxmin and the minmax values to be different arises in games with three or more players. The reason is that the definitions of both the maxmin and the minmax values impose that the opponents of player $i$ choose their actions independently after each history. The point is illustrated by Maschler, Solan, and Zamir \cite[Example 5.41]{Maschler13}, which can be seen as a 3-player Blackwell game with binary action sets, where the player's payoff function only depends on the stage 0 action profile.
Analogues of Theorems ~\ref{thrm:reg}, \ref{thrm:regfunc}, and \ref{thrm:tailapprox} could be established for the maxmin values using the same approach.
\noindent\textbf{Open problems.} Existence of an $\varepsilon$-equilibrium in dynamic games with general \linebreak (Borel-measurable) payoffs has been, and still is, one of the Holy Grails of game theory. A more modest approach, also pursued in this paper, is to establish existence in some special classes of games. Blackwell games, as they are defined here, do not include moves of nature. An interesting avenue for a follow up research is to extend the methods developed in this paper to the context of stochastic games with general Borel-measurable payoff functions.
Theorems \ref{thrm:reg} and \ref{thrm:regfunc} provide two distinct approximation results, and neither seems to be a consequence of the other. This raises the question of whether there is a natural single generalization that would encompass both these results as two special cases.
\section{Appendix: The proof of Theorems~\ref{thrm:reg}, \ref{thrm:regfunc}, and \ref{thrm:tailapprox}}\label{subsecn.proof} The proofs of Theorems~\ref{thrm:reg} and~\ref{thrm:regfunc} are adaptations of the corresponding arguments in Maitra and Sudderth \cite{Maitra98} and in Martin \cite{Martin98} and are provided here for completeness. Theorem \ref{thrm:tailapprox} follows easily from Theorem~\ref{thrm:reg} and Proposition \ref{prop:v(Q)=1}.
Consider a Blackwell game $\Gamma = (I, A, H, (f_{i})_{i \in I})$, fix a player $i \in I$, and suppose that player $i$'s payoff function $f_{i}$ is bounded and Borel-measurable. Also assume w.l.o.g. that $0 \leq f_{i} \leq 1$. When we will consider Theorem~\ref{thrm:reg} we will substitute $f_i = 1_{W_i}$.
Given $h \in H$, let $R(h)$ denote the set of one-shot payoff functions $r: A(h) \to [0,1]$. Let $X_i(h) := \Delta(A_i(h))$ denote player $i$'s set of mixed actions at history $h$, and let $X_{-i}(h) := \prod_{j \in -i}X_{j}(h)$. For $x \in \prod_{i \in I}X_{i}(h)$ we write $r(x)$ to denote $\mathbb{E}_{x}(r)$, the expectation of $r$ with respect to $x$. Player $i$'s minmax value of the function $r \in R(h)$ is \[d_i(r) := \inf_{x_{-i} \in X_{-i}(h)}\sup_{x_i \in X_i(h)} r(x_{-i},x_i).\]
We next introduce the main tool of the proof, an auxiliary two-player game of perfect information denoted by $G_i(f_i,c)$. This is a variation of the games $G_v$ and $G'_v$ in Martin \cite[pp. 1575]{Martin98}.
Given $c \in (0,1]$ and a Borel measurable function $f_i \colon [H] \to [0,1]$, define the game $G_i(f_i,c)$ as follows: \begin{itemize}
\item Let $h_0 := \oslash$. Player~I chooses a one-shot payoff function $r_0:A(h_0)\to[0,1]$ such that $d_i(r_0)\geq c$.
\item Player~II chooses an action profile $a_0 \in A(h_0)$ such that $r_0(a_0) > 0$.
\item Let $h_1 := (a_0)$. Player~I chooses a one-shot payoff function $r_1:A(h_1)\to[0,1]$ such that $d_i(r_1)\geq r_0(a_0)$.
\item Player~II chooses an action profile $a_1 \in A(h_1)$ such that $r_1(a_1)>0$.
\item Let $h_2 := (a_0,a_1)$. Player~I chooses a one-shot payoff function $r_2:A(h_2)\to[0,1]$ such that $d_i(r_2)\geq r_1(a_1)$. And so on. \end{itemize} This results in a run\footnote{To distinguish histories and plays of $\Gamma$ from those of $G_i(f_i,c)$, we refer to the latter as \textit{positions} and \textit{runs}. To distinguish the players of $\Gamma$ from those of $G_i(f_i,c)$, we refer to the latter as Player I and Player II, using the initial capital letters.} $(r_0,a_0,r_1,a_1,\ldots)$. Player I wins the run if \[\limsup_{t \to \infty}r_{t}(a_{t}) \leq f_{i}(a_0,a_1,\ldots) \quad\text{and}\quad 0 < f_{i}(a_0,a_1,\ldots).\]
Let $T$ be the set of all legal positions in the game $G_i(f_i,c)$. This is a tree on the set $R \cup A$ where $R := \cup_{h \in H}R(h)$. Sequences of even (odd) length in $T$ are Player I's (Player II's) positions. The tree $T$ is pruned: an active player has a legal move at each legal position of the game. Indeed, consider Player I's legal position in the game $G_i(f_i,c)$ and let $h_t$ denote, as above, the sequence of action profiles produced, to date, by Player II. Then the function $r_t$ which is identically equal to $1$ on the set $A(h_t)$ is a legal move for Player I. Consider now Player II's legal position in $G_i(f_i,c)$, let $h_{t}$ denote the sequence of action profiles produced to date by Player II, and let $r_{t}$ be Player I's latest move. Then $d_i(r_t) > 0$. Therefore, there exists an action profile $a_{t} \in A(h_{t})$ such that $r_{t}(a_{t}) > 0$, and thus $a_{t}$ is Player II's legal move at the given position.
The set $[T]$ is the set of all runs of the game $G_i(f_i,c)$, a subset of $(R \cup A)^{\mathbb{N}}$.
A run is \emph{consistent} with a pure strategy $\sigma_{\rm I}$ of Player~I if it is generated by the pair $(\sigma_{\rm I},\sigma_{\rm II})$, for some pure strategy $\sigma_{\rm II}$ of Player~II. Runs that are consistent with pure strategies of Player~II are defined analogously.
Player I's pure strategy $\sigma_{{\rm I}}$ in $G_i(f_i,c)$ is said to be \emph{winning} if Player I wins all runs of the game that are consistent with $\sigma_{{\rm I}}$.
\begin{proposition}\label{prop:PlayerI} Let $c \in (0,1]$ and let $f_i : [H] \to [0,1]$ be a Borel-measurable function. If Player {\rm I}~ has a winning strategy in the game $G_i(f_i,c)$, then there exists a closed set $C \subseteq [H]$ and a limsup function $g : [H] \to [0,1]$ such that $g \leq f_i$, $\{g > 0\} \subseteq C \subseteq \{f_i > 0\}$, and $c \leq v_{i}(g)$. In particular, $c \leq v_i(C)$; and if $f_i = 1_{W_i}$, then $C \subseteq W_i$. \end{proposition}
\begin{proof} Fix Player I's winning strategy $\sigma_{{\rm I}}$ in $G_i(f_i,c)$.
\noindent\textsc{Step 1:} Defining $C \subseteq [H]$ and $g : [H] \to [0,1]$.
Let $T_{\rm I} \subseteq T$ denote the set of positions in the game $G_i(f_i,c)$ of even length (i.e., Player I's positions) that are consistent with $\sigma_{\rm I}$, i.e., those positions that can be reached under a strategy profile $(\sigma_{{\rm I}},\sigma_{{\rm II}})$ for some pure strategy of $\sigma_{{\rm II}}$ of Player II. Let $\pi_{\rm I} : T_{\rm I} \to H$ be the projection that maps a position of length $2t$ in $G_i(f_i,c)$ to a history of length $t$ in $\Gamma$: Formally, $\pi_{\rm I}(\oslash) := \oslash$, $\pi_{\rm I}(r_0,a_0) := (a_0)$, etc. Let $H_{\rm I} \subseteq H$ be the image of $T_{\rm I}$ under $\pi_{\rm I}$. Since in the tree $T_{\rm I}$ Player I's moves are uniquely determined by $\sigma_{\rm I}$, the map $\pi_{\rm I}$ is in fact a bijection between $T_{\rm I}$ and $H_{\rm I}$. We write $\phi : H_{\rm I} \to T_{\rm I}$ for the inverse of $\pi_{\rm I}$. The map $\phi$ induces a continuous bijection $[H_{\rm I}] \to [T_{\rm I}]$, which we also denote by $\phi$. We say that positions in $H_{\rm I}$ are \emph{$\sigma_{\rm I}$-acceptable}, and define $C$ to be the set $[H_{\rm I}]$.
For each $t \in \mathbb{N}$, define the function $\rho_t : H_{t} \to \mathbb{R}$ as follows: $\rho_0(\oslash) := c$. Let $t \in \mathbb{N}$ and consider a history $h_{t} \in H_{t}$. If $h_{t}$ is not $\sigma_{\rm I}$-acceptable, we define $\rho_{t+1}(h_{t},a_{t}) := 0$ for each $a_{t} \in A(h_{t})$. Suppose that $h_{t}$ is $\sigma_{\rm I}$-acceptable, and let $r_{t} := \sigma_{\rm I}(\phi(h_{t}))$. For each $a_{t} \in A(h_{t})$ define $\rho_{t+1}(h_{t},a_{t}) := r_{t}(a_{t})$. Note that if $h_{t}$ is $\sigma_{\rm I}$-acceptable while $(h_{t},a_{t})$ is not, we have $\rho_{t+1}(h_{t},a_{t}) = r_{t}(a_{t}) = 0$.
Also define $g : [H] \to [0,1]$ by letting \[g(a_{0},a_{1},\ldots) := \limsup_{t \to \infty}\rho_t(a_{0},\dots,a_{t-1}).\]
\noindent\textsc{Step 2:} Verifying that $g \leq f_i$ and $\{g > 0\} \subseteq C \subseteq \{f_i > 0\}$.
Since $\sigma_{\rm I}$ is Player I's winning strategy in $G_i(f_i,c)$, all runs in $[T_{\rm I}]$ are won by Player I, and hence $[H_{\rm I}] \subseteq \{f_i > 0\}$. For a play $p = (a_{0},a_{1},\ldots)$ in $[H_{\rm I}]$, if $\phi(p) = (r_{0},a_{0},r_{1},a_{1},\ldots)$, then $g(p)$ equals $\limsup_{t \to \infty}r_{t}(a_{t})$. Since the run $\phi(p)$ is won by Player I, we conclude that $g(p) \leq f_i(p)$. Thus $g \leq f_i$ on $[H]$.
\noindent\textsc{Step 3:} Verifying that $c \leq v_{i}(g)$. Since $g \leq 1_{C}$ it will then follow that $c \leq v_i(C)$.
Fix a strategy profile $\sigma_{-i} \in \Sigma_{-i}$ for the players in $-i$ in the game $\Gamma$. Take any $\epsilon > 0$. We define a strategy $\sigma_i$ for player $i$ in the game $\Gamma$ with the property that $\mathbb{E}_{\sigma_{-i},\sigma_{i}}(g) \geq c - 2\epsilon$.
\noindent\textsc{Step 3.1:} Defining player $i$'s strategy $\sigma_{i}$.
Let $r_0 := \sigma_{\rm I}(\oslash)$, Player I's first move in $G_i(f_i,c)$ according to her strategy $\sigma_{\rm I}$. Define $\sigma_{i}(\oslash)$ to be a mixed action on $A_i(\oslash)$ such that \[r_0(\sigma_{-i}(\oslash),\sigma_{i}(\oslash)) \geq c - \epsilon.\] Let $t \geq 1$ and consider a history $h_{t} = (a_{0},\ldots,a_{t-1}) \in H_{t}$ of $\Gamma$. If $h_{t}$ is not $\sigma_{\rm I}$-acceptable, then $\sigma_{i}(h_{t})$ is arbitrary. If $h_{t}$ is $\sigma_{\rm I}$-acceptable, let $\phi(h_{t}) := (r_{0}, a_{0}, \ldots, r_{t-1}, a_{t-1})$ and $r_{t} := \sigma_{\rm I}(\phi(h_{t}))$. Define $\sigma_{i}(h_{t})$ to be a mixed action on $A_i(h_{t})$ such that \[r_t(\sigma_{-i}(h_{t}),\sigma_{i}(h_{t})) \geq r_{t-1}(a_{t-1})-\epsilon \cdot 2^{-t}.\]
\noindent\textsc{Step 3.2:} Verifying that $\mathbb{E}_{\sigma_{-i},\sigma_{i}}(g) \geq c - 2\epsilon$.
For each $t \in \mathbb{N}$ let us define $\rho_t^{\epsilon} := \rho_t - \epsilon \cdot 2^{-t+1}$. One can think of the functions $\rho_0^{\epsilon}, \rho_1^{\epsilon}, \dots$ as a stochastic process on $[H]$ that is measurable with respect to the filtration $\{\mathcal{F}_{t}\}_{t \in \mathbb{N}}$. We now argue that this process is a submartingale with respect to the measure $\mathbb{P}_{\sigma_{-i},\sigma_{i}}$.
Letting $r_0 := \sigma_{\rm I}(\oslash)$ we have \[\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\rho_{1}^{\epsilon}) = \mathbb{E}_{\sigma_{-i},\sigma_{i}}(r_{0}(a_{0})) - \epsilon= r_{0}(\sigma_{-i}(\oslash),\sigma_{i}(\oslash)) - \epsilon \geq c-2\epsilon = \rho_{0}^{\epsilon}(\oslash).\] Consider a $\sigma_{\rm I}$-acceptable history $h_{t} = (a_{0},\dots,a_{t-1}) \in H_t$ of length $t \geq 1$. Let $(r_{0},a_{0},\ldots,r_{t-1},a_{t-1}) := \phi(h_{t})$ and $r_{t} := \sigma_{\rm I}(\phi(h_{t}))$. We have \begin{align*}
\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\rho_{t+1}^{\epsilon} | h_{t}) &= \mathbb{E}_{\sigma_{-i},\sigma_{i}}(r_{t}(a_{t}) | h_{t}) - \epsilon \cdot 2^{-t}\\ &= r_{t}(\sigma_{-i}(h_{t}),\sigma_{i}(h_{t})) - \epsilon \cdot 2^{-t}\\ &\geq r_{t-1}(a_{t-1}) - \epsilon \cdot 2^{-t} - \epsilon \cdot 2^{-t}\\ &= \rho_t^{\epsilon}(h_{t}). \end{align*} On the other hand, if $h_{t}$ is not $\sigma_{\rm I}$-acceptable, then
\[\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\rho_{t+1}^{\epsilon} | h_{t}) = -\epsilon \cdot 2^{-t} > -\epsilon \cdot 2^{-t+1} = \rho_t^{\epsilon}(h_{t}).\] This establishes the submartingale property for $\rho_0^{\epsilon}, \rho_1^{\epsilon}, \dots$.
The submartingale property implies that $\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\rho_t^{\epsilon}) \geq \rho_0^{\epsilon}(\oslash) = c - 2\epsilon$ for each $t \in \mathbb{N}$. Using Fatou lemma we thus obtain \[\mathbb{E}_{\sigma_{-i},\sigma_{i}}(g) =\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\limsup_{t \to \infty}\rho_t) \geq \mathbb{E}_{\sigma_{-i},\sigma_{i}}(\limsup_{t \to \infty}\rho_t^{\epsilon}) \geq \limsup_{t \to \infty}\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\rho_t^{\epsilon}) \geq c - 2\epsilon,\] as desired. \end{proof}
\begin{proposition}\label{prop:PlayerII} Let $c \in (0,1]$ and let $f_i : [H] \to [0,1]$ be a Borel-measurable function. If Player {\rm II}~has a winning strategy in the game $G_i(f_i,c)$, then for every $\epsilon > 0$ there exists an open set $O \subseteq [H]$ and a limsup function $g : [H] \to [0,1]$ such that $f_i \leq g$, $\{f_i = 1\} \subseteq O \subseteq \{g = 1\}$, and $v_{i}(g) \leq c + \epsilon$. In particular, $v_i(O) \leq c + \epsilon$; and if $f_i = 1_{W_i}$, then $W_i \subseteq O$. \end{proposition}
\begin{proof} Fix Player II's winning strategy $\sigma_{\rm II}$ in $G_i(f_i,c)$.
\noindent\textsc{Step 1:} Defining $O \subseteq [H]$ and $g : [H] \to [0,1]$.
We recursively define (a) the notion of a \textit{$\sigma_{\rm II}$-acceptable} history in the game $\Gamma$, (b) for each $\sigma_{\rm II}$-acceptable history $h$ in $\Gamma$, Player~I's position $\psi(h)$ in the game $G_i(f_i,c)$, and (c) for each $\sigma_{\rm II}$-acceptable history $h$ of $\Gamma$, a function $u_{h} : A(h) \to [0,1]$.
The empty history $\oslash$ of $\Gamma$ is $\sigma_{\rm II}$-acceptable. We define $\psi(\oslash) := \oslash$, the empty history in $G_i(f_i,c)$. Let $t \in \mathbb{N}$ and consider a history $h_{t} \in H_{t}$ of the game $\Gamma$. If $h_{t}$ is not $\sigma_{\rm II}$-acceptable, so is the history $(h_{t},a_{t})$ for each $a_{t} \in A(h_{t})$. Suppose that $h_{t}$ is $\sigma_{\rm II}$-acceptable and that Player I's position $\psi(h_{t})$ in $G_i(f_i,c)$ has been defined. Take $a_{t} \in A(h_{t})$. Let $R^{*}(h_{t},a_{t})$ denote the set of Player~I's legal moves at position $\psi(h_{t})$ to which $\sigma_{\rm II}$ responds with $a_{t}$: \[R^{*}(h_{t},a_{t}) := \{r_{t} \in R(h_t): (\psi(h_{t}),r_{t}) \in T\text{ and }\sigma_{\rm II}(\psi(h_{t}),r_{t}) = a_{t}\}.\] The history $(h_{t},a_{t})$ is defined to be $\sigma_{\rm II}$\emph{-acceptable} if $R(h_{t},a_{t})$ is not empty. In this case we define \[u_{h_{t}}(a_{t}) := \inf\{r_{t}(a_{t}): r_{t} \in R^{*}(h_{t},a_{t})\}.\] Choose $r_{t} \in R^{*}(h_{t},a_{t})$ with the property that \begin{equation}\label{eqn:proprt} u_{h_{t}}(a_{t}) \leq r_{t}(a_{t}) \leq u_{h_{t}}(a_{t}) + \epsilon \cdot 3^{-t-2}, \end{equation} and define $\psi(h_{t},a_{t}) := (\psi(h_{t}),r_{t},a_{t})$.
Finally, extend the definition of $u_{h}$ to all histories $h$ of $\Gamma$ by setting $u_{h}(a) := 1$ whenever $(h,a)$ is not $\sigma_{\rm II}$-acceptable.
Let $H_{{\rm II}}$ be the set of $\sigma_{\rm II}$-acceptable histories of $\Gamma$. We define the set $O$ to be the complement of $[H_{\rm II}]$, that is $O := [H] \setminus [H_{\rm II}]$. Since $[H_{\rm II}]$ is a closed subset of $[H]$ (e.g. Kechris \cite[Proposition 2.4]{Kechris95}), $O$ is an open subset of $[H]$. Let $T_{{\rm II}} \subseteq T$ be the image of $H_{\rm II}$ under $\psi$. The function $\psi_{{\rm II}} : H_{{\rm II}} \to T_{{\rm II}}$ induces a continuous function $\psi_{{\rm II}} : [H_{{\rm II}}] \to [T_{{\rm II}}]$. Note that all runs in $[T_{{\rm II}}]$ are consistent with Player II's winning strategy $\sigma_{\rm II}$.
For $t \in \mathbb{N}$ define a function $\upsilon_{t} : H_{t} \to \mathbb{R}$ by letting $\upsilon_0(\oslash) := c$; and for each $t \in \mathbb{N}$ and each history $(h_{t},a_{t}) \in H_{t+1}$, by letting $\upsilon_{t+1}(h_{t},a_{t}) := u_{h_{t}}(a_{t})$. Note that, for $t \in \mathbb{N}$ and $h_{t} \in H_{t}$, we have $\upsilon_{t}(h_{t}) = 1$ whenever $h_{t}$ is not $\sigma_{\rm II}$-acceptable.
Also define $g : [H] \to [0,1]$ by letting \[g(a_0,a_1,\ldots) := \limsup_{t \to \infty}\upsilon_{t}(a_0,\ldots,a_{t-1}).\]
\noindent\textsc{Step 2:} Verifying that $f_i \leq g \leq 1$ and $\{f_i = 1\} \subseteq O \subseteq \{g = 1\}$.
The function $g$ is equal to $1$ on the set $O$; thus $O \subseteq \{g = 1\}$. Consider a play $p = (a_0,a_1,\ldots) \in [H_{\rm II}]$, and let $\psi(p) := (r_{0}, a_{0}, r_{1}, a_{1}, \ldots)$. It follows by \eqref{eqn:proprt} that \[g(p) := \limsup_{t \to \infty}r_{t}(a_{t}).\] Since the run $\psi(p)$ is won by Player II, it must hold that either $f_i(p) < g(p)$ or $0 = f_i(p)$; in either case $f_i(p) < 1$ and $f_i(p) \leq g(p)$. We conclude that $[H_{{\rm II}}] \subseteq \{f_i < 1\}$, or equivalently that $\{f_i = 1\} \subseteq O$, and that $f_i \leq g$ on $[H]$.
\noindent\textsc{Step 3:} Verifying that $v_i(g) \leq c + \epsilon$. Since $1_{O} \leq g$, it then follows that $v_i(O) \leq c + \epsilon$.
\noindent\textsc{Step 3.1:} Defining a strategy profile for player~$i$'s opponents.
First we argue that \begin{equation}\label{eqn argue} d_i(u_{\oslash}) \leq c. \end{equation} Suppose to the contrary that $c \leq d_i(u_{\oslash}) - \lambda$ for some $\lambda > 0$. Define $r_0 \in R(\oslash)$ by letting $r_0(a) := \max\{u_{\oslash}(a) - \lambda,0\}$. Since $u_{\oslash} - \lambda \leq r_0$, it holds that $c \leq d_{i}(u_{\oslash}) - \lambda \leq d_{i}(r_0)$. Consequently, $r_0$ is a legal move of Player~I in the game $G_i(f_i,c)$ at position $\oslash$. Denote $a_0 := \sigma_{\rm II}(r_{0})$. As $a_0$ is Player~II's legal move in $G_i(f_i,c)$ at position $(r_0)$, it must be the case that $r_0(a_0) > 0$, and hence $r_0(a_0) = u_{\oslash}(a_{0}) - \lambda$. On the other hand, $r_{0} \in R^{*}(\oslash,a_{0})$, so the definition of $u_{\oslash}$ implies that $u_{\oslash}(a_{0}) \leq r_{0}(a_{0})$, a contradiction.
Take $t \geq 1$, let $h_{t} := (h_{t-1},a_{t-1}) \in H_{t}$ be a $\sigma_{\rm II}$-acceptable history, and let $r_{t-1}$ be such that $\psi(h_{t}) = (\psi(h_{t-1}),r_{t-1},a_{t-1})$. Then \begin{equation}\label{eqn:argue1} d_i(u_{h_{t}}) \leq r_{t-1}(a_{t-1}). \end{equation}
Indeed, suppose to the contrary that $r_{t-1}(a_{t-1}) \leq d_i(u_{h_{t}}) - \lambda$ for some $\lambda > 0$. Define $r_{t} \in R(h_{t})$ by letting $r_{t}(a) := \max\{u_{h_{t}}(a) - \lambda,0\}$. Since $u_{h_{t}} - \lambda \leq r_{t}$, it holds that $r_{t-1}(a_{t-1}) \leq d_{i}(u_{h_{t}}) - \lambda \leq d_{i}(r_{t})$. Consequently, $r_{t}$ is a legal move of Player~I at position $\psi(h_{t})$. Let $a_{t} := \sigma_{\rm II}(\psi(h_{t}), r_{t})$. As $a_{t}$ is Player~II's legal move at position $(\psi(h_{t}), r_{t})$, it must be the case that $r_{t}(a_{t}) > 0$, and hence $r_{t}(a_{t}) = u_{h_{t}}(a_{t}) - \lambda$. On the other hand, $r_{t} \in R^{*}(h_{t}, a_{t})$, so the definition of $u_{h_{t}}$ implies that $u_{h_{t}}(a_{t}) \leq r_{t}(a_{t})$, a contradiction.
We now define a strategy profile $\sigma_{-i}$ of $i$'s opponents in $\Gamma$ as follows: For a history $h_{t} \in H_{t}$ of $\Gamma$ let $\sigma_{-i}(h_{t}) \in X_{-i}(h)$ be such that \begin{equation}\label{eqn str opponent} u_{h_{t}}(\sigma_{-i}(h_{t}),x_{i}) \leq d_{i}(u_{h_{t}}) + \epsilon \cdot 3^{-t-1}\text{ for each }x_{i} \in \Delta(A_i(h_{t})). \end{equation}
\noindent\textsc{Step 3.2:} Verifying that $\mathbb{E}_{\sigma_{-i},\sigma_{i}}(g) \leq c + \epsilon$ for each strategy $\sigma_{i} \in \Sigma_{i}$ of player $i$ in $\Gamma$.
Fix a strategy $\sigma_{i} \in \Sigma_{i}$. For $t \in \mathbb{N}$ define a function $\upsilon_{t}^\epsilon := \upsilon_{t} + \epsilon \cdot 3^{-t}$. The sequence $\upsilon_0^\epsilon, \upsilon_1^\epsilon, \dots$ could be thought of as a process on $[H]$, measurable with respect to the filtration $\{\mathcal{F}_{t}\}_{t \in \mathbb{N}}$. We next show that the process is a supermartingale w.r.t $\mathbb{P}_{\sigma_{-i},\sigma_{i}}$.
By Eqs. \eqref{eqn str opponent} and \eqref{eqn argue}, \begin{align*} \mathbb{E}_{\sigma_{-i},\sigma_{i}}(\upsilon_1^\epsilon) &= \mathbb{E}_{\sigma_{-i},\sigma_{i}}(u_{\oslash}(a_0)) + \epsilon\cdot 3^{-1}\\ &= u_{\oslash}(\sigma_{-i}(\oslash),\sigma_{i}(\oslash)) + \epsilon\cdot 3^{-1}\\ &\leq d_{i}(u_{\oslash}) + \epsilon\cdot 2 \cdot 3^{-1}\\ &\leq c + \epsilon = \upsilon_{0}^\epsilon(\oslash). \end{align*} Take $t \geq 1$, let $h_{t} = (h_{t-1},a_{t-1}) \in H_{t}$ be a $\sigma_{\rm II}$-acceptable history, and let $r_{t-1}$ be such that $\psi(h_{t}) = (\psi(h_{t-1}),r_{t-1},a_{t-1})$. We have by Eqs. \eqref{eqn str opponent}, \eqref{eqn:argue1}, and \eqref{eqn:proprt}: \begin{align*} \mathbb{E}_{\sigma_{-i},\sigma_{i}}(\upsilon_{t+1}^\epsilon \mid h_{t}) &= \mathbb{E}_{\sigma_{-i},\sigma_{i}}(u_{h_{t}}(a_t) \mid h_{t}) + \epsilon\cdot3^{-t-1}\\ &= u_{h_{t}}(\sigma_{-i}(h_{t}),\sigma_{i}(h_{t})) + \epsilon\cdot3^{-t-1}\\ &\leq d_{i}(u_{h_{t}}) + \epsilon \cdot 2 \cdot 3^{-t-1}\\ &\leq r_{t-1}(a_{t-1}) + \epsilon \cdot 2 \cdot 3^{-t-1}\\ &\leq u_{h_{t-1}}(a_{t-1}) + \epsilon \cdot 3 \cdot 3^{-t-1}\\ &= \upsilon_{t}^\epsilon(h_{t-1},a_{t-1}) = \upsilon_{t}^\epsilon(h_{t}). \end{align*} If, on the other hand, the history $h_{t}$ is not $\sigma_{\rm II}$-acceptable, then \[\mathbb{E}_{\sigma_{-i},\sigma_{i}}(\upsilon_{t+1}^\epsilon \mid h_{t}) = 1+\epsilon\cdot 3^{-t-1} \leq 1+\epsilon\cdot 3^{-t} = \upsilon_{t}^\epsilon(h_{t}).\]
Since the process $\upsilon_0^\epsilon,\upsilon_1^\epsilon,\dots$ is bounded below (by 0), by the Martingale Convergence Theorem, it converges pointwise $\mathbb{P}_{\sigma_{-i},\sigma_{i}}$-almost surely; whenever the process converges, its limit is $g$. Hence $\mathbb{E}_{\sigma_{-i},\sigma_{i}}(g) = \mathbb{E}_{\sigma_{-i},\sigma_{i}}(\lim_{t \to \infty}\upsilon_t^\epsilon) \leq \upsilon_0^\epsilon(\oslash) = c + \epsilon$, as desired. \end{proof}
We now invoke the result of Martin \cite{Martin75} on Borel determinacy of perfect information games. To do so, we endow $[T]$ with its relative topology as a subspace of the product space $(R \cup A)^{\mathbb{N}}$, where $R \cup A$ is given its discrete topology. One can then check that Player I's winning set in $G_i(f_i,c)$ is a Borel subset of $[T]$. It follows that for each $c \in (0,1]$ the game $G_i(f_i,c)$ is determined: either Player~I has a winning strategy in the game or Player~II does. We arrive at the following conclusion.
\begin{proposition}\label{prop:det} If $v_i(f_i) < c$, then Player II has a winning strategy in $G_i(f_i,c)$. If $c< v_i(f_i)$, then Player I has a winning strategy in $G_i(f_i,c)$. \end{proposition}
Theorems \ref{thrm:reg} and \ref{thrm:regfunc} follow from Propositions \ref{prop:PlayerI}, \ref{prop:PlayerII}, and \ref{prop:det}.
\noindent\textbf{Proof of Theorem \ref{thrm:tailapprox}:} Take an $\varepsilon > 0$. Without loss of generality, suppose that $f_i$ takes values in $[0,1]$.
By Proposition \ref{prop:v(Q)=1} we know that $v_{i}(Q_{i,\varepsilon}(f_i)) = 1$. To obtain an approximation from below, use Theorem \ref{thrm:reg} to choose a closed set $C \subseteq Q_{i, \varepsilon}(f_i)$ such that $1 - \varepsilon \leq v_{i}(C)$, and define the function $g := (v_i(f_i) - \varepsilon) \cdot 1_{C}$. Then $g \leq f_i$ and $v_i(f_i) - 2\varepsilon \leq (v_i(f_i) - \varepsilon)\cdot(1-\varepsilon) \leq v_{i}(g)$. Since $C$ is closed, $g$ is upper semicontinuous.
By Proposition \ref{prop:v(Q)=1} we know that $v_{i}(U_{i,\varepsilon}(f_i)) = 0$. To obtain an approximation from above, use Theorem \ref{thrm:reg} to choose an open set $O \supseteq U_i^{\varepsilon}(f_i)$ such that $v_{i}(O) \leq \varepsilon$, and define the function $g := v_i(f_i) + \varepsilon + (1 - v_i(f_i) - \varepsilon) \cdot 1_{O}$. Then $f_i \leq g \leq 1$ and $v_{i}(g) \leq v_i(f_i) + 2\varepsilon$. Since $O$ is open, $g$ is lower semicontinuous. $\Box$
\end{document} |
\begin{document}
\begin{abstract}
We prove that if $\sigma \in S_m$ is a pattern of $w \in S_n$, then we can express the Schubert polynomial $\mathfrak{S}_w$ as a monomial times $\mathfrak{S}_\sigma$ (in reindexed variables) plus a polynomial with nonnegative coefficients. This implies that the set of permutations whose Schubert polynomials have all their coefficients equal to either 0 or 1 is closed under pattern containment. Using Magyar's orthodontia, we characterize this class by a list of twelve avoided patterns. We also give other equivalent conditions on $\mathfrak{S}_w$ being zero-one. In this case, the Schubert polynomial $\mathfrak{S}_w$ is equal to the integer point transform of a generalized permutahedron. \end{abstract}
\title{Zero-one Schubert polynomials}
\section{Introduction}
Schubert polynomials, introduced by Lascoux and Sch\"utzenberger in \cite{LS}, represent cohomology classes of Schubert cycles in the flag variety. Knutson and Miller also showed them to be multidegrees of matrix Schubert varieties \cite{multidegree}. There are a number of combinatorial formulas for the Schubert polynomials \cite{laddermoves, BJS, FK1993, nilcoxeter, thomas, lenart, manivel, prismtableaux},
yet only recently has the structure of their supports been investigated:
the support of a Schubert polynomial $\mathfrak{S}_w$ is the set of all integer points of a certain generalized permutahedron $\mathcal{P}(w)$ \cite{FMS, MTY}.
The question motivating this paper is to characterize when $\mathfrak{S}_w$ equals the integer point transform of $\mathcal{P}(w)$,
in other words, when all the coefficients of $\mathfrak{S}_w$ are equal to $0$ or $1$.
One of our main results is a pattern-avoidance characterization of the permutations corresponding to these polynomials:
\begin{theorem} \label{thm:01}
The Schubert polynomial $\mathfrak{S}_w$ is zero-one if and only if $w$ avoids the patterns $12543$, $13254$, $13524$, $13542$, $21543$, $125364$, $125634$, $215364$, $215634$, $315264$, $315624$, and $315642$.
\end{theorem}
In Theorem~\ref{thm:011} we provide further equivalent conditions on the Schubert polynomial $\mathfrak{S}_w$ being zero-one. One implication of Theorem~\ref{thm:01} follows from our other main result, which relates the Schubert polynomials $\mathfrak{S}_\sigma$ and $\mathfrak{S}_w$ when $\sigma$ is a pattern of $w$:
\begin{theorem}
\label{thm:pattern}
Fix $w \in S_n$ and let $\sigma \in S_{n-1}$ be the pattern with Rothe diagram $D(\sigma)$ obtained by removing row $k$ and column $w_k$ from $D(w)$. Then
\begin{align*}
\mathfrak{S}_{w}(x_1, \ldots, x_n)=M(x_1, \ldots, x_n) \mathfrak{S}_{\sigma}(x_{1}, \ldots, \widehat{x_k}, \ldots, x_{n})+F(x_1, \ldots, x_n),
\end{align*}
where $F\in \mathbb{Z}_{\geq 0}[x_1, \ldots, x_n]$ and
\[M(x_1, \ldots, x_n) = \left(\prod_{(k,i)\in D(w)}{x_k}\right)\left(\prod_{(i,w_k)\in D(w)}{x_i} \right).\]
\end{theorem}
Theorem~\ref{thm:pattern} is a special case of Theorem~\ref{thm:pattern2}, which applies to the dual character of the flagged Weyl module of any diagram.
\subsection*{Outline of this paper}
Section~\ref{sec:magyar} gives an expression of Magyar for Schubert polynomials in terms of orthodontic sequences $(\bm{i}, \bm{m})$. In Section~\ref{sec:suff}, we give a condition ``multiplicity-free'' on the orthodontic sequence $(\bm{i}, \bm{m})$ of $w$ which implies that $\mathfrak{S}_w$ is zero-one. In Section~\ref{sec:mult-patt} we show that multiplicity-freeness can equivalently be phrased in terms of pattern avoidance. We then prove in Section~\ref{sec:mult-patt} that multiplicity-freeness is also a necessary condition for $\mathfrak{S}_w$ to be zero-one. In the latter proof we assume Theorem~\ref{thm:pattern}, whose generalization (Theorem~\ref{thm:pattern2}) and proof is the subject of Section~\ref{sec:trans}.
\section{Magyar's orthodontia for Schubert polynomials}
\label{sec:magyar}
In this section we explain the results we use to show one direction of Theorem~\ref{thm:01}. We include the classical definition of Schubert polynomials here for reference.
The \emph{Schubert polynomial} of the longest permutation $w_0=n \hspace{.1cm} n\!-\!1 \hspace{.1cm} \cdots \hspace{.1cm} 2 \hspace{.1cm} 1 \in S_n$ is
\[\mathfrak{S}_{w_0}\coloneqq x_1^{n-1}x_2^{n-2}\cdots x_{n-1}.\]
For $w\in S_n$, $w\neq w_0$, there exists $i\in [n-1]$ such that $w_i<w_{i+1}$.
For any such~$i$, the Schubert polynomial $\mathfrak{S}_{w}$ is defined as
\[\mathfrak{S}_{w}(x_1, \ldots, x_n)\coloneqq \partial_i \mathfrak{S}_{ws_i}(x_1, \ldots, x_n),\]
where $s_i$ is the transposition swapping $i$ and $i+1$, and $\partial_i$ is the $i$th divided difference operator
\[\partial_i (f):=\frac{f(x_1,\ldots,x_n)-f(x_1,\ldots,x_{i-1},x_{i+1},x_i,x_{i+2},\ldots,x_n)}{x_i-x_{i+1}}.\]
Since the operators $\partial_i$ satisfy the braid relations, the Schubert polynomials $\mathfrak{S}_{w}$ are well-defined.
We will not be using the above definition of Schubert polynomials in this work. Instead, we will make use of several results due to Magyar in \cite{magyar}. We start by summarizing Proposition 15 and Proposition 16 of \cite{magyar} and supplying the necessary background, closely following the exposition of \cite{magyar}.
By a \emph{diagram}, we mean a sequence $D=(C_1,C_2,\ldots,C_n)$ of finite subsets of $[n]$, called the \emph{columns} of $D$. We interchangeably think of $D$ as a collection of boxes $(i,j)$ in a grid, viewing an element $i\in C_j$ as a box in row $i$ and column $j$ of the grid. When we draw diagrams, we read the indices as in a matrix: $i$ increases top-to-bottom and $j$ increases left-to-right.
Two diagrams $D$ and $D'$ are called \emph{column-equivalent} if one is obtained from the other by reordering nonempty columns and adding or removing any number of empty columns. For a column $C\subseteq [n]$, let the \emph{multiplicity} $\mathrm{mult}_D(C)$ be the number of columns of $D$ which are equal to $C$. The sum of diagrams, denoted $D\oplus D'$, is constructed by concatenating the lists of columns; graphically this means placing $D'$ to the right of $D$.
The \emph{Rothe diagram} $D(w)$ of a permutation $w\in S_n$ is the diagram
\[ D(w)=\{(i,j)\in [n]\times [n] \mid i<(w^{-1})_j\mbox{ and } j<w_i \}. \]
Note that Rothe diagrams have the \emph{northwest property}: If $(r,c'),(r',c)\in D(w)$ with $r<r'$ and $c<c'$, then $(r,c)\in D(w)$.
\begin{example}
If $w=31542$, then
\begin{center}
\begin{tikzpicture}[scale=.55]
\draw (0,0)--(5,0)--(5,5)--(0,5)--(0,0);
\filldraw[draw=black,fill=lightgray] (0,4)--(1,4)--(1,5)--(0,5)--(0,4);
\filldraw[draw=black,fill=lightgray] (1,4)--(2,4)--(2,5)--(1,5)--(1,4);
\filldraw[draw=black,fill=lightgray] (1,2)--(2,2)--(2,3)--(1,3)--(1,2);
\filldraw[draw=black,fill=lightgray] (1,1)--(2,1)--(2,2)--(1,2)--(1,1);
\filldraw[draw=black,fill=lightgray] (3,2)--(4,2)--(4,3)--(3,3)--(3,2);
\node at (-1.5,2.5) {$D(w)=$};
\node at (8.9,2.5) {$=(\{1\},\{1,3,4\},\emptyset,\{3\},\emptyset).$};
\end{tikzpicture}
\end{center}
\end{example}
We next recall Magyar's orthodontia. Let $D$ be the Rothe diagram of a permutation $w\in S_n$ with columns $C_1,C_2,\ldots,C_n$. We describe an algorithm for constructing a reduced word $\bm{i}=(i_1,\ldots,i_l)$ and a multiplicity list $\bm{m}=(k_1,\ldots,k_n;\,m_1,\ldots,m_l)$ such that the diagram $D_{\bm{i},\bm{m}}$ defined by
\[D_{\bm{i},\bm{m}} = \bigoplus_{j=1}^n {k_j\cdot [j]} \quad \oplus \quad \bigoplus_{j=1}^l m_j\cdot (s_{i_1}s_{i_2}\cdots s_{i_j}[i_j]), \]
is column-equivalent to~$D$. In the above, $m\cdot C$ denotes $C\oplus \cdots\oplus C$ with $m$ copies of~$C$; in particular $0\cdot C$ should be interpreted as a diagram with no columns, not the empty column.
The algorithm to produce $\bm{i}$ and $\bm{m}$ from $D$ is as follows. To begin the first step, for each $j\in [n]$ let $k_j=\mathrm{mult}_D([j])$, the number of columns of $D$ of the form $[j]$. Replace all such columns by empty columns for each $j$ to get a new diagram $D_-$.
Given a column $C\subseteq [n]$, a \emph{missing tooth} of $C$ is a positive integer $i$ such that $i\notin C$, but $i+1\in C$. The only columns without missing teeth are the empty column and the intervals $[i]$. Hence the first nonempty column of $D_-$ (if there is any) contains a smallest missing tooth $i_1$. Switch rows $i_1$ and $i_1+1$ of $D_-$ to get a new diagram $D'$.
In the second step, repeat the above with $D'$ in place of $D$. That is, let $m_1=\mathrm{mult}_{D'}([i_1])$ and replace all columns of the form $[i_1]$ in $D'$ by empty columns to get a new diagram $D_-'$. Find the smallest missing tooth $i_2$ of the first nonempty column of $D_-'$, and switch rows $i_2$ and $i_2+1$ of $D_-'$ to get a new diagram $D''$.
Continue in this fashion until no nonempty columns remain. It is easily seen that the sequences $\bm{i}=(i_1,\ldots,i_l)$ and $\bm{m}=(k_1,\ldots,k_n;\,m_1,\ldots,m_l)$ just constructed have the desired properties.
\begin{definition}
\label{def:imsequence}
The pair $(\bm{i},\bm{m})$ constructed from the preceding algorithm is called the \emph{orthodontic sequence} of $w$.
\end{definition}
\begin{example}
If $w=31542$, then the orthodontic sequence algorithm produces the diagrams
\begin{center}
\includegraphics[scale=.85]{orthodontia.pdf}
\end{center}
The sequence of missing teeth gives $\bm{i}=(2,3,1)$ and $\bm{m}=(1,0,0,0,0;\,0,1,1)$, so
\begin{center}
\begin{tikzpicture}[scale=.5]
\draw (9.5,0)--(12.5,0)--(12.5,5)--(9.5,5)--(9.5,0);
\filldraw[draw=black,fill=lightgray] (9.5,4)--(10.5,4)--(10.5,5)--(9.5,5)--(9.5,4);
\filldraw[draw=black,fill=lightgray] (10.5,4)--(11.5,4)--(11.5,5)--(10.5,5)--(10.5,4);
\filldraw[draw=black,fill=lightgray] (10.5,2)--(11.5,2)--(11.5,3)--(10.5,3)--(10.5,2);
\filldraw[draw=black,fill=lightgray] (10.5,1)--(11.5,1)--(11.5,2)--(10.5,2)--(10.5,1);
\filldraw[draw=black,fill=lightgray] (11.5,2)--(12.5,2)--(12.5,3)--(11.5,3)--(11.5,2);
\node at (8,2.5) {$D_{\bm{i},\bm{m}}=$};
\node at (12.7,2.5) {.};
\end{tikzpicture}
\end{center}
\end{example}
\begin{theorem}[{\cite[Proposition~15]{magyar}}]
\label{thm:magyaroperatortheorem}
Let $w\in S_n$ have orthodontic sequence $(\bm{i},\bm{m})$. If $\pi_j=\partial_j x_j$ denotes the $j$th Demazure operator and $\omega_j=x_1x_2\cdots x_j$, then
\[\mathfrak{S}_w = \omega_1^{k_1}\cdots\omega_n^{k_n}\pi_{i_1}(\omega_{i_1}^{m_1} \pi_{i_2}(\omega_{i_2}^{m_2}\cdots \pi_{i_l}(\omega_{i_l}^{m_l})\cdots )). \]
\end{theorem}
\begin{example}
For $w=31542$, it is easily checked that
\[\mathfrak{S}_w=x_1\pi_2\pi_3(x_1x_2x_3\pi_1(x_1)). \]
\end{example}
Theorem~\ref{thm:magyaroperatortheorem} can also be realized on the level of tableaux, analogous to Young tableaux in the case of Schur polynomials. A \emph{filling} (with entries in $\{1,...,n\}$) of a diagram $D$ is a map $T$ assigning to each box in $D$ an integer in $[n]$. A filling $T$ is called \emph{column-strict} if $T$ is strictly increasing down each column of $D$. The \emph{weight} of a filling $T$ is the vector $wt(T)$ whose $i$th component $wt(T)_i$ is the number of times $i$ occurs in $T$.
Given a permutation $w\in S_n$ with orthodontic sequence $(\bm{i},\bm{m})$, we will define a set $\mathcal{T}_w$ of fillings of the diagram $D_{\bm{i},\bm{m}}$ which satisfy
\[\mathfrak{S}_w=\sum_{T\in\mathcal{T}_w}x_1^{wt(T)_1}x_2^{wt(T)_2}\cdots x_n^{wt(T)_n}. \]
We start by recalling the \emph{root operators}, first defined in \cite{rootoperators}. These are operators $f_i$ which either take a filling $T$ of a diagram $D$ to another filling of $D$, or are undefined on $T$. To define root operators, we first encode a filling $T$ in terms of its reading word. The \emph{reading word} of a filling $T$ of a diagram $D=D_{\bm{i},\bm{m}}$ is the sequence of the entries of $T$ read in order, down each column going left-to-right along columns; that is the sequence
\[T(1,1),T(2,1),\ldots,T(n,1),T(1,2),T(2,2),\ldots,T(n,2),\ldots, T(n,n) \]
ignoring any boxes $(i,j)\notin D$.
If it is defined, the operator $f_i$ changes an entry of $i$ in $T$ to an entry of $i+1$ according to the following rule. First, ignore all the entries in $T$ except those which equal $i$ or $i+1$. Now ``match parentheses'':
if, in the list of entries not yet ignored, an $i$ is followed by an $i+1$, then henceforth ignore that pair of entries as well; look again for an $i$ followed (up to ignored entries) by an $i+1$, and henceforth ignore this pair; continue doing this until all no such pairs remain unignored. The remaining entries of $T$ will be a subword of the form $i+1,i+1,\ldots,i+1,i,i,\ldots,i$. If $i$ does not appear in this word, then $f_i(T)$ is undefined. Otherwise, $f_i$ changes the leftmost $i$ to an $i+1$. Reading the image word back into $D$ produces a new filling. We can iteratively apply $f_i$ to a filling $T$.
\begin{example}
If $T=3\,1\,2\,2\,2\,1\,3\,1\,2\,4\,3\,2\,4\,1\,3\,1$, applying $f_1$ iteratively to $T$ yields:
\begin{center}
\begin{tabular}{rccccccccccccccccc}
$\phantom{f_1(}T\phantom{)}$=&3&1&2&2&2&1&3&1&2&4&3&2&4&1&3&1&\\
$\phantom=$&$\cdot$&1&2&2&2&1&$\cdot$&1&2&$\cdot$&$\cdot$&2&$\cdot$&1&$\cdot$&1&\\
$\phantom=$&$\cdot$&$\cdot$&$\cdot$&2&2&1&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&2&$\cdot$&1&$\cdot$&1&\\
$\phantom=$&$\cdot$&$\cdot$&$\cdot$&2&2&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&1&$\cdot$&1&\\
$f_1(T)=$&3&1&2&2&2&1&3&1&2&4&3&2&4&\textbf{2}&3&1&\\
$f_1^2(T)=$&3&1&2&2&2&1&3&1&2&4&3&2&4&2&3&\textbf{2}&\\
$f_1^3(T)\mathrel{\phantom=}$&&&&&&&&&&&&&&&&&\hspace{-45ex}\mbox{is undefined}
\end{tabular}
\end{center}
\end{example}
Define the set-valued \emph{quantized Demazure operator} $\widetilde{\pi}_i$ by $\widetilde{\pi}_i(T)=\{T,f_i(t),f_i^2(T),\ldots\}$. For a set $\mathcal{T}$ of tableaux, let
\[\widetilde{\pi}_{i}(\mathcal{T})=\bigcup_{T\in\mathcal{T}}\widetilde{\pi}_i(T).\]
Next, consider the column $[j]$ and its minimal column-strict filling $\widetilde{\omega}_j$ ($j$th row maps to $j$). For a filling $T$ of a diagram $D$ with columns $(C_1,C_2,\ldots,C_n)$, define in the obvious way the composite filling of $[j]\oplus D$, corresponding to concatenating the reading words of $[j]$ and $D$. Define $[j]^r\oplus D$ analogously by adding $r$ columns $[j]$ to $D$, each with filling $\widetilde{\omega}_j$.
\begin{definition}
\label{def:magyartableauxset}
Let $w\in S_n$ be a permutation with orthodontic sequence $(\bm{i},\bm{m})$. Define the set $\mathcal{T}_w$ of tableaux by
\[\mathcal{T}_w=\widetilde{\omega}_1^{k_1}\oplus\cdots\oplus\widetilde{\omega}_n^{k_n}\oplus\widetilde{\pi}_{i_1}(\widetilde{\omega}_{i_1}^{m_1}\oplus \widetilde{\pi}_{i_2}(\widetilde{\omega}_{i_2}^{m_2}\oplus\cdots \oplus\widetilde{\pi}_{i_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots )). \]
\end{definition}
\begin{theorem}[{\cite[Proposition~16]{magyar}}]
\label{thm:magyartableauxtheorem}
Let $w\in S_n$ be a permutation with orthodontic sequence $(\bm{i},\bm{m})$. Then,
\[\mathfrak{S}_w=\sum_{T\in\mathcal{T}_w}x_1^{wt(T)_1}x_2^{wt(T)_2}\cdots x_n^{wt(T)_n}. \]
\end{theorem}
\begin{example}
Consider again $w=31542$, so the orthodontic sequence of $w$ is $\bm{i}=(2,3,1)$ and $\bm{m}=(1,0,0,0,0; 0,1,1)$.
The set $\mathcal{T}_w$ is built up as follows:
\begin{align*}
\{\}&\xrightarrow{\widetilde{\omega}_1}\{1\}\xrightarrow{\widetilde{\pi}_1}\{1,2\}\xrightarrow{\widetilde{\omega}_3}\{1231,1232\}\xrightarrow{\widetilde{\pi}_3}\{1231,1241,1232,1242\}\\
&\xrightarrow{\widetilde{\pi}_2}\{1231,1241,1341,1232,1233,1242,1342,1343\}\\
&\xrightarrow{\widetilde{\omega}_1}\{11231,11241,11341,11232,11233,11242,11342,11343\}
\end{align*}
which agrees with
\[\mathfrak{S}_{w}=x_1^3x_2x_3+x_1^3x_2x_4+x_1^3x_3x_4+x_1^2x_2^2x_3+x_1^2x_2x_3^2+x_1^2x_2^2x_4+x_1^2x_2x_3x_4+x_1^2x_3^2x_4.\]
\end{example}
We now describe a way to view each step of the construction of $\mathcal{T}_w$ as producing a set of fillings of a diagram.
\begin{definition}
\label{def:partialfilling}
Let $w$ be a permutation with orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. For each $r\in [l]$, define
\[\mathcal{T}_w(r)=\widetilde{\omega}_{i_r}^{m_r}\oplus\widetilde{\pi}_{i_{r+1}}(\widetilde{\omega}_{i_{r+1}}^{m_{r+1}}\oplus\cdots\oplus \widetilde{\pi}_{i_l}(\widetilde{\omega}_{i_l})\cdots). \]
Set $\mathcal{T}_w(0)=\mathcal{T}_w$.
\end{definition}
\begin{definition}
\label{def:partialdiagram}
Let $w$ be a permutation with orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. For any $r \in [l]$, let $O(w,r)$ be the diagram obtained from $D(w)$ in the construction of $(\bm{i},\bm{m})$ at the time when the row swaps of the missing teeth $i_1,\ldots,i_{r}$ have all been executed on $D(w)$, but after executing the row swap of the missing tooth $i_r$, columns without missing teeth have not yet been removed ($m_r$ has not yet been recorded). Set $O(w,0)=D(w)$. For each $r$, give $O(w,r)$ the same column indexing as $D(w)$, so any columns replaced by empty columns in the execution of the missing teeth $i_1,\ldots,i_{r-1}$ retain their original index in $D(w)$.
\end{definition}
The motivation behind Definition~\ref{def:partialfilling} and Definition~\ref{def:partialdiagram} is that the elements of $\mathcal{T}_w(r)$ can be viewed as column-strict fillings of $O(w,r)$ for each $r$. To do this, the choice of filling order for $O(w,r)$ is crucial. Let $w\in S_n$ and consider $D=D(w)$ and $D_{\bm{i},\bm{m}}$. Suppose $D$ has $z$ nonempty columns. There is a unique permutation $\tau$ of $[n]$ taking the column indices of $D$ to the column indices of $D_{\bm{i},\bm{m}}\oplus \emptyset^{n-z}$ with the following properties:
\begin{itemize}
\item Column $c$ of $D$ is the same as column $\tau(c)$ of $D_{\bm{i},\bm{m}}$.
\item If column $c$ and column $c'$ of $D$ are equal with $c<c'$, then $\tau(c)<\tau(c')$.
\end{itemize}
Recall that the columns of $O(w,r)$ have the same column labels as $D$. To read an element $T\in \mathcal{T}_w(r)$ into $O(w,r)$, read $T$ left-to-right and fill in top-to-bottom columns $\tau^{-1}(n),\,\tau^{-1}(n-1),\ldots,\tau^{-1}(1)$ (ignoring any column indices referring to empty columns).
\begin{lemma}
\label{lem:fillings}
Let $w\in S_n$ have orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. In the filling order specified above, the elements of $\mathcal{T}_w(r)$ are column-strict fillings of $O(w,r)$ for each $0\leq r\leq l$.
\end{lemma}
\begin{example}
Take again $w=31542$ with orthodontic sequence $\bm{i}=(2,3,1)$ and $\bm{m}=(1,0,0,0,0;0,1,1)$.
Recall that
\begin{center}
\begin{tikzpicture}[scale=.5]
\draw (0,0)--(5,0)--(5,5)--(0,5)--(0,0);
\filldraw[draw=black,fill=lightgray] (0,4)--(1,4)--(1,5)--(0,5)--(0,4);
\filldraw[draw=black,fill=lightgray] (1,4)--(2,4)--(2,5)--(1,5)--(1,4);
\filldraw[draw=black,fill=lightgray] (1,2)--(2,2)--(2,3)--(1,3)--(1,2);
\filldraw[draw=black,fill=lightgray] (1,1)--(2,1)--(2,2)--(1,2)--(1,1);
\filldraw[draw=black,fill=lightgray] (3,2)--(4,2)--(4,3)--(3,3)--(3,2);
\node at (-1.6,2.5) {$D(w)=$};
\node at (6,2.5) {and};
\draw (9.5,0)--(12.5,0)--(12.5,5)--(9.5,5)--(9.5,0);
\filldraw[draw=black,fill=lightgray] (9.5,4)--(10.5,4)--(10.5,5)--(9.5,5)--(9.5,4);
\filldraw[draw=black,fill=lightgray] (10.5,4)--(11.5,4)--(11.5,5)--(10.5,5)--(10.5,4);
\filldraw[draw=black,fill=lightgray] (10.5,2)--(11.5,2)--(11.5,3)--(10.5,3)--(10.5,2);
\filldraw[draw=black,fill=lightgray] (10.5,1)--(11.5,1)--(11.5,2)--(10.5,2)--(10.5,1);
\filldraw[draw=black,fill=lightgray] (11.5,2)--(12.5,2)--(12.5,3)--(11.5,3)--(11.5,2);
\node at (8,2.5) {$D_{\bm{i},\bm{m}}=$};
\node at (12.7,2.5) {,};
\end{tikzpicture}
\end{center}
so $\tau=12435=\tau^{-1}$. Consider the elements $1\in\mathcal{T}_w(3)$, $1232\in\mathcal{T}_w(2)$, $1242\in\mathcal{T}_w(1)$, and $11342\in\mathcal{T}_w(0)$. The column filling order of each $O(w,r)$ is given by reading $\tau^{-1}$ in one-line notation right to left: in the indexing of $D(w)$, fill down column 4, then down column 2, then down column 1. The elements of each set $\mathcal{T}_w(r)$ are column-strict fillings in the corresponding diagrams $O(w,r)$:
\begin{center}
\includegraphics[scale=1]{tableauxinterpretation.pdf}
\end{center}
\end{example}
\begin{lemma}
Let $w$ be a permutation with orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. For each $0\leq r \leq l$, $O(w,r)$ has the northwest property.
\end{lemma}
\begin{definition}
A filling $T$ of a diagram $D$ is called \emph{row-flagged} if $T(p,q)\leq p$ for each box $(p,q)\in D$.
\end{definition}
\begin{lemma}
\label{lem:rowflagged}
For each $0\leq r\leq l$, the elements of $\mathcal{T}_w(r)$ are row-flagged fillings of $O(w,r)$.
\end{lemma}
\begin{proof}
Clearly, the singleton $\mathcal{T}_w(l)$ is a row-flagged filling of $O(w,l)$. Assume that for some $l\geq s>0$, the result holds with $r=s$. We show that the result also holds with $r=s-1$. Let $T\in\mathcal{T}_w(s)$. We must show that for each $u$, if $f_{i_s}^u(T)$ is defined, then $\widetilde{\omega}_{i_{s-1}}^{m_{s-1}}\oplus f_{i_s}^u(T)$ is a row-flagged filling of $O(w,s-1)$. By the orthodontia construction, $O(w,s)$ is obtained from $O(w,s-1)$ by removing the $m_{s-1}$ columns with no missing tooth, and then switching rows $i_s+1$ and $i_s$.
Since $T$ is a row-flagged filling of $O(w,s)$, each box in $O(w,s)$ containing an entry of $T$ equal to $i_s$ lies in a row with index at least $i_s$. Any box in $O(w,s)$ containing an entry of $T$ equal to $i_s$ and lying in row $i_s$ of $O(w,s)$ will have row index $i_s+1$ in $O(w,s-1)$. Any box in $O(w,s)$ containing an entry in $T$ equal to $i_s$ and lying in a row $d>i_s$ of $O(w,s)$ will still have row index $d$ in $O(w,s-1)$. Then if $f_{i_s}^u(T)$ is defined, $\widetilde{\omega}_{i_{s-1}}^{m_{s-1}}\oplus f_{i_s}^u(T)$ will be a row-flagged filling of $O(w,s-1)$.
\end{proof}
\section{Zero-one Schubert polynomials}
\label{sec:suff}
This section is devoted to giving a sufficient condition on the orthodontic sequence $(\bm{i},\bm{m})$ of $w$ for the Schubert polynomial $\mathfrak{S}_w$ to be zero-one. We give such a condition in Theorem~\ref{thm:multfree}. We will see in Theorem~\ref{thm:011} that this condition turns out to also be a necessary condition for $\mathfrak{S}_w$ to be zero-one.
We start with a less ambitious result:
\begin{proposition}
\label{prop:norepeats}
Let $w\in S_n$ and $(\bm{i},\bm{m})$ be the orthodontic sequence of $w$. If $\bm{i}=(i_1,\ldots,i_l)$ has distinct entries, then $\mathfrak{S}_w$ is zero-one.
\end{proposition}
\begin{proof}
Let $T,T'\in\mathcal{T}_w$ with $wt(T)=wt(T')$. By Definition~\ref{def:magyartableauxset}, we can find $p_1,\ldots,p_l$ so that
\[T=\widetilde{\omega}_1^{k_1}\oplus\cdots\oplus \widetilde{\omega}_n^{k_n}\oplus f_{i_1}^{p_1}(\widetilde{\omega}_{i_1}^{m_1}\oplus\cdots\oplus f_{i_l}^{p_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots). \]
Then if $e_1,\ldots,e_n$ denote the standard basis vectors of $\mathbb{R}^n$,
\[wt(T) = \sum_{j=1}^{n}wt(\widetilde{\omega}_{j}^{k_j}) + \sum_{j=1}^{l}wt(\widetilde{\omega}_{i_j}^{m_j})+\sum_{j=1}^{l}p_j(e_{i_j+1}-e_{i_j}). \]
Similarly, we can find $q_1,\ldots,q_l$ so that
\[T'=\widetilde{\omega}_1^{k_1}\oplus\cdots\oplus \widetilde{\omega}_n^{k_n}\oplus f_{i_1}^{q_1}(\widetilde{\omega}_{i_1}^{m_1}\oplus\cdots\oplus f_{i_l}^{q_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots), \]
which implies
\[wt(T') = \sum_{j=1}^{n}wt(\widetilde{\omega}_{j}^{k_j}) + \sum_{j=1}^{l}wt(\widetilde{\omega}_{i_j}^{m_j})+\sum_{j=1}^{l}q_j(e_{i_j+1}-e_{i_j}). \]
As $wt(T)=wt(T')$,
\begin{align}
\label{eqn:vectorexpansion}
0=wt(T)-wt(T')=(p_1-q_1)(e_{i_1+1}-e_{i_1})+\cdots+(p_l-q_l)(e_{i_l+1}-e_{i_l})\tag{$*$}.
\end{align}
Since the vectors $\{e_{i_j+1}-e_{i_j}\}_{j=1}^{l}$ are independent and $\bm{i}$ has distinct entries, $p_j=q_j$ for all $j$. Thus $T=T'$. This shows that all elements of $\mathcal{T}_w$ have distinct weights, so $\mathfrak{S}_w$ is zero-one.
\end{proof}
We now strengthen Proposition~\ref{prop:norepeats} to allow $\bm{i}$ to not have distinct entries. To do this, we will need a technical definition related to the orthodontic sequence. Recall the construction of the orthodontic sequence $(\bm{i},\bm{m})$ of a permutation $w\in S_n$ (Definition~\ref{def:imsequence}) and the intermediate diagrams $O(w,r)$ (Definition~\ref{def:partialdiagram}).
Let $\bm{i}=(i_1,\ldots,i_l)$, and define $O(w,r)_-$ to be the diagram $O(w,r)$ with all columns of the form $[i_{r}]$ replaced by empty columns.
\begin{definition}
Define the \emph{orthodontic impact function} $\mathcal{I}_w:[l]\to 2^{[n]}$ by
\[\mathcal{I}_w(j)=\{c\in [n] \mid (i_j+1,c)\in O(w,j-1)_-\}. \]
\end{definition}
\noindent That is, $\mathcal{I}_w(j)$ is the set of indices of columns of $O(w,j-1)_-$ that are changed when rows $i_j$ and $i_j+1$ are swapped to form $O(w,j)$.
\begin{definition}
Let $w\in S_n$ have orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. We say $w$ is \emph{multiplicity-free} if for any $r,s\in [l]$ with $r\neq s$ and $i_r=i_s$, we have $\mathcal{I}_w(r)=\mathcal{I}_w(s)=\{c\}$ for some $c\in [n]$.
\end{definition}
\begin{example}
If $w=457812693$, then
\begin{center}
\begin{tikzpicture}[scale=.5]
\draw (0,0)--(9,0)--(9,9)--(0,9)--(0,0);
\filldraw[draw=black,fill=lightgray] (0,8)--(1,8)--(1,9)--(0,9)--(0,8);
\filldraw[draw=black,fill=lightgray] (1,8)--(2,8)--(2,9)--(1,9)--(1,8);
\filldraw[draw=black,fill=lightgray] (2,8)--(3,8)--(3,9)--(2,9)--(2,8);
\filldraw[draw=black,fill=lightgray] (0,7)--(1,7)--(1,8)--(0,8)--(0,7);
\filldraw[draw=black,fill=lightgray] (1,7)--(2,7)--(2,8)--(1,8)--(1,7);
\filldraw[draw=black,fill=lightgray] (2,7)--(3,7)--(3,8)--(2,8)--(2,7);
\filldraw[draw=black,fill=lightgray] (0,6)--(1,6)--(1,7)--(0,7)--(0,6);
\filldraw[draw=black,fill=lightgray] (1,6)--(2,6)--(2,7)--(1,7)--(1,6);
\filldraw[draw=black,fill=lightgray] (2,6)--(3,6)--(3,7)--(2,7)--(2,6);
\filldraw[draw=black,fill=lightgray] (0,5)--(1,5)--(1,6)--(0,6)--(0,5);
\filldraw[draw=black,fill=lightgray] (1,5)--(2,5)--(2,6)--(1,6)--(1,5);
\filldraw[draw=black,fill=lightgray] (2,5)--(3,5)--(3,6)--(2,6)--(2,5);
\filldraw[draw=black,fill=lightgray] (5,6)--(6,6)--(6,7)--(5,7)--(5,6);
\filldraw[draw=black,fill=lightgray] (5,5)--(6,5)--(6,6)--(5,6)--(5,5);
\filldraw[draw=black,fill=lightgray] (2,2)--(3,2)--(3,3)--(2,3)--(2,2);
\filldraw[draw=black,fill=lightgray] (2,1)--(3,1)--(3,2)--(2,2)--(2,1);
\node at (-1.5,4.5) {$D(w)=$};
\node at (13.5,4.5) {and $\bm{i}=(6,5,7,6,2,1,3,2).$};
\end{tikzpicture}
\end{center}
The only entries of $\bm{i}$ occurring multiple times are $i_1=i_4=6$ and $i_5=i_8=2$. Their respective impacts are $\mathcal{I}_w(1)=\mathcal{I}_w(4)=\{3\}$ and $\mathcal{I}_w(5)=\mathcal{I}_w(8)=\{6\}$, so $w$ is multiplicity-free.
\end{example}
The proof of the generalization of Proposition~\ref{prop:norepeats} will require the following technical lemma. Before proceeding, recall Lemma~\ref{lem:fillings} and Lemma~\ref{lem:rowflagged}: for every $0\leq j\leq l$, elements of $\mathcal{T}_w(j)$ can be viewed as row-flagged, column-strict fillings of $O(w,j)$ (via the column filling order of $O(w,j)$ specified prior to Lemma~\ref{lem:fillings}). Applying $\widetilde{\omega}_{i_{j-1}}^{m_{j-1}}\oplus f_{i_j}$ to an element of $\mathcal{T}_w(j)$ gives an element of $\mathcal{T}_w(j-1)$, a filling of $O(w,j-1)$. Thus, when we speak below of the application of $f_{i_j}$ to an element $T\in\mathcal{T}_w(j)$ ``changing an $i_j$ to an $i_j+1$ in column $c$'', we specifically mean that when we view $T$ as a filling of $O(w,j)$ and $\widetilde{\omega}_{i_{j-1}}^{m_{j-1}}\oplus f_{i_j}(T)$ as a filling of $O(w,j-1)$, $T$ and $\widetilde{\omega}_{i_{j-1}}^{m_{j-1}}\oplus f_{i_j}(T)$ differ (in the stated way) in their entries in column $c$.
\begin{lemma}
\label{lem:rootoperatorproperty}
Let $w$ be a multiplicity-free permutation with orthodontic sequence $(\bm{i},\bm{m})$, $\bm{i}=(i_1,\ldots,i_l)$. Suppose $i_r=i_s$ with $r<s$ and $\mathcal{I}_w(r)=\mathcal{I}_w(s)=\{c\}$.
Then for each $j$ with $r\leq j \leq s$, $\mathcal{I}_w(j)=\{c\}$ and the application of $f_{i_j}$ to an element of
$\mathcal{T}_w(j)$
is either undefined or changes an $i_j$ to an $i_{j}+1$ in column $c$.
\end{lemma}
\begin{proof}
We handle first the case that $j=r$. In the diagram $O(w,r-1)$, column $c$ is the leftmost column containing a missing tooth, and $i_r$ is the smallest missing tooth in column $c$. Reading column $c$ of $O(w,r-1)$ top-to-bottom, one sees a (possibly empty) sequence of boxes in $O(w,r-1)$, followed by a sequence of boxes not in $O(w,r-1)$. The sequence of boxes not in $O(w,r-1)$ has length at least two since $i_r$ occurs at least twice in $\bm{i}$, and terminates with the box $(i_r+1,c)\in O(w,r-1)$. Note that since $(i_r-1,c),(i_r,c)\notin O(w,r-1)$, the northwest property of $O(w,r-1)$ implies that there can be no box $(i_r-1,c')$ or $(i_r,c')$ in $O(w,r-1)$ with $c'>c$.
Note also that since $\mathcal{I}_w(r)=\{c\}$, we have $(i_r+1,c')\notin O(w,r-1)$ for each $c'>c$.
Lastly, observe that for any $c'>c$ and $d>i_r+1$, there can be no box $(d,c')\in O(w,r-1)$. Otherwise there would be some $t\in [l]$ with $i_t=i_r$ and $t\neq r$ such that $c'\in \mathcal{I}_w(t)$, violating that $w$ is multiplicity-free.
As a consequence of the previous observations, the largest row index that a column $c'>c$ of $O(w,r-1)$ can contain a box in is $i_r-2$. In particular, Lemma~\ref{lem:rowflagged} implies that the application of $f_{i_r}$ to an element of $\mathcal{T}_w(r)$ either is undefined or changes an $i_r$ to an $i_r+1$ in column $c$. This concludes the case that $j=r$.
When $j=s$, an entirely analogous argument works. The only significant difference in the observations is that when column $c$ of $O(w,s-1)$ is read top-to-bottom, the (possibly empty) initial sequence of boxes in $O(w,s-1)$ is followed by a sequence of boxes not in $O(w,s-1)$ with length at least 1, ending with the box $(i_s+1,c)$. Consequently, the largest row index that a column $c'>c$ of $O(w,s-1)$ can contain a box in is $i_s-1$. In particular, Lemma~\ref{lem:rowflagged} implies that the application of $f_{i_s}$ to an element of $\mathcal{T}_w(s)$ either is undefined or changes an $i_s$ to an $i_s+1$ in column $c$. This concludes the case that $j=s$.
Now, let $r<j<s$. Since $\mathcal{I}_w(r)=\mathcal{I}_w(s)=\{c\}$, we have $c\in \mathcal{I}_w(j)$.
If $i_j$ occurs multiple times in $\bm{i}$, then multiplicity-freeness of $w$ implies $\mathcal{I}_w(j)=\{c\}$. In this case, we can find $j'\neq j$ with $i_j=i_{j'}$ and apply the previous argument (with $r$ and $s$ replaced by $j$ and $j'$) to conclude that the application of $f_{i_j}$ to an element of $\mathcal{T}_w(j)$ is either undefined or changes an $i_j$ to an $i_j+1$ in column $c$.
Thus, we assume $i_j$ occurs only once in $\bm{i}$. Recall that it was shown above that $O(w,r-1)$ has no boxes $(d,c')$ with $d>i_r$ and $c'>c$. Read top-to-bottom, let column $c$ of $O(w,r-1)$ have a (possibly empty) initial sequence of boxes ending with a missing box in row $u$, so clearly $u\leq i_r-1$. Since the first missing tooth in column $c$ of $O(w,r-1)$ is in row $i_r$, none of the boxes $(u,c),(u+1,c),\ldots,(i_r,c)$ are in $O(w,r-1)$, but $(i_r+1,c)\in O(w,r-1)$. Then, the northwest property implies that there is no box in $O(w,r-1)$ in any column $c'>c$ in any of rows $u,u+1,\ldots,i_r$. In particular, the largest row index such that a column $c'>c$ of $O(w,r-1)$ can contain a box in is $u-1$.
As $r<j<s$ and $\mathcal{I}_w(r)=\mathcal{I}_w(s)=\{c\}$, we have that $c\in\mathcal{I}_w(j)$.
Also since $r<j<s$, the leftmost nonempty column in $O(w,j-1)$ is column $c$, and $i_j\geq u$. Then in $O(w,j-1)$, the maximum row index a box in a column $c'>c$ can have is $u-1$. In particular, $\mathcal{I}_w(j)=\{c\}$, and Lemma~\ref{lem:rowflagged} implies that the application of $f_{i_j}$ to an element of $\mathcal{T}_w(j)$ is either undefined or changes an $i_j$ to an $i_j+1$ in column $c$.
\end{proof}
\begin{theorem}
\label{thm:multfree}
If $w$ is multiplicity-free, then $\mathfrak{S}_w$ is zero-one.
\end{theorem}
\begin{proof}
Assume $wt(T)=wt(T')$ for some $T,T'\in \mathcal{T}_w$. If we can show that $T=T'$, then we can conclude that all elements of $\mathcal{T}_w$ have distinct weights, so $\mathfrak{S}_w$ is zero-one. To begin, write
\[T=\widetilde{\omega}_1^{k_1}\oplus\cdots\oplus \widetilde{\omega}_n^{k_n}\oplus f_{i_1}^{p_1}(\widetilde{\omega}_{i_1}^{m_1}\oplus\cdots\oplus f_{i_l}^{p_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots) \]
and
\[T'=\widetilde{\omega}_1^{k_1}\oplus\cdots\oplus \widetilde{\omega}_n^{k_n}\oplus f_{i_1}^{q_1}(\widetilde{\omega}_{i_1}^{m_1}\oplus\cdots\oplus f_{i_l}^{q_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots), \]
for some $p_1,\ldots,p_l,q_1,\ldots,q_l$. The basic idea of the proof is to show that as $T$ and $T'$ are constructed step-by-step from $\widetilde{\omega}_{i_l}^{m_l}$, the resulting intermediate tableaux are intermittently equal. At termination of the construction, this will imply that $T=T'$.
By the expansion (\ref{eqn:vectorexpansion}) of $wt(T)-wt(T')$ used in the proof of Proposition~\ref{prop:norepeats}, we observe that $p_u=q_u$ for all $u$ such that $i_u$ occurs only once in $\bm{i}$. Let $s$ be the largest index such that $p_s\neq q_s$. Suppose $\mathcal{I}_w(s)=\{c\}$.
Let $r_1$ be the smallest index such that $i_{r_1}$ occurs multiple times in $\bm{i}$ and $\mathcal{I}_w(r_1)=\{c\}$.
We know $r_1<s$, because (\ref{eqn:vectorexpansion}) implies that $p_{s'}\neq q_{s'}$ for another $s'<s$ with $i_{s'}=i_s$, and by multiplicity-freeness $\mathcal{I}_w(s')=\{c\}$.
We wish to find an interval $[r,s]\subseteq [r_1,s]$ such that $r<s$ and the following two conditions hold:
\begin{itemize}
\item[(i)] \label{itm:i} If $v\geq r$ and $i_v$ occurs multiple times in $\bm{i}$, then any other $v'$ with $i_v=i_{v'}$ will satisfy $v'\geq r$.
\item[(ii)] \label{itm:ii} For every $j$ with $r<j<s$ and $i_j$ occurring only once in $\bm{i}$, there are $t$ and $u$ with $r\leq t<j<u\leq s$ such that $i_t=i_u$.
\end{itemize}
We first show that (i) holds for $[r_1,s]$. Note that if $i_v$ occurs multiple times in $\bm{i}$ and $r_1\leq v\leq s$, then it must be that $\mathcal{I}_w(v)=\{c\}$
by the fact that the orthodontia construction records all missing teeth needed to eliminate one column before moving on to the next column.
If $i_{v'}=i_{v}$, then $\mathcal{I}_w(v')=\{c\}$ also, by multiplicity-freeness of $w$. The choice of $r_1$ implies $r_1\leq v'$.
If $i_v$ occurs multiple times in $\bm{i}$ with $s<v$ and $\mathcal{I}_w(v)=\{c\}$, then the choice of $r_1$ again implies that $r_1\leq v'$ for any $i_{v'}=i_v$.
If $i_v$ occurs multiple times in $\bm{i}$ with $s<v$ and $\mathcal{I}_w(v)\neq \{c\}$, then the orthodontia construction implies that
any $v'$ with $i_v=i_{v'}$ must satisfy $s<v'$.
In particular, $r_1<v'$ as needed. Thus, (i) holds for $[r_1,s]$. If $[r_1,s]$ also satisfies (ii), then we are done.
Otherwise, assume $[r_1,s]$ does not satisfy (ii). Then there is some $j$ with $r_1<j<s$ such that $i_j$ occurs only once in $\bm{i}$ and
there are no $t$ and $u$ with $r_1\leq t<j<u\leq s$ and $i_t=i_u$. Consequently for every pair $i_u=i_t$ with $r_1\leq t<u\leq s$, it must be that either $t<u<j$ or $j<t<u$. Let $r_2$ be the smallest index such that $j<r_2$ and $i_{r_2}$ occurs multiple times in $\bm{i}$. By the choice of $j$, it is clear that the interval $[r_2,s]$ still satisfies (i). If $[r_2,s]$ also satisfies (ii), then we are done.
Otherwise, $[r_2,s]$ satisfies (i) but not (ii), and we can argue exactly as in the case of $[r_1,s]$ to find an $r_3$ such that $r_2<r_3<s$ and $[r_3,s]$ satisfies (i).
Continue working in this fashion. We show that this process terminates with an interval $[r,s]$ satisfying $r<s$, (i), and (ii).
As mentioned above, there exists $s'<s$ such that $i_{s'}=i_s$. Let $s'$ be the maximal index less than $s$ with this property.
Since all of the intervals $[r_*,s]$ will satisfy (i), it follows that $r_1<r_2<\cdots\leq s'$. At worst, the process will terminate after finitely many steps with the interval $[s',s]$. The interval $[s',s]$ will then satisfy (i) since the process reached it, and will trivially satisfy (ii) since $i_s=i_{s'}$.
Hence, we can assume that we have found an interval $[r,s]$ satisfying $r<s$, (i), and (ii). Consider the tableaux
\begin{align*}
&T_r=\widetilde{\omega}_{i_{r-1}}^{m_{r-1}}\oplus f_{i_r}^{p_r}(\widetilde{\omega}_{i_r}^{m_r}\oplus\cdots\oplus f_{i_l}^{p_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots),\quad &T_s=\widetilde{\omega}_{i_{s}}^{m_{s}}\oplus f_{i_{s+1}}^{p_{s+1}}(\widetilde{\omega}_{i_{s+1}}^{m_{s+1}}\oplus\cdots\oplus f_{i_l}^{p_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots),\\
&T'_r=\widetilde{\omega}_{i_{r-1}}^{m_{r-1}}\oplus f_{i_r}^{q_r}(\widetilde{\omega}_{i_r}^{m_r}\oplus\cdots\oplus f_{i_l}^{q_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots),\quad &T'_s=\widetilde{\omega}_{i_{s}}^{m_{s}}\oplus f_{i_{s+1}}^{q_{s+1}}(\widetilde{\omega}_{i_{s+1}}^{m_{s+1}}\oplus\cdots\oplus f_{i_l}^{q_l}(\widetilde{\omega}_{i_l}^{m_l})\cdots).
\end{align*}
By definition, $T_r,T_r'\in\mathcal{T}_w(r-1)$, so we can view $T_r$ and $T_r'$ as fillings of $O(w,r-1)$. Similarly, $T_s,T_s'\in\mathcal{T}_w(s)$, so we can view $T_s$ and $T_s'$ as fillings of $O(w,s)$. Since we chose $s$ to be the largest index such that $p_s\neq q_s$, it follows that $T_s=T'_s$. By property (i) of $[r,s]$, $i_u\neq i_v$ for any $u<r\leq v$. Hence, it must be that $wt(T_r)=wt(T'_r)$. Finally, property (ii) of $[r,s]$ allows us to apply Lemma~\ref{lem:rootoperatorproperty} and conclude that for any $a_r,a_{r+1},\ldots,a_{s}\geq 0$,
when $\widetilde{\omega}_{i_{r-1}}^{m_{r-1}}\oplus f_{i_r}^{a_r}(\widetilde{\omega}_{m_r}^{i_r}\oplus\cdots\oplus \widetilde{\omega}_{i_{s-1}}^{m_{s-1}}f_{i_s}^{a_s}(\mbox{---})\cdots)$
is applied to an element of $\mathcal{T}_w(s)$, only the entries in column $c$ are affected by the root operators $f_{i_r}^{a_r},\ldots,f_{i_s}^{a_s}$. Since
\[
T_r= \widetilde{\omega}_{i_{r-1}}^{m_{r-1}}\oplus f_{i_r}^{p_r}(\widetilde{\omega}_{m_r}^{i_r}\oplus\cdots\oplus \widetilde{\omega}_{i_{s-1}}^{m_{s-1}}f_{i_s}^{p_s}(T_s)\cdots)
\quad\mbox{and}\quad
T_r'= \widetilde{\omega}_{i_{r-1}}^{m_{r-1}}\oplus f_{i_r}^{q_r}(\widetilde{\omega}_{m_r}^{i_r}\oplus\cdots\oplus \widetilde{\omega}_{i_{s-1}}^{m_{s-1}}f_{i_s}^{q_s}(T_s')\cdots),
\]
$T_r$ and $T_r'$ must coincide outside of column $c$. Since we already deduced that $wt(T_r)=wt(T'_r)$, it follows that column $c$ of $T_r$ and $T_r'$ have the same weight. By column-strictness of $T_r$ and $T_r'$, column $c$ of $T_r$ and $T_r'$ must coincide, so $T_r=T_r'$.
To complete the proof, let $\hat{s}$ be the largest index $\hat{s}<r$ such that $p_{\hat{s}}\neq q_{\hat{s}}$. If no such index exists, then $T=T'$. Otherwise, set $\hat{r}_1$ to be the smallest index such that $i_{\hat{r}_1}$ occurs multiple times in $\bm{i}$ and $\mathcal{I}_w(\hat{r}_1)=\mathcal{I}_w(\hat{s})$.
We have $\hat{r}_1<\hat{s}$ because some other $\hat{s}'$ distinct from~$\hat{s}$ such that $p_{\hat{s}'}\neq q_{\hat{s}'}$ and $i_{\hat{s}'}=i_{\hat{s}}$ must exist as before,
and $\hat{s}'$ is also less than~$r$ by property (i) of $[r,s]$.
Use the previous algorithm to find an interval $[\hat{r},\hat{s}]\subseteq [\hat{r}_1,\hat{s}]$ satisfying $\hat{r}<\hat{s}$, (i), and (ii). Construct $T_{\hat{r}},T_{\hat{r}}',T_{\hat{s}},T'_{\hat{s}}$, and argue exactly as in the case of $[r,s]$ that $T_{\hat{r}}=T_{\hat{r}}'$.
Continuing in this manner for a finite number of steps will show that $T=T'$.
\end{proof}
As we will show in Theorem~\ref{thm:011},
it is not only sufficient but also necessary that $w$ be multiplicity-free for the Schubert polynomial $\mathfrak{S}_w$ to be zero-one.
\section{Pattern avoidance conditions for multiplicity-freeness}
\label{sec:mult-patt}
This section is devoted to showing that $w$ being multiplicity-free is equivalent to a certain pattern avoidance condition. We then prove our full characterization of zero-one Schubert polynomials.
We start with several definitions.
\begin{definition}
\label{def:confA}
We say a Rothe diagram $D=D(w)$ contains an instance of configuration $\mathrm{A}$ if there are $r_1,c_1,r_2,c_2,r_3$ such that $1\leq r_3<r_1<r_2$, $1<c_1<c_2$, $(r_1,c_1),(r_2,c_2)\in D$, $(r_1,c_2)\notin D$, and $w_{r_3}<c_1$.
\end{definition}
\begin{definition}
\label{def:confB}
We say a Rothe diagram $D=D(w)$ contains an instance of configuration $\mathrm{B}$ if there are $r_1,c_1,r_2,c_2,r_3,r_4$ such that $1\leq r_4\neq r_3<r_1<r_2$, $2<c_1<c_2$, $(r_1,c_1),(r_1,c_2),(r_2,c_2)\in D$, $w_{r_3}<c_1$, and $w_{r_4}<c_2$.
\end{definition}
\begin{definition}
\label{def:confBprime}
We say a Rothe diagram $D=D(w)$ contains an instance of configuration $\mathrm{B}'$ if there are $r_1,c_1,r_2,c_2,r_3,r_4$ such that $1\leq r_4<r_3<r_1<r_2$, $2<c_1<c_2$, $(r_1,c_1),(r_1,c_2),(r_2,c_1)\in D$, $w_{r_3}<c_1$, and $w_{r_4}<c_1$.
\end{definition}
Given a Rothe diagram $D(w)$, we will call a tuple $(r_1,c_1,r_2,c_2,r_3)$ meeting the conditions of Definition~\ref{def:confA} an instance of configuration $\mathrm{A}$ in $D(w)$. Similarly, we will call a tuple $(r_1,c_1,r_2,c_2,r_3,r_4)$ meeting the conditions of Definition~\ref{def:confB} (resp. \ref{def:confBprime}) an instance of configuration $\mathrm{B}$ (resp. $\mathrm{B}'$) in $D(w)$.
\begin{figure}
\caption{Examples of instances of the configurations $\mathrm{A}$, $\mathrm{B}$, and $\mathrm{B}'$ in Rothe diagrams. Both the hooks removed from the $n\times n$ grid to form each Rothe diagram and the remaining boxes are shown.}
\end{figure}
\begin{figure}
\caption{The Rothe diagrams of the twelve multiplicitous patterns.}
\label{fig:badpatterns}
\end{figure}
\begin{theorem}\label{thm:mult-pattern}
If $w\in S_n$ is a permutation such that $D(w)$ does not contain any instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$,
then $w$ is multiplicity-free.
\end{theorem}
Theorem~\ref{thm:011} will also imply the converse of this theorem.
\begin{proof}
We prove the contrapositive.
Assume $w$ is not multiplicity-free and let $(\bm{i},\bm{m})$ be the orthodontic sequence of $w$. Then, we can find entries $i_{p_1}=i_{p_2}$ of $\bm{i}$ with $p_1<p_2$ such that either $\mathcal{I}_w(p_1)\neq \mathcal{I}_w(p_2)$, or $\mathcal{I}_w(p_1)=\mathcal{I}_w(p_2)$ with $|\mathcal{I}_w(p_1)|>1$. We show that $D(w)$ must contain at least one instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$.\\
\noindent\emph{Case 1:} Assume that $\mathcal{I}_w(p_1)\nsubseteq\mathcal{I}_w(p_2)$ and $\mathcal{I}_w(p_2)\nsubseteq\mathcal{I}_w(p_1)$.
Take $c_1\in \mathcal{I}_w(p_1)\backslash \mathcal{I}_w(p_2)$ and $c_2\in \mathcal{I}_w(p_2)\backslash \mathcal{I}_w(p_1)$.
We show that columns $c_1$ and $c_2$ of $D(w)$ contain an instance of configuration $\mathrm{A}$.
In step $p_1$ of the orthodontia on $D(w)$, a box in column $c_1$ is moved (by the missing tooth $i_{p_1}$) to row $i_{p_1}$. Let this box originally be in row $r_1$ of $D(w)$. Analogously, let the box in column $c_2$ moved to row $i_{p_2}$ in step $p_2$ of the orthodontia (by the missing tooth $i_{p_2}$) originally be in row $r_2$ of $D(w)$.
Observe that $r_1<r_2$.
If $c_2<c_1$, then the northwest property would imply that $(r_1,c_2)\in D(w)$, contradicting that $c_2\notin \mathcal{I}_w(p_1)$. Thus $c_1<c_2$. Since $c_2\notin \mathcal{I}_w(p_1)$, $(r_1,c_2)\notin D(w)$.
Lastly, since the box $(r_1,c_1)$ is moved by the orthodontia, there is some box $(r_3,c_1)\notin D(w)$ with $r_3<r_1$. Consequently, $w_{r_3}<c_1$. Thus, $(r_1,c_1,r_2,c_2,r_3)$ is an instance of configuration $\mathrm{A}$.\\
\noindent\emph{Case 2:}
Assume $\mathcal{I}_w(p_2)$ is a proper subset of $\mathcal{I}_w(p_1)$.
Let $c_1=\max(\mathcal{I}_w(p_2))$
and $c_2=\min(\mathcal{I}_w(p_1)\backslash\mathcal{I}_w(p_2))$.
Let the box in column $c_1$ moved to row $i_{p_1}=i_{p_2}$ in step $p_1$ (resp. $p_2$) of the orthodontia originally be in row $r_1$ (resp. $r_2$) of $D(w)$. Observe that $r_1<r_2$.
Assume first that $c_1<c_2$. Since $c_1\in\mathcal{I}_w(p_1)\cap \mathcal{I}_w(p_2)$, the boxes $(r_1,c_1)$ and $(r_2,c_2)$ both move weakly above row $i_{p_1}$ in the orthodontia. Then,
we can find indices $r_3,r_4$ with $r_4<r_3<r_1$ such that $(r_3,c_1),(r_4,c_1)\notin D(w)$. Hence, $w_{r_3}<c_1$ and $w_{r_4}<c_1$, so $(r_1,c_1,r_2,c_2,r_3,r_4)$ is an instance of configuration $\mathrm{B}'$.
Otherwise $c_1>c_2$. Since the box $(r_1,c_2)$ is moved by the orthodontia, we can find $r_3<r_1$ with $(r_3,c_2)\notin D(w)$. Then $w_{r_3}<c_2$. As we are assuming $c_2<c_1$, $(r_3,c_1)\notin D(w)$ also. Since the boxes $(r_1,c_1)$ and $(r_2,c_1)$ in $D(w)$ are moved weakly above row $i_{p_1}$ by the orthodontia, we can find some $r_4<r_1$ with $r_4\neq r_3$ such that $(r_4,c_1)\notin D(w)$. Then, $w_{r_4}<c_1$, so $(r_1,c_2,r_2,c_1,r_3,r_4)$ is an instance of configuration $\mathrm{B}$.\\
\noindent\emph{Case 3:}
Assume $\mathcal{I}_w(p_1)$ is a proper subset of $\mathcal{I}_w(p_2)$.
This case is handled similarly to Case 2.
Let $c_1=\max(\mathcal{I}_w(p_1))$ and $c_2=\min(\mathcal{I}_w(p_2)\backslash\mathcal{I}_w(p_1))$.
Let the box in column $c_1$ moved to row $i_{p_1}=i_{p_2}$ in step $p_1$ (resp. $p_2$) of the orthodontia originally be in row $r_1$ (resp. $r_2$) of $D(w)$. Observe that $r_1<r_2$.
Assume $c_1<c_2$. Since the boxes $(r_1,c_1)$ and $(r_2,c_1)$ of $D(w)$ are moved weakly above row $i_{p_1}$ by the orthodontia, we can find indices $r_3,r_4$ with $r_4<r_3<r_1$ such that $(r_3,c_1),(r_4,c_1)\notin D(w)$. Then, $w_{r_3}<c_1$ and $w_{r_4}<c_1$.
Since $c_2\notin \mathcal{I}_w(p_1)$,
$(r_1,c_2)\notin D(w)$. Then, $(r_1,c_1,r_2,c_2,r_3)$ is an instance of configuration $\mathrm{A}$.
Otherwise $c_1>c_2$.
As $c_2\notin \mathcal{I}_w(p_1)$,
$(r_1,c_2)\notin D(w)$. Since $(r_2,c_2),(r_1,c_1)\in D(w)$, this is a contradiction of the northwest property of $D(w)$.\\
\noindent\emph{Case 4:}
Assume $\mathcal{I}_w(p_1)=\mathcal{I}_w(p_2)$ is not a singleton.
Let $c_1,c_2\in \mathcal{I}_w(p_1)$ with $c_1<c_2$.
Let the box in column $c_1$ moved to row $i_{p_1}=i_{p_2}$ in step $p_1$ (resp. $p_2$) of the orthodontia originally be in row $r_1$ (resp. $r_2$) of $D(w)$. Observe that $r_1<r_2$. Since the boxes $(r_1,c_1)$ and $(r_2,c_1)$ in $D(w)$ are moved weakly above row $i_{p_1}$ by the orthodontia, we can find indices $r_3,r_4$ with $r_4<r_3<r_1$ such that $(r_3,c_1),(r_4,c_1)\notin D(w)$. Then, $w_{r_3}<c_1$ and $w_{r_4}<c_1$. Thus, $(r_1,c_1,r_2,c_2,r_3,r_4)$ is an instance of configuration $\mathrm{B}'$.
\end{proof}
We now relate multiplicity-freeness to pattern avoidance of permutations. We begin by clarifying our pattern avoidance terminology.
A \emph{pattern} $\sigma$ of \emph{length}~$n$ is a permutation in~$S_n$.
The length $n$ is a crucial part of the data of a pattern;
we make no identifications between patterns of different lengths, unlike what is usual when handling permutations in the Schubert calculus.
A permutation $w$ \emph{contains} $\sigma$ if $w$ has $n$ entries $w_{j_1},\ldots, w_{j_n}$ with $j_1<j_2<\cdots<j_n$ that are in the same relative order as $\sigma_1,\sigma_2,\ldots,\sigma_n$. In this case, the indices $j_1<j_2<\cdots<j_n$ are called a \emph{realization} of $\sigma$ in $w$. We say that $w$ \emph{avoids} the pattern $\sigma$ if $w$ does not contain $\sigma$.
To illustrate the dependence of these definitions on $n$, note that $w=154623$ contains the pattern 132, but not the pattern 132456.
The following easy lemma gives a diagrammatic interpretation of pattern avoidance.
\begin{lemma}
\label{lem:patterndiagram}
Let $w\in S_n$ be a permutation and $\sigma$ a pattern of length~$m$ contained in $w$. Choose a realization
$j_1<j_2<\cdots<j_{m}$ of $\sigma$ in $w$.
Then $D(\sigma)$ is obtained from $D(w)$ by deleting the rows $[n]\backslash\{j_1,\ldots,j_{m} \}$ and the columns $[n]\backslash\{w_{j_1},\ldots,w_{j_{m}} \}$,
and reindexing the remaining rows and columns by~$[m]$, preserving their order.
\end{lemma}
\begin{definition}
The \emph{multiplicitous patterns} are those in the set
\[\mathrm{MPatt}=\{12543, 13254, 13524, 13542, 21543, 125364, 125634, 215364, 215634, 315264, 315624, 315642\}.\]
\end{definition}
\begin{theorem}
\label{thm:badpatterns}
Let $w\in S_n$. Then $D(w)$ does not contain any instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$ if and only if $w$ avoids all of the multiplicitous patterns.
\end{theorem}
\begin{proof}
It is easy to check (see Figure~\ref{fig:badpatterns}) that each of the twelve multiplicitous patterns contains an instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$. Lemma~\ref{lem:patterndiagram} implies that if $w$ contains $\sigma\in\mathrm{MPatt}$, then deleting some rows and columns from $D(w)$ yields $D(\sigma)$. Since $D(\sigma)$ contains at least one instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$, so does $D(w)$.
Conversely, assume $D(w)$ contains at least one instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$. We must show that $w$ contains some multiplicitous pattern.
Let $\tau^1,\tau^2,\ldots,\tau^n$ be the $n$ patterns of length~$n-1$ contained in $w$;
say $\tau^j$ is realized in $w$ by forgetting $w_j$.
Without loss of generality, we may assume none of $D(\tau^1),\ldots,D(\tau^n)$ contain an instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$: if $D(\tau^j)$ does contain an instance of one of these configurations, replace $w$ by $\tau^j$ and iterate.
For each $j$, $D(\tau^j)$ is obtained from $D(w)$ by deleting row $j$ and column $w_j$.
Since $D(\tau^j)$ does not contain any instance of any of our three configurations,
each cross $\{(j,q) \mid (j,q)\in D(w) \}\cup \{(p,w_j) \mid (p,w_j)\in D(w)\}$ intersects each instance of every configuration contained in $D(w)$. However, an instance of configuration $\mathrm{A}$ involves only three rows and two columns, and an instance of $\mathrm{B}$ or $\mathrm{B}'$ involves only four rows and two columns. Thus, it must be that $w\in S_n$ for some $n\leq 6$.
It can be checked by exhaustion that the only permutations in $S_n$ with $n\leq 6$ that are minimal (with respect to pattern avoidance) among those whose Rothe diagrams contain an instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$ are the twelve multiplicitous patterns.
\end{proof}
We are now ready to state our full characterization of zero-one Schubert polynomials,
and most of the elements of the proof are at hand.
\begin{theorem} \label{thm:011}
The following are equivalent:
\begin{itemize}
\item[(i)] The Schubert polynomial $\mathfrak{S}_w$ is zero-one.
\item[(ii)] The permutation $w$ is multiplicity-free,
\item[(iii)] The Rothe diagram $D(w)$ does not contain any instance of configuration $\mathrm{A}$, $\mathrm{B}$, or $\mathrm{B}'$,
\item[(iv)] The permutation $w$ avoids the multiplicitous patterns, namely $12543$, $13254$, $13524$, $13542$, $21543$, $125364$, $125634$, $215364$, $215634$, $315264$, $315624$, and $315642$.
\end{itemize}
\end{theorem}
\begin{proof}
Theorem~\ref{thm:multfree} shows $(ii)\Rightarrow(i)$. Theorem~\ref{thm:mult-pattern} shows $(iii)\Rightarrow(ii)$. Theorem~\ref{thm:badpatterns} shows $(iii)\Leftrightarrow(iv)$. The implication $(i)\Rightarrow(iv)$ will follow immediately from Corollary~\ref{cor:pattern}, since the Schubert polynomials associated to the permutations $12543$, $13254$, $13524$, $13542$, $21543$, $125364$, $125634$, $215364$, $215634$, $315264$, $315624$, and $315642$ each have a coefficient equal to 2. We prove Corollary~\ref{cor:pattern} in the next section.
\end{proof}
\section{A coefficient-wise inequality for dual characters of flagged Weyl modules of diagrams}
\label{sec:trans}
The aim of this section is to prove a generalization of Theorem~\ref{thm:pattern}, namely, Theorem~\ref{thm:pattern2}.
We now explain the necessary background and terminology for Theorem~\ref{thm:pattern2} and its proof.
Let $G=\mathrm{GL}(n,\mathbb{C})$ be the group of $n\times n$ invertible matrices over $\mathbb{C}$ and $B$ be the subgroup of $G$ consisting of the $n\times n$ upper-triangular matrices. The flagged Weyl module is a representation $\mathcal{M}_D$ of $B$ associated to a diagram $D$. The dual character of $\mathcal{M}_D$ has been shown in certain cases to be a Schubert polynomial \cite{KP} or a key polynomial \cite{flaggedLRrule}. We will use the construction of $\mathcal{M}_D$ in terms of determinants given in \cite{magyar}.
Denote by $Y$ the $n\times n$ matrix with indeterminates $y_{ij}$ in the upper-triangular positions $i\leq j$ and zeros elsewhere. Let $\mathbb{C}[Y]$ be the polynomial ring in the indeterminates $\{y_{ij}\}_{i\leq j}$. Note that $B$ acts on $\mathbb{C}[Y]$ on the right via left translation: if $f(Y)\in \mathbb{C}[Y]$, then a matrix $b\in B$ acts on $f$ by $f(Y)\cdot b=f(b^{-1}Y)$. For any $R,S\subseteq [n]$, let $Y_S^R$ be the submatrix of $Y$ obtained by restricting to rows $R$ and columns $S$.
For $R,S\subseteq [n]$, we say $R\leq S$ if $\#R=\#S$ and the $k$\/th least element of $R$ does not exceed the $k$\/th least element of $S$ for each $k$. For any diagrams $C=(C_1,\ldots, C_n)$ and $D=(D_1,\ldots, D_n)$, we say $C\leq D$ if $C_j\leq D_j$ for all $j\in[n]$.
\begin{definition}
For a diagram $D=(D_1,\ldots, D_n)$, the \emph{flagged Weyl module} $\mathcal{M}_D$ is defined by
\[\mathcal{M}_D=\mathrm{Span}_\mathbb{C}\left\{\prod_{j=1}^{n}\det\left(Y_{D_j}^{C_j}\right)\ \middle|\ C\leq D \right\}. \]
$\mathcal{M}_D$ is a $B$-module with the action inherited from the action of $B$ on $\mathbb{C}[Y]$.
\end{definition}
Note that since $Y$ is upper-triangular, the condition $C\leq D$ is technically unnecessary since $\det\left(Y_{D_j}^{C_j}\right)=0$ unless $C_j\leq D_j$. Conversely, if $C_j\leq D_j$, then $\det\left(Y_{D_j}^{C_j}\right)\neq 0$.
For any $B$-module $N$, the \emph{character} of $N$ is defined by $\mathrm{char}(N)(x_1,\ldots,x_n)=\mathrm{tr}\left(X:N\to N\right)$ where $X$ is the diagonal matrix $\mathrm{diag}(x_1,x_2,\ldots,x_n)$ with diagonal entries $x_1,\ldots,x_n$, and $X$ is viewed as a linear map from $N$ to $N$ via the $B$-action. Define the \emph{dual character} of $N$ to be the character of the dual module $N^*$:
\begin{align*}
\mathrm{char}^*(N)(x_1,\ldots,x_n)&=\mathrm{tr}\left(X:N^*\to N^*\right) \\
&=\mathrm{char}(N)(x_1^{-1},\ldots,x_n^{-1}).
\end{align*}
A special case of dual characters of flagged Weyl modules of diagrams are Schubert polynomials:
\begin{theorem}[\cite{KP}]
\label{thm:kp}
Let $w$ be a permutation, $D(w)$ be the Rothe diagram of $w$, and $\mathcal{M}_{D(w)}$ be the associated flagged Weyl module. Then,
\[\mathfrak{S}_w = \mathrm{char}^*\mathcal{M}_{D(w)}. \]
\end{theorem}
Another special family of dual characters of flagged Weyl modules of diagrams, for so-called skyline diagrams of compositions, are key polynomials \cite{keypolynomials}.
\begin{definition}
For a diagram $D\subseteq [n]\times [n]$, let $\chi_D=\chi_D(x_1,\ldots,x_n)$ be the dual character
\[\chi_D=\mathrm{char}^*\mathcal{M}_D. \]
\end{definition}
We now work towards proving Theorem~\ref{thm:pattern2}.
We start by reviewing some material from \cite{FMS} for the reader's convenience.
We then derive several lemmas that simplify the proof of Theorem~\ref{thm:pattern2}.
\begin{theorem}[cf. {\cite[Theorem 7]{FMS}}]
For any diagram $D\subseteq [n]\times [n]$, the monomials appearing in $\chi_D$ are exactly
\[\left\{\prod_{j=1}^{n}\prod_{i\in C_j}x_i\ \middle|\ C\leq D \right\}.\]
\end{theorem}
\begin{proof}
(Following that of \cite[Theorem 7]{FMS}) Denote by $X$ the diagonal matrix $\mathrm{diag}(x_1,x_2,\ldots,x_n)$. First, note that $y_{ij}$ is an eigenvector of $X$ with eigenvalue $x_i^{-1}$. Take a diagram $C=(C_1,\ldots,C_n)$ with $C\leq D$. Then, the element $\prod_{j=1}^{n}\det\left(Y_{D_j}^{C_j}\right)$ is an eigenvector of $X$ with eigenvalue $\prod_{j=1}^{n}\prod_{i\in C_j}x_i^{-1}$. Since $\mathcal{M}_D$ is spanned by elements $\prod_{j=1}^{n}\det\left(Y_{D_j}^{C_j}\right)$ and each is an eigenvector of $X$, the monomials appearing in the dual character $\chi_D$ are exactly
$\left\{\prod_{j=1}^{n}\prod_{i\in C_j}x_i\ \middle|\ C\leq D \right\}$.
\end{proof}
\begin{corollary}
\label{cor:fms}
Let $D\subseteq [n]\times [n]$ be a diagram. Fix any diagram $C^{(1)}\leq D$ and set \[\bm{m}=\prod_{j=1}^{n}\prod_{i\in C^{(1)}_j}x_i.\]
Let $C^{(1)}, \ldots, C^{(r)}$ be all the diagrams $C$ such that $C\leq D$ and $\prod_{j=1}^{n}\prod_{i\in C_j}x_i=\bm{m}$. Then, the coefficient of $\bm{m}$ in $\chi_D$ is equal to
\[\dim \left(\mathrm{Span}_\mathbb{C}\left\{\prod_{j=1}^{n}\det\left(Y_{D_j}^{C^{(i)}_j}\right) \ \middle|\ i\in [r] \right\}\right).\]
\end{corollary}
\begin{proof}
The coefficient of $\bm{m}$ in $\chi_D$ equals the dimension of the eigenspace of $\bm{m}^{-1}$ in $\mathcal{M}_D$ ($\bm{m}^{-1}$ occurs here instead of $\bm{m}$ since $\chi_D$ is the dual character of $\mathcal{M}_D$). This eigenspace equals
\[\mathrm{Span}_{\mathbb{C}}\left\{\prod_{j=1}^{n}\det\left(Y_{D_j}^{C^{(i)}_j}\right) \ \middle|\ i\in [r] \right\},\] so the result follows.
\end{proof}
The understanding of the coefficients of the monomials of $\chi_D$ given in Corollary~\ref{cor:fms} is key to our proof of Theorem~\ref{thm:pattern2}. We set up some notation now.
Given diagrams $C,D\subseteq [n]\times[n]$ and $k,l\in [n]$, let ${\widehat{C}}$ and ${\widehat{D}}$ denote the diagrams obtained from $C$ and $D$ by removing any boxes in row $k$ or column $l$.
Fix a diagram $D$. For each diagram ${\widehat{C}}$, let
\[{{\widehat{C}}}_{\rm aug}={\widehat{C}} \cup \{(k,i) \mid (k,i)\in D\} \cup \{(i,l) \mid (i,l) \in D \}\subseteq [n]\times [n].\]
The following lemma is immediate and its proof is left to the reader.
\begin{lemma}
\label{lem:c}
Let $C, D \subseteq [n]\times[n]$ be diagrams and $k,l\in [n]$. If ${\widehat{C}}\leq {\widehat{D}}$, then ${{\widehat{C}}}_{\rm aug}\leq D$.
In particular, every diagram $C'\leq {\widehat{D}}$ with no boxes in row $k$
can be obtained from some diagram $C\leq D$ by removing any boxes in row $k$ or column $l$ from $C$.
\end{lemma}
The following result is our key lemma.
For a polynomial $f\in\mathbb{Z}[x_1,\ldots,x_n]$ and a monomial $\bm{m}$, let $[\bm{m}]f$ denote the coefficient of $\bm{m}$ in $f$.
\begin{lemma}
\label{lem:keylemma}
Fix a diagram $D$ and $k,l\in [n]$. Let $\{\widehat{C}^{(i)}\}_{i\in [m]}$ be a set of diagrams with $\widehat{C}^{(i)}\leq \widehat{D}$ for each $i$, and denote $\widehat{C}^{(i)}_{\mathrm{aug}}$ by $C^{(i)}$ for $i\in[m]$. If the polynomials $\displaystyle\left\{\prod_{j\in [n]}\det\left(Y_{D_j}^{{C^{(i)}_j}}\right)\right\}_{i \in [m]}$ are linearly dependent, then so are the polynomials $\displaystyle\left\{\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{{\widehat{D}}_j}^{\widehat{C}^{(i)}_j}\right)\right\}_{i \in [m]}$.
\end{lemma}
\begin{proof}
We are given that
\begin{align}\label{eq:proof-1} \sum_{i\in [m]}{c_{i}}\prod_{j\in [n]}\det\left(Y_{D_j}^{C^{(i)}_j}\right)=0\end{align} for some constants $(c_{i})_{i \in [m]}\in \mathbb{C}^m$ not all zero. Since ${C^{(i)}}=\widehat{C}^{(i)}_{\rm aug}$ for $\widehat{C}^{(i)}\leq {\widehat{D}}$ we have that ${C_l^{(i)}}=D_l$ for every $i \in [m]$. Thus, \eqref{eq:proof-1} can be rewritten as
\begin{align*}\det\left(Y_{D_l}^{{D_l}}\right)\left(\sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)\right)=0.\end{align*}
However, since $\det\left(Y_{D_l}^{{D_l}}\right)\neq 0$, we conclude that
\begin{align}\label{eq:proof-2} \sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)=0.\end{align}
First consider the case that the only boxes of $D$ in row $k$ or column $l$ are those in $D_l$. If this is the case then
\begin{align*}
\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{{\widehat{D}}_j}^{{\widehat{C}^{(i)}}_j}\right)=\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)
\end{align*}
for each $i \in [m]$.
Therefore,
\begin{align}\label{eq:proof-3} \sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{{\widehat{D}}_j}^{{\widehat{C}^{(i)}}_j}\right)=\sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right).\end{align}
Combining \eqref{eq:proof-2} and \eqref{eq:proof-3} we obtain that the polynomials $\left\{\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{{\widehat{D}}_j}^{\widehat{C}^{(i)}_j}\right)\right\}_{i \in [m]}$ are linearly dependent, as desired.
Now, suppose that there are boxes of $D$ in row $k$ that are not in $D_l$. Let $j_1< \ldots< j_p$ be all indices $j \neq l$ such that $D_j={\widehat{D}}_j \cup \{k\}$.
Then also $C^{(i)}_{j_q}=\widehat{C}^{(i)}_{j_q} \cup \{k\}$ for each $i \in [m]$ and $q\in [p]$.
View the left-hand side of \eqref{eq:proof-2} as a polynomial in $y_{kk}$. Then, \eqref{eq:proof-2} implies that the coefficient of $y_{kk}^p$ is $0$:
\begin{align}\label{eq:proof-4} [y_{kk}^p]\sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)=0.\end{align}
On the other hand, the determinants $\det\left(Y_{D_j}^{C^{(i)}_j}\right)$ involve $y_{kk}$ exactly when $j=j_q$ for some $q\in[p]$. In this case, applying Laplace expansion along the row of $Y_{D_{j_q}}^{C^{(i)}_{j_q}}$ containing $y_{kk}$ implies
\begin{align*}
[y_{kk}]\det\left(Y_{D_{j_q}}^{C^{(i)}_{j_q}}\right) = \xi_{i,q}\det\left(Y_{\widehat{D}_{j_q}}^{\widehat{C}^{(i)}_{j_q}}\right)
\end{align*}
with $\xi_{i,q}\in\{1,-1 \}$. Thus,
\begin{align*}
[y_{kk}^p]\sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)
&=\sum_{i\in [m]}{c_{i}}\left([y_{kk}^p]\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)\right)\\
&=\sum_{i\in [m]}{c_{i}}\left(\prod_{j\in [n]\backslash \{l,j_1,\ldots,j_p\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)\right)\left([y_{kk}^p]\prod_{q\in [p]}\det\left(Y_{D_{j_q}}^{C^{(i)}_{j_q}}\right)\right)\\
&=\sum_{i\in [m]}{c_{i}}\left(\prod_{j\in [n]\backslash \{l,j_1,\ldots,j_p\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)\right)\left(\prod_{q\in [p]}[y_{kk}]\det\left(Y_{D_{j_q}}^{C^{(i)}_{j_q}}\right)\right)\\
&=\sum_{i\in [m]}{c_{i}}\left(\prod_{j\in [n]\backslash \{l,j_1,\ldots,j_p\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)\right)\left(\prod_{q\in [p]}\xi_{i,q}\det\left(Y_{\widehat{D}_{j_q}}^{\widehat{C}^{(i)}_{j_q}}\right)\right).
\end{align*}
Since $\widehat{C}_j^{(i)}=C_j^{(i)}$ and $\widehat{D}_j=D_j$ whenever $j\neq l,j_1,\ldots,j_p$, we obtain
\begin{align*}
[y_{kk}^p]\sum_{i\in [m]}{c_{i}}\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{D_j}^{C^{(i)}_j}\right)
&=\sum_{i\in [m]}{c_{i}}\left(\prod_{j\in [n]\backslash \{l,j_1,\ldots,j_p\}}\det\left(Y_{\widehat{D}_j}^{\widehat{C}^{(i)}_j}\right)\right)\left(\prod_{q\in [p]}\xi_{i,q}\det\left(Y_{\widehat{D}_{j_q}}^{\widehat{C}^{(i)}_{j_q}}\right)\right)\\
&=\sum_{i\in [m]}{c_{i}}\left(\prod_{q\in [p]}\xi_{i,q}\right)
\left(\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{\widehat{D}_j}^{\widehat{C}^{(i)}_j}\right)\right).\\
\end{align*}
Setting $c_i'=c_i\prod_{q\in [p]}\xi_{i,q}$ and applying (\ref{eq:proof-4}) yields the dependence relation
\begin{align*}
\sum_{i\in [m]}c_{i}'
\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{\widehat{D}_j}^{\widehat{C}^{(i)}_j}\right)=0.
\end{align*}
This implies the polynomials $\displaystyle\left\{\prod_{j\in [n]\backslash \{l\}}\det\left(Y_{{\widehat{D}}_j}^{\widehat{C}^{(i)}_j}\right)\right\}_{i \in [m]}$ are linearly dependent, concluding the proof.
\end{proof}
We now state and prove Theorem~\ref{thm:pattern} and its generalization Theorem~\ref{thm:pattern2}.
\begin{theorem}
\label{thm:pattern2}
Fix a diagram $D\subseteq [n]\times [n]$ and let ${\widehat{D}}$ be the diagram obtained from $D$ by removing any boxes in row $k$ or column $l$. Then
\begin{align*}
\chi_D(x_1, \ldots, x_n)=M(x_1, \ldots, x_n) \chi_{{\widehat{D}}}(x_{1}, \ldots,x_{k-1},0,x_{k+1},\ldots, x_{n})+F(x_1, \ldots, x_n),
\end{align*}
where $F(x_1,\ldots,x_n) \in \mathbb{Z}_{\geq 0}[x_1, \ldots, x_n]$ and
\[M(x_1, \ldots, x_n) = \left(\prod_{(k,i)\in D}{x_k}\right)\left(\prod_{(i,l)\in D}{x_i} \right).\]
\end{theorem}
\begin{proof}
Let $M=M(x_1,\ldots,x_n)$.
We must show that $[M\bm{m}]\chi_D\geq [\bm{m}]\chi_{{\widehat{D}}}$ for each monomial $\bm{m}$ of $\chi_{{\widehat{D}}}$ not divisible by $x_k$.
Let $C^{(1)}, \ldots, C^{(r)}$ be all the diagrams $C$ such that $C\leq D$ and $\prod_{j=1}^{n}\prod_{i\in C_j}x_i=M\bm{m}$. By Corollary~\ref{cor:fms},
\[[M\bm{m}]\chi_D=\dim\left(\mathrm{Span}_{\mathbb{C}} \left\{\prod_{j=1}^{n}\det\left(Y_{D_j}^{C^{(i)}_j}\right) \ \middle|\ i\in [r] \right\}\right).\]
Let $1,2,\ldots,q$ be the indices of the distinct diagrams among $\widehat{C}^{(1)}, \ldots, \widehat{C}^{(r)}$ such that $\widehat{C}^{(j)}\leq \widehat{D}$ for $j\in [q]$.
By Lemma~\ref{lem:c}, $\widehat{C}^{(1)}, \ldots, \widehat{C}^{(q)}$ are all the diagrams $C$ such that $C\leq \widehat{D}$ and $\prod_{j=1}^{n}\prod_{i\in C_j}x_i=\bm{m}$,
as no diagram with this dual eigenvalue can have a box in row~$k$.
So Corollary~\ref{cor:fms} implies that
\[[\bm{m}]\chi_{\widehat{D}}=\dim\left(\mathrm{Span}_{\mathbb{C}} \left\{\prod_{j=1}^{n}\det\left(Y_{\widehat{D}_j}^{\widehat{C}^{(i)}_j}\right) \ \middle|\ i\in [q] \right\}\right).\]
Finally, Lemma~\ref{lem:keylemma} implies that
\[
\dim\left(\mathrm{Span}_{\mathbb{C}} \left\{\prod_{j=1}^{n}\det\left(Y_{D_j}^{C^{(i)}_j}\right) \ \middle|\ i\in [r] \right\}\right)\geq
\dim\left(\mathrm{Span}_{\mathbb{C}} \left\{\prod_{j=1}^{n}\det\left(Y_{\widehat{D}_j}^{\widehat{C}^{(i)}_j}\right) \ \middle|\ i\in [q] \right\}\right),
\]
so $[M\bm{m}]\chi_D\geq [\bm{m}]\chi_{{\widehat{D}}}$ for each monomial $\bm{m}$ of $\chi_{{\widehat{D}}}$ not divisible by $x_k$; that is
\[\chi_D(x_1, \ldots, x_n)-M\chi_{{\widehat{D}}}(x_{1}, \ldots,x_{k-1},0,x_{k+1},\ldots, x_{n})\in \mathbb{Z}_{\geq 0}[x_1,\ldots,x_n].\]
\end{proof}
\begin{namedtheorem}[\ref{thm:pattern}]
Fix $w \in S_n$ and let $\sigma \in S_{n-1}$ be the pattern with Rothe diagram $D(\sigma)$ obtained by removing row $k$ and column $w_k$ from $D(w)$. Then
\begin{align*}
\mathfrak{S}_{w}(x_1, \ldots, x_n)=M(x_1, \ldots, x_n) \mathfrak{S}_{\sigma}(x_{1}, \ldots, \widehat{x_k}, \ldots, x_{n})+F(x_1, \ldots, x_n),
\end{align*}
where $F\in \mathbb{Z}_{\geq 0}[x_1, \ldots, x_n]$ and
\[M(x_1, \ldots, x_n) = \left(\prod_{(k,i)\in D(w)}{x_k}\right)\left(\prod_{(i,w_k)\in D(w)}{x_i} \right).\]
\end{namedtheorem}
\begin{proof}
Specialize Theorem~\ref{thm:pattern2} to the case that $D$ is a Rothe diagram $D(w)$ and $l=w_k$. The dropping of $x_k$ is due to reindexing, since the entirety of row $k$ and column $w_k$ of $D(w)$ are removed from to obtain $D(\sigma)$, not just the boxes in row $k$ and column $w_k$.
\end{proof}
\begin{corollary}
\label{cor:pattern}
Fix $w \in S_n$ and let $\sigma\in S_m$ be any pattern contained in $w$. If $k$ is a coefficient of a monomial in $\mathfrak{S}_\sigma$, then $\mathfrak{S}_w$ contains a monomial with coefficient at least $k$.
\end{corollary}
\begin{proof}
Immediate consequence of repeated applications of Theorem~\ref{thm:pattern}.
\end{proof}
\section*{Acknowledgments} We are grateful to Sara Billey and Allen Knutson for many discussions about Schubert polynomials. We thank the Institute for Advanced Study for providing a hospitable environment for our collaboration. Many thanks to Arthur Tanjaya for his careful reading.
\end{document} |
\begin{document}
\allowdisplaybreaks
\newcommand{2110.07042}{2110.07042}
\renewcommand{113}{113}
\FirstPageHeading
\ShortArticleName{Orthogonal Polynomial Stochastic Duality Functions for Multi-Species SEP$(2j)$}
\ArticleName{Orthogonal Polynomial Stochastic Duality Functions\\ for Multi-Species SEP$\boldsymbol{(2j)}$ and Multi-Species IRW}
\Author{Zhengye ZHOU}
\AuthorNameForHeading{Z.~Zhou}
\Address{Department of Mathematics, Texas A$\&$M University, College Station, TX 77840, USA} \Email{\href{mailto:[email protected]}{[email protected]}}
\ArticleDates{Received October 16, 2021, in final form December 24, 2021; Published online December 26, 2021}
\Abstract{We obtain orthogonal polynomial self-duality functions for multi-species version of the symmetric exclusion process (SEP$(2j)$) and the independent random walker process (IRW) on a finite undirected graph. In each process, we have $n>1$ species of particles. In addition, we allow up to $2j$ particles to occupy each site in the multi-species SEP$(2j)$. The duality functions for the multi-species SEP$(2j)$ and the multi-species IRW come from unitary intertwiners between different $*$-representations of the special linear Lie algebra $\mathfrak{sl}_{n+1}$ and the Heisenberg Lie algebra $\mathfrak{h}_n$, respectively. The analysis leads to multivariate Krawtchouk polynomials as orthogonal duality functions for the multi-species SEP$(2j)$ and homogeneous products of Charlier polynomials as orthogonal duality functions for the multi-species IRW.}
\Keywords{orthogonal duality; multi-species SEP$(2j)$; multi-species IRW}
\Classification{60K35}
\section{Introduction} In recent years, stochastic duality has been used as a powerful tool in the study of stochastic processes (see, e.g., \cite{Borodin_2014,corwin2016asepqj,kuan2019stochastic,zhou2020hydrodynamic}). More recently, orthogonal stochastic dualities were derived for some classical interacting particle systems. For instance, the independent random walker process (IRW) is self-dual with respect to Charlier polynomials \cite{Carinci_2019,Groenevelt_2018}, and the symmetric exclusion process (SEP$(2j)$) is self-dual with respect to Krawtchouk polynomials \cite{Carinci_2019,Groenevelt_2018}. Orthogonal polynomial duality functions turn out to be useful in applications as they form a convenient orthogonal basis in a suitable space of the systems’ observables. There are many applications of orthogonal duality functions (see, e.g., \cite{Ayala_2018,ayala2020higher,chen2021higher}). In a series of previous works, several ways to find orthogonal dualities were introduced. In \cite{franceschini2017stochastic}, the two-terms recurrence relations of orthogonal polynomials were used, method via generating functions was used in \cite{2018}, while Lie algebra representations and unitary intertwiners were used in~\cite{Groenevelt_2018}. In addition, two more approaches were described in \cite{Carinci_2019}. The first approach is based on unitary symmetries, another one is based on scalar products of classical duality functions. In this paper, we make use of the method introduced in~\cite{Groenevelt_2018}.
We first study a model of interacting particle systems with symmetric jump rates. The multi-species symmetric exclusion process is a generalization of the SEP$(2j)$ to multi-species systems, where we have up to $2j\in\mathbb{N}$ particles allowed for each site and we have $n>1$ species particles in the system. It is worth mentioning that the multi-species SEP$(2j)$ we consider is closely related to other multi-species (multi-color) exclusion processes studied over the past decades. For example, when $j=\frac{1}{2}$, it degenerates to a special case of the multi-color exclusion processes studied in \cite{Caputo2008ONTS,Dermoune2008SpectralGF}. This model also arises naturally as a special case of the multi-species ASEP$(q,j)$ studied in \cite{Kuan_2017} when $q=1$. Given the fact that the single-species SEP$(2j)$ is self-dual with respect to Krawtchouk polynomials, it's expected that similar results could be found for the multi-species SEP$(2j)$. We prove that the multi-species SEP$(2j)$ is self-dual with multivariate Krawtchouk polynomials as duality functions.
Another process we study is the multi-species independent random walker, which can be thought of as $n>1$ independent copies of IRW evolving simultaneously. Although it is straightforward to obtain the duality functions using the independence property, it is still interesting to show how the duality functions arise from representations of the nilpotent Heisenberg Lie algebra $\mathfrak{h}_n$.
The organization of this paper is as follows. In Section~\ref{section2} we give an overview of the method that we use to construct orthogonal duality functions. In Section~\ref{section3} we obtain the orthogonal duality functions for the multi-species SEP$(2j)$ and in Section~\ref{section4} for the multi-species IRW.
\section{Background}\label{section2} In this section we describe the method to obtain the orthogonal dualities which was introduced in \cite{Groenevelt_2018}. We start by recalling the definition of stochastic duality.
\begin{Definition} Two Markov processes $\mathfrak{s}_t$ and $\mathfrak{s}_t'$ on state spaces $\mathfrak{S}$ and $\mathfrak{S}'$ are dual with respect to duality function $D(\cdot,\cdot)$ on $\mathfrak{S}\times\mathfrak{S}'$ if \begin{gather*}
E_{\mathfrak{s}}[D(\mathfrak{s}_t,\mathfrak{s}')]=E'_{\mathfrak{s}'}[D(\mathfrak{s},\mathfrak{s}_t')] \qquad \text{for all} \ \mathfrak{s} \in \mathfrak{S},\ \mathfrak{s}'\in \mathfrak{S}', \ \text{and} \ t>0, \end{gather*} where $E_{\mathfrak{s}}$ denotes expectation with respect to the law of $\mathfrak{s}_t$ with $\mathfrak{s}_0=\mathfrak{s}$ and similarly for $E'_{\mathfrak{s}'}$. If~$\mathfrak{s}_t'$ is a copy of $\mathfrak{s}_t$, we say that the process $\mathfrak{s}_t$ is self-dual.
In most relevant examples, duality could also be stated at the level of Markov generators. We say that generator $L_1$ is dual to $L_2$ with respect to duality function $D(\cdot,\cdot)$ if for all~$\mathfrak{s}$ and~$\mathfrak{s}'$, \begin{gather*}
[L_1D(\cdot,\mathfrak{s}')](\mathfrak{s})=[L_2D(\mathfrak{s},\cdot)](\mathfrak{s}'). \end{gather*} If $L_1=L_2$, we have self-duality. \end{Definition}
Let $\mathfrak{g}=(\mathfrak{g},[\cdot,\cdot])$ be a complex Lie algebra with a $*$-structure, i.e., there exists an involution $*\colon X\xrightarrow[]{}X^*$ such that for any $X,Y\in \mathfrak{g}$, $a,b\in \mathbb{C}$, \begin{gather*}
(aX+bY)^*=\overline{a}X^*+\overline{b}Y^*,\qquad [X,Y]^*=[Y^*,X^*]. \end{gather*} Let $U(\mathfrak{g})$ be the universal enveloping algebra of~$\mathfrak{g}$.
Given a Hilbert space $(H,\langle \cdot,\cdot\rangle)$ and a representation $\rho$ of $\mathfrak{g}$ on $H$, we call $\rho$ a $*$-representation if for any $f,g\in H$ and any $X\in\mathfrak{g} $, \begin{gather*} \langle \rho(X)f,g\rangle =\langle f,\rho(X^*)g\rangle . \end{gather*} Suppose we have state spaces $\Omega_1$ and $\Omega_2$ of configurations on $L$ sites given by $\Omega_1=E_1\times\dots\times E_L$ and $\Omega_2=F_1\times\dots\times F_L$. Let $\mu=\mu_1\otimes\dots\otimes \mu_L$ and $\nu=\nu_1\otimes\dots\otimes \nu_L$ be product measures on~$\Omega_1$ and~$\Omega_2$.
For $1\le x\le L$, let $\rho_x$ and $\sigma_x$ be unitarily equivalent $*$-representations of a Lie algebra $\mathfrak{g}$ on $ L^2(E_x,\mu_x)$ and $ L^2(F_x,\nu_x)$, respectively. Then $\rho=\rho_1\otimes\dots\otimes \rho_L$ and $\sigma=\sigma_1\otimes\dots\otimes\sigma_L$ are $*$-representations of $\mathfrak{g}$. We assume that the corresponding unitary intertwiner $\Lambda_x\colon L^2(E_x,\mu_x)\xrightarrow{}L^2(F_x,\nu_x)$ has the following form: \begin{gather*}
(\Lambda_x f)(z_2)=\int_{E_x} f(z_1)K_x(z_1,z_2)\,{\rm d}\mu_x(z_1),\qquad \text{for $\nu_x$-almost all $z_2\in F_x$}, \end{gather*} for some kernel $K_x\in L^2(E_x\times F_x,\mu_x\otimes \nu_x)$ satisfying the relation \begin{gather*}
[\rho_x(X^*)K_x(\cdot,z_2)](z_1)=[\sigma_x(X)K_x(z_1,\cdot)](z_2), \qquad (z_1,z_2)\in E_x\times F_x,\quad X\in\mathfrak{g}. \end{gather*}
With all the above structures, the following theorem provides a way to construct duality functions. \begin{Theorem}[\cite{Groenevelt_2018}]\label{th:2.1} Suppose $L_1$ and $L_2$ are self-adjoint operators on $L^2(\Omega_1,\mu)$ and $L^2(\Omega_2,\nu)$, respectively, given by \begin{gather*}
L_1=\rho(Y),\qquad L_2=\sigma(Y), \end{gather*} for some self-adjoint $Y\in U(\mathfrak{g})^{\otimes L}$. Then $L_1$ and $L_2$ are in duality, with duality function \begin{gather*}
D(z_1,z_2)=\prod_{x=1}^L K_x(z_{1x},z_{2x}), \qquad z_1=(z_{11},\dots,z_{1L})\in \Omega_1,\qquad z_2=(z_{21},\dots,z_{2L})\in \Omega_2. \end{gather*} \end{Theorem}
\section[Multi-species SEP(2j) and Lie algebra sl\_\{n+1\}]{Multi-species SEP$\boldsymbol{(2j)}$ and Lie algebra $\boldsymbol{\mathfrak{sl}_{n+1}}$}\label{section3}
In this section, we study the multi-species version of the SEP$(2j)$ with $2j\in\mathbb{N}$ on a finite undirected graph $G=(V,E)$, where $V=\{1,\dots,L\}$ with $L\in\mathbb{N}$ and $L>2$ is the set of sites (vertices) and $E$ is the set of edges. In what follows, we write site $x\in G$ instead of mentioning~$V$ for ease of notation.
The state space $\mathcal{S}(n,2j,G)$ of particle configurations consists of variables $\xi=\big(\xi_i^x\colon 0\le i\le n$, $x\in G\big)$, where $\xi_i^x$ denotes the number of particles of species $i$ at site $x$, and \begin{gather*}
\xi^x=\big(\xi^x_0,\dots,\xi^x_n\big)\in \Omega_{2j}:=\left\{\xi=(\xi_0,\dots,\xi_n)\,\Bigg|\,\sum_{i=0}^n \xi_i=2j,\, \xi_i\ge 0\right\} \end{gather*} for any site $x\in G $. One can think of $\xi^x_0$ as the number of holes at site~$x$.
\begin{Definition}\label{def:3.1} The generator of the multi-species SEP$(2j)$ on a finite undirected graph $G=(V,E)$ is given by \begin{gather} \begin{split}
&\mathcal{L}f(\xi)=\sum_{\text{edge}\{x,y\}\in E} \mathcal{L}_{x,y} f(\xi), \\ & \mathcal{L}_{x,y}f(\xi)=\sum_{0\le k<l\le n}\xi_l^{x}\xi_k^{y}\big[f\big(\xi^{x,y}_{l,k}\big)-f(\xi)\big]+\xi_l^{y}\xi^{x}_k\big[f\big(\xi^{y,x}_{l,k}\big)-f(\xi)\big], \end{split}\label{eq:6} \end{gather} where $\xi_{l,k}^{x,y}$ denotes the particle configuration obtained by switching a particle of the $l^{\rm th}$ species at site $x$ with a particle of the $k^{\rm th}$ species at site $y$ if $\xi_{l,k}^{x,y}\in\mathcal{S}(n,2j,G)$. \end{Definition}
Note that when $n=1$, this process reduces to the single-species SEP(2j) defined in \cite{Giardin__2009}.
Suppose $p=(p_0,\dots,p_n)$ is a probability distribution, the multinomial distribution on $\Omega_{2j}$ is defined as \begin{gather*}
w_p(\xi)=\binom{2j}{\xi}\prod_{i=0}^np_i^{\xi_i}, \end{gather*} where $ \binom{2j}{\xi}$ denotes the multinomial coefficient $\frac{(2j)!}{\prod_{i=0}^n\xi_i!}$. Following a simple detailed balance computation, we can show that the product measure with marginals $w_p(\xi)$ for any fixed $p$ being a distribution is a reversible measure of the multi-species SEP$(2j)$, i.e., $\otimes_G w_p$ is a reversible measure of the multi-species SEP$(2j)$ when $p$ is the same for all sites.
\subsection[Multivariate Krawtchouk polynomials and Lie algebra sl\_\{n+1\}]{Multivariate Krawtchouk polynomials and Lie algebra $\boldsymbol{\mathfrak{sl}_{n+1}}$}\label{section3.1}
First, we introduce the $n$-variable Krawtchouk polynomials defined by Griffiths \cite{https://doi.org/10.1111/j.1467-842X.1971.tb01239.x}. We shall adopt the notation of Iliev~\cite{iliev_2012} in the following.
\begin{Definition}\label{def:1} Let $\mathcal{K}_n$ be the set of 4-tuples $\big(\nu,P,\hat{P},U\big)$ such that
$P$, $\hat{P}$, $U$ are $(n+1)\times(n+1)$ complex matrices satisfying the following conditions: \begin{enumerate}\itemsep=0pt \item[(1)] $P=\operatorname{diag}(p_0,\dots,p_n)$, $\hat{P}=\operatorname{diag}(\hat{p}_0,\dots,\hat{p}_n)$ and $p_0=\hat{p}_0=\frac{1}{\nu}\neq 0$, \item[(2)] $U=(u_{kl})_{0\le k,l\le n}$ with $U_{k0}=U_{0k}=1$ for all $0\le k \le n$, \item[(3)] $\nu PU\hat{P}U^{\rm T}=I_{n+1}$. \end{enumerate} \end{Definition}
It follows from the above definition that $p=(p_0,\dots,p_n)$ and $\hat{p}=(\hat{p}_0,\dots,\hat{p}_n)$ satisfy that $\sum_{k=0}^n p_k=\sum_{k=0}^n \hat{p}_k=1$ and $p_k,\hat{p}_k\neq 0$ for any $k$.
For all points $\kappa\in \mathcal{K}_n$, Griffith constructed multivariate Krawtchouk polynomials using a~generating function as follows.
\begin{Definition}[\cite{https://doi.org/10.1111/j.1467-842X.1971.tb01239.x}]
For $\xi,\eta\in \Omega_{2j}$ and $\kappa\in \mathcal{K}_n$, the multivariate Krawtchouk polynomial $K(\xi,\eta,\kappa,j)$ is defined by
\begin{gather*}
\sum_{\xi\in\Omega_{2j}}\binom{2j}{\xi}K(\xi,\eta,\kappa,j)z_1^{\xi_1}\cdots z_n^{\xi_n}=\prod_{k=0}^n\bigg(1+\sum_{l=1}^nu_{kl}z_l\bigg)^{\eta_k}.
\end{gather*}
\end{Definition}
Although it's not obvious to tell from the generating function, $K(\xi,\eta,\kappa,j)$ depends on~$P$ and~$\hat{P}$ because the matrix $U\in\kappa$ satisfies the condition~(3) in Definition~\ref{def:1}.
In what follows, we fix a 4-tuple $\kappa\in \mathcal{K}_n$ as in Definition~\ref{def:1}, we also write $K(\xi,\eta,\kappa,j) $ as~$K(\xi,\eta)$ for simplicity.
In \cite{iliev_2012}, Iliev interpreted multivariate Krawtchouk polynomials with representations of the Lie algebra~$\mathfrak{sl}_{n+1}$. We recall some of the essential results. Let's start by introducing some basic notations. Let $z_0,\dots,z_n$ be mutually commuting variables, we set $z=(z_0,\dots,z_n)$. For each $\xi=(\xi_0,\dots,\xi_n)\in\Omega_{2j}$, we denote
$z^\xi=z_0^{\xi_0}z_1^{\xi_1}\cdots z_n^{\xi_n}$ and $\xi!=\xi_0!\cdots\xi_n!$. Also define $V_{2j}=\operatorname{span}\big\{z^\xi|\xi\in\Omega_{2j}\big\}\subset \mathbb{C}[z]$, which is the space consisting of all homogeneous complex polynomials of total degree $2j$.
Let $I_{n+1}$ denote the $(n+1)\times(n+1)$ identity matrix. For $0\le k,l\le n$, let $e_{k,l}$ denote the $(n+1)\times(n+1)$ matrix with $(k,l)^{\rm th}$ entry $1$ and other entries~$0$. The special linear Lie algebra of order $n+1$ denoted by $\mathfrak{sl}_{n+1}$ consists of $(n+1)\times (n+1) $ matrices with trace zero and has the Lie bracket $[X,Y]=XY-YX$. It has basis $\{e_{kl}\}_{0\le k\neq l\le n}$ and $\{h_l\}_{0< l\le n}$, where $h_l=e_{ll}-\frac{1}{n+1}I_{n+1}$.
The $*$-structure of $\mathfrak{sl}_{n+1}$ is given by \begin{gather}\label{eq:1}
e_{kl}^*=e_{lk}, \quad k\neq l , \qquad h_l^*=h_l. \end{gather}
Let ${\mathfrak {gl}}(V_{2j})$ denote the space of endomorphisms of ${V_{2j}}$, we consider the representation $\rho\colon \mathfrak{sl}_{n+1}\allowbreak \xrightarrow{} \mathfrak{gl}(V_{2j})$ defined by \begin{gather}\label{eq:3.2}
\rho e_{kl} =z_k\partial z_l,\quad k\neq l,\qquad \rho h_l=z_l\partial z_l-\frac{2j}{n+1}. \end{gather}
Next, define an antiautomorphism $\mathfrak{a}$ on $\mathfrak{sl}_{n+1}$ by $\mathfrak{a}(X)=\hat{P}X^{\rm T}\hat{P}^{-1}$. It follows easily that \begin{gather}\label{eq:3.1}
\mathfrak{a}(e_{kl})=\frac{\hat{p}_l}{\hat{p}_k}e_{lk},\quad k\neq l, \qquad
\mathfrak{a}(h_{l})=h_l. \end{gather}
We define a symmetric bilinear form $\langle\,,\,\rangle_{\kappa}$ on $V_{2j}$ by \begin{gather*} \big\langle z^\xi,z^\eta\big\rangle _{\kappa}=\delta_{\xi,\eta}\frac{\xi!}{\hat{p}^\xi}\theta^{2j}. \end{gather*}
Then it is easy to check that for any $X\in \mathfrak{sl}_{n+1}$ and $v_1,v_2\in V_{2j}$ \begin{gather}\label{eq:2} \langle \rho Xv_1,v_2\rangle _{\kappa}=\langle v_1,\rho \mathfrak{a}(X)v_2\rangle_{\kappa}. \end{gather}
Let $R$ be the matrix \begin{gather}\label{eq:4}
R=\hat{\theta}\hat{P}U^{\rm T}, \end{gather} where $\hat{\theta}\in \mathbb{C}$ such that $\det(R)=1$. Next, we define $\hat{z}=(\hat{z}_0,\dots,\hat{z}_n)$ by $\hat{z}=zR$. \begin{Lemma}\label{lemma:3.1} Define operator $\operatorname{Ad}_R$ on $\mathfrak{sl}_{n+1}$ by $\operatorname{Ad}_R(X)=R^{-1}XR$, where $R=(r_{kl})_{0\le k,l\le n}$ is defined in equation~\eqref{eq:4}. Then $\operatorname{Ad}_R$ is a Lie algebra automorphism of $\mathfrak{sl}_{n+1}$. \end{Lemma} \begin{proof}
It can be checked directly. \end{proof}
Last, we list some properties of the multivariate Krawtchouk polynomial whose proof could be found in~\cite{iliev_2012}. \begin{Proposition}[{\cite[Corollary 5.2]{iliev_2012}}]\label{prop:3.1} For $\xi,\eta\in \Omega_{2j}$, the multivariate polynomial~$K$ has the following bilinear form, \begin{gather*}
K(\xi,\eta)=\frac{p_0^{2j}}{(2j)!}\big\langle z^\xi,\hat{z}^\eta\big\rangle_{\kappa}. \end{gather*} \end{Proposition}
\begin{Proposition}[{\cite[Corollary 5.3]{iliev_2012}}]\label{prop:3.2} For $\xi,\eta,\zeta\in \Omega_{2j}$ we have the following relations: \begin{gather*}
\sum_{\xi\in\Omega_{2j}}K(\xi,\eta)K(\xi,\zeta)w_{\hat{p}}(\xi)=\frac{p_0^{2j}\delta_{\eta,\zeta}}{w_p(\eta)}, \\
\sum_{\xi\in\Omega_{2j}}K(\eta,\xi)K(\zeta,\xi)w_p(\xi)=\frac{p_0^{2j}\delta_{\eta,\zeta}}{w_{\hat{p}}(\eta)}. \end{gather*} \end{Proposition} \begin{Remark} If $U\in\kappa$ is a real matrix, then it follows from the generating function that the multivariate Krawtchouk polynomial is real valued. In this case, Proposition~\ref{prop:3.2} is the orthogonality relation for the multivariate Krawtchouk polynomial in $l^2(w_p)$ and $l^2(w_{\hat{p}})$. \end{Remark}
\subsection[Self-duality of the multi-species SEP(2j)]{Self-duality of the multi-species SEP$\boldsymbol{(2j)}$}\label{section3.2}
In this subsection, we show that the multi-species SEP($2j$) is self dual with respect to duality functions given by homogeneous products of multivariate Krawtchouk polynomials. Suppose $p$ and $\hat{p}$ in the 4-tuple $\kappa\in \mathcal{K}_n$ as in Definition~\ref{def:1} are both probability measures.
Let $l^2(w_p)$ be a Hilbert space with inner product $(f,g)_p=\sum_{\xi\in\Omega_{2j}}f(\xi)\overline{g(\xi)}w_p(\xi)$. Now we define a $*$-representations $\rho_p$ of $\mathfrak{sl}_{n+1}$ on $l^2(w_p)$ by \begin{alignat*}{3}
&\rho_p(e_{kl})f(\xi)=\sqrt{\frac{p_k}{p_l}}\xi_lf\big(\xi_{l,k}^{-1,+1}\big) \qquad &&\text{for} \ 0\le k\neq l\le n,& \\
&\rho_p(h_l)f(\xi)=\left(\xi_l-\frac{2j}{n+1}\right)f(\xi)\qquad&& \text{for} \ 0< l\le n,& \end{alignat*} where $\xi_{l,k}^{+1,-1}$ represents the variable with $\xi_l$ increased by 1 and $\xi_k$ decreased by 1. Recalling the $*$-structure defined in equation~\eqref{eq:1}, it is straightforward to check that $(\rho_p(X)f,g)_p=(f,\rho_p(X^*)g)_p$ for all $X\in \mathfrak{sl}_{n+1} $.
Next, we introduce another non-trivial $*$-representation $\sigma_p$ of $\mathfrak{sl}_{n+1}$ on $l^2(w_{p})$ that is unitarily equivalent to $\rho_{\hat{p}}$. \begin{Definition}\label{def:2} For each $\rho_{\hat{p}}$, define a corresponding representation by $\sigma_p=\rho_{\hat{p}}\circ \operatorname{Ad}_R $, where~$\operatorname{Ad}_R$ is the automorphism defined in Lemma~\ref{lemma:3.1}. \end{Definition}
\begin{Proposition}\label{prop:3.4} If the matrix $U$ in the $4$-tuple $\kappa\in \mathcal{K}_n$ is real, then the representation $\sigma_p$ defined in Definition~{\rm \ref{def:2}} is a $*$-representation of~$\mathfrak{sl}_{n+1}$ on~$l^2(w_{p})$. \end{Proposition}
\begin{proof} By definitions of the matrices $U$ and $R$, when $U$ is a real matrix, then $R$ and $R^{-1}$ are all real matrices. For ease of notation, we write $Q=R^{-1}=(q_{i,m})_{0\le i,m\le n}$. Then, computing $\sigma_p$ explicitly, we have \begin{gather}\label{eq:5.3}
\sigma_p(e_{im})f(\eta)=\sqrt{\frac{\hat{p}_i}{\hat{p}_m}}\sum_{k,l=0}^n q_{ki}r_{ml}\eta_lf\big(\eta_{k,l}^{+1,-1}\big). \end{gather}
Next we verify that for any $X\in \mathfrak{sl}_{n+1} $, $\left(\sigma_p(X) f(\eta),g(\eta)\right)_{p}= ( f(\eta),\sigma_p(X^*)g(\eta) )_{p}$. First, we plug equation~\eqref{eq:5.3} in the inner products, when $i\neq m$, \begin{gather}
\big(\sigma_p(e_{im}) f(\eta),g(\eta)\big)_{p}=\sqrt{\frac{\hat{p}_i}{\hat{p}_m}}\sum_{k,l=0}^n q_{ki}r_{ml}\big(\eta_lf\big(\eta_{k,l}^{+1,-1}\big),g(\eta)\big)_{p},\nonumber\\ \label{eq:5.1} \big( f(\eta),\sigma_p(e_{mi})g(\eta)\big)_{p}=\sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\sum_{k,l=0}^n q_{km}r_{il}\big(f(\eta),\eta_lg\big(\eta_{k,l}^{+1,-1}\big)\big)_{p}. \end{gather} By switching $k$ and $l$ in equation~\eqref{eq:5.1}, we have that \begin{gather*}
\big( f(\eta),\sigma_p(e_{mi})g(\eta)\big)_{p}=\sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\sum_{k,l=0}^n q_{lm}r_{ik}\big(\eta_kf(\eta),g\big(\eta_{l,k}^{+1,-1}\big)\big)_{p}. \end{gather*} Now define $\tilde{\eta}=\eta_{l,k}^{+1,-1}$, we get \begin{gather*} \big( f(\eta),\sigma_p(e_{mi})g(\eta)\big)_{p}=\sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\sum_{k,l=0}^n q_{lm}r_{ik}\frac{p_k}{p_l}\big(\tilde{\eta}_lf\big(\tilde{\eta}_{l,k}^{-1,+1}\big),g(\tilde{\eta})\big)_{p}. \end{gather*}
Computing the entries of matrices $R$ and $Q$ explicitly, we have $r_{ki}=\hat{\theta}\hat{p}_{k}u_{ik}$ and $q_{ki}=\frac{1}{p_0\hat{\theta}}p_ku_{ki}$. Plugging in $r$ and $q$, we have that $\frac{\hat{p}_i}{\hat{p}_m}q_{ki}r_{ml}=\frac{p_k}{p_l}q_{lm}r_{ik}$, which gives that \[ \big(\sigma_p(e_{im}) f(\eta),g(\eta)\big)_{p}=\big( f(\eta),\sigma_p(e_{mi})g(\eta)\big)_{p}. \]
The proof for $h_l$ is similar. \end{proof} \begin{Remark} Proposition~\ref{prop:3.4} is nontrivial since the automorphism $\operatorname{Ad}_R$ does not preserve the $*$-structure of $\mathfrak{sl}_{n+1}$, i.e., there exists $X\in \mathfrak{sl}_{n+1}$ such that $\operatorname{Ad}_R(X^*)\neq \operatorname{Ad}_R(X)^*$. For example, \[ \operatorname{Ad}_R(e_{12}^*)=\operatorname{Ad}_R(e_{21})=\sum_{k,l}q_{k2}r_{1l}e_{kl}, \] while \[ \operatorname{Ad}_R(e_{12})^*=\bigg(\sum_{k,l}q_{k1}r_{2l}e_{kl}\bigg)^*=\sum_{k,l}q_{k1}r_{2l}e_{lk}=\sum_{k,l}q_{l1}r_{2k}e_{kl}, \] which are not equal for general~$\kappa$. \end{Remark}
\begin{Proposition} When the matrix $U$ in the $4$-tuple $\kappa$ is real, $\rho_{\hat{p}}$ and $\sigma_p$ satisfy the following property: $[\rho_{\hat{p}}(X^*)K(\cdot,\eta)](\xi)=[\sigma_p(X)K(\xi,\cdot)](\eta)$ for any $X\in\mathfrak{sl}_{n+1}$ and $(\xi,\eta)\in \Omega_{2j}\times \Omega_{2j}$. \end{Proposition}
\begin{proof} Recall from Proposition~\ref{prop:3.1}, the multivariate Krawtchouk polynomials can be written in bilinear form. Also recall that the antiautomorphism $\mathfrak{a}$ defined in \eqref{eq:3.1} has property~\eqref{eq:2}. Notice that for function~$z^\xi$, we can write~$\rho_{p}$ in terms of the representation $\rho$ defined in~\eqref{eq:3.2}, \begin{gather*}
\rho_{\hat{p}}(e_{im})z^{\xi}=\sqrt{\frac{\hat{p}_i}{\hat{p}_m}}\rho(e_{im})z^{\xi}. \end{gather*} Thus, for $i\neq m$, \begin{align*}
[\rho_{\hat{p}}(e_{im})K(\cdot,\eta)](\xi)& =\frac{p_0^{2j}}{(2j)!}\sqrt{\frac{\hat{p}_i}{\hat{p}_m}}\big\langle \rho(e_{im})z^{\xi},\hat{z}^\eta\big\rangle_{\kappa} =\frac{p_0^{2j}}{(2j)!}\sqrt{\frac{\hat{p}_i}{\hat{p}_m}}\big\langle z^{\xi},\rho\mathfrak{a}(e_{im})\hat{z}^\eta\big\rangle _{\kappa}\\ & =\frac{p_0^{2j}}{(2j)!}\sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\big\langle z^{\xi},\rho(e_{mi})\hat{z}^\eta\big\rangle _{\kappa}=[\sigma_p(e_{mi})K(\xi,\cdot)](\eta), \end{align*} where the last equality follows form the fact that $e_{im}=\operatorname{Ad}_R(\hat{e}_{im})$ and $\rho(\hat{e}_{im})\hat{z}^\eta=\hat{z}_i\partial_{\hat{z}_m}\hat{z}^\eta$~\cite{iliev_2012}, thus \[ \sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\rho(e_{mi})\hat{z}^\eta=\sqrt{\frac{\hat{p}_m}{\hat{p}_i}}\rho\circ \operatorname{Ad}_R(\hat{e}_{mi})\hat{z}^\eta=\sigma_p(e_{mi})\hat{z}^\eta.\]
The proof for $h_l$ follows from the same argument. \end{proof}
\begin{Proposition}
If the matrix $U$ in the $4$-tuple $\kappa$ is real, define the operator $\Lambda\colon l^2(w_{\hat{p}})\xrightarrow[]{} l^2(w_p)$ by \begin{gather*}
(\Lambda f)(\eta)=p_0^{-j}\sum_{\xi\in \Omega_{2j}}w_{\hat{p}}(\xi)f(\xi)K(\xi,\eta). \end{gather*}
Then $\Lambda$ is an unitary operator and intertwines $\rho_{\hat{p}}$ with $\sigma_p$. The kernel $K(\xi,\eta)$ satisfies \begin{gather}\label{eq:3}
[\rho_{\hat{p}}(X^*)K(\cdot,\eta)](\xi)=[\sigma_p(X)K(\xi,\cdot)](\eta). \end{gather}
\end{Proposition}
\begin{proof}It follows directly from equation~\eqref{eq:3} that $\Lambda[\rho_{\hat{p}}(X)f]=\sigma_p(X)\Lambda(f)$ for all $X\in\mathfrak{sl}_{n+1}$, thus $\Lambda$ intertwines $\rho_{\hat{p}}$ with $\sigma_p$. Recall that if a process has reversible measure, then it's self dual with respect to the cheap duality function, which comes from the reversible measure. For the multi-species SEP$(2j)$, the cheap duality function is given by $\delta_{\zeta}(\xi)=\frac{\delta_{\zeta,\xi}}{w_{\hat{p}}(\xi)}$, which has squared norm $\frac{1}{w_{\hat{p}}(\zeta)}$ in $l^2(w_{\hat{p}})$.
On the other hand, $\Lambda(\delta_\zeta)(\eta)=p_0^{-j}K(\zeta,\eta)$ has squared norm $\frac{1}{w_{\hat{p}}(\zeta)}$ in $l^2(w_p)$. Thus $\Lambda$ maps an orthogonal basis to another orthogonal basis preserving the norm, hence $\Lambda$ is unitary. \end{proof}
Last we show the generator defined in~\eqref{eq:6} is the image of some self-adjoint element in $\mathcal{U}(\mathfrak{sl}_{n+1})^{\otimes L}$ under the $*$-representations $\rho_{\hat{p}}^{\otimes L}$ and $\sigma_p^{\otimes L}$. We generalize the construction of the Markov generator in terms of the co-product of a Casimir element (see, e.g., \cite{Giardin__2009,Groenevelt_2018}) to the multi-species cases.
We start by constructing a Casimir element of $\mathcal{U}(\mathfrak{sl}_{n+1})$. Under the non-degenerate bilinear form $B(X,Y)=\operatorname{tr}(XY)$, the dual basis of $\mathfrak{sl}_{n+1}$ is given by \begin{gather*}
e_{lk}^\star=e_{kl}, \quad k\neq l, \qquad h_l^\star=e_{ll}-e_{00}=h_l+\sum_{k=1}^nh_k. \end{gather*} The Casimir element $\Omega$ of $\mathcal{U}(\mathfrak{sl}_{n+1})$ is given by \begin{gather*}
\Omega=\sum_{0\le k<l\le n}(e_{kl}e_{lk}+e_{lk}e_{kl})+\sum_{0< l\le n} h_lh_l^\star . \end{gather*} It is easy to verify that $\Omega$ is self-adjoint, i.e., with the $*$-structure given in~\eqref{eq:1}, $\Omega^*=\Omega$. Next, define the coproduct for the basis $\{e_{kl}\}_{0\le k\neq l\le n}$ and $\{h_l\}_{0< l\le n}$ as $\Delta(X)=1\otimes X+X\otimes 1$, and define an element \begin{gather*}
Y=\Delta(\Omega)-\Omega\otimes 1-1\otimes \Omega. \end{gather*}
\begin{Lemma} $\mathcal{L}_{x,y}$ is the image of a self-adjoint element in $\mathcal{U}(\mathfrak{sl}_{n+1})^{\otimes 2}$ under the representation $\rho_{\hat{p}}\otimes\rho_{\hat{p}}$ and $\sigma_p\otimes\sigma_p$. Specifically, there exists a constant $c\in \mathbb{R}$ such that \begin{align}\label{eq:5}
\mathcal{L}_{x,y}& =\frac{1}{2}\rho_{\hat{p}}\otimes\rho_{\hat{p}} (Y_{x,y} )-c \\ \label{eq:5.2} & = \frac{1}{2}\sigma_p\otimes\sigma_p (Y_{x,y} )-c. \end{align} \end{Lemma}
\begin{proof}To prove \eqref{eq:5}, we make use of the following identity: \begin{gather*}
\sum_{0\le k<l\le n}\xi_k^x\xi_l^y+\xi_k^x\xi_l^y=\bigg(\sum_{0\le l\le n}\xi_l^x\bigg)\bigg(\sum_{0\le l\le n}\xi_l^y\bigg)-\sum_{0\le l \le n} \xi^x_l\xi^y_l=(2j)^2-\sum_{0\le l \le n} \xi^x_l\xi^y_l. \end{gather*} Expanding $\Omega$ in $Y$, we have \begin{gather*}
Y=2\sum_{0\le k< l\le n} ( e_{lk}\otimes e_{kl}+e_{kl}\otimes e_{lk} )+\sum_{1\le l\le n} (h_l\otimes h_l^{\star}+h_l^{\star}\otimes h_l ). \end{gather*} Now we can compute the right-hand side of~\eqref{eq:5} using the above identities and the representation $\rho_{\hat{p}}$ to see that it agrees with $\mathcal{L}_{x,y}$. Note that $\mathcal{L}_{x,y}$ does not depend on~$\hat{p}$ since all terms with~$\hat{p}$ get cancelled.
To prove \eqref{eq:5.2}, it suffices to show $\operatorname{Ad}_R\otimes \operatorname{Ad}_R(Y)=Y$. First, we can check that $\operatorname{Ad}_R(\Omega)=\Omega$ by direct calculation. Using the fact that $\operatorname{Ad}_R\otimes \operatorname{Ad}_R \circ \Delta=\Delta\circ \operatorname{Ad}_R$, we have $\operatorname{Ad}_R\otimes \operatorname{Ad}_R(Y)\allowbreak =Y$, thus \begin{gather*} \sigma_p \otimes\sigma_p(Y_{x,y})= (\rho_{\hat{p}}\otimes\rho_{\hat{p}} )\circ (\operatorname{Ad}_R\otimes \operatorname{Ad}_R ) (Y_{x,y}) =\rho_{\hat{p}}\otimes\rho_{\hat{p}} (Y_{x,y}).\tag*{\qed} \end{gather*}
\renewcommand{\qed}{} \end{proof}
Applying Theorem~\ref{th:2.1} yields the self duality for the multi-species SEP$(2j)$. \begin{Theorem}
The multi-species ${\rm SEP}(2j)$ defined in Definition~{\rm \ref{def:3.1}} is self dual with respect to duality functions \begin{gather*}
\prod_{x\in G} K\big(\xi^x,\eta^x,\kappa,2j\big), \end{gather*}
for any $\kappa\in \mathcal{K}_n$ such that $U$ in $\kappa$ is real and~$p$, $\hat{p}$ in $\kappa$ are probability measures. \end{Theorem}
\section[Multi-species IRW and Heisenberg Lie algebra h\_n]{Multi-species IRW and Heisenberg Lie algebra $\boldsymbol{\mathfrak{h}_n}$}\label{section4}
In this section, we find a family of self-duality functions of the multi-species independent random walk (multi-species IRW) using the Heisenberg Lie algebra $\mathfrak{h}_n$.
The $n$-species independent random walk is a generalization of the usual IRW to~$n$ species on a finite undirected graph $G=(V,E)$, where $V=\{1,\dots,L\}$ with $L\in\mathbb{N}$ and $L>2$ is the set of sites (vertices) and $E$ is the set of edges. It is a Markov process where $n$ species of particles move independently between $L$ sites. The jump rate for a particle of species $i$ from a site is proportional to the number of species $i$ particles at that site.
The state space $\mathcal{S}(n,G)$ of particle configurations consists of variables $\xi=\big(\xi_i^x\colon 1\le i\le n$, $x\in G\big)$, where $\xi_i^x\in\mathbb{N}_0$ (non-negative integers) denotes the number of species~$i$ particles at site~$x$. \begin{Definition}\label{def:4.1} The generator of $n$-species IRW on $G=(V,E)$ is given by \begin{gather*}
\mathcal{L}f(\xi)=\sum_{\text{edge}\{x,y\}\in E} \mathcal{L}_{x,y},\\
\mathcal{L}_{x,y}= \sum_{i=1}^n\big [\xi_i^x\big(f\big(\xi_i^{x,y}\big)-f(\xi)\big)+\xi_i^y\big(f\big(\xi_i^{y,x}\big)-f(\xi)\big)\big], \end{gather*} where $\xi_i^{x,y}$ denotes the particle configuration obtained by moving a particle of species~$i$ from site~$x$ to site~$y$ if $\xi_i^{x,y}\in\mathcal{S}(n,G)$. \end{Definition}
Define measure $ \mu_\lambda(\xi)=\prod_{i=1}^n \frac{\lambda^{\xi_i}}{\xi_i!}{\rm e}^{-\lambda}$ with $ \lambda>0$. Following a simple detailed balance computation, we can show that the product measure $\otimes_G\mu_\lambda$ is a reversible measure of the $n$-species IRW when $\lambda$ is the same for all sites.
Next, we mention here that the space $l^2(\mu_\lambda)$ is equipped with inner product \begin{gather*}
(f,g)_\lambda=\sum_{\xi\in\mathbb{N}_0^{ n}}\mu_\lambda(\xi)f(\xi)\overline{g(\xi)}. \end{gather*}
\subsection[The Charlier polynomials and Heisenberg Lie algebra h\_n]{The Charlier polynomials and Heisenberg Lie algebra $\boldsymbol{\mathfrak{h}_n}$}\label{section4.1}
\begin{Definition} The Heisenberg Lie algebra $\mathfrak{h}_n$ is the $2n+1$ dimensional complex Lie algebra with generators $\{P_1,\dots,P_n,Q_1,\dots,Q_n,Z\}$ and commutation relations: for $1\le i,l \le n$, \begin{gather*}
[P_i,P_l]=[Q_i,Q_l]=[P_i,Z]=[Q_i,Z]=0,\qquad
[P_i,Q_l]=\delta_{i,l}Z. \end{gather*} \end{Definition} The Heisenberg Lie algebra $\mathfrak{h}_n$ is nilpotent but not semisimple. It has a $*$-structure given by \begin{gather*}
P_i^*=Q_i,\qquad Q_i^*=P_i,\qquad Z^*=Z. \end{gather*}
The Charlier polynomials are given by \begin{gather*}
C_m(z,\lambda)={}_2F_0\left(\left.\begin{matrix}
-m,\ -z \\
-
\end{matrix}\,\right| -\frac{1}{\lambda}\right). \end{gather*} Here we list some properties of Charlier polynomials that will be used later on. First, Charlier polynomials are Orthogonal,
\begin{gather*}
\sum_{z\in \mathbb{N}_0}C_m(z,\lambda)C_{\tilde{m}}(z,\lambda)\frac{\lambda^{z}}{z!}{\rm e}^{-\lambda}=\delta_{m,\tilde{m}}\lambda^{-\tilde{m}}\tilde{m}!.
\end{gather*}
They have the following raising and lowering property,
\begin{gather*}
mC_{m-1}(z,\lambda)=\lambda C_m(z,\lambda)-\lambda C_m(z+1,\lambda),
\\
\lambda C_{m+1}(z,\lambda)=\lambda C_m(z,\lambda)-z C_m(z-1,\lambda).
\end{gather*}
To construct unitary operator later, we define function $C(\xi,\eta,\lambda)$ for $\xi,\eta\in \mathbb{N}_0^{ n}$ by \begin{gather}\label{eq: 4.9}
C(\xi,\eta,\lambda)=\prod_{i=1}^n {\rm e}^{\lambda} C_{\xi_i}(\eta_i,\lambda). \end{gather}
\subsection{Self duality of the multi-species IRW}\label{section4.2} Now we define the $*$-representation $\rho_\lambda$ of $\mathfrak{h}_n$ on $l^2(\mu_\lambda)$ by \begin{gather*}
[\rho_\lambda(Q_i)f](\xi)=\xi_if\big(\xi_i^{-1}\big),\\
[\rho_\lambda(P_i)f](\xi)=\lambda f\big(\xi_i^{+1}\big),\\
[\rho_\lambda(Z)f](\xi)=\lambda f(\xi), \end{gather*} where $\xi_i^{+1}$ \big($\xi_i^{-1}$\big) means that $\xi_i$ is increased (decreased) by~$1$.
Next, we define the map $\theta$ by \begin{gather*}
\theta(P_i)=Z-P_i,\qquad \theta(Q_i)=Z-Q_i,\qquad \theta(Z)=Z, \end{gather*} then $\theta$ extends to a Lie algebra isomorphism of $\mathfrak{h}_n$, preserving the $*$-structure.
\begin{Proposition}\label{prop:4.1}
For any $X\in \mathfrak{h}_n$, we have
\begin{gather*}
\rho_\lambda(X^*)C(\cdot,\eta,\lambda)(\xi)=\rho_\lambda(\theta(X))C(\xi,\cdot,\lambda)(\eta).
\end{gather*} \end{Proposition}
\begin{proof} It's easy to verify by definition and the raising and lowering property, \begin{gather*}
\rho_\lambda(Q_i)C(\cdot,\eta,\lambda)(\xi)=\xi_i C\big(\xi_i^{-1},\eta,\lambda\big)\\
\hphantom{\rho_\lambda(Q_i)C(\cdot,\eta,\lambda)(\xi)}{} =\lambda C(\xi,\eta,\lambda)-\lambda C\big(\xi,\eta_i^{+1},\lambda\big)
=\rho_\lambda(\theta(P_i))C(\xi,\cdot,\lambda)(\eta),\\
\rho_\lambda(P_i)C(\cdot,\eta,\lambda)(\xi)=\lambda C\big(\xi_i^{+1},\eta,\lambda\big)\\
\hphantom{\rho_\lambda(P_i)C(\cdot,\eta,\lambda)(\xi)}{}
=\lambda C(\xi,\eta,\lambda)-\eta_i C\big(\xi,\eta_i^{-1},\lambda\big)
=\rho_\lambda(\theta(Q_i))C(\xi,\cdot,\lambda)(\eta) .\tag*{\qed} \end{gather*} \renewcommand{\qed}{} \end{proof}
\begin{Proposition}
Define the operator $\Lambda\colon l^2(\mu_\lambda)\xrightarrow[]{} l^2(\mu_\lambda)$ by \begin{gather*}
(\Lambda f)(\eta)=\sum_{\xi\in \mathbb{N}_0^{ n}}\mu_\lambda(\xi)f(\xi)C(\xi,\eta,\lambda), \end{gather*} then $\Lambda$ is an unitary operator and intertwines $\rho_\lambda$ with $\rho_\lambda\circ\theta$. \end{Proposition} \begin{proof} It follows directly from Proposition~\ref{prop:4.1} that $\Lambda[\rho_\lambda(X)f]=\rho_\lambda\circ\theta(X)\Lambda(f)$ for all $X\in\mathfrak{h}_n$, thus $\Lambda$ intertwines $\rho_{\lambda}$ with $\rho_\lambda\circ\theta$. The cheap duality functions for the $n$-species IRW given by $\delta_{\zeta}(\xi)=\frac{\delta_{\zeta,\xi}}{\mu_\lambda(\xi)}$ form an orthogonal basis for $l^2(\mu_\lambda)$ with squared norm $\frac{1}{\mu_\lambda(\zeta)}$, while $\Lambda(\delta_\zeta)(\eta)=C(\zeta,\eta,\lambda)$ also has squared norm $\frac{1}{\mu_\lambda(\zeta)}$ in $l^2(\mu_\lambda)$. By the fact that all $C(\zeta,\eta,\lambda)$ form an orthogonal basis for $l^2(\mu_\lambda)$, $\Lambda$ is unitary. \end{proof}
Finally, to show self duality, we define $Y\in \mathcal{U}(\mathfrak{h}_n)^{\otimes 2}$ by \begin{gather*}
Y=\sum_{i=1}^n (1\otimes Q_i-Q_i\otimes 1)(P_i\otimes 1-1\otimes P_i). \end{gather*}
\begin{Lemma} The generator for the multi-species IRW can be written as the following: \begin{align}\label{eq:7}
\mathcal{L}_{x,y}& =\lambda^{-1} \rho_\lambda\otimes\rho_\lambda (Y_{x,y}) \\ \label{eq:8}
& =\lambda^{-1} (\rho_\lambda\circ\theta)\otimes(\rho_\lambda \circ\theta)(Y_{x,y}). \end{align} \end{Lemma} \begin{proof}
\eqref{eq:7} is obtained by plugging in definitions, and to prove \eqref{eq:8}, we show that $(\rho_\lambda\circ\theta)\otimes(\rho_\lambda \circ\theta)(Y)=\rho_\lambda\otimes\rho_\lambda (Y)$, which follows from \cite[Lemma~3.5]{Groenevelt_2018}. \end{proof}
Again, applying Theorem~\ref{th:2.1}, we obtain the self duality for the multi-species IRW. \begin{Theorem} The multi-species IRW defined in Definition~{\rm \ref{def:4.1}} is self dual with respect to duality functions \begin{gather}\label{eq: 4.20}
\prod_{x\in G} C\big(\xi^x,\eta^x,\lambda\big),\qquad \lambda>0. \end{gather} \end{Theorem} \begin{Remark} These duality functions could be obtained by the independence of the evolution of each species of particles and the fact that the duality functions given by \eqref{eq: 4.9} and~\eqref{eq: 4.20} suitably factorize over species. \end{Remark}
\subsection*{Acknowledgments} The author is very grateful to Jeffrey Kuan and anonymous referees for helpful discussions and insightful comments.
\pdfbookmark[1]{References}{ref}
\LastPageEnding
\end{document} |
\begin{document}
\sf
\title[Calabi-Yau objects in triangulated categories]{Calabi-Yau objects in triangulated categories} \author[C. Cibils, P. Zhang] {Claude Cibils$^a$ and Pu Zhang$^{b, *}$} \thanks{The second named author is supported by the CNRS of France, the NSF of China and of Shanghai City (Grant No. 10301033 and ZR0614049).} \thanks{$^*$ The corresponding author} \keywords{Serre functor, Calabi-Yau object, Auslander-Reiten triangle, stable module category, self-injective Nakayama algebra} \maketitle
\begin{center} $^A$D\'epartement de Math\'ematiques, \ \ Universit\'e de Montpellier 2\\ F-34095, Montpellier Cedex 5, France\ \ \ Claude.Cibils$\symbol{64}$math.univ-montp2.fr\\ $^B$Department of Mathematics, \ \ Shanghai Jiao Tong University\\ Shanghai 200240, P. R. China\ \ \ \ pzhang$\symbol{64}$sjtu.edu.cn \end{center} \begin{abstract} We introduce the Calabi-Yau (CY) objects in a Hom-finite Krull-Schmidt triangulated $k$-category, and notice that the structure of the minimal, consequently all the CY objects, can be described. The relation between indecomposable CY objects and Auslander-Reiten triangles is provided. Finally we classify all the CY modules of self-injective Nakayama algebras, determining this way the self-injective Nakayama algebras admitting indecomposable CY modules. In particular, this result recovers the algebras whose stable categories are Calabi-Yau, which have been obtained in [BS]. \end{abstract}
\vskip10pt
\section {\bf Introduction}
Calabi-Yau (CY) categories have been introduced by Kontsevich [Ko]. They provide a new insight and a wide framework for topics as in mathematical physics ([Co]), non-commutative geometry ([B], [Gin1], [Gin2]), and representation theory of Artin algebras ([BS], [ES], [IR], [Ke], [KR1], [KR2]).
Triangulated categories with Serre dualities ([BK], [RV]) and CY categories have important global naturality. On the other hand, even in non CY categories, inspired by [Ko], one can introduce CY objects. It turns out that they arise naturally in non CY categories and enjoy ``local naturality'' and interesting properties (Prop. 4.4, Theorems 3.2, 4.2, 5.5 and 6.1).
\vskip10pt
The first aim of this paper is to study the properties of such objects in a Hom-finite Krull-Schmidt triangulated $k$-category with Serre functor $F$. We give the relation between indecomposable CY objects and the Auslander-Reiten triangles ($\S 3$), and describe all the $d$-th CY objects via the minimal ones, which are exactly the direct sum of all the objects in finite $\langle [-d]\circ F\rangle$-orbits of $\operatorname{Ind}(\mathcal A)$ ($\S 4$). We classify all the $d$-th CY modules of self-injective Nakayama algebras for any integer $d$ ($\S 5$). Finally, we determine all the self-injective Nakayama algebras which admit indecomposable CY modules. In particular, this recovers the algebras whose stable categories are Calabi-Yau ($\S 6$), included in the work of Bialkowski and Skowro\'nski [BS]. Note that the CY modules are invariant under stable equivalences between self-injective algebras, with a very few exceptions (Prop.3.1). Consequently our results on self-injective Nakayama algebras extend to the one on the wreath-like algebras ([GR]), which contains the Brauer tree algebras ([J]).
This also raises an immediate question. Let $\mathcal A$ be a Hom-finite Krull-Schmidt triangulated $k$-category with a Serre functor. If all objects are $d$-th CY with the same $d$, whether $\mathcal A$ is a Calabi-Yau category?
\vskip10pt
\section {\bf Backgrounds and Preliminaries}
\subsection{} Let $k$ be a field and $\mathcal{A}$ a Hom-finite $k$-category. Recall from Bondal and Kapranov [BK] that a $k$-linear functor $F \colon \mathcal{A} \to \mathcal{A}$ \ is {\em a right Serre functor} if there exist $k$-isomorphisms $$\eta_{A, B}: \ \ \operatorname{Hom}_{\mathcal{A}}(A, B) \longrightarrow D \operatorname{Hom}_{\mathcal{A}}(B, FA), \ \ \forall \ \ A, \ B\in \mathcal{A},$$ which are natural both in $A$ and $B$, where $D = \operatorname{Hom}_{k}(-, k)$. Such an $F$ is unique up to a natural isomorphism, and fully-faithful; if it is an equivalence, then a quasi-inverse $F^{-1}$ is a left Serre functor; in this case we call $F$ a Serre functor. Note that $\mathcal{A}$ has a Serre functor if and only if it has both right and left Serre functor. See Reiten and Van den Bergh [RV].
\vskip10pt
For triangulated categories we refer to [Har], [V], and [N]. Let $\mathcal{A}$ be a Hom-finite triangulated $k$-category. Following Happel [Hap1], {\em an Auslander-Reiten triangle} $X\stackrel{f} {\longrightarrow} Y \stackrel{g} {\longrightarrow} Z \stackrel{h} {\longrightarrow} X[1]$ of $\mathcal{A}$ is a distinguished triangle satisfying:
(AR1) \ $X$ and $Z$ are indecomposable;
(AR2) \ $h\ne 0$;
(AR3) \ If $t: Z'\longrightarrow Z$ is not a retraction, then there exists $t': Z'\longrightarrow Y$ such that $t = gt'$.
\vskip10pt
Note that (AR3) is equivalent to
(AR4) \ If $Z'$ is indecomposable and $t: Z'\longrightarrow Z$ is a non-isomorphism, then $ht = 0$.
Under (AR1) and (AR2), (AR3) is equivalent to
(AR3') \ If $s: X\longrightarrow X'$ is not a section, then there exists $s': Y\longrightarrow X'$ such that $s = s'f$.
Also, (AR3') is equivalent to
(AR4') \ If $X'$ is indecomposable and $s: X\longrightarrow X'$ is a non-isomorphism, then $s\circ h[-1] = 0$.
\vskip10pt
In an Auslander-Reiten triangle $X {\longrightarrow} Y {\longrightarrow} Z {\longrightarrow} X[1]$, the object $X$ is uniquely determined by $Z$. Write $X = \tau_\mathcal A Z$. In general $\tau_\mathcal A$ is {\em not} a functor. By definition $\mathcal A$ has right Auslander-Reiten triangles if there exists an Auslander-Reiten triangle $X {\longrightarrow} Y {\longrightarrow} Z {\longrightarrow} X[1]$ for any indecomposable $Z$; and $\mathcal A$ has Auslander-Reiten triangles if $\mathcal{A}$ has right and left Auslander-Reiten triangles. We refer to [Hap1], [XZ] and [A] for the Auslander-Reiten quiver of a triangulated category.
\vskip10pt
A Hom-finite $k$-category is {\em Krull-Schmidt} if the endomorphism algebra of any indecomposable is local. In this case any object is uniquely decomposed into a direct sum of indecomposables, up to isomorphisms and up to the order of indecomposable direct summands (Ringel [R], p.52).
Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category. Theorem I.2.4 in [RV] says that $\mathcal{A}$ has a right Serre functor $F$ if and if $\mathcal{A}$ has right Auslander-Reiten triangles. In this case, $F$ coincides with $[1]\circ \tau_\mathcal A$ on objects, up to isomorphisms.
\vskip10pt
\subsection{}Let $\mathcal{A}$ be a Hom-finite triangulated $k$-category with Serre functor $F$. Denote by $[1]$ the shift functor of $\mathcal A$. Following Kontsevich [Ko], $\mathcal{A}$ is {\em a Calabi-Yau category} if there is a natural isomorphism $F \cong [d]$ of functors for some $d\in\Bbb Z$.
Denote by $o([1])$ the order of $[1]$. If $o([1]) = \infty$ then the integer $d$ above is unique, and is called {\em the CY dimension} of $\mathcal{A}$; if $o([1])$ is finite then we call the minimal non-negative integer $d$ such that $F\cong [d]$ {\em the CY dimension} of $\mathcal{A}$. Denote by $\operatorname{CYdim}(\mathcal A)$ the CY dimension of $\mathcal A$.
\vskip10pt
For example, if $A$ is a symmetric algebra and $\mathcal P$ is the category of projective modules, then the homotopy category $K^b(\mathcal P)$ is of CY dimension $0$. Moreover, if $\mathcal A$ is of CY dimension $d$, then $\operatorname {Ext}_\mathcal A^i(X, Y)\cong D\circ \operatorname {Ext}_\mathcal A^{d-i}(Y, X), \ X, Y\in\mathcal A, \ i\in\Bbb Z$, where $\operatorname {Ext}_\mathcal A^i(X, Y): =\operatorname {Hom}_\mathcal A(X, Y[i])$. Thus, if $A$ is a CY algebra ([B], [Gin2]), i.e. the bounded derived category $D^b(A\mbox{-mod})$ is Calabi-Yau of CY dimension $d$, then $\operatorname {gl.dim}(A\mbox{-mod})=d$ (see [B]).
\vskip10pt
\subsection{} Let $\mathcal{A}$ and $\mathcal{B}$ be triangulated categories. {\em A triangle functor} from $\mathcal{A}$ to $\mathcal{B}$ is a pair $(F, \eta^F)$, where $F\colon \mathcal{A} \to \mathcal{B}$ is an additive functor and $\eta^F: \ F\circ [1] \longrightarrow [1]\circ F$ is a natural isomorphism, such that if $X \stackrel{f} {\longrightarrow} Y \stackrel{g} {\longrightarrow} Z \stackrel{h} {\longrightarrow} X[1]$ is a distinguished triangle of $\mathcal{A}$ then $FX \stackrel{Ff} {\longrightarrow} FY \stackrel{Fg} {\longrightarrow} FZ \stackrel{\eta^F_X \circ Fh} {\longrightarrow} (FX)[1]$ is a distinguished triangle of $\mathcal{B}$. Triangle functors $(F, \ \eta^F)$ and $(G, \ \eta^G)$ are {\em natural isomorphic} if there is a natural isomorphism $\xi: \ F\longrightarrow G$ such that the following diagram commutes for any $A\in\mathcal{A}$ \[\xymatrix{ F(A[1]) \ar[rr]^{\eta^F_A} \ar[d]_{\xi_{A[1]}} && F(A)[1]
\ar[d]^-{\xi_A[1]}\\ G(A[1]) \ar[rr]^{\eta^G_A} && G(A)[1].}\]
As Keller pointed out, the pair $([n], \ (-1)^n{\rm Id}_{[n+1]}): \ \mathcal{A}\longrightarrow\mathcal{A}$ is a triangle functor for $n\in\Bbb Z$. However, $([n], \ {\rm Id}_{[n+1]})$ may be {\em not}.
We need the following important result. A nice proof given by Van den Bergh is in the Appendix of [B].
\begin {lem} \ \ (Bondal-Kapranov {\em [BK]}; Van den Bergh {\em [B]}) \ \ Let $F$ be a Serre functor of a Hom-finite triangulated $k$-category $\mathcal{A}$. Then there exists a natural isomorphism $\eta^F: \ F\circ [1] \longrightarrow [1]\circ F$ such that $(F, \ \eta^F): \ \mathcal{A}\longrightarrow\mathcal{A}$ is a triangle functor. \end{lem}
>From [Ke, 8.1] and [B, A.5.1] one has the following
\begin{prop} (Keller; Van den Bergh)\ Let $\mathcal{A}$ be a Hom-finite triangulated $k$-category with Serre functor $F$. Then $\mathcal{A}$ is a Calabi-Yau category if and only if there exists a natural isomorphism $\eta^F: F\circ [1] \longrightarrow [1]\circ F,$ such that $(F, \ \eta^F)$ is a triangle functor and $(F, \eta^F) \longrightarrow ([d], (-1)^d\operatorname {Id}_{[d+1]})$\ is a natural isomorphism of triangle functors, for some integer $d$. \end{prop}
\noindent {\bf Proof.} For convenience we justify the ``only if'' part. By assumption we have a natural isomorphism $\xi: F \cong [d]$. Define $\eta^F_A \colon F(A[1]) \longrightarrow (FA)[1]$ for $A\in\mathcal{A}$ by \ $\eta^F_A:=(-1)^d(\xi_A)^{-1}[1]\circ \xi_{A[1]}.$\ Then $\xi_A[1] \circ \eta_A^F = (-1)^d \xi_{A[1]}$. The naturality of $\eta^F: \ F\circ [1] \longrightarrow [1]\circ F$ follows from the one of $\xi$. It remains to show that $(F, \eta^F)\colon \mathcal{A}\to \mathcal{A}$ is a triangle functor. Let $X \stackrel{f} {\longrightarrow} Y \stackrel{g} {\longrightarrow} Z \stackrel{h} {\longrightarrow} X[1]$ be a distinguished triangle. Since \ $X[d]\stackrel{f[d]} {\longrightarrow}Y[d]\stackrel{g[d]} {\longrightarrow} Z[d] \stackrel{(-1)^d h[d]} {\longrightarrow}X[d+1]$\ \ is a distinguished triangle, it suffices to prove that the following diagram is commutative \[\xymatrix{ F(X)\ar[r]^-{F(f)} \ar[d]_{\xi_X}^-{\wr} & F(Y) \ar[r]^-{F(g)}\ar[d]_{\xi_Y}^-{\wr}& F(Z)\ar[rr]^-{\eta_X^F \circ F(h)}\ar[d]_{\xi_Z}^-{\wr}&& (F(X))[1]\ar[d]\ar[d]_{\xi_X[1]}^-{\wr}\\ X[d]\ar[r]^-{f[d]} & Y[d]\ar[r]^-{g[d]}& Z[d]\ar[rr]^-{(-1)^d h[d]}&& X[d+1].}\] By the naturality of $\xi$ the first and the second square are commutative. We also have $$\xi_X[1]\circ \eta_X^F \circ F(h) = (-1)^d \xi_{X[1]} \circ F(h) = (-1)^d h[d]\circ \xi_Z. \ \ \ \blacksquare $$
\vskip10pt
\subsection{} Let $A$ be a self-injective $k$-algebra, $A$-mod the category of finite-dimensional left $A$-modules, and $A\underline {\mbox{-mod}}$ the stable category of $A$-mod modulo projective modules. Then the Nakayama functor $\mathcal N: = D( A)\otimes_A-$, Heller's syzygy functor $\Omega$, and the Auslander-Reiten translate $\tau \cong \Omega^2\circ \mathcal N\cong\mathcal N\circ \Omega^2$ ([ARS], p.126), and are endo-equivalences of $A\underline {\mbox{-mod}}$ ([ARS], Chap. IV). Note that $A\underline {\mbox{-mod}}$ is a Hom-finite Krull-Schmidt triangulated $k$-category with $[1] = \Omega^{-1}$ ([Hap1], p.16). By the bi-naturality of the Auslander-Reiten isomorphisms ([AR]) $$ \underline {\operatorname {Hom}}(X, Y) \cong D\circ\operatorname {Ext}_A^1(Y, {\tau} X) \cong D\circ\underline {\operatorname {Hom}}(Y, [1]\circ {\tau} X),$$ where $\underline {\operatorname {Hom}}(X, Y): = \operatorname {Hom} _{A\underline {\mbox{-mod}}}(X, Y)$, one gets the Serre functor $F:=[1]\circ {\tau} \cong \Omega\circ \mathcal N$ of $A\underline {\mbox{-mod}}$. It follows that $A\underline {\mbox{-mod}}$ is Calabi-Yau if and only if $\mathcal N \cong \Omega^{-(d+1)}$ for some $d$ ([Ke, 8.3]). In this case denote by $\operatorname{CYdim}(A)$ the CY dimension of $A\underline {\mbox{-mod}}$. Note that $\Omega, \ F, \ \mathcal N, \ \tau$ are pairwise commutative as functors of $A\underline {\mbox{-mod}}$. This follows from Lemma 2.1. \vskip10pt
\subsection{} Let $A$ be a finite-dimensional $k$-algebra. Recall that $A$ is a Nakayama algebra if any indecomposable is uniserial, i.e. it has a unique composition series ([ARS], p.197). In this case $A$ is representation-finite. If $k$ is algebraically closed then any connected self-injective Nakayama algebra is Morita equivalent to $\Lambda(n, t),$ $n\ge 1, \ t\ge 2$ ([GR], p.243), which is defined below.
Let $\Bbb Z_n$ be the cyclic quiver with vertices indexed by the cyclic group $\Bbb Z/n\Bbb Z$ of order $n$, and with arrows $a_i: \ i \longrightarrow i+1, \ \forall \ i\in \Bbb Z/n\Bbb Z$. Let $k\Bbb Z_n$ be the path algebra of the quiver $\Bbb Z_n$, $J$ the ideal generated by all arrows, and $\Lambda = \Lambda(n, t): =k\Bbb Z_n/J^t$ with $t\ge 2$. Denote by $\gamma^l_i$ the path starting at vertex
$i$ and of length $l$, and $e_i: = \gamma^0_i$. We write the conjunction of paths from right to left. Then $\{\gamma^l_i \ | \ 0\le i\le n-1, \ 0\le l\le t-1\}$ is a basis of $\Lambda$; while
$\{P(i): = \Lambda e_i\ | \ 0\le i\le n-1\}$ is the set of pairwise non-isomorphic indecomposable projective modules, and $\{I(i): =
D(e_{i}\Lambda)\ | \ 0\le i\le n-1\}$ is the set of pairwise non-isomorphic indecomposable injective modules, with $P(i) \cong I(i+t-1)$. Note that $\Lambda$ is a Frobenius algebra, and $\Lambda$ is symmetric if and only if $n\mid (t-1)$. Write $S(i): = P(i)/\operatorname{rad}P(i)$, and $S^l_i: =
\Lambda\gamma_{i+l-t}^{t-l}$. Then $S_i^l$ is the indecomposable with top $S(i)$ and the Loewy length $l$, and $\{ S^l_i \ | \ 0\le i\le n-1, \ 1\le l\le t \}$ is the set of pairwise non-isomorphic indecomposable modules, with $S^{t}_i = P(i)$ and $\operatorname{soc}(S^l_i) = S(i+l-1)$. For the Auslander-Reiten quiver of $\Lambda$ see [GR], Section 2, and [ARS], p.197. In particular, the stable Auslander-Reiten quiver of $\Lambda$ is $\Bbb Z A_{t-1}/\langle\tau^n\rangle.$
\vskip10pt
\section{\bf Indecomposable Calabi-Yau objects}
The purpose of this section is to introduce the Calabi-Yau objects and to give the relation between indecomposable Calabi-Yau objects and Auslander-Reiten triangles.
\vskip10pt
\subsection{} Let $\mathcal{A}$ be a Hom-finite triangulated $k$-category. A non-zero object $X$ is called \emph {a Calabi-Yau object} if there exists a natural isomorphism \begin{align}\operatorname{Hom}_{\mathcal A}(X, -)\cong D\circ \operatorname{Hom}_{\mathcal A}(-, X[d])\end {align} for some integer $d$.
By Yoneda Lemma, such a $d$ is unique up to a multiple of the relative order $o([1]_X)$ of $[1]$ respect to $X$. Recall that $o([1]_X)$ is the minimal positive integer such that $X[o([1]_X)]\cong X$, otherwise $o([1]_X) = \infty$. If $o([1]_X)=\infty$ then $d$ in $(3.1)$ is unique and is called {\em the CY dimension} of $X$. If $o([1]_X)$ is finite then the minimal non-negative integer $d$ in $(3.1)$ is called {\em the CY dimension} of $X$. We denote $\operatorname{CYdim}(X)$ the CY dimension. Thus, if $o([1]) < \infty$ then $o([1]_X)\mid o([1])$ and $0\le \operatorname{CYdim}(X) < o([1]_X).$
Let $A$ be a finite-dimensional self-injective algebra. An $A$-module $M$ without projective direct summands is called {\em a Calabi-Yau module} of CY dimension $d$, if it is a Calabi-Yau object of $A\underline{\mbox{-mod}}$ with $\operatorname{CYdim}(M) = d$.
\vskip10pt
Note that $\operatorname{CYdim}(X)$ is usually not easy to determine. In case $(3.1)$ holds for some $d$, we say that $X$ is a {\em $d$-th CY object}. Of course, if $o([1]_X) < \infty$ then $o([1]_X)\mid (d - \operatorname{CYdim}(X))\ge 0.$
\vskip10pt
If $\mathcal A$ has right Serre functor $F$, then by Yoneda Lemma a non-zero object $X$ is a $d$-th CY object if and only if $F(X)\cong X[d]$, or equivalently, $F(X)[-d]\cong X$. Thus, a non-zero $A$-module $M$ without projective direct summands is a $d$-th CY module if and only if $\mathcal N(M)\cong \Omega^{-(d+1)}(M)$ in $A\underline{\mbox{-mod}}$ (in fact, this isomorphism can be taken in $A$-mod).
\vskip10pt
\subsection{} We have the following basic property.
\vskip10pt
\begin {prop} \ $(i)$ \ The Calabi-Yau property for a category or an object, is invariant under triangle-equivalences.
\vskip10pt
$(ii)$ \ The Calabi-Yau property for a module is {\em ``usually"} invariant under stable equivalences between self-injective algebras. Precisely, let $A$ and $B$ be self-injective algebras, $G: A\underline {\mbox{-mod}}\longrightarrow B\underline {\mbox{-mod}}$ a stable equivalence, and $X$ a CY $A$-module of dimension $d$. If $A\ncong \Lambda(n, 2),$ or if $A$ and $B$ are symmetric algebras, then $G(X)$ is a CY $B$-module of dimension $d$. \end{prop}
\noindent{\bf Proof.} \ $(i)$ \ Let $\mathcal A$ be a Calabi-Yau category with $F_\mathcal A\cong [d]$, where $F_\mathcal A$ is the Serre functor. Clearly $F_\mathcal B: = G \circ F_\mathcal A\circ G^{-1}$ is a Serre functor of $\mathcal B$ (if $\mathcal B$ has already one, then it is natural isomorphic to $F_\mathcal B$). By the natural isomorphism $(\xi^G)^d: G\circ [d] \longrightarrow [d]\circ G$, which is the composition $G\circ [d] \longrightarrow [1]\circ G\circ [d-1] \longrightarrow \cdots \longrightarrow [d]\circ G$ (A.2 in [B]), we see that $\mathcal B$ is a Calabi-Yau category with $F_\mathcal B\cong [d].$ If $X$ is a calabi-Yau object with a natural isomorphism $\eta$ as in $(3.1)$, then we have natural isomorphism \ $\operatorname{Hom}_\mathcal B(-, (\xi^G)^d_X)\circ G\circ \eta\circ G^{-1}: \ \operatorname{Hom}_{\mathcal B}(G(X), -)\cong D\circ \operatorname{Hom}_{\mathcal B}(-, (GX)[d]),$ \ which implies that $G(X)$ is a Calabi-Yau object of $\mathcal B$.
$(ii)$ \ Recall that an equivalence $G: A\underline {\mbox{-mod}}\longrightarrow B\underline {\mbox{-mod}}$ of categories is called a stable equivalence. Note that in general $G$ is not induced by an exact functor (cf. [ARS], p.339), hence $G$ may be not a triangle-equivalence (cf. [Hap1], Lemma 2.7, p.22. Note that the converse of Lemma 2.7 is also true). One may assume that $A$ is connected. If $A\ncong \Lambda(n, 2),$ or if $A$ and $B$ are symmetric algebras, then by Corollary 1.7 and Prop. 1.12 in [ARS], p.344, we know that $G$ commutes with $\tau$ and $\Omega$ on modules, hence we have isomorphism \begin{align*}\Omega_B^{-1}\circ \tau_B (G(X)) \cong G(\Omega_A^{-1}\circ \tau_A) (X)) \cong G(\Omega_A^{-d}(X))\cong \Omega_B^{-d}(G(X)),\end{align*} which implies that $G(X)$ is a Calabi-Yau $B$-module of CY dimension $d$.
$\blacksquare$
\vskip10pt
It seems that the Calabi-Yau property for the stable category is also invariant under stable equivalence $G$ between self-injective algebras. However, this need natural isomorphiams between $G$ and $\tau$, and $G$ and $\Omega$, which are not clear to us.
\vskip10pt
\subsection{} The main result of this section is as follows.
\vskip10pt
\begin{thm} Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category, and $X$ an indecomposable object of $\mathcal A$. Then $X$ is a $d$-th CY object if and only if there exists an Auslander-Reiten triangle of the form \begin{align}X[d-1]\stackrel f \longrightarrow Y \stackrel g \longrightarrow X\stackrel h\longrightarrow X[d].\end{align}
Moreover, $Y$ is also a $d$-th CY object. \end{thm}
\vskip10pt \subsection{}The proof of the first part of Theorem 3.2 follows an argument of Reiten and Van den Bergh in [RV]. For the convenience we include a complete proof.
\vskip10pt
\begin{lem} \ Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category, and $X$ a non-zero object of $\mathcal A$. Then $X$ is a $d$-th CY object if and only if for any indecomposable $Z$ there exists a non-degenerate bilinear form \begin{align}(-,-)_{Z}: \ \operatorname{Hom}_{\mathcal A}(X, Z)\times \operatorname{Hom}_{\mathcal A}(Z, X[d]) \longrightarrow k\end{align} such that for any \ $u\in \operatorname{Hom}_{\mathcal A}(X, Z), \ \ v\in \operatorname{Hom}_{\mathcal A}(Z, W)$, \ and \ $w\in \operatorname{Hom}_{\mathcal A}(W, X[d]),$ there holds \begin{align}(u, wv)_Z=(vu, w)_W.\end{align}
\end{lem} \noindent{\bf Proof.} \ If $X$ is a $d$-th CY object, then we have $\eta_Z: \ \operatorname{Hom}_{\mathcal A}(X, Z)\cong D\circ \operatorname{Hom}_{\mathcal A}(Z, X[d]), \ \forall \ Z\in\mathcal A$, which are natural in $Z$. Each isomorphism $\eta_Z$ induces a non-degenerate bilinear form $(-,-)_{Z}$ in $(3.3)$ by $(u, z)_Z: = \eta_Z(u)(z),$ and $(3.4)$ follows from the naturality of $\eta_Z$ in $Z$. Conversely, if we have $(3.3)$ and $(3.4)$ for any indecomposable $Z$, then we have isomorphism $\eta_Z: \ \operatorname{Hom}_{\mathcal A}(X, Z)\cong D\circ \operatorname{Hom}_{\mathcal A}(Z, X[d])$ given by $\eta_Z(u)(z): = (u, z)_Z, \ \forall \ z\in \operatorname{Hom}_{\mathcal A}(Z, X[d])$. By $(3.4)$ $\eta_Z$ are natural in $Z$. Since $\mathcal A$ is Krull-Schmidt, it follows that we have isomorphisms $\eta_Z$ for any $Z$ which are natural in $Z$. This means that $X$ is a $d$-th CY object.
$\blacksquare$
\vskip10pt
The following Lemma in [RV] will be used.
\vskip10pt
\begin{lem} \ \ ({\em [RV]}, Sublemma I.2.3)\ Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category, $\tau_{\mathcal A}(X) \longrightarrow Y
\longrightarrow X\stackrel h\longrightarrow \tau_\mathcal A(X)[1]$ an Auslander-Reiten triangle of $\mathcal A$, and $Z$ an indecomposable in $\mathcal A$. Then
$(i)$ \ For any non-zero $z\in \operatorname{Hom}_{\mathcal A}(Z, \tau_{\mathcal A}(X)[1])$ there exists $u\in \operatorname{Hom}_{\mathcal A}(X, Z)$ such that $zu = h$.
$(ii)$ For any non-zero $u\in \operatorname{Hom}_{\mathcal A}(X, Z)$ there exists $z\in \operatorname{Hom}_{\mathcal A}(Z,
\tau_{\mathcal A}(X)[1])$ such that $zu = h$. \end{lem}
\vskip10pt
In a Hom-finite Krull-Schmidt triangulated $k$-category without Serre functor (e.g., by [Hap2] and [RV] if $\operatorname{gl.dim}(A) = \infty$ then $D^b(A\mbox{-mod})$ has no Serre functor), one may use {\em the generalized Serre functor} introduced by Chen [Ch].
\vskip10pt
\begin{lem} (Chen [Ch]) Let $\mathcal A$ be a Hom-finite Krull-Schmidt triangulated $k$-category. Consider the full subcategories of $\mathcal A$
given by $$\mathcal{\mathcal A}_r:=\{X \in \mathcal{A}\; |\; D\circ\operatorname{Hom}_{\mathcal A}(X, -) \mbox{ is representable} \}$$ and $$\mathcal{\mathcal A}_l:=\{X \in
\mathcal{A}\;|\; D\circ \operatorname{Hom}_\mathcal A(-, X) \mbox{ is representable}\}.$$ Then both $\mathcal{A}_r$ and $\mathcal{A}_l$ are thick triangulated subcategories of $\mathcal A$. Moreover, one has
$(i)$ \ There is a unique $k$-functor $S: \mathcal{A}_r \longrightarrow \mathcal{A}_l$ which is an equivalence, such that there are natural isomorphisms \begin{align}\operatorname{Hom}_\mathcal A(X, -) \simeq D\circ\operatorname{Hom}_\mathcal A(-, S(X)), \ \ \forall \ X\in \mathcal {A}_r,\end{align} which are natural in $X$. $S$ is called the generalized Serre functor, with range $\mathcal A_r$ and domain $\mathcal A_l$.
$(ii)$ \ There exists a natural isomorphism $\eta^S: S\circ [1]\longrightarrow [1]\circ S$ such that the pair $(S, \eta^S): \mathcal{A}_r \longrightarrow \mathcal{A}_l$ is an triangle-equivalence. \end{lem}
In this terminology, a non-zero object $X$ is a $d$-th CY object if and only if $X\in\mathcal A_r$ and $S(X)\cong X[d]$, by $(3.5)$ and Yoneda Lemma.
\vskip10pt
\subsection{} {\bf Proof of Theorem 3.2.}\quad Let $X$ be an indecomposable $d$-th CY object. By Lemma 3.3 we have a non-degenerate bilinear from $(-,-)_X: \ \operatorname{Hom}_{\mathcal A}(X, X)\times \operatorname{Hom}_{\mathcal A}(X, X[d]) \longrightarrow k.$ It follows that there exists $0\ne h\in \operatorname{Hom}_{\mathcal A}(X, X[d])$ such that $(\operatorname{radHom}_{\mathcal A}(X, X), h)_X=0.$ Embedding $h$ into a distinguished triangle as in $(3.2)$. We claim that it is an Auslander-Reiten triangle. For this it remains to prove (AR4) in 2.1. Let $X'$ be indecomposable and $t: X'\longrightarrow X$ a non-isomorphism. Then by $(3.4)$ for any $u\in \operatorname{Hom}_{\mathcal A}(X, X')$ we have $(u, ht)_{X'} = (tu, h)_X = 0.$ Since $(-,-)_{X'}$ is non-degenerate, it follows that $ht=0$.
Conversely, let $(3.2)$ be an Auslander-Reiten triangle. In order to prove that $X$ is a $d$-th CY object, by Lemma 3.3 it suffices to prove that for any indecomposable $Z$ there exists a non-degenerate bilinear form $(-,-)_{Z}$ as in $(3.3)$ satisfying $(3.4)$. For this, choose an arbitrary linear function $\operatorname {tr}\in D\circ \operatorname{Hom}_{\mathcal A}(X, X[d])$ such that $\operatorname {tr}(h)\ne 0$, and define $(u, z)_Z = \operatorname{tr}(zu).$ Then $(3.4)$ is automatically satisfied. It remains to prove that $(-, -)_Z$ is non-degenerate. In fact, for any $0\ne z\in \operatorname{Hom}_{\mathcal A}(Z, X[d])$, by Lemma 3.4 there exists $u\in \operatorname{Hom}_{\mathcal A}(X, Z)$ such that $zu = h$. So $(u, z)_Z = \operatorname{tr}(zu) = \operatorname{tr}(h)\ne 0$. Similarly, for any $0\ne u\in \operatorname{Hom}_{\mathcal A}(X, Z)$ we have $z\in \operatorname{Hom}_{\mathcal A}(Z, X[d])$ such that $(u, z)_Z \ne 0$. This proves the non-degenerateness of $(-, -)_Z$.
\vskip10pt
Now we prove that $Y$ in $(3.2)$ is also a $d$-th CY object. We make use of the generalized Serre functor in [Ch] (For the reader prefer Serre functor, one can assume the existence, and use Lemma 2.1). Since $X, \ X[d-1]\in\mathcal A_r$, it follows from Lemma 3.5 that $Y\in\mathcal A_r$. Applying the generalized Serre functor $(S, \eta^S)$ to $(3.2)$ we get the distinguished triangle (by Lemma 3.5$(ii)$) $$S(X[d-1])\stackrel {S(f)}\longrightarrow S(Y)\stackrel {S(g)}\longrightarrow S(X)\stackrel {\eta^S_{X[d-1]}\circ S(h)}\longrightarrow S(X[d-1])[1].\eqno(*)$$ Also, we have the Auslander-Reiten triangle $$X[2d-1]\stackrel {f[d]} {\longrightarrow}Y[d] \stackrel {g[d]} {\longrightarrow}X[d]\stackrel {(-1)^d h[d]}{\longrightarrow} X[2d].$$ Since $X$ is a $d$-th CY object it follows that we have an isomorphism $w: S(X)\longrightarrow X[d]$. Note that $S(h)\ne 0$ means that $S(g)$ is not a retraction ([Hap1], p.7). Thus, by (AR3) there exists $v: S(Y)\longrightarrow Y[d]$ such that $w\circ S(g) = g[d]\circ v$. By the definition of a triangulated category we get $u: S(X[d-1])\longrightarrow X[2d-1]$ such that the following diagram is commutative \[\xymatrix{ S(X[d-1])\ar[r]^-{S(f)} \ar[d]_{u} & S(Y) \ar[r]^-{S(g)}\ar[d]_{v}& S(X)\ar[rr]^-{\eta_{X[d-1]}^S \circ S(h)}\ar[d]_{w}&& S(X[d-1])[1]\ar[d]\ar[d]_{u[1]}\\ X[2d-1]\ar[r]^-{f[d]} & Y[d]\ar[r]^-{g[d]}& X[d]\ar[rr]^-{(-1)^d h[d]}&& X[2d].}\eqno(**)\] We claim that $u: S(X[d-1])\longrightarrow X[d]$ is an isomorphism, hence by the property of a triangulated category we know that $v: S(Y)\longrightarrow Y[d]$ is also an isomorphism, i.e. $Y$ is a $d$-th CY object.
Otherwise $u: S(X[d-1])\longrightarrow X[2d-1]$ is not an isomorphism. Note that $S$ is {\em only} defined on $\mathcal A_r$, it follows that we do not know if $(*)$ is an Auslander-Reiten triangle.
Since $X[d-1]$ is a $d$-th CY object, it follows that we have an isomorphism $\alpha: X[2d-1]\longrightarrow S(X[d-1])$, hence $\alpha\circ u\in \operatorname{Hom}_\mathcal A(S(X[d-1]), S(X[d-1]))$. So we have $u'\in \operatorname{Hom}_\mathcal A(X[d-1], X[d-1])$ such that $ \alpha\circ u = S(u')$, and $u'$ is also a non-isomorphism. Since $X[d-1]\stackrel f \longrightarrow Y \stackrel g \longrightarrow X\stackrel h\longrightarrow X[d]$ is an Auslander-Reiten triangle it follows from (AR4') that $u'\circ h[-1] = 0$, or equivalently, $S(u')\circ S(h[-1]) = 0$ (note that $h[-1]\in \mathcal A_r$). Thus we have $$u[1] \circ S(h[-1])[1]\circ \eta^S_{X[-1]} = 0$$ where $\eta^S_{X[-1]}: S(X)\longrightarrow S(X[-1])[1]$ is an isomorphism. By the naturality of $\eta^S$ we have the commutative diagram \[\xymatrix{ S(X) \ar[rr]^{\eta_{X[-1]}^S} \ar[d]_{S(h)}&& S(X[-1])[1] \ar[d]^-{S(h[-1])[1]}\\ S(X[d]) \ar[rr]^{\eta_{X[d-1]}^S} && S(X[d-1])[1].}\] It follows that we have $u[1]\circ \eta^S_{X[d-1]}\circ S(h) = 0,$ and hence by the commutative diagram $(**)$ we get a contradiction \ $(-1)^dh[d]\circ w = u[1]\circ \eta^S_{X[d-1]}\circ S(h) = 0.$ This completes the proof.
$\blacksquare$
\vskip10pt
\subsection{} {\bf Remark 3.6.} Let $\mathcal A$ be a Hom-finite Krull-Schmidt triangulated $k$-category. If every indecomposable $X$ in $\mathcal A$ is a $d_X$-th CY object, then $\mathcal A$ has a Serre functor $F$ with $F(X)\cong X[d_X]$.
In fact, by Theorem 3.2 $\mathcal A$ has right and left Auslander-Reiten triangles, and then by Theorem I.2.4 in [RV] $\mathcal A$ has Serre functor $F$. By Prop.I.2.3 in [RV] and $(3.2)$ we have $F(X)\cong \tau_\mathcal A(X)[1] = X[d_X-1][1] = X[d_X]$ for any indecomposable $X$.
\vskip10pt
However, even if all indecomposables are $d$-th CY objects with the same $d$, we do not know whether $\mathcal A$ is a Calabi-Yau category, although $F$ and $[d]$ coincide on objects. The examples we know have a positive answer to this question.
$\blacksquare$
\vskip10pt
\section{\bf Minimal Calabi-Yau objects}
The purpose of this section is to describe all the Calabi-Yau objects of a Hom-finite Krull-Schmidt triangulated $k$-category with a Serre functor.
\subsection{} Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category. A $d$-th CY object $X$ is said to be {\em minimal} if any {\em proper} direct summand of $X$ is {\em not} a $d$-th CY object.
\vskip10pt
\begin{lem} \ Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category with right Serre functor $F$. Then a non-zero object $X$ is a minimal $d$-th CY object if and only if the following are satisfied:
\vskip10pt
1. The indecomposable direct summands of $X$ can be ordered as $X = X_1\oplus \cdots \oplus X_r$ such that \begin{align} F(X_1) \cong X_2[d], \ F(X_2) \cong X_3[d], \ \cdots, \ F(X_{r-1}) \cong X_r[d], \ F(X_r)\cong X_1[d].\end{align}
We call the cyclic order arising from this property {\em a canonical order} of $X$ (with respect to $F$ and $[d]$).
2. \ $X$ is multiplicity-free, i.e. its indecomposable direct summands are pairwise non-isomorphic. \end{lem}
\noindent {\bf Proof.}\quad In the following we often use that a non-zero object $X$ is a $d$-th CY object if and only if $F(X)\cong X[d]$.
Let $X= X_1\oplus \cdots \oplus X_r$ be a minimal $d$-th CY object, with each $X_i$ indecomposable and $r\ge 2$. Then $F(X_1) \oplus\cdots\oplus F(X_r)\cong X_1[d]\oplus\cdots\oplus X_r[d]$. Since $\mathcal{A}$ is Krull-Schmidt, it follows that there exists a permutation $\sigma$ of $1, \cdots, r$, such that $F(X_i)\cong X_{\sigma(i)}[d]$ for each $i$. Write $\sigma$ as a product of disjoint cyclic permutations. Since $X$ is minimal, it follows that $\sigma$ has to be a cyclic permutation of length $r$. By reordering the indecomposable direct summands of $X$, one may assume that $\sigma = (12\cdots r)$. Thus, $X$ satisfies the condition 1.
Now, we consider a canonical order $X= X_1\oplus \cdots \oplus X_r$. If $X_i\cong X_j$ for some $1\le i < j\le r$, then $F(X_{j-1})\cong X_j[d]\cong X_i[d]$, it follows that $X_i\oplus \cdots\oplus X_{j-1}$ is already a $d$-th CY object, which contradicts the minimality of $X$. This proves that $X$ is multiplicity-free.
\vskip10pt Conversely, assume that a multiplicity-free object $X= X_1\oplus \cdots \oplus X_r$ is in a canonical order. By $(4.1)$ we have $F(X)\cong X[d]$. So $X$ is a $d$-th CY object. It remains to show the minimality. If not, then there exists a proper direct summand $X_{i_1}\oplus \cdots\oplus X_{i_t}$ of $X$ which is a minimal $d$-th CY object, so $1\le t< r$. By what we have proved above we may assume that this is a canonical order. Then $$F(X_{i_1})\cong X_{i_2}[d],\ \cdots, \ F(X_{i_{t-1}})\cong X_{i_t}[d], \ F(X_{i_t})\cong X_{i_1}[d].$$ While $X= X_1\oplus \cdots \oplus X_r$ is also in a canonical order, it follows that (note that we work on indices modulo $r$, e.g. if $i_1 = r$ then $i_1+1$ is understood to be $1$) $$F(X_{i_1})\cong X_{i_1+1}[d], \ \cdots, \ F(X_{i_{t-1}})\cong X_{i_{t-1}+1}[d], \ F(X_{i_t})\cong X_{i_t+1}[d].$$ Since $X$ is multiplicity-free, it follows that (considering indices modulo $r$) $$i_2 = i_1+1, \ \cdots, \ i_t = i_{t-1}+1, \ i_1 = i_t + 1,$$ hence $i_1 = i_1+t$, which means $r\mid t$. This is impossible since $1\le t< r.$
$\blacksquare$
\vskip10pt
\subsection{} Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category with Serre functor $F$. For each $d\in\Bbb Z$, consider the triangle-equivalence $G:=[-d]\circ F\cong F\circ [-d]: \mathcal A\longrightarrow \mathcal A$. For each indecomposable $M\in\mathcal A$, denote by $o(G_M)$ the relative order of $G$ respect to $M$, that is, $r:=o(G_M)$ is the minimal positive integer such that $G^r(M)\cong M$, otherwise $o(G_M) = \infty$. Denote by $\operatorname{Aut}(\mathcal A)$ the group of the triangle-equivalences of $\mathcal A$, and by $\langle G\rangle$ the cyclic subgroup of $\operatorname{Aut}(\mathcal A)$ generated by $G$. Then $\langle G\rangle$ acts naturally on $\operatorname{Ind}(\mathcal A)$, the set of the isoclasses of indecomposables of $\mathcal A$. Denote by $\mathcal O_M$ the
$G$-orbit of an indecomposable $M$. Then $|\mathcal O_M| = o(G_M)$. If $|\mathcal O_M| < \infty$ then the set $\mathcal O_M$ is a finite $G$-orbit.
\vskip10pt
Denote by $\operatorname{Fin}\mathcal{O}(\mathcal A, d)$ the set of all the finite $G$-orbits of $\operatorname{Ind}(\mathcal A)$, and by $\operatorname{MinCY}(\mathcal A, d)$ the set of isoclasses of minimal $d$-th CY objects. We have the following
\vskip10pt
\begin {thm} \ \ Let $\mathcal{A}$ be a Hom-finite Krull-Schmidt triangulated $k$-category with Serre functor $F$. Then
\vskip10pt
$(i)$ \ Every $d$-th CY object is a direct sum of finitely many minimal $d$-th CY objects.
\vskip10pt
$(ii)$ \ With the notations above, for each $d\in\Bbb Z$ the map \begin{align}\mathcal O_M \ \mapsto \ \bigoplus\limits_{X\in \mathcal O_M} X = M\oplus G(M)\oplus\cdots \oplus G^{o(G_M)-1}(M)\end{align} \noindent gives a one-to-one correspondence between the sets $\operatorname{Fin}\mathcal{O}(\mathcal A, d)$ and $\operatorname{MinCY}(\mathcal A, d)$, where $G: = [-d]\circ F$.
Thus, a minimal $d$-th CY object is exactly the direct sum of all the objects in a finite $G$-orbit of $\operatorname{Ind}(\mathcal A)$.
\vskip10pt
$(iii)$ Non-isomorphic minimal $d$-th CY objects are disjoint, i.e. they have no isomorphic indecomposable direct summands. \end {thm}
\noindent {\bf Proof.}\quad Let $X$ be a $d$-th CY object. If $X$ is not minimal, then $X = Y\oplus Z$ with $Y\ne 0\ne Z$ such that $F(Y)\oplus F(Z) \cong Y[d]\oplus Z[d]$ and $F(Y)\cong Y[d]$. Since $\mathcal{A}$ is Krull-Schmidt, it follows that $F(Z) \cong Z[d]$, i.e. $Z$ is also a $d$-th CY object. Then $(i)$ follows by induction.
Thanks to Lemma 2.1, $(4.1)$ becomes $X_i = G^{i-1}(X_1), \ 1\le i\le r.$ Then $(ii)$ is a reformulation of Lemma 4.1. Moreover $(iii)$ follows from $(ii)$.
$\blacksquare$
\vskip10pt
\begin {cor} \ \ Let $A$ be a finite-dimensional self-injective algebra. Then $X$ is a minimal $d$-th CY module if and only if $X$ is of the form $X \cong \bigoplus \limits_{0\le i\le r-1}G^i(M)$, where $M$ is an indecomposable non-projective $A$-module with $r: = o(G_M)<\infty$, and $G: = \Omega^{d+1}\circ \mathcal N$. \end {cor}
\noindent {\bf Proof.}\quad Note that in this case $[-d]\circ F = \Omega^{d+1}\circ \mathcal N$.
$\blacksquare$
\vskip10pt
\subsection{} As an example, we describe all the Calabi-Yau objects in $D^b(kQ\mbox{-mod})$, the bounded derived category of $kQ\mbox{-mod}$, where $Q$ is a finite quiver without oriented cycles.
Note that indecomposable objects of $D^b(kQ\mbox{-mod})$ are exactly stalk complexes of indecomposable $kQ$-modules. The category $D^b(kQ\mbox{-mod})$ has Serre functor $F =[1]\circ \tau_D$, where $\tau_D$ is the Auslander-Reiten translation of $D^b(kQ\mbox{-mod})$. Recall that $\tau_D$ is given by $$\tau_D(M) = \begin{cases}\tau(M), & \ \mbox{if} \ M \ \mbox{is an indecomposable non-projective}; \\ I[-1], & \ \mbox{if} \ P \ \mbox{is an indecomposable projective}, \end{cases}$$ where $I$ is the indecomposable injective with \ $\operatorname{soc}(I) = P/\operatorname{rad} P$, and $\tau$ is the Auslander-Reiten translation of $kQ\mbox{-mod}$ ([Hap1], p.51). Note that $D^b(kQ\mbox{-mod})$ is {\em not} a Calabi-Yau category except that $Q$ is the trivial quiver with one vertex and no arrows. However, the cluster category $\mathcal C_{kQ}$ introduced in [BMRRT], which is the orbit category of $D^b(kQ\mbox{-mod})$ respect to the functor $\tau_D^{-1}\circ [1]$, is a Calabi-Yau category of CY dimension $2$ ([BMRRT]).
Let $M$ be a minimal Calabi-Yau object of $D^b(kQ\mbox{-mod})$ of CY dimension $d$ (in this case $o([1]_M) = \infty$, hence $d$ is unique). By shifts we may assume that $M$ is a $kQ$-module. By $F(M)=\tau_D(M[1]) = \tau_D(M)[1] = M[d]$ we see $d = 1$ or $0$. Note that $kQ$ admits an indecomposable projective-injective module if and only if $Q$ is of type $A_n$ with the linear orientation. However, in this case the unique indecomposable projective-injective module $P = I$ does not satisfy the relation $\operatorname{soc}(I) = P/\operatorname{rad} P$. It follows that $d\ne 0$. Thus $d=1$ and $\tau(M) = M$. Consequently, $Q$ is an affine quiver and $M$ is a $\tau$-periodic (regular) module of period $1$. All such modules are well-known, by the classification of representations of affine quivers (see Dlab-Ringel [DR]). Thus we have
\vskip10pt
\begin{prop} \ \ $D^b(kQ\mbox{-mod})$ admits a Calabi-Yau object $M$ if and only if $Q$ is an affine quiver. In this case, $M$ is minimal if and only if $M$ is an indecomposable in a homogeneous tube of the Auslander-Reiten quiver of $kQ$-mod, or $M$ is the direct sum of all the indecomposables of same quasi-length in a non-homogeneous tube of the Auslander-Reiten quiver of $kQ$-mod, up to shifts. Moreover, all such $M$'s have CY dimension $1$. \end{prop}
\vskip10pt
\section{\bf Calabi-Yau modules of self-injective Nakayama algebras}
This purpose of this section is to classify all the $d$-th CY modules of self-injective Nakayama algebras $\Lambda(n, t), \ n\ge 1, \ t\ge 2$, where $d$ is any given integer. By Theorem 4.2$(i)$ it suffices to consider the minimal $d$-th CY $\Lambda$-modules. By Corollary 4.3 this reduces to computing the relative order $o(G_M)$ of $G: = \Omega^{d+1}\circ \mathcal N$ respect to any indecomposable $\Lambda(n, t)$-module $M=S_i^l, \ 1\le l\le t-1$, for any integer $d$.
\vskip10pt
\subsection{} Recall that $\Lambda(n, t)$ is the quotient of the path algebra of the cyclic quiver with $n$ vertices by the truncated ideal $J^t$, where $J$ is the two-sided ideal generated by the arrows. From now on we write $\Lambda$ instead of $\Lambda(n, t)$.
We keep the notations introduced in 2.5. Note that the indecomposable module $S^l_i$ \ ($i\in\Bbb Z/n\Bbb Z,$ \ $1\le l\le t-1$) \ has a natural $k$-basis, consisting of all the paths of quiver $\Bbb Z_n$ starting at the vertex $i+l-t$ and of lengths at least $t-l$: $$\gamma^{t-l}_{i+l-t}, \ \gamma^{t-l+1}_{i+l-t}, \ \cdots, \ \gamma^{t-1}_{i+l-t}.$$ For $1\le l\le t-1$ denote by $\sigma_{i}^{l}: S_{i}^{l}\longrightarrow S_{i-1}^{l+1}$ the inclusion by embedding the basis above; and for $2\le l\le t$ denote by $p_{i}^{l}: S_{i}^{l}\longrightarrow S_{i}^{l-1}$ the $\Lambda$-epimorphism given by the right multiplication by arrow $a_{i+l-t-1}$. These $\sigma_i^l$'s and $p_i^l$'s are all the irreducible maps of $\Lambda$-mod, up to scalars.
\vskip10pt
We need the explicit actions of functors $\mathcal N$ and $\Omega^{-(d+1)}$ of $\Lambda\underline {\mbox{-mod}}$. By the exact sequences $0 \longrightarrow S_{i}^l \longrightarrow I(i+l-1)\longrightarrow S_{i+l-t}^{t-l}\longrightarrow 0$ (with the canonical maps), via the basis above one has the actions of functor $\Omega^{-1}$ for $i\in\Bbb Z/n\Bbb Z, \ \ 1\le l \le t-1$: \begin{align*}\Omega^{-1}(S^l_i) = S_{i+l-t}^{t-l}, \ \ \ \ \Omega^{-1}(\sigma^l_i) = p_{i+l-t}^{t-l}, \ \ \ \ \Omega^{-1}(p^l_i) = \sigma_{i+l-t}^{t-l}.\end{align*} By induction one has in $\Lambda\underline {\mbox{-mod}}$ for any integer $m$ (even negative): \begin{align} \Omega^{-(2m-1)}(S_i^l) = S_{i+l-mt}^{t-l}; \ \ \ \ \Omega^{-2m}(S_i^l) = S_{i-mt}^{l};\end{align} and $$\Omega^{-(2m-1)}(\sigma_i^l) = p_{i+l-mt}^{t-l}, \ \ \ \ \Omega^{-(2m-1)}(p_i^l) = \sigma_{i+l-mt}^{t-l};\eqno(*)$$ and $$\Omega^{-2m}(\sigma_i^l) = \sigma_{i-mt}^{l}, \ \ \ \ \Omega^{-2m}(p_i^l) = p_{i-mt}^{l}.\eqno(**)$$ In particular, we have $$o([1]) = \begin{cases} n, & \ t = 2;\\ 2m, & \ t\ge 3,\end{cases}\eqno(***)$$ where $m$ is the minimal positive integer such that $n\mid mt.$
\vskip10pt
\subsection{} Again using the natural basis of $S_i^l$ one has the following commutative diagrams in $\Lambda\underline {\mbox{-mod}}$: \[\xymatrix{\mathcal N(S_{i}^l)\ar[rr]^{\mathcal N(\sigma_{i}^l)} \ar[d]_{\theta_i^l}^-{\wr} && \mathcal N(S_{i-1}^{l+1})\ar[d]_{\theta_{i-1}^{l+1}}^-{\wr}\\ S_{i+1-t}^l \ar[rr]^{\sigma_{i+1-t}^l} && S_{i-t}^{l+1};} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \xymatrix{ \mathcal N(S_{i}^l)\ar[rr]^{\mathcal N(p_{i}^l)} \ar[d]_{\theta_i^l}^-{\wr} && \mathcal N(S_{i}^{l-1})\ar[d]_{\theta_i^{l-1}}^-{\wr}\\ S_{i+1-t}^l \ar[rr]^{p_{i+1-t}^l} && S_{i+1-t}^{l-1}.}\]
\vskip10pt We justify the commutative diagrams above. Note that for any finite quiver $Q$ the bimodule structure of $D(kQ)$ is given by (using the dual basis) $$p^*a = \begin{cases} b^*, \ & \mbox{if} \ ab = p, \\ 0, \ & \mbox{otherwise;}\end{cases} \ \ \ \ \ \ \ \ \ \ \ \mbox{and} \ \ \ \ \ \ \ \ \ \ \ ap^* = \begin{cases} b^*, \ & \mbox{if} \ ba = p, \\ 0, \ & \mbox{otherwise.}\end{cases}$$ for any paths $p$ and $a$. Note that $N(S_{i}^l) = D(\Lambda)\otimes_\Lambda S_{i}^l$ is spanned by $(\gamma_j^{l'})^*\otimes_\Lambda \gamma_{i+l-t}^{t-u}$, where $j\in\Bbb Z/n\Bbb Z, \ 1\le l'\le t-1, \ 1\le u\le l.$ By $(\gamma_j^{l'})^*\otimes_\Lambda \gamma_{i+l-t}^{t-u}= (\gamma_j^{l'})^*\gamma_{i+l-t}^{t-u}\otimes_\Lambda e_{i+l-t}$ we see that if $(\gamma_j^{l'})^*\otimes_\Lambda \gamma_{i+l-t}^{t-u}\ne 0$ then $j = i+l-l'-u$; and in this case we have $$(\gamma_{i+l-l'-u}^{l'})^*\otimes_\Lambda \gamma_{i+l-t}^{t-u}= (\gamma_{i+l-l'-u}^{l'})^* \gamma_{i+l-t}^{t-u}\otimes_\Lambda e_{i+l-t} =(\gamma_{i+l-(l'+u)}^{l'+u-t})^*\otimes_\Lambda e_{i+l-t}.$$ This makes sense only if $l'+u\ge t$. So we have a basis of $N(S_{i}^l)$: $$(\gamma_{i+l-t-v}^v)^*\otimes_\Lambda e_{i+l-t}, \ \ 0\le v\le l-1.$$ Using the natural basis of $S_{i+1-t}^l$ given in 5.1 we have a $\Lambda$-isomorphism $\theta_i^l: \ \mathcal N(S_{i}^l)\longrightarrow S_{i+1-t}^l$ for any $i\in\Bbb Z/n\Bbb Z$ and $1\le l\le t-1$: $$\theta_i^l: \ (\gamma_{i+l-t-v}^v)^*\otimes_\Lambda e_{i+l-t}\mapsto \gamma_{i+1+l-2t}^{t-(v+1)}, \ \ 0\le v\le l-1.$$ (One checks that this is indeed a left $\Lambda$-map.) Note that $N(\sigma_{i}^l): \ \mathcal N(S_{i}^l)\longrightarrow \mathcal N(S_{i-1}^{l+1})$ is a natural embedding given by $$(\gamma_{i+l-t-v}^v)^*\otimes_\Lambda e_{i+l-t}\mapsto (\gamma_{i+l-t-v}^v)^*\otimes_\Lambda e_{i+l-t}, \ \ 0\le v\le l-1;$$ and that $N(p_{i}^l): \ \mathcal N(S_{i}^l)\longrightarrow \mathcal N(S_{i}^{l-1})$ is a $\Lambda$-epimorphism given by $$(\gamma_{i+l-t-v}^v)^*\otimes_\Lambda e_{i+l-t}\mapsto (\gamma_{i+l-t-v}^{v-1})^*\otimes_\Lambda e_{i+l-1-t}, \ \ 0\le v\le l-1$$ where $\gamma_{i+l-1-t}^{-1}$ is understood to be $0$. Then one easily checks the following $$\sigma_{i+1-t}^l\circ \theta_i^l = \theta_{i-1}^{l+1}\circ \mathcal N(\sigma_i^l); \ \ \ \ p_{i+1-t}^l\circ \theta_i^l = \theta_{i}^{l-1}\circ \mathcal N(p_i^l).$$ This justifies the commutative diagrams.
Since all these $\theta_i^l$ depend only on $i$ and $l$, which means that they do not depend on whatever the maps $\sigma_i^l$ or $p_i^l$ are (this is important for the bi-naturality of a Calabi-Yau category), it follows, without loss of the generality, that we can specialize these maps to identities. Thus we have \begin{align} \mathcal N(S^l_i) = S_{i+1-t}^{l}, \ \ \ \mathcal N(\sigma^l_i) = \sigma_{i+1-t}^{l}, \ \ \ \mathcal N(p^l_i) = p_{i+1-t}^{l}.\end{align}
\vskip10pt
\subsection{} By $(***)$ we have $o([1]) < \infty$, it follows, without loss of generality, that we can assume $d\ge 0$. For convenience, set $d(t): = 1 + \frac{(d-1)t}{2}\in \frac {1}{2}\Bbb Z$, whatever $d$ is even or odd; denote by $N=N(d, n, t)$ the minimal positive integer such that \begin{align} \begin{cases} n\mid N d(t), \ &\mbox{if} \ (d-1)t \ \mbox{is even}; \\ n\mid N(2d(t)), \ &\mbox{if} \ (d-1)t \ \mbox{is odd}. \end{cases}\end{align} (When $2d(t)$ is odd, we will write it together in the following.) \vskip10pt By $(5.1)$ we have \ $\Omega^{(2m-1)}(S_i^l) = S_{i+l+(m-1)t}^{t-l}$ \ and \ $ \Omega^{2m}(S_i^l) = S_{i+mt}^{l}$, hence by $(5.2)$ we have (remember $G: = \Omega^{d+1}\circ \mathcal N$ and $d\ge 0$) \begin{align*} G(S_i^l) = S_{i+1+(m-1)t}^{l}, \ \mbox{if} \ d = 2m-1\end{align*} and \begin{align*} G(S_i^l) = S_{i+l+1+(m-1)t}^{t-l}, \ \mbox{if} \ d = 2m.\end{align*} Thus, by induction we have \begin{align} G^{m'}(S_i^l) = S_{i+m'(1+(m-1)t)}^{l} = S_{i+m'd(t)}^{l}, \ \mbox{if} \ d = 2m-1;\end{align} and \begin{align} G^{2m'}(S_i^l) = S_{i+m'(2+(2m-1)t)}^{l} = S_{i+m'(2d(t))}^{l}, \ \mbox{if} \ d = 2m, \end{align} and \begin{align} G^{2m'+1}(S_i^l) = S_{i+(m'+1)(2d(t))+l-1-mt}^{t-l}, \ \mbox{if} \ d = 2m, \end{align} for $m'\ge 0$.
\subsection{} If $d= 2m-1\ge1$, then by $(5.4)$ we see that $o(G_{S_i^l}) = N$, where $N$ is as given in $(5.3)$, i.e. $N$ is the minimal positive integer such that $n\mid N(1+(m-1)t)$. It follows from Corollary 4.3 and $(5.4)$ that we have
\vskip10pt
\begin{lem} Let $d = 2m-1\ge 1$. Then $M$ is a minimal $d$-th CY $\Lambda$-module if and only if $M$ is isomorphic to one of the following
\begin{align} S_i^l\oplus S_{i+d(t)}^l\oplus S_{i+2d(t)}^l\oplus \cdots \oplus S_{i + (N-1)d(t)}^l, \ \ 1\le l\le t-1, \ \ i\in\Bbb Z/n\Bbb Z.\end{align}
\vskip10pt In particular, all the minimal $d$-th CY modules have the same number $N = N(d, n, t)$ of indecomposable direct summands. \end{lem}
\vskip10pt
\subsection{} If $d = 2m\ge 0$ and $t$ is odd, then by $(5.6)$ we see $G^{2m'+1}(S_i^l)\ne S_i^l$ for any $i, l$ (since $t-l\ne l$). Note that in this case $(d-1)t$ is odd. It follows from $(5.5)$ that $o(G_{S_i^l}) = 2N$, where $N$ is as given in $(5.3)$, i.e. $N$ is the minimal positive integer such that $n\mid N(2d(t))$. It follows from Corollary 4.3, $(5.5)$ and $(5.6)$ that we have
\vskip10pt
\begin{lem} Let $t\ge 3$ be an odd integer and $d = 2m\ge 0$. Then $M$ is a minimal $d$-th CY $\Lambda$-module if and only if $M$ is isomorphic to one of the following \begin{align} S_i^l\oplus S_{i+l' + 2d(t)}^{t-l} \oplus S_{i+2d(t)}^l\oplus S_{i+l'+ 4d(t)}^{t-l} \oplus \cdots \oplus S_{i + 2d(t)(N-1)}^l\oplus S_{i + l'+ 2d(t)N}^{t-l}, \end{align} where $l': = l-1-mt,$ \ $1\le l\le t-1$ \ and \ $i\in\Bbb Z/n\Bbb Z$.
In particular, any minimal $d$-th CY modules has $2N = 2N(d, n, t)$ indecomposable direct summands.\end{lem}
\vskip10pt
\subsection{} Let $d = 2m\ge 0$ and $t=2s$. Then $d(t) = 1+ (2m-1)s\in\Bbb Z$.
First, we consider $o(G_{S^s_i})$. In this case $(5.5)$ and $(5.6)$ can be written in a unified way: \begin{align} G^{m'}(S_i^s) = S_{i+m'd(t)}^{s}, \ m'\ge 0, \ \mbox{if} \ d = 2m, \ t=2s.\end{align} So we have $o(G_{S_i^s}) = N$, where $N$ is as given in $(5.3)$, i.e. $N$ is the minimal positive integer such that $n\mid N(1+(2m-1)s)$.
\vskip10pt
Now, we consider $o(G_{S^l_i})$ with $l\ne s, \ 1\le l\le t-1$. In this case $(5.5)$ and $(5.6)$ are written respectively as: \begin{align} G^{2m'}(S_i^l) = S_{i+2m'd(t)}^{l}, \ \mbox{if} \ d = 2m, \ t=2s, \ l\ne s,\end{align} and \begin{align} G^{2m'+1}(S_i^l) = S_{i+(2m'+1)d(t)+l-s}^{t-l}, \ \mbox{if} \ d = 2m, \ t=2s, \ l\ne s\end{align} for $m'\ge 0.$ Since $l\ne t-l$ for $l\ne s$, it follows that $G^{2m'+1}(S_i^l) \ne S_i^l$. So by $(5.10)$ we see $o(G_{S_i^l}) = 2N',$ where $N'$ is the minimal positive integer such that $n\mid 2N'd(t)$. In order to determine $N'$, we divided into two cases.
{\em Case} 1. \ If $N = N(d, n, t) = N(2m, n, 2s)$ is even, then $o(G_{S_i^l}) = N$ for any $1\le l\le t-1$. It follows from Corollary 4.3, $(5.9)$, $(5.10)$, and $(5.11)$ that we have the following (note that in this case $(5.9)$ is exactly $(5.10)$ together with $(5.11)$, by taking $l=s$)
\vskip10pt
\begin{lem} Let $t = 2s$ and $d = 2m\ge 0$. Assume that $N = N(d, n, t)$ is even. Then $M$ is a minimal $d$-th CY $\Lambda$-module if and only if $M$ is isomorphic to one of the following \begin{align} S_i^l\oplus S_{i+d(t)+l-s}^{t-l} \oplus S_{i+2d(t)}^l\oplus S_{i+3d(t)+l-s}^{t-l}\oplus \cdots \oplus S_{i + (N-2)d(t)}^l\oplus S_{i + (N-1)d(t)+l-s}^{t-l}\end{align} where $1\le l\le t-1, \ i\in\Bbb Z/n\Bbb Z$.
\vskip10pt In particular, all the minimal $d$-th CY modules have the same number $N$ of indecomposable direct summands. \end{lem}
\vskip10pt
It remains to deal with
{\em Case} 2. Let $N = N(d, n, t) = N(2m, n, 2s)$ be odd. Since by definition $N$ is the minimal positive integer such that $n\mid Nd(t)$, it follows that $N < o(G_{S_i^l}) = 2N' \le 2N.$ It is easy to see $N'=N$: otherwise $1\le 2N' - N\le N-1$ and $n\mid (2N' - N)d(t)$, which contradicts the minimality of $N$. It follows from Corollary 4.3, $(5.9)$, $(5.10)$, and $(5.11)$ that we have
\vskip10pt
\begin{lem} Let $t = 2s$ and $d = 2m\ge 0$. Assume that $N=N(d, n, t)$ is odd. Then $M$ is a minimal $d$-th CY $\Lambda$-module if and only if $M$ is isomorphic to one of the following
\begin{align} S_i^s\oplus S_{i+d(t)}^{s} \oplus S_{i+2d(t)}^s\oplus \cdots \oplus S_{i + (N-1)d(t)}^{s}\end{align} where $i\in\Bbb Z_n$, and \begin{align}\begin{matrix} S_i^l \oplus & S_{i+d(t)+l-s}^{t-l} \oplus & S_{i+2d(t)}^l \oplus & S_{i+3d(t)+l-s}^{t-l} \oplus \cdots \oplus & S_{i + (N-1)d(t)}^{l}\\ \oplus S_{i+l-s}^{t-l} \oplus & S_{i+d(t)}^{l} \oplus & S_{i+2d(t)+l-s}^{t-l} \oplus & S_{i+3d(t)}^{l} \oplus \cdots \oplus & S_{i + (N-1)d(t)+l-s}^{t-l}\end{matrix}\end{align} where $l\ne s$, \ $1\le l\le t-1$ and $i\in\Bbb Z_n$.
\vskip10pt In particular, all the minimal $d$-th CY modules have either $N$, or $2N$ indecomposable direct summands. \end{lem}
\vskip10pt
\subsection{} By Lemmas 5.1-5.4 all the minimal $d$-th CY modules of self-injective Nakayama algebras have been classified, where $d$ is any given integer. The main result of this section is as follows.
\vskip10pt
\begin{thm} For any $n\ge 1, \ t\ge 2, \ d\ge 0$, let $N=N(d, n, t)$ be as in $(5.3)$. Then $M$ is a minimal $d$-th CY \ $\Lambda$-module if and only if $M$ is isomorphic to one of the following
\vskip10pt
$(i)$ \ \ The modules in $(5.7)$, when $d=2m-1$;
$(ii)$ \ \ The modules in $(5.8)$, when $d=2m$ and $t$ is odd;
$(iii)$ \ \ The modules in $(5.12)$, when $d=2m$, $t=2s$, and $N(d, n, t)$ is even;
$(iv)$ \ \ The modules in $(5.13)-(5.14)$, when $d=2m$, $t=2s$, and $N(d, n, t)$ is odd.
\vskip10pt
In particular, any minimal $d$-th CY $\Lambda$-module has either $N$ or $2N$ indecomposable direct summands; and
\begin{align}\operatorname{min} \{d\ge 0 \ |\ N = c(M), \mbox{or} \ 2N = c(M) \} \ \le \operatorname{CYdim}(M) < o([1]_M) \le 2n \end{align} for any minimal Calabi-Yau $\Lambda$-modules $M$, where $c(M)$ is the number of indecomposable direct summands of $M$. \end{thm} \noindent {\bf Proof.}\ \ By Lemmas 5.1-5.4 the CY dimension $d$ of any minimal Calabi-Yau module $M$ satisfies $N(d, n, t) = c(M) \ \mbox{or} \ \ 2N(d, n, t) = c(M).$
$\blacksquare$
\vskip10pt
\begin{rem} The modules in $(5.7)-(5.8)$ and $(5.12)-(5.14)$ have overlaps. This is because $d$ is not uniquely determined by a minimal $d$-th CY module. A general formula of the CY dimensions of the minimal Calabi-Yau modules seems to be difficult to obtain.
Note that the inequality on the left hand side in $(5.15)$ can not be an equality in general. For example, take $n = 2, \ t=4, \ m = 2, \ d=2m-1=3$. Then $d(t) = 5$, \ $N = N(3, 2, 4) = 2$, and $S_i^l\oplus S_{i+1}^l, \ 1\le l\le 3$, are all the minimal $3$-th CY modules of $CY$ dimension $0$ if $l = 2$, and $1$ if $l=1, \ 3$. However, the left hand side in $(5.15)$ is $0$ since $N(0, 2, 4) = 2$. \end{rem}
\vskip10pt
\section{\bf Self-injective Nakayama algebras with indecomposable Calabi-Yau modules}
In this section we determine all the self-injective Nakayama algebras $\Lambda = \Lambda(n, t), \ n\ge 1, \ t\ge 2$, which admit indecomposable Calabi-Yau modules.
\vskip10pt
Note that Erdmann and Skowro\'nski have proved in $\S 2$ of [ES] that self-injective algebras $A$ such that $A\underline {\mbox{-mod}}$ is Calabi-Yau of CY dimension $0$ (resp. $1$) are the algebras Morita equivalent to $\Lambda(n, 2)$ for some $n\ge 1$ (resp. $\Lambda(1, t)$ for some $t\ge 3$). So we assume that $t\ge 3$.
\vskip10pt
\begin{thm} \ \ Let $t\ge 3$. Then $\Lambda$ has an indecomposable Calabi-Yau module if and only if $n$ and $t$ satisfy one of the following conditions
$(i)$ \ $g.c.d. \ (n, \ t) = 1$. This is exactly the case where $\Lambda\underline {\mbox{-mod}}$ is a Calabi-Yau category. In this case we have $\operatorname{CYdim}(\Lambda) = 2m-1$, where $m$ is the minimal positive integer such that $n\mid (m-1)t + 1$.
\vskip10pt
$(ii)$ \ $g.c.d. \ (n,\ t)\ne 1$, \ $t = 2s$, \ and \ $g.c.d. \ (n,\ s) = 1$. This is exactly the case where $\Lambda\underline {\mbox{-mod}}$ is not a Calabi-Yau category but admits indecomposable Calabi-Yau modules.
In this case, we have $g.c.d.\ (n,\ t) = 2$ and
$(a)$\ \ $S_i^s, \ i\in \Bbb Z/n\Bbb Z,$ \ are all the indecomposable Calabi-Yau modules;
$(b)$ \ \ $S_i^l\oplus S_{i+l-s}^{t-l}, \ 1\le l\le s-1, \ i\in\Bbb Z/n\Bbb Z,$ \ are all the decomposable minimal $2m$-th CY modules, where $m$ is the minimal non-negative integer such that $n\mid (2m-1)s + 1$;
$(c)$ \ All of these modules in $(a)$ and $(b)$ have the same CY dimension $2m$. \end{thm}
\vskip10pt
{\bf Remark.} Bialkowski and Skowro\'nski [BS] have classified representation-finite self-injective algebras whose stable categories are Calabi-Yau. This includes the assertion $(i)$ of Theorem 6.1.
\vskip10pt
\noindent {\bf Proof.}\ \ If $\Lambda$ has an indecomposable $d$-th CY module $S_i^l$, then $\mathcal N(S^l_i) \cong \Omega^{-(d+1)}(S^l_i)$. By $(5.2)$ and $(5.1)$ we have \begin{align}1-t\equiv -mt \ (\mbox{mod} \ n), \ \mbox{if} \ d+1 =2m,\end{align} or \begin{align} t= t-l, \ 1-t\equiv l-mt \ (\mbox{mod} \ n), \ \mbox{if} \ d+1 =2m-1.\end{align} In the first case we get $g.c.d.\ (n, \ t) = 1.$ In the second case we get $t=2s, \ l=s$ and $g.c.d.\ (n, \ s) = 1.$ Excluding the overlap situations we conclude that either $g.c.d.\ (n, \ t) = 1;$ \ or \ $g.c.d.\ (n, \ t) \ne 1, \ t= 2s, \ g.c.d.\ (n, \ s) = 1$.
\vskip10pt
Assume that $g.c.d. \ (n, \ t) = 1$. Then there exists an integer $m$ such that $n\mid (m-1)t+1$. We chose a positive (otherwise, add $n(-m+2)t$), and minimal $m$. Set $d: = 2m-1$. Then the same computation shows that every indecomposable is a $d$-th CY module. We claim that $\Lambda\underline {\mbox{-mod}}$ is a Calabi-Yau category. For this, it remains to show $\mathcal N(f) = \Omega^{-(d+1)}(f) =\Omega^{-2m}(f)$ for any morphism $f$ between indecomposables of $\Lambda\underline {\mbox{-mod}}$. Since $n\mid (m-1)t +1$, it follows from $(**)$ in $\S 5$ that \begin{align}\mathcal N(\sigma_i^l) = \Omega^{-2m}(\sigma^l_i), \ \ \mathcal N(p_i^l) = \Omega^{-2m}(p^l_i).\end{align} Since $\Lambda$ is representation-finite, it follows that $f$ is a $k$-combination of compositions of irreducible maps $\sigma^l_i$'s and $p^l_i$'s, hence $\mathcal N(f) = \Omega^{-2m}(f)$ by $(6.3)$. We stress here that, this argument relies on the fact that all the isomorphisms $\theta_i^l$ in 5.2 depend only on $i$ and $l$, which means that they do not depend on whatever the maps $\sigma_i^l$ or $p_i^l$ are. Otherwise we can not take them as identities, and then we can not get the naturality for the Calabi-Yau category of this case.
This proves the claim, hence $\Lambda\underline {\mbox{-mod}}$ is a Calabi-Yau category of CY dimension $D$, with $0\le D \le d = 2m-1$. We claim $D=d$. In fact, since every indecomposable is a $D$-th CY module, and since we have $l$ such that $t\ne t-l$, it follows from $(6.1)$ and $(6.2)$ that $D = 2m'-1$ with $n\mid (m'-1)t+1$, hence $D = d$, by the minimality of $m$.
The argument above also proves that if $\Lambda\underline {\mbox{-mod}}$ is a Calabi-Yau category then $g.c.d. \ (n, \ t) = 1$.
\vskip10pt
Assume that $g.c.d. \ (n,\ t)\ne 1$, \ $t = 2s$, \ and $g.c.d. \ (n, \ s) = 1$. Then there exists an integer $m'$ such that $n\mid (m'-1)s+1$. We choose a positive $m'$. Since $g.c.d.\ (n, \ t) \ne 1$, it follows that $m'$ is even, say $m' = 2m$ with $m\ge 1$. Let $m$ be the the minimal positive integer such that $n\mid (2m-1)s+1$, and set $d: = 2m$. Then the same computation shows that \ $S_i^s, \ i\in \Bbb Z/n\Bbb Z,$ \ are all the indecomposable Calabi-Yau modules. This proves $(a)$.
By applying Lemma 5.4 to $d$ given above (note that the corresponding $N = N(n, d, t) = 1$ in this case), we know that $S_i^l\oplus S_{i+l-s}^{t-l}, \ 1\le l\le t-1, \ l\ne s, \ i\in\Bbb Z/n\Bbb Z,$ \ are all the decomposable minimal $2m$-th CY modules. By symmetry one can consider $1\le l\le s-1:$ if $l>s$ then one can replace $l$ by $t-l$, since $i = (i+l-s) + (t-l)-s.$ This proves $(b)$.
It remains to prove $(c)$. Let $\operatorname{CYdim}(S^s_i) = d'$. Since $g.c.d. \ (n,\ t)\ne 1$, it follows that $d'$ has to be an even integer $2m'\ge 0$ with $n\mid (2m'-1)s +1$. it follows that $d' = d$, by the minimality of $m$. Let $\operatorname{CYdim}(S_i^l\oplus S_{i+l-s}^{t-l}) = d'$. Then we have $\mathcal N(S^l_i) = \Omega^{-(d'+1)}(S_{i+l-s}^{t-l})$. Since $l\ne s$ it follows from $(5.2)$ and $(5.1)$ that $d'$ has to be $2m'\ge 0$ with $n\mid (2m'-1)s+1$. Again by the minimality of $m$ we have $d' = d$. This completes the proof.
$\blacksquare$
\vskip10pt
\begin{rem} $(i)$ $\operatorname{CYdim}(X)$ usually differs from $\operatorname{CYdim}(\mathcal A)$ in a Calabi-Yau category $\mathcal A$.
For example, if $n=3, \ t=4$, then $o([1]) = 6$, $\operatorname{CYdim}(\Lambda) = 5$, while $\operatorname{CYdim}(S_i^2) = 2$ and $o([1]_{S_i^2}) = 3, \ \forall \ i\in\Bbb Z/3\Bbb Z$.
However, for $t\ge 3$ and \ $g.c.d.\ (n,\ t) = 1$, if $X$ is indecomposable and $\operatorname{CYdim}(X)$ is odd then $\operatorname{CYdim}(X) = \operatorname{CYdim}(\Lambda)$. In fact, since $o([1]) < \infty$ it follows that $\operatorname{CYdim}(X) = 2m'-1\ge 1$, and $n\mid 1+(m'-1)t$. By Theorem 6.1$(i)$ $\operatorname{CYdim}(\Lambda) = 2m-1$, where $m$ is the minimal positive integer such that $n\mid 1+(m-1)t$. It follows that $m'\ge m$, hence $\operatorname{CYdim}(X) \ge \operatorname{CYdim}(\Lambda)$. On the other hand we have $\operatorname{CYdim}(X) \le \operatorname{CYdim}(\Lambda)$ by definition.
$(ii)$ \ Consider the algebra $A(t): = kA_\infty ^\infty/J^t$ where $A_\infty^\infty$ is the infinite quiver $$ \cdots \longrightarrow \bullet \longrightarrow \bullet \longrightarrow \bullet \longrightarrow \bullet \longrightarrow \bullet\longrightarrow \cdots.$$ Then $A(t)\underline {\mbox{-mod}}$ has a Serre functor, and there is a natural covering functor $A(t)\underline {\mbox{-mod}}$ $\longrightarrow \Lambda(n, t)\underline {\mbox{-mod}}$ ([Gab], 2.8). But one can prove that in any case $A(t)\underline {\mbox{-mod}}$ is not a Calabi-Yau category. \end{rem}
\vskip10pt
{\bf Acknowledgements.} This work is done during a visit of the second named author at Universit\'e de Montpellier 2, supported by the CNRS of France. He thanks the first named author and his group for the warm hospitality, the D\'epartement de Math\'ematiques of Universit\'e de Montpellier 2 for the working facilities, and the CNRS for the support. We thank Bernhard Keller for helpful conversations.
\vskip30pt
\end{document} |
\begin{document}
\title[Some cohomologically similar manifolds and special generic maps]{New families of manifolds with similar cohomology rings admitting special generic maps} \author{Naoki Kitazawa} \keywords{Special generic maps. (Co)homology rings. Closed and simply-connected manifolds. \\ \indent {\it \textup{2020} Mathematics Subject Classification}: Primary~57R45. Secondary~57R19.} \address{Institute of Mathematics for Industry, Kyushu University, 744 Motooka, Nishi-ku Fukuoka 819-0395, Japan\\
TEL (Office): +81-92-802-4402 \\
FAX (Office): +81-92-802-4405 \\ } \email{[email protected]} \urladdr{https://naokikitazawa.github.io/NaokiKitazawa.html}
\begin{abstract}
As Reeb's theorem shows, Morse functions with exactly two singular points on closed manifolds are very simple and important. They characterize spheres whose dimensions are not $4$ topologically and the $4$-dimensional unit sphere.
{\it Special generic} maps are generalized versions of these maps. Canonical projections of unit spheres are special generic. Studies of Saeki and Sakuma since the 1990s, followed by Nishioka and Wrazidlo, show that the differentiable structures of the spheres and the homology groups of the manifolds (in several classes) are restricted. We see special generic maps are attractive.
Our paper studies the cohomology rings of manifolds admitting such maps. As our new result, we find a new family of manifolds whose cohomology rings are similar and find that the (non-)existence of special generic maps are closely related to the topologies. More explicitly, we have previously found related families and our new manifolds add to these discoveries.
\end{abstract}
\maketitle \section{Introduction.} \label{sec:1}
According to the so-called Reeb's theorem, Morse functions with exactly two singular points on closed manifolds characterize spheres whose dimensions are not $4$ topologically and the $4$-dimensional unit sphere.
{\it Special generic} maps are, in short, their higher dimensional versions.
We first define a {\it special} generic map between smooth manifolds with no boundaries.
Before that, we introduce fundamental and important terminologies, notions and notation.
Hereafter, for an integer $k>0$, ${\mathbb{R}}^k$ denotes the $k$-dimensional Euclidean space, which is a smooth manifold canonically and also a Riemannian manifold endowed with the standard Euclidean metric.
For ${\mathbb{R}}^1$, $\mathbb{R}$ is used in a natural way. This is also a commutative ring and $\mathbb{Z} \subset \mathbb{R}$ denotes the subring of all integers.
$||x|| \geq 0$ is for the distance between $x$ and the origin $0$ in ${\mathbb{R}}^k$.
$S^k\ {\rm (}D^{k+1}{\rm )}:=\{x \in {\mathbb{R}}^{k+1} \mid ||x||=1\ {\rm (}resp.\ ||x|| \leq 1{\rm )}\}$ is the $k$-dimensional unit sphere (resp. ($k+1$)-dimensional unit disk). A canonical projection of the unit sphere $S^k \subset {\mathbb{R}}^{k+1}$ into ${\mathbb{R}}^{k^{\prime}}$ with $k \geq k^{\prime}$ is defined as a map mapping $(x_1,x_2) \in {\mathbb{R}}^{k^{\prime}} \times {\mathbb{R}}^{k+1-k^{\prime}}$ to $x_1 \in {\mathbb{R}}^{k^{\prime}}$. This is also a simplest special generic map.
Hereafter, $\dim X$ is the dimension of a topological space regarded as a CW complex, which we can define uniquely. A manifold is always regarded as a CW complex. A smooth manifold always has the structure of a so-called {\it PL} manifold in a canonical way and this is a polyhedron.
For a smooth map $c:X \rightarrow Y$ between smooth manifold, a point $p \in X$ is a {\it singular} point of $c$ if the rank of the differential ${dc}_p$ here is smaller than $\min{\dim X,\dim Y}$. $S(c)$ denotes the set of all singular points of $c$ and we call this the {\it singular set} of $c$.
A {\it diffeomorphism} means a homeomorphism which is a smooth map with no singular points.
Two smooth manifolds are defined to be {\it diffeomorphic} if a diffeomorphism between the two manifolds exists.
A smooth manifold homeomorphic to a sphere is said to be a {\it homotopy sphere}. A homotopy sphere is a {\it standard} (an {\it exotic}) sphere if it is diffeomorphic to a unit sphere (resp. not diffeomorphic to any unit sphere).
\begin{Def}
A smooth map $c:X \rightarrow Y$ between manifolds with no boundaries satisfying $\dim X \geq \dim Y$ is said to be {\it special generic} if at each singular point $p$, there exists suitable local coordinates and $f$ is locally
represented by $(x_1,\cdots,x_{\dim X}) \rightarrow (x_1,\cdots,x_{\dim Y-1},{\Sigma}_{j=1}^{\dim X-\dim Y+1} {x_{\dim Y+j-1}}^2)$.
\end{Def}
Morse functions in the Reeb's theorem are special generic. As a kind of exercises on smooth manifolds and maps and the theory of Morse functions, we can see that canonical projections of unit spheres are special generic.
\begin{Prop}
The singular set $S(c)$ of the special generic map $c:X \rightarrow Y$ is a {\rm (}$\dim Y-1${\rm )}-dimensional smooth closed submanifold of $X$ and has no boundary. Furtheremore, the restriction $c {\mid}_{S(c)}$ is a smooth immersion,
\end{Prop}
Other properties are presented in the next section. Studies of Saeki and Sakuma (\cite{saeki1,saeki2,saekisakuma,saekisakuma2,sakuma}) since the 1990s,
followed by Nishioka (\cite{nishioka}) and Wrazidlo (\cite{wrazidlo,wrazidlo2,wrazidlo3}), show that the differentiable structures of the spheres and the homology groups of the manifolds are restricted in several cases. This explicitly shows that special generic maps are attractive. Some of these results are also presented later.
Our paper studies the cohomology rings of manifolds admitting such maps. We also introduce terminologies and notation on homology groups, cohomology rings, characteristic classes of manifolds such as {\it Stiefel-Whitney classes} and {\it Pontrjagin classes} and other notions from algebraic topology and differential topology in the next section. We also expect that readers have some knowledge on them. \cite{hatcher} and \cite{milnorstasheff} explain about them systematically.
\begin{MainThm}
\label{mthm:1}
Let $m \geq 8$ be an arbitrary integer. Let $m^{\prime} \geq 6$ be another integer satisfying the condition $m-m^{\prime} \geq 2$. Assume also the existence of some integer $a$. Furthermore, the following conditions hold.
\begin{itemize}
\item $a=2$ or $a=m^{\prime}-3$.
\item $a+m^{\prime}-1 \leq m$
\item $2m^{\prime}-m-a=1$.
\end{itemize}
Let $G$ be an arbitrary finite commutative group which is not the trivial group.
Then we have a closed and simply-connected manifold $M$ enjoying the following properties.
\begin{enumerate}
\item \label{mthm:1.1} There exists an $m^{\prime}$-dimensional closed and simply-connected manifold $M^{\prime}$ and $M$ is diffeomorphic to $M^{\prime} \times S^{m-m^{\prime}}$.
\item \label{mthm:1.2} The $j$-th homology group of $M^{\prime}$ whose coefficient ring is $\mathbb{Z}$ is isomorphic to $G$ if $j=2,m^{\prime}-3$, $\mathbb{Z}$ if $j=0,m^{\prime}$, and the trivial group, otherwise.
\item \label{mthm:1.3} $M$ admits a special generic map into ${\mathbb{R}}^n$ for $m^{\prime}<n\leq m$ whereas it admits no special generic maps into ${\mathbb{R}}^n$ for $1 \leq n \leq 4$ and $n=m^{\prime}$. Furthermore, the special generic maps can be obtained as ones whose restrictions to the singular sets are embeddings and {\rm product organized} ones, defined in Definition \ref{def:2}, later.
\end{enumerate}
\end{MainThm}
\begin{MainThm}
\label{mthm:2}
Let $\{G_j\}_{j=1}^5$ be a sequence of free commutative groups such that $G_{j}$ and $G_{6-j}$ are isomorphic for $1 \leq j \leq 2$, that $G_{1}$ is not the trivial group and that the rank of $G_{3}$ is even. Let $G_{{\rm F},1}$ be an arbitrary finite commutative group and $G_{{\rm F},2}$ a finite commutative group which is not the trivial group.
Let $\{M_i\}_{i=1}^2$ be a pair of $8$-dimensional closed and simply-connected manifolds enjoying the following properties.
\begin{enumerate}
\item The $j$-th homology groups of these manifolds whose coefficient rings are $\mathbb{Z}$ are mutually isomorphic to $G_{1} \oplus G_{{\rm F},1}$ for $j=2$, the direct sum of $G_{2} \oplus G_{{\rm F},2}$ for $j=3$, the direct sum of $G_{3} \oplus G_{{\rm F},2}$ for $j=4$, the direct sum of $G_{4} \oplus G_{{\rm F},1}$ for $j=5$, and $G_{5}$ for $j=6$.
\item Consider the restrictions to the subgroups generated by all elements whose orders are infinite for these manifolds. They are regarded as subalgebras and isomorphic to the cohomology rings of manifolds represented as connected sums of finitely many manifolds diffeomorphic to the products of two homotopy spheres where the coefficient ring is $\mathbb{Z}$.
\item For an arbitrary positive integer $j>0$, the $j$-th Stiefel-Whitney classes and the $j$-th Pontrjagin classes of these manifolds are the zero elements of the $j$-th cohomology groups whose coefficient rings are $\mathbb{Z}/2\mathbb{Z}$, the ring of order $2$, and the $4j$-th cohomology groups whose coefficient rings are $\mathbb{Z}$, respectively.
\item $M_i$ does not admit special generic maps into ${\mathbb{R}}^n$ for $1 \leq n < i+5$ whereas it admits one into ${\mathbb{R}}^n$ for $i+5 \leq n \leq 8$.
Furthermore, the special generic maps can be obtained as ones whose restrictions to the singular sets are embeddings and {\rm product organized} ones, defined in Definition \ref{def:2}, later.
\end{enumerate}
\end{MainThm}
Main Theorem \ref{mthm:2} is regarded as a variant of Main Theorem 4 of \cite{kitazawa2}.
In the next section, we present additional fundamental properties of special generic maps. The third section is devoted to our proof of Main Theorems. We also present existing studies on the the homology groups and the cohomology rings of the manifolds. \\
\ \\
{\bf Conflict of Interest.} \\
The author was a member of the project supported by JSPS KAKENHI Grant Number JP17H06128 "Innovative research of geometric topology and singularities of differentiable mappings"
(Principal investigator: Osamu Saeki) and
is also a member of the project JSPS KAKENHI Grant Number JP22K18267 "Visualizing twists in data through monodromy" (Principal Investigator: Osamu Saeki). The present study is also supported by these projects. \\
\ \\
{\bf Data availability.} \\
Data essentially supporting the present study are all in the present paper.
\section{Fundamental properties and existing studies on special generic maps and the manifolds.}
First, we review elementary algebraic topology and other notions, terminologies and notation we need.
Let $(X,X^{\prime})$ be a pair of topological spaces where these two spaces satisfy the relation $X^{\prime} \subset X$ and may be empty. The ({\it $k$-th}) {\it homology group} of this pair $(X,X^{\prime})$ whose coefficient ring is $A$ is denoted by $H_{k}(X,X^{\prime};A)$. In the case $X^{\prime}$ is empty, we may omit ",$X^{\prime}$" in the notation and we call the homology group (cohomology group) of $(X,X^{\prime})$ the {\it homology group} (resp. {\it cohomology group}) of $X$ in general.
For a topological space $X$ of suitable classes, the {\it $k$-th homotopy group} of $X$ is denoted by ${\pi}_k(X)$. The class of arcwise-connected cell complex, containing the class of arcwise-connected CW complexes for example, is one of such classes.
Let $(X,X^{\prime})$ and $(Y,Y^{\prime})$ be pairs of topological spaces where these spaces satisfy the relations $X^{\prime} \subset X$ and $Y^{\prime} \subset Y$ and may be empty. Given a continuous map $c:X \rightarrow Y$ satisfying $c(X^{\prime}) \subset Y^{\prime}$, then $c_{\ast}:H_{k}(X,X^{\prime};A) \rightarrow H_{k}(Y,Y^{\prime};A)$, $c^{\ast}:H^{k}(Y,Y^{\prime};A) \rightarrow H^{k}(X,X^{\prime};A)$ and $c_{\ast}:{\pi}_k(X) \rightarrow {\pi}_k(Y)$ denote the canonically induced homomorphisms where in considering homotopy groups the spaces satisfy the suitable conditions for example.
Let $X$ be a topological space and $A$ a commutative ring. Let $H^{\ast}(X;A)$ denote the direct sum ${\oplus}_{j=0}^{\infty} H^j(X;A)$ for all integers greater than or equal to $0$. For any sequence $\{a_j\}_{j=1}^l \subset H^{\ast}(X;A)$ of elements of length $l>0$, we can define the {\it cup product} ${\cup}_{j=1}^l a_j \in H^{\ast}(X;A)$ for general $l$. $a_1 \cup a_2$ is also used in the case $l=2$. We can have the structure of a graded commutative algebra $H^{\ast}(X;A)$. This is the {\it cohomology ring} of $X$ whose {\it coefficient ring} is $A$.
The {\it fundamental class} of a compact, connected and oriented manifold $Y$ is defined as the canonically and uniquely defined element of the ($\dim Y$)-th homology group, which is also a generator of the group $H_{\dim Y}(Y,\partial Y;\mathbb{Z})$, isomorphic to the group $\mathbb{Z}$. Let $i_{Y,X}:Y \rightarrow X$ be a smooth immersion satisfying $i_{Y,X}(\partial Y) \subset \partial X$ and $i_{Y,X}({\rm Int}\ Y) \subset {\rm Int}\ X$. In other words, $Y$ is smoothly and {\it properly} immersed or embedded into $X$.
Note that in considering other categories such as the PL category and the topology category, we consider suitable maps playing roles as the smooth immersions play. If for an element $h \in H_j(X,\partial X;A)$, the value of the homomorphism ${i_{Y,X}}_{\ast}$ induced by the map $i_{Y,X}:Y \rightarrow X$ at the fundamental class of $Y$ is $h$, then $h$ is {\it represented} by $Y$.
We also need several fundamental methods and theorems. For example, homology exact sequences for pairs of topological spaces, Mayer-Vietoris sequences, Poincar\'e duality (theorem) for compact and connected (orientable) manifolds, universal coefficient theorem and K\"unneth theorem for the products of topological spaces.
For such explanations, see \cite{hatcher} again for example.
The {\it diffeomorphism group} of a smooth manifold is the group of all diffeomorphisms from the manifold to itself endowed with the so-called {\it Whitney $C^{\infty}$ topology}.
A {\it smooth} bundle means a bundle whose fiber is a smooth manifold whose structure group is the diffeomorphism group. A {\it linear} bundle means a bundle whose fiber is a Euclidean space, unit sphere, or a unit disk and whose structure group consists of linear transformations. Note that a linear transformation here is defined in a matural and canonical way.
For general theory of bundles, see \cite{steenrod}. \cite{milnorstasheff} is for linear bundles. \begin{Prop}
\label{prop:2}
For a special generic map on an $m$-dimensional closed and connected manifold $M$ into ${\mathbb{R}}^n$, we have the following properties.
\begin{enumerate}
\item \label{prop:2.1}
There exists an $n$-dimensional compact and connected smooth manifold $W_f$ such that a smooth surjection $q_f:M \rightarrow W_f$ and a smooth immersion $\bar{f}:W_f \rightarrow {\mathbb{R}}^n$ enjoying the relation $f=\bar{f} \circ q_f$ exist.
\item \label{prop:2.2}
There exists a small collar neighborhood $N(\partial W_f)$ of the boundary $\partial W_f \subset W_f$ and the composition of the restriction of $f$ to the preimage with the canonical projection to $\partial W_f$ gives a linear bundle whose fiber is the {\rm (}$m-n+1${\rm )}-dimensional unit disk $D^{m-n+1}$.
\item \label{prop:2.3}
The restriction of $f$ to the preimage of $W_f-{\rm Int}\ N(\partial W_f)$ gives a smooth bundle whose fiber is an {\rm (}$m-n${\rm )}-dimensional standard sphere. It is regarded as a linear bundle in specific cases. For example, in the case $m-n=0,1,2,3$, the bundle is regarded as a linear bundle.
\end{enumerate} \end{Prop}
\begin{Prop}
\label{prop:3}
In Proposition \ref{prop:2}, we can have an {\rm (}m+1{\rm )}-dimensional compact and connected topological {\rm (}PL{\rm )} manifold $W$ and a continuous {\rm (}resp. piesewise smooth{\rm )} surjection $r:W \rightarrow W_f$ enjoying the following properties where we abuse the notation.
\begin{enumerate}
\item $M$ is the boundary of $W$ and the restriction of $r$ to the boundary $M$ is $q_f$.
\item $r$ gives the structure of a bundle over $W_f$ whose fiber is homeomorphic {\rm (}resp. PL homeomorphic{\rm )} to the {\rm (}$m-n+1${\rm )}-dimensional unit disk $D^{m-n+1}$ and whose structure group consists of piesewise smooth homeomorphisms.
\item $W$ is decomposed into two {\rm (}m+1{\rm )}-dimensional topological {\rm (}resp. PL{\rm )} manifolds $W_1$ and $W_2$.
\item We can consider the composition of the restriction of $r$ to $W_1$ with a canonical projection to $\partial W_f$. This gives a linear bundle whose fiber is the {\rm (}$m-n+2${\rm )}-dimensional unit disk $D^{m-n+2}$. Furthermore, this bundle is realized as a bundle whose subbundle obtained by considering the boundary of the unit disk and a suitable smoothly embedded copy of the unit disk $D^{m-n+1}$ there as its fiber is the linear bundle of Proposition \ref{prop:2} {\rm (}\ref{prop:2.2}{\rm )}.
\item The restriction of $r$ to $W_2$ gives a bundle whose fiber is diffeomorphic to the {\rm (}$m-n+1${\rm )}-dimensional unit disk $D^{m-n+1}$ {\rm (}resp. and whose structure group consists of piesewise smooth homeomorphisms{\rm )} . Furthermore, this bundle is realized as a bundle whose subbundle obtained by considering the boundary of the unit disk as its fiber is the smooth bundle of Proposition \ref{prop:2} {\rm (}\ref{prop:2.3}{\rm )}.
\item In the case $m-n=0,1,2,3$ for example, $W$ can be chosen as a smooth one and we can also do in such a way that $r$ gives a linear bundle over $W_f$.
\end{enumerate} \end{Prop} \begin{Ex}
\label{ex:1}
Let $l>0$ be an integer. Let $m \geq n \geq 2$ be integers.
Other than canonical projections of units spheres, we present simplest special generic maps. We consider a connected sum of $l>0$ manifolds in the family $\{S^{n_j} \times S^{m-n_j}\}_{j=1}^l$ in the smooth category where $1 \leq n_j \leq n-1$ is an integer for $1 \leq j \leq l$. We have a special generic map $f:M \rightarrow {\mathbb{R}}^n$ on the resulting manifold $M$ such that in Proposition \ref{prop:2}, the following properties are enjoyed.
\begin{enumerate}
\label{ex:1.1}
\item $\bar{f}$ is an embedding.
\item
\label{ex:1.2}
$W_f$ is represented as a boundary connected sum of $l>0$ manifolds each of which is diffeomorphic to each of $\{S^{n_j} \times D^{n-n_j}\}_{j=1}^l$ in the family. The boundary connected sum is considered in the smooth category.
\item \label{ex:1.3}
The bundles in Proposition \ref{prop:2} (\ref{prop:2.2}) and (\ref{prop:2.3}) are trivial.
\end{enumerate} \end{Ex} We introduce a result of \cite{saeki1} related to Example \ref{ex:1}.
\begin{Thm}[\cite{saeki1}]
\label{thm:1}
An $m$-dimensional closed and connected manifold $M$ admits a special generic map into ${\mathbb{R}}^2$ if and only if either of the following holds.
\begin{enumerate}
\item $M$ is a homotopy sphere which is not an $4$-dimensional exotic sphere.
\item A manifold
represented as a connected sum of smooth manifolds considered in the smooth category where each of the manifolds here is represented as either of the following manifolds. \begin{enumerate}
\item The total space of a smooth bundle over $S^1$ whose fiber is a homotopy sphere for $m \neq 5$.
\item The total space of a smooth bundle over $S^1$ whose fiber is a standard sphere for $m=5$. \end{enumerate} \end{enumerate} Furthermore, for each manifold here, we can construct a special generic map so that the properties {\rm (}\ref{ex:1.1}{\rm )} and {\rm (}\ref{ex:1.2}{\rm )} in Example \ref{ex:1} are enjoyed with $n_j=1$. \end{Thm} \begin{Prop}
\label{prop:4}
Let $A$ be a commutative ring.
In Proposition \ref{prop:2}, the homomorphisms ${q_f}_{\ast}:H_j(M;A) \rightarrow H_j(W_f;A)$, ${q_f}^{\ast}:H^j(W_f;A) \rightarrow H^j(M;A)$ and ${q_f}_{\ast}:{\pi}_j(M) \rightarrow {\pi}_j(W_f)$ are isomorphisms for $0 \leq j \leq m-n$.
\end{Prop} We can show the following theorem by applying Proposition \ref{prop:4} with Proposition \ref{prop:2}, some fundamental theory on $3$-dimensional compact manifolds and some previously presented facts. For a proof, see the
original paper \cite{saeki1} for example. \begin{Thm}[\cite{saeki1}]
\label{thm:2}
Let $m>3$ be an arbitrary integer.
\begin{enumerate}
\item If an $m$-dimensional closed and simply-connected manifold admits a special generic map into ${\mathbb{R}}^3$, then it is either of the following.
\begin{enumerate}
\item A homotopy pshere which is not a $4$-dimensional exotic sphere.
\item A manifold represented as a connected sum of smooth manifolds considered in the smooth category where each of the manifolds here is represented as the total space of a smooth bundle over $S^2$ whose fiber is a homotopy sphere. Moreover, the homotopy sphere of the fiber is also not a $4$-dimensional exotic sphere.
\end{enumerate}
\item
Furthermore, for each manifold here admitting a special generic map into ${\mathbb{R}}^3$, we can construct a special generic map so that the properties {\rm (}\ref{ex:1.1}{\rm )} and {\rm (}\ref{ex:1.2}{\rm )} in Example \ref{ex:1} are enjoyed with $n_j=2$.
\end{enumerate} \end{Thm}
The following is shown in \cite{nishioka} to show Theorem \ref{thm:3} for $(m,n)=(5,4)$ by applying a classification result of $5$-dimensional closed and simply-connected manifolds of \cite{barden}, Propositon \ref{prop:4} and some additional arguments on homology groups. \begin{Prop}[\cite{nishioka}]
\label{prop:5}
Let $k \geq 3$ be an integer. For a $k$-dimensional compact and connected manifold $X$ such that $H_1(X;\mathbb{Z})$ is the trivial group, $X$ is orientable and the homology group $H_{j}(X;\mathbb{Z})$ is free for $j=k-2,k-1$.
\end{Prop}
\begin{Thm}[\cite{saeki1} for $(m,n)=(4,3),(5,3),(6,3)$,\cite{nishioka} for $(m,n)=(5,4)$, and \cite{kitazawa5} for $(m,n)=(6,4)$] \label{thm:3}
For integers $(m,n)=(4,3),(5,3),(5,4),(6,4)$, An $m$-dimensional closed and simply-connected manifold $M$ admits a special generic map into ${\mathbb{R}}^n$ if and only if $M$ is as follows.
\begin{enumerate}
\item A standard sphere.
\item A manifold represented as a connected sum of smooth manifolds considered in the smooth category where each of the manifolds here is as follows.
\begin{enumerate}
\item The total space of a linear bundle over $S^2$ whose fiber is the unit sphere $S^{m-2}$.
\item Only in the case $(m,n)=(6,4)$, a manifold diffeomorphic to $S^3 \times S^3$.
\end{enumerate}
\end{enumerate}
Furthermore, for each manifold here, we can construct a special generic map so that the properties {\rm (}\ref{ex:1.1}{\rm )} and {\rm (}\ref{ex:1.2}{\rm )} in Example \ref{ex:1} are enjoyed with $n_j=2$ unless $(m,n)=(6,4)$. In the case $(m,n)=(6,4)$, we can do similarly with $n_j=2,3$. \end{Thm} \begin{Prop}
\label{prop:6}
Let $A$ be a commutative ring.
In Proposition \ref{prop:2}, the cup product for a finite sequence $\{u_j\}_{j=1}^{l} \subset H^{\ast}(M;A)$ of length $l>0$ consisting of elements whose degrees are at most $m-n$ is the zero element if the sum of all $l$ degrees is greater than or equal to $n$. \end{Prop} We introduce a proof of Propositions \ref{prop:4} and \ref{prop:6} referring to \cite{kitazawa1}. For terminologies from the PL category and similar categories, see \cite{hudson} for example. \begin{proof}[A proof of Propositions \ref{prop:4} and \ref{prop:6}]
This is based on an argument of \cite{saekisuzuoka} for example. We abuse the notation of some Propositions such as Proposition \ref{prop:2} and \ref{prop:3}.
$W$ is an ($m+1$)-dimensional compact and connected (PL) manifold whose boundary is $M$ and collapses to $W_f$, which is an $n$-dimensional compact smooth manifold immersed smoothly into ${\mathbb{R}}^n$. This has the (simple) homotopy type of an ($n-1$)-dimensional polyhedron and more precisely, collapses to an ($n-1$)-dimensional polyhedron. We can see that for $0 \leq j < m+1-(n-1)-1=m-n+1$, the three kinds of homomorphisms in Proposition \ref{prop:4} are isomorphisms.
In Proposition \ref{prop:6}, the cup product for the finite sequence is regarded as the value of ${q_f}^{\ast}:H^{\ast}(W_f;A) \rightarrow H^{\ast}(M;A)$ at the cup product of a sequence of length $l$ where ${q_f}^{\ast}:H^{\ast}(W_f;A) \rightarrow H^{\ast}(M;A)$ is defined in a canonical way from ${q_f}^{\ast}:H^{i}(W_f;A) \rightarrow H^{i}(M;A)$. This sequence of elements of $H^{\ast}(W_f;A)$ is defined in a canonical and unique way by Proposition \ref{prop:4}. The cup product is the zero element of $H^{\ast}(W_f;A)$. This follows from the fact that $W_f$ has the homotopy type of an ($n-1$)-dimensional polyhedron.
This completes the proof.
\end{proof} Hereafter, in the present section, we introduce arguments in \cite{kitazawa7} and present some new arguments.
\begin{Prop}[Partially discussed in \cite{kitazawa7}]
\label{prop:7} Let $m \geq n \geq 1$ be integers. Let ${\bar{f}}_N:\bar{N} \rightarrow {\mathbb{R}}^n$ be a smooth immersion of an $n$-dimensional compact, connected and orientable manifold $\bar{N}$. Then there exist a suitable closed and connected smooth manifold $M$ and a special generic map $f:M \rightarrow {\mathbb{R}}^n$ enjoying the following properties where we abuse the notation in Proposition \ref{prop:2}.
\begin{enumerate}
\item $W_f=\bar{N}$ and $\bar{f}={\bar{f}}_N$.
\item Two bundles in Proposition \ref{prop:2} {\rm (}\ref{prop:2.2}{\rm )} and {\rm (}\ref{prop:2.3}{\rm )} are trivial bundles.
\item Let $f_0:=f$. Then there exist a special generic map $f_j:M \rightarrow {\mathbb{R}}^{n+j}$ for $0 \leq j \leq m-n$ and a smooth immersion $f_j:M \rightarrow {\mathbb{R}}^{n+j}$ for $j \geq m-n$. If $f_N$ is an embedding, then each immersion can be obtained as an embedding. Furthermore, they can be constructed enjoying the relation $f_{j_1}={\pi}_{n+j_2,n+j_1} \circ f_{j_2}$ for any distinct integers $j_1<j_2$ and a canonical projection ${\pi}_{n+j_2,n+j_1};{\mathbb{R}}^{n+j_2} \rightarrow {\mathbb{R}}^{n+j_1}$. \end{enumerate} \end{Prop} \begin{proof}
Most of our proof is based on a proof in the original paper.
By considering the restriction of the canonical projection of a unit sphere to $\mathbb{R}$ to a suitable hemisphere, we have a Morse function on a copy of the unit disk. This function can be obtained enjoying the following properties.
\begin{itemize}
\item It has exactly one singular point and there the function has maximal value (minimal value).
\item The preimage of the minimal (resp. maximal) value is the boundary of the disk.
\end{itemize}
We can prepare the product map of such a function and the identity map on the boundary $\partial \bar{N}$ and we have a surjection onto $N(\partial W_f)$, a suitable collar neighborhood of $\partial \bar{N}$, by restricting the manifold of the target of the product map and composing a suitable embedding.
We have a smooth trivial bundle over $\bar{N}-{\rm Int}\ N(\partial W_f)=W_f-{\rm Int}\ N(\partial W_f)$ whose fiber is an ($m-n$)-dimensional standard sphere. By gluing these maps in a suitable way we have a special generic map $f=f_0$ enjoying the first two properties.
\begin{itemize}
\item The diffeomorphism agreeing with the diffeomorphism used for identification between the connected component in the boundary of $\partial N(\partial W_f)$ and the boundary $\partial W_f -{\rm Int}\ N(\partial W_f)$.
\item The identity map on the fiber, which is the unit sphere $S^{m-n}$. Note that the fibers of the both trivial bundles can be regarded as the unit sphere $S^{m-n}$.
\end{itemize}
Let $0 \leq j \leq m-n$.
We replace the canonical projection of the unit sphere to $\mathbb{R}$ in the former product map by a suitable one onto ${\mathbb{R}}^{1+j}$. For the projection of the latter trivial smooth bundle, we replace the projection by the product map of a canonical projection of the unit sphere to ${\mathbb{R}}^j$ and the identity map on the base space. We glue in a natural and suitable way to have a desired map $f_j$.
In the case $j>m-n$, for construction of a desired map $f_j$, we consider the canonically defined embeddings of the unit spheres instead. This argument is a new one, presented first in the present paper.
This completes the proof of Proposition \ref{prop:7}, and by the construction, Proposition \ref{prop:8}, which is presented later. \end{proof} \begin{Def}[Partially defined in \cite{kitazawa2}]
\label{def:2}
We define the special generic map $f=f_0$ obtained in this way a {\it product-orginized} special generic map. \end{Def}
\begin{Prop}
\label{prop:8}
For $0 \leq j \leq m-n$, $f_j$ in Proposition \ref{prop:7} can be also constructed as a product-organized special generic map. \end{Prop} We can generalize ${\mathbb{R}}^n$ here to a general connected smooth manifold with no boundary. However, we concentrate on the cases of ${\mathbb{R}}^n$ essentially.
For a linear bundle over a base space $X$, we can define the {\it $j$-th Stiefel-Whitney class} as an element of $H^j(X;\mathbb{Z}/2\mathbb{Z})$ and the {\it $j$-th Pontrjagin class} as an element of $H^{4j}(X;\mathbb{Z})$.
The tangent bundles of smooth manifolds are important linear bundles whose fibers are Euclidean spaces.
The {\it $j$-th Stiefel-Whitney class} of a smooth manifold $X$ is defined as that of its tangent bundle and an element of $H^j(X;\mathbb{Z}/2\mathbb{Z})$. The {\it $j$-th Pontrjagin class} of a smooth manifold $X$ is defined as that of its tangent bundle and an element of $H^{4j}(X;\mathbb{Z})$.
The following follows from elementary arguments related to this.
\begin{Prop}
\label{prop:9}
Let $j>0$ be an arbitrary integer.
For the manifold $M$ and any positive integer $j$, the $j$-th Stiefel-Whitney class and the $j$-th Pontrjagin class of $M$ are the zero elements of $H^j(M;\mathbb{Z}/2\mathbb{Z})$ and $H^{4j}(M;\mathbb{Z})$, respectively.
\end{Prop}
See \cite{milnorstasheff} for Stiefel-Whitney classes and Pontrjagin classes.
\section{Main Theorems and related arguments.} We prove Main Theorems. We need some arguments and results of the author. For example, ones in \cite{kitazawa2,kitazawa3,kitazawa4,kitazawa5,kitazawa6} are important.
\begin{Prop}[\cite{kitazawa2,kitazawa3}]
\label{prop:10}
Let $n \geq 5$ be an integer.
Let $G$ be a finite commutative group. We have an $n$-dimensional compact and simply-connected manifold ${\bar{N}}_{G,n}$ enjoying the following properties. \begin{enumerate}
\item ${\bar{N}}_{G,n}$ is smoothly embedded in ${\mathbb{R}}^n$.
\item The boundary $\partial {\bar{N}}_{G,n}$ is connected.
\item $H_{n-3}({\bar{N}}_{G,n};\mathbb{Z})$ is isomorphic to $G$.
\item $H_j({\bar{N}}_{G,n};\mathbb{Z})$ is the trivial group for $j \neq 0,n-3$. \end{enumerate} \end{Prop} \begin{proof}
We prove this referring to \cite{kitazawa3} and in a different way. However, the method is essentially same.
First Let $G$ be a cyclic group.
We can choose a 3-dimensional closed and connected manifold $_G$ smoothly embedded in ${\mathbb{R}}^n$ such that ${\pi}_1(Y_G)$ and $H_1(Y_G;\mathbb{Z})$ are isomorphic to $G$ and $H_2(Y_G;\mathbb{Z})$ is the trivial group. This is due to \cite{wall} and for example, we can choose a so-called {\it Lens space}.
We remove the interior of a small closed tubular neighborhood $N(Y_G)$ of $Y_G$ and ${S^n}_{N(Y_G)}$ denotes the resulting manifold. $N(Y_G)$ is regarded as the total space of a trivial linear bundle over $Y_G$ whose fiber is the ($n-3$)-dimensional unit disk $D^{n-3}$.
$\partial {S^{n}}_{N(Y_G)}$ is regarded as the total space of the subbundle of the previous bundle whose fiber is $\partial D^{n-3} \subset D^{n-3}$.
We have a Mayer-Vietoris sequence
$$\rightarrow H_j(\partial {S^n}_{Y_G};\mathbb{Z})=H_j(\partial N(Y_G);\mathbb{Z}) \rightarrow H_j(N(Y_G);\mathbb{Z}) \oplus H_j({S^n}_{N(Y_G)};\mathbb{Z}) \rightarrow H_j(S^n;\mathbb{Z}) \rightarrow$$
\ \\
and the homomorphism from the first group to the second group is an isomorphism for $1 \leq j \leq n-1$.
This is due to the fundamental fact that $H_j(S^n;\mathbb{Z})$ is isomorhic to $\mathbb{Z}$ for $j=0,n$ and the trivial group for $1 \leq j \leq n-1$ and that the homomorphism from $H_n(S^n;\mathbb{Z})$ into $H_{n-1}(\partial {S^n}_{N(Y_G)};\mathbb{Z})$ is an isomorphism between groups isomorphic to $\mathbb{Z}$.
${S^n}_{N(Y_G)} \supset \partial {S^n}_{N(Y_G)}$ are a trivial linear bundle over $Y_G$ whose fiber is the unit disk $D^{n-3}$ and its subbundle obtained by considering the fiber $S^{n-4}=\partial D^{n-3} \subset D^{n-3}$, respectively.
${S^n}_{N(Y_G)}$ is a connected manifold.
$H_j({S^n}_{N(Y_G)};\mathbb{Z})$ is isomorphic to $H_{n-1}(Y_G;\mathbb{Z})$ for $1 \leq j \leq n-2$ and the trivial group for $j=n-1,n$.
${\pi}_1(\partial {S^n}_{N(Y_G)})$ is isomorphic to the direct sum $\mathbb{Z} \oplus G$ in the case $n=5$ and $G$ in the case $n>5$ and commutative. $S^n$ is simply-connected. For $S^n$ and the same submanifolds $N(Y_G)$ and ${S^n}_{N(Y_G);\mathbb{Z}}$, we can apply Seifert van-Kampen theorem. ${\pi}_1({S^n}_{N(Y_G)})$ is shown to be isomorphic to $\mathbb{Z}$.
We define
${\bar{N}}_G$ as a manifold in the following way. First attach a copy of $D^3 \times D^{n-3}$ so that this is the total space of the restriction of the linear bundle $N(Y_G)$ to a copy of the $3$-dimensional unit disk $D^3$ smoothly embedded in the base space $Y_G$. After that eliminate the corner.
We have a Mayer-Vietoris sequence for ${\bar{N}}_G$ and its decomposition into the two manifolds, one of which is ${S^n}_{N(Y_G)}$ and the other of which is the copy of $D^3 \times D^{n-3}$. More precisely, the resulting manifold is decomposed by an ($n-1$)-dimensional smoothly embedded manifold diffeomorphic to $D^3 \times \partial D^{n-3}$ into the two manifold.
We can also apply Seifert van-Kampen theorem for these manifolds.
Investigating our Mayer-Vietoris sequence and our argument using Seifert van-Kampen theorem, this completes our proof in the case of a cyclic group $G$.
For a general finite commutative group $G$, we have a desired manifold by considering a boundary connected sum of finitely many such manifolds forming the family $\{{\bar{N}}_{G_j}\}_{j=1}^l$ such that each $G_j$ is a cyclic group and that the direct sum ${\oplus}_{j=1}^l G_j$ is isomorphic to $G$. We add that the boundary connected sum is taken in the smooth category.
This completes the proof. \end{proof}
We have the following by applying Proposition \ref{prop:10} and other arguments such as`Propositions \ref{prop:7}, \ref{prop:8} and \ref{prop:9}.
\begin{Thm}[\cite{kitazawa5} ($(m,n)=(6,5)$)]
\label{thm:4}
Let $m \geq n+1$ be an integer. Let $G$ be a finite commutative group. We have an $m$-dimensional closed and simply-connected manifold $M_{G,n,m-n}$ and a product-organized special generic map $f_{G,n,m-n}:M_{G,n,m-n} \rightarrow {\mathbb{R}}^n$ enjoying the following properties.
\begin{enumerate}
\item \label{thm:4.1}
For any positive integer $j$, the $j$-th Stiefel-Whitney class and the $j$-th Pontrjagin class of $M_{G,n,m-n}$ are the zero elements of $H^j(M_{G,n,m-n};\mathbb{Z}/2\mathbb{Z})$ and $H^{4j}(M_{G,n,m-n};\mathbb{Z})$, respectively.
\item \label{thm:4.2}
The restriction of the map to the singular set is an embedding. The image is diffeomorphic to ${\bar{N}}_G$ in Proposition \ref{prop:10} and $W_{f_{G,n,m-n}}$ where $W_{f_{G,n,m-n}}$ abuse "$W_f$ in Proposition \ref{prop:2}".
\item \label{thm:4.3}
$H_j(M_{G,n,m-n};\mathbb{Z})$ is isomorphic to $G$ for $j=n-3,m-n+2$, $\mathbb{Z}$ for $j=0,m$, and the trivial group otherwise.
\end{enumerate} Furthermore, according to \cite{jupp,wall2,zhubr,zhubr2} for example, in a suitable case, the topology and the differentiable structure of the manifold $M_{G,5,1}$ is uniquely defined. \end{Thm} \begin{proof}
We have a product-organized special generic map $f_{G,n,m-n}:M_{G,n,m-n} \rightarrow {\mathbb{R}}^n$ on a suitable closed and simply-connected manifold $M_{G,n,m-n}$ enjoying (\ref{thm:4.1}) and (\ref{thm:4.2}) here by Propositions \ref{prop:7}, \ref{prop:8} and \ref{prop:9}
with Proposition \ref{prop:4}.
It is sufficient to show (\ref{thm:4.3}) here.
$W_{f_{G,n,m-n}}$ is simply-connected and by Proposition \ref{prop:10} collapses to an ($n-2$)-dimensional polyhedron.
By revising the proof of Propositions \ref{prop:4} and \ref{prop:6}, we can see that ${q_{f_{G,n,m-n}}}_{\ast}:H_j(M_{G,n,m-n};\mathbb{Z}) \rightarrow H_j(W_{f_{G,n,m-n}};\mathbb{Z})$ is an isomorphism for $0 \leq j \leq m-n+1$.
In the present proof, "Proposition \ref{prop:4}" means this isomorphism. \\
\\
Hereafter, $W_{G,n,m-n}$ denotes "$W$ in Propositions \ref{prop:2} and \ref{prop:3}" where $M_{G,n,m-n}$ is used instead of "$M$ in these propositions". This is ($m+1$)-dimensional, closed and simply-connected.
We also have a homology exact sequence
$$\rightarrow H_{j+1}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow H_j(M_{G,n,m-n};\mathbb{Z}) \rightarrow H_j(W_{G,n,m-n};\mathbb{Z}) \rightarrow $$ \quad $H_j(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow $ \\ \ \\
for $(W_{G,n,m-n},M_{G,n,m-n})$.\\
\ \\
Case 1 The case $m-n+1 \geq \frac{m}{2}$. \\
The resulting manifold $M_{G,n,m-n}$ is a desired manifold
by applying Proposition \ref{prop:4} and Poincar\'e duality theorem. \\
\ \\
Case 2 The case $m$ is even and $m-n+2=\frac{m}{2}$.\\
\\
We also have a homology exact sequence
$$\rightarrow H_{n-2}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow H_{n-3}(M_{G,n,m-n};\mathbb{Z}) \rightarrow H_{n-3}(W_{G,n,m-n};\mathbb{Z}) \rightarrow $$
$H_{n-3}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow $ \\
\ \\
for $(W_{G,n,m-n},M_{G,n,m-n})$. $H_{n-2}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z})$ is isomorphic to $H^{m-n+3}(W_{G,n,m-n};\mathbb{Z})$, $H^{m-n+3}(W_{f_{G,n,m-n}};\mathbb{Z})$, and $H_{2n-m-3}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$ by Proposition \ref{prop:3} and by virtue of Poincar\'e duality theorem for $W_{f_{G,n,m-n}}$. We have the relation $2n-m-4=0$ and the previous groups are isomorphic to $H_{1}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$. This is the trivial group since $W_{f_{G,n,m-n}}$ is simply-connected and $\partial W_{f_{G,n,m-n}}$ is connected. This is shown by the homology exact sequence for $(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}})$.
Similarly, $H_{n-1}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z})$ is isomorphic to $H_{2}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$ and $H^{n-2}(W_{f_{G,n,m-n}};\mathbb{Z})$ by Proposition \ref{prop:3} and by virtue of Poincar\'e duality theorem for $W_{f_{G,n,m-n}}$. This group is shown to be finite by Proposition \ref{prop:10} and universal coefficient theorem. Similarly, $H_{n-2}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z})$ is isomorphic to $H_{0}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$ and the trivial group. We have the relation $n-2=\frac{m}{2}$. By Proposition \ref{prop:4}, Proposition \ref{prop:10} and Poincar\'e duality theorem for $M_{G,n,m-n}$, this completes the proof for this case. \\
\ \\
Case 3 The case $m$ is odd and $m-n+\frac{3}{2}=\frac{m}{2}$.\\
\\ We also have a homology exact sequence
$$\rightarrow H_{n-2}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow H_{n-3}(M_{G,n,m-n};\mathbb{Z}) \rightarrow H_{n-3}(W_{G,n,m-n};\mathbb{Z}) \rightarrow $$ $H_{n-3}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z}) \rightarrow $ \\ \ \\
for $(W_{G,n,m-n},M_{G,n,m-n})$. $H_{n-2}(W_{G,n,m-n},M_{G,n,m-n};\mathbb{Z})$ is isomorphic to $H^{m-n+3}(W_{G,n,m-n};\mathbb{Z})$, $H^{m-n+3}(W_{f_{G,n,m-n}};\mathbb{Z})$, and $H_{2n-m-3}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$ by Proposition \ref{prop:3} and by virtue of Poincar\'e duality theorem for $W_{f_{G,n,m-n}}$. We have the relation $2n-m-3=0$ and the previous groups are isomorphic to $H_{0}(W_{f_{G,n,m-n}},\partial W_{f_{G,n,m-n}};\mathbb{Z})$ and the trivial group. We have the relation $n-2=\frac{m-1}{2}$. By Proposition \ref{prop:4}, Proposition \ref{prop:10} and Poincar\'e duality theorem for $M_{G,n,m-n}$, this completes the proof for this case. \\
\ \\
This completes the proof of (\ref{thm:4.3}). \\
This completes the proof.
\end{proof} \begin{Rem}
\cite{nishioka} has studied Case 3 of the proof of Theorem \ref{thm:4}. More precisely, for a positive integer $k>0$ and a ($2k+1$)-dimensional closed and connected manifold $M$ such that the group $H_1(M;\mathbb{Z})$ is the trivial group admitting a special generic map into ${\mathbb{R}}^{k+2}$, the group $H_k(M;\mathbb{Z})$ has been shown to be free. The case $(m,n)=(5,4)$ of Theorem \ref{thm:3} concerns the case $k=2$ here. \end{Rem}
\begin{proof} [A proof of Main Theorem \ref{mthm:1}] First consider an $m^{\prime}$-dimensional closed and simply-connected manifold $M_{G,5,m^{\prime}-5}$ in Theorem \ref{thm:4} where $m^{\prime}$ denotes "$m$ of Theorem \ref{thm:4}". We can embed this smoothly into ${\mathbb{R}}^{m^{\prime}+1}$ by Proposition \ref{prop:7} for example.
We define $M$ as $M:=M_{G,5,m^{\prime}-5} \times S^{m-m^{\prime}}$. This completes our exposition on the properties (\ref{mthm:1.1}) and (\ref{mthm:1.2}).
By Propositions \ref{prop:2}, \ref{prop:4} and \ref{prop:5} for example, this does not admit special generic maps into ${\mathbb{R}}^n$ for $n=1,2,3,4$. By the previous argument, we can consider the product map of a Morse function with exactly two singular points on $S^{m-m^{\prime}}$ and the identity map on $M_{G,5,m^{\prime}-5}$ and embed the image smoothly in a suitable way into ${\mathbb{R}}^{m^{\prime}+1}$. This can be constructed as product-organized one. This implies the existence of special generic maps into ${\mathbb{R}}^n$ for $m^{\prime}<n \leq m$.
We prove the non-existence into ${\mathbb{R}}^{m^{\prime}}$. Suppose that such a special generic map $f$ on $M$ into ${\mathbb{R}}^{m^{\prime}}$ exists. From Proposition \ref{prop:4}, the homomorphism ${q_f}_{\ast}$ maps the homology group $H_{m-m^{\prime}}(M;\mathbb{Z})$ onto $H_{m-m^{\prime}}(W_f;\mathbb{Z})$ as an isomorphism. $m^{\prime}-1$ is the dimension of the boundary $\partial W_f \subset W_f$ and the relation $a+(m^{\prime}-1) \leq m$ is assumed as a condition. We can apply Poincar\'e duality or intersection theory for $M$. From this, the torsion subgroup of $H_{a}(M;\mathbb{Z})$, which is not the trivial group from the assumption and K\"unneth theorem for the product $M_{G,5,m^{\prime}-5} \times S^{m-m^{\prime}}$, is mapped onto $H_{a}(W_f;\mathbb{Z})$ by the homomorphism ${q_f}_{\ast}$ and this is an isomorphism.
Here, we apply fundamental methods used first in \cite{kitazawa4}, generalizing some methods used in \cite{saeki1}, and used also in \cite{kitazawa5,kitazawa6} for example.
We choose an element of $h_1 \in H_{m-m^{\prime}}(M;\mathbb{Z})$ and an element of $h_2 \in H_{a}(M;\mathbb{Z})$ which are not the zero elements. Furthermore, we choose $h_2$ as an element of a summand of a direct sum decomposition of the torsion subgroup of $H_{a}(M;\mathbb{Z})$ into cyclic groups. This decomposition is due to fundamental theorem on the structures of finitely generated commutative groups. Note that $a=2,m^{\prime}-3$ and that $M$ is the product of a manifold $M_{G,5,m^{\prime}-5}$ in Theorem \ref{thm:4} and $S^{m-m^{\prime}}$. By the isomorphisms before and Poincar\'e duality theorem for $W_f$, we have an element of $H_{m^{\prime}-(m-m^{\prime})}(W_f,\partial W_f;\mathbb{Z})$ which is not the zero element corresponding to the element ${q_f}_{\ast}(h_1)$, the value of the isomorphism presented before at $h_1$, in a canonical way uniquely. In a similar reason, there exists a suitable commutative group $G_{h_2}$ which is finite and cyclic and which is not the trivial group and we have an element of $H_{m^{\prime}-a}(W_f,\partial W_f;G_{h_2})$ which is not the zero element corresponding to the element of $H_{a}(W_f;G_{h_2})$, obtained by mapping $h_2$ by the isomorphism defined canonically by ${q_f}_{\ast}$ before and mapping by the canonically defined homomorphism defined naturally from the canonical quotient map from $\mathbb{Z}$ onto $G_{h_2}$, in a canonical way uniquely. For $h_1$, we consider the value of the canonically defined homomorphism defined naturally from the canonical quotient map from $\mathbb{Z}$ onto $G_{h_2}$ at the obtained element of $H_{m^{\prime}-(m-m^{\prime})}(W_f,\partial W_f;\mathbb{Z})$. We put these two elements by ${h_{1,G_{h_2}}}^{\prime} \in H_{m^{\prime}-(m-m^{\prime})}(W_f,\partial W_f;G_{h_2})$ and ${h_{2,G_{h_2}}}^{\prime} \in H_{m^{\prime}-a}(W_f,\partial W_f;G_{h_2})$. By considering a (so-called {\it generic}) {\it intersection}, we have an element of $H_1(W_f;\partial W_f;G_{h_2})$. The degree $1$ is due to the relation $(m^{\prime}-(m-m^{\prime}))+(m^{\prime}-a)-m^{\prime}=2m^{\prime}-m-a=1$ with the assumption on the integers. This is the sum of elements represented by closed intervals embedded smoothly and properly in $W_f$. Furthermore, two boundary points of the each interval is mapped into distinct connected components of $\partial W_f$ and the interior is embedded into the interior of $W_f$. Related to this, circles in (the interior of) $W_f$ are null-homotopic since $W_f$ is simply-connected. By respecting the bundle whose projection is $r:W \rightarrow M$ in Proposition \ref{prop:3} where we abuse the notation and considering a variant of so-called {\it Thom isomorphisms} or {\it prism-operators}, we have an element of $H_{m-m^{\prime}+2}(W,M;G_{h_2})$. By considering the boundary, we have an element of $H_{m-m^{\prime}+1}(M;G_{h_2})$. For this see \cite{saeki1} and for algebraic topological notions see \cite{hatcher,milnorstasheff} for example.
We go back to our arguments for the proof. If this element is not the zero element, then by fundamental arguments on Poincar\'e duality and intersection theory, this element must be obtained as an element which is the value of the canonically defined homomorphism induced naturally from the canonical quotient map from $\mathbb{Z}$ to $G_{h_2}$. By the structure of $M=M_{G,5,m^{\prime}-5} \times S^{m-m^{\prime}}$ and conditions on homology groups together with Poincar\'e duality and K\"unneth theorem for example, this is not the zero element and this is not an element which is the value of the canonically defined homomorphism induced naturally from the canonical quotient map from $\mathbb{Z}$ to $G_{h_2}$. We add that this homomorohism is one from $H_{m-m^{\prime}+1}(M;\mathbb{Z})$ into $H_{m-m^{\prime}+1}(M;G_{h_2})$ as a precise exposition.
We find a contradiction. We have (\ref{mthm:1.3}). This completes the proof.
\end{proof} \begin{proof}[A proof of Main Theorem \ref{mthm:2}]
$M_1$ is obtained as a connected sum of the following three manifolds taken in the smooth category.
\begin{itemize}
\item A $8$-dimensional manifold
admitting a product-organized special generic map into ${\mathbb{R}}^5$ in Theorem \ref{thm:4} by considering "$(m,n,G)=(8,5,G_{{\rm F},1})$ in the theorem".
\item A $8$-dimensional manifold
admitting a product-organized special generic map into ${\mathbb{R}}^6$ in Theorem \ref{thm:4} by considering "$(m,n,G)=(8,6,G_{{\rm F},2})$ in the theorem".
\item A manifold represented as a suitably chosen connected sum of manifolds diffeomorphic to $S^2 \times S^6$, $S^3 \times S^5$ or $S^4 \times S^4$, which admit special generic maps into ${\mathbb{R}}^6$ in Example \ref{ex:1}. The special generic maps discussed in Example \ref{ex:1} are also constructed as product-organized maps by fundamental arguments on special generic maps.
\end{itemize}
We can see the non-existence of special generic map on $M_1$ into ${\mathbb{R}}^n$ for $n=1,2,3,4,5$ from Propositions \ref{prop:2}, \ref{prop:4}, \ref{prop:5} and Theorem \ref{thm:1} for example.
By a fundamental argument for construction of special generic maps in \cite{saeki1}, we can construct a product-organized special generic map on $M_1$ into ${\mathbb{R}}^6$.
We give $M_2$. This is obtained as a connected sum of the following three manifolds taken in the smooth category.
\begin{itemize}
\item A $8$-dimensional manifold
admitting a product-organized special generic map into ${\mathbb{R}}^5$ in Theorem \ref{thm:4} by considering "$(m,n,G)=(8,5,G_{{\rm F},1})$ in the theorem".
\item
A $8$-dimensional closed and simply-connected manifold "$M^{\prime} \times S^2$ in Main Theorem \ref{mthm:1}", having the 3rd homology group $H_3(M^{\prime} \times S^3;\mathbb{Z})$ isomorphic to $G_{{\rm F},2}$. More precisely, we consider the case "$(m,m^{\prime},a,G)=(8,6,3,G_{{\rm F},2})$ in Main Theorem \ref{mthm:1}".
\item A manifold represented as a suitably chosen connected sum of manifolds diffeomorphic to $S^2 \times S^6$, $S^3 \times S^5$ or $S^4 \times S^4$, which admit special generic maps into ${\mathbb{R}}^7$ in Example \ref{ex:1}. These maps can be constructed as product-organized maps as before.
\end{itemize}
We can see the non-existence of special generic maps on $M_2$ into ${\mathbb{R}}^n$ for $n=1,2,3,4,5,6$. For $n=1,2,3,4,5$, for example, Propositions \ref{prop:2}, \ref{prop:4}, \ref{prop:5}, \ref{prop:6} and Theorem \ref{thm:1} complete the proof. For $n=6$ here, we apply Main Theorem \ref{mthm:1} directly or revise arguments in the proof of Main Theorem \ref{mthm:1} suitably if we need. In the case ($m=8$ and) $n=6$, $m-n=8-6=2$ and we do not have any sequence of elements of a finite length of $H^j(M;A)$ with $0 \leq j \leq m-n=2$ the cup product for which is not the zero element whose degree is greater than or equal to $6$. This means that we cannot apply Proposition \ref{prop:6} to give a proof for the case $n=6$. We can show the existence of special generic maps into ${\mathbb{R}}^n$ for $n=7,8$ similarly.
This completes the proof. \end{proof}
This adds to Main Theorem 4 of \cite{kitazawa2} where situations are a bit different. Last, for example, our new examples are discovered especially motivated by \cite{kitazawa6}. Compare these results to our new results.
\end{document} |
\begin{document}
\markboth{J. Jansson, C. Johnson, and A. Logg}{Computational Modeling of Dynamical Systems}
\title{COMPUTATIONAL MODELING OF DYNAMICAL SYSTEMS}
\author{JOHAN JANSSON} \address{Department of Computational Mathematics,
Chalmers University of Technology, SE--412 96 G\"{o}teborg, Sweden,
\emph{email}: johanjan{\@@}math.chalmers.se.}
\author{CLAES JOHNSON} \address{Department of Computational Mathematics,
Chalmers University of Technology, SE--412 96 G\"{o}teborg, Sweden,
\emph{email}: claes{\@@}math.chalmers.se.}
\author{ANDERS LOGG} \address{Department of Computational Mathematics,
Chalmers University of Technology, SE--412 96 G\"{o}teborg, Sweden,
\emph{email}: logg{\@@}math.chalmers.se.}
\maketitle
\begin{abstract}
In this short note, we discuss the basic approach to computational
modeling of dynamical systems. If a dynamical system contains
multiple time scales, ranging from very fast to slow, computational
solution of the dynamical system can be very costly.
By resolving the fast time scales in a short time simulation, a
model for the effect of the small time scale variation on large time
scales can be determined, making solution possible on a long time interval.
This process of computational modeling can be completely automated.
Two examples are presented, including a simple model problem
oscillating at a time scale of $10^{-9}$ computed over the
time interval $[0,100]$, and a lattice
consisting of large and small point masses. \end{abstract}
\keywords{Modeling, dynamical system, reduced model, automation}
\section{Introduction}
We consider a dynamical system of the form \begin{equation}
\label{eq:u'=f}
\begin{array}{rcl}
\dot{u}(t) &=& f(u(t),t), \quad t \in (0,T], \\
u(0) &= u_0,
\end{array} \end{equation} where $u : [0,T] \rightarrow \mathbb{R}^N$ is the solution to be computed, $u_0 \in \mathbb{R}^N$ a given initial value, $T>0$ a given final time, and $f : \mathbb{R}^N \times (0,T] \rightarrow \mathbb{R}^N$ a given function that is Lipschitz-continuous in $u$ and bounded. We consider a situation where the exact solution $u$ varies on different time scales, ranging from very fast to slow. Typical examples include meteorological models for weather prediction, with fast time scales on the range of seconds and slow time scales on the range of years, protein folding represented by a molecular dynamics model of the form (\ref{eq:u'=f}), with fast time scales on the range of femtoseconds and slow time scales on the range of microseconds, or turbulent flow with a wide range of time scales.
To make computation feasible in a situation where computational resolution of the fast time scales would be prohibitive because of the small time steps required, the given model (\ref{eq:u'=f}) containing the fast time scales needs to be replaced with a \emph{reduced model} for the variation of the solution $u$ of (\ref{eq:u'=f}) on resolvable time scales. As discussed below, the key step is to correctly model the effect of the variation at the fast time scales on the variation on slow time scales.
The problem of model reduction is very general and various approaches have been taken\cite{RuhSko98,Kre91}. We present below a new approach to model reduction, based on resolving the fast time scales in a short time simulation and determining a model for the effect of the small time scale variation on large time scales. This process of computational modeling can be completely \emph{automated} and the validity of the reduced model can be evaluated a posteriori.
\section{A simple model problem}
We consider a simple example illustrating the basic aspects: Find $u=(u_1,u_2):[0,T] \rightarrow \mathbb{R}^2$, such that \begin{equation}
\label{eq:model}
\begin{array}{rcl}
\ddot{u}_1 + u_1 - u_2^2/2 &=& 0 \quad \mbox{on } (0,T],\\
\ddot{u}_2 + \kappa u_2 &=& 0 \quad \mbox{on } (0,T],\\
u(0) = (0,1) &&\quad \dot{u}(0) = (0,0),
\end{array} \end{equation} which models a moving unit point mass $M_1$ connected through a soft spring to another unit point mass $M_2$, with $M_2$ moving along a line perpendicular to the line of motion of $M_1$, see Figure \ref{fig:model}. The second point mass $M_2$ is connected to a fixed support through a very stiff spring with spring constant $\kappa = 10^{18}$ and oscillates rapidly on a time scale of size $1/\sqrt{\kappa} = 10^{-9}$. The oscillation of $M_2$ creates a force $\sim u_2^2$ on $M_1$ proportional to the elongation of the spring connecting $M_2$ to $M_1$ (neglecting terms of order $u_2^4$).
The short time scale of size $10^{-9}$ requires time steps of size $\sim 10^{-10}$ for full resolution. With $T = 100$, this means a total of $\sim 10^{12}$ time steps for solution of (\ref{eq:model}). However, by replacing (\ref{eq:model}) with a reduced model where the fast time scale has been removed, it is possible to compute the (averaged) solution of (\ref{eq:model}) with time steps of size $\sim 0.1$ and consequently only a total of $10^3$ time steps.
\begin{figure}
\caption{A simple mechanical system with large time scale $\sim 1$ and
small time scale $\sim 1/\sqrt{\kappa}$.}
\label{fig:model}
\end{figure}
\section{Taking averages to obtain the reduced model}
Having realized that point-wise resolution of the fast time scales of the exact solution $u$ of (\ref{eq:u'=f}) may sometimes be computationally very expensive or even impossible, we seek instead to compute a time average $\bar{u}$ of $u$, defined by \begin{equation}
\label{eq:average}
\bar{u}(t)= \frac{1}{\tau}\int_{-\tau/2}^{\tau/2}u(t+s)\, ds, \quad
t \in [\tau/2,T - \tau/2], \end{equation} where $\tau > 0$ is the size of the average. The average $\bar{u}$ can be extended to $[0,T]$ in various ways. We consider here a constant extension, i.e., we let $\bar{u}(t) = \bar{u}(\tau/2)$ for $t \in [0,\tau/2)$, and let $\bar{u}(t) = \bar{u}(T-\tau/2)$ for $t \in (T - \tau/2,T]$.
We now seek a dynamical system satisfied by the average $\bar{u}$ by taking the average of (\ref{eq:u'=f}). We obtain \begin{displaymath}
\dot{\bar{u}}(t) = \bar{\dot{u}}(t) = \overline{f(u,\cdot)}(t)
= f(\bar{u}(t),t) + (\overline{f(u,\cdot)}(t) - f(\bar{u}(t),t)), \end{displaymath} or \begin{equation}
\label{eq:u'=f,average}
\dot{\bar{u}}(t) = f(\bar{u}(t),t) + \bar{g}(u,t), \end{equation} where the \emph{variance} $\bar{g}(u,t) = \overline{f(u,\cdot)}(t) - f(\bar{u}(t),t)$ accounts for the effect of small scales on time scales larger than $\tau$. (Note that we may extend (\ref{eq:u'=f,average}) to $(0,T]$ by defining $\bar{g}(u,t) = - f(\bar{u}(t),t)$ on $(0,\tau/2] \cup (T-\tau/2,T]$.)
We now seek to model the variance $\bar{g}(u,t)$ in the form $\bar{g}(u,t)\approx \tilde{g}(\bar{u}(t),t)$ and replace (\ref{eq:u'=f,average}) and thus (\ref{eq:u'=f}) by \begin{equation}
\label{eq:reduced}
\begin{array}{rcl}
\dot{\tilde{u}}(t) &=& f(\tilde{u}(t),t) + \tilde{g}(\tilde{u}(t),t),
\quad t \in (0,T], \\
\tilde{u}(0) &=& \bar{u}_0,
\end{array} \end{equation} where $\bar{u}_0 = \bar{u}(0) = \bar{u}(\tau/2)$. We refer to this system as the \emph{reduced model} with \emph{subgrid model} $\tilde{g}$ corresponding to (\ref{eq:u'=f}).
To summarize, if the solution $u$ of the full dynamical system (\ref{eq:u'=f}) is computationally unresolvable, we aim at computing the average $\bar{u}$ of $u$. However, since the variance $\bar{g}$ in the averaged dynamical system (\ref{eq:u'=f,average}) is unknown, we need to solve the reduced model (\ref{eq:reduced}) for $\tilde{u} \approx \bar{u}$ with an approximate subgrid model $\tilde{g} \approx \bar{g}$. Solving the reduced model (\ref{eq:reduced}) using e.g. a Galerkin finite element method, we obtain an approximate solution $U \approx \tilde{u} \approx \bar{u}$. Note that we may not expect $U$ to be close to $u$ point-wise in time, while we hope that $U$ is close to $\bar{u}$ point-wise.
\section{Modeling the variance}
There are two basic approaches to the modeling of the variance $\bar{g}(u,t)$ in the form $\tilde{g}(\tilde{u}(t),t)$; (i) scale-extrapolation or (ii) local resolution. In (i), a sequence of solutions is computed with increasingly fine resolution, but without resolving the fastest time scales. A model for the effects of the fast unresolvable scales is then determined by extrapolation from the sequence of computed solutions\cite{Hof02}. In (ii), the approach followed below, the solution $u$ is computed accurately over a short time period, resolving the fastest time scales. The reduced model is then obtained by computing the variance \begin{equation}
\bar{g}(u,t) = \overline{f(u,\cdot)}(t) - f(\bar{u}(t),t) \end{equation} and then determining $\tilde{g}$ for the remainder of the time interval such that $\tilde{g}(\tilde{u}(t),t) \approx \bar{g}(u,t)$.
For the simple model problem (\ref{eq:model}), which we can write in the form (\ref{eq:u'=f}) by introducing the two new variables $u_3 = \dot{u}_1$ and $u_4 = \dot{u}_2$ with \begin{displaymath}
f(u,\cdot) = (u_3, u_4, -u_1 + u_2^2/2, -\kappa u_2), \end{displaymath} we note that $\bar{u}_2 \approx 0$ (for $\sqrt{\kappa} \tau$ large) while $\overline{u_2^2} \approx 1/2$. By the linearity of $f_1$, $f_2$, and $f_4$, the (approximate) reduced model takes the form \begin{equation}
\label{eq:model,reduced}
\begin{array}{rcl}
\ddot{\tilde{u}}_1 + \tilde{u}_1 - 1/4 &=& 0 \quad \mbox{on } (0,T],\\
\ddot{\tilde{u}}_2 + \kappa \tilde{u}_2 &=& 0 \quad \mbox{on } (0,T],\\
\tilde{u}(0) = (0,0), &&\quad \dot{\tilde{u}}(0) = (0,0),
\end{array} \end{equation} with solution $\tilde{u}(t) = (\frac{1}{4}(1 - \cos t),0)$.
In general, the reduced model is constructed with subgrid model $\tilde{g}$ varying on resolvable time scales. In the simplest case, it is enough to model $\tilde{g}$ with a constant and repeatedly checking the validity of the model by comparing the reduced model (\ref{eq:reduced}) with the full model (\ref{eq:u'=f}) in a short time simulation. Another possibility is to use a piecewise polynomial representation for the subgrid model $\tilde{g}$.
\section{Solving the reduced system}
Although the presence of small scales has been decreased in the reduced system (\ref{eq:reduced}), the small scale variation may still be present. This is not evident in the reduced system (\ref{eq:model,reduced}) for the simple model problem (\ref{eq:model}), where we made the approximation $\tilde{u}_2(0) = 0$. In practice, however, we compute $\tilde{u}_2(0) = \frac{1}{\tau}\int_0^\tau u_2(t) \, dt = \frac{1}{\tau}\int_0^\tau \cos(\sqrt{\kappa} t) \, dt \sim 1/(\sqrt{\kappa}\tau)$ and so $\tilde{u}_2$ oscillates at the fast time scale $1/\sqrt{\kappa}$ with amplitude $1/(\sqrt{\kappa}\tau)$.
To remove these oscillations, the reduced system needs to be stabilized by introducing damping of high frequencies. Following the general approach\cite{HofJoh04b}, a least squares stabilization is added in the Galerkin formulation of the reduced system (\ref{eq:reduced}) in the form of a modified test function. As a result, damping is introduced for high frequencies without affecting low frequencies.
Alternatively, components such as $u_2$ in (\ref{eq:model,reduced}) may be \emph{inactivated}, corresponding to a subgrid model of the form $\tilde{g}_2(\tilde{u},\cdot) = -f_2(\tilde{u},\cdot)$. We take this simple approach for the example problems presented below.
\section{Error analysis}
The validity of a proposed subgrid model may be checked a posteriori. To analyze the modeling error introduced by approximating the variance $\bar{g}$ with the subgrid model $\tilde{g}$, we introduce the \emph{dual problem} \begin{equation}
\label{eq:dual}
\begin{array}{rcl}
- \dot{\phi}(t) &=& J(\bar{u},U,t)^{\top} \phi(t), \quad t \in [0,T), \\
\phi(T) &=& \psi,
\end{array} \end{equation} where $J$ denotes the Jacobian of the right-hand side of the dynamical system (\ref{eq:u'=f}) evaluated at a mean value of the average $\bar{u}$ and the computed numerical (finite element) solution $U \approx \tilde{u}$ of the reduced system (\ref{eq:reduced}), \begin{equation}
J(\bar{u},U,t) = \int_0^1 \frac{\partial f}{\partial u}
(s \bar{u}(t) + (1-s) U(t),t) \, ds, \end{equation} and where $\psi$ is initial data for the backward dual problem.
To estimate the error $\bar{e} = U - \bar{u}$ at final time, we note that $\bar{e}(0) = 0$ and $\dot{\phi} + J(\bar{u},U,\cdot)^{\top} \phi = 0$, and write \begin{displaymath}
\begin{array}{rcl}
(\bar{e}(T),\psi)
&=& (\bar{e}(T),\psi) -
\int_0^T (\dot{\phi} + J(\bar{u},U,\cdot)^{\top} \phi,
\bar{e}) \, dt \\
&=& \int_0^T (\phi, \dot{\bar{e}} - J\bar{e}) \, dt
= \int_0^T (\phi, \dot{U} - \dot{\bar{u}} -
f(U,\cdot) + f(\bar{u},\cdot)) \, dt \\
&=& \int_0^T (\phi, \dot{U} - f(U,\cdot) - \tilde{g}(U,\cdot)) \, dt +
\int_0^T (\phi, \tilde{g}(U,\cdot) - \bar{g}(u,\cdot)) \, dt \\
&=& \int_0^T (\phi, \tilde{R}(U,\cdot)) \, dt +
\int_0^T (\phi, \tilde{g}(U,\cdot) - \bar{g}(u,\cdot)) \, dt.
\end{array} \end{displaymath} The first term, $\int_0^T (\phi, \tilde{R}(U,\cdot)) \, dt$, in this \emph{error representation} corresponds to the \emph{discretization error} $U - \tilde{u}$ for the numerical solution of (\ref{eq:reduced}). If a Galerkin finite element method is used\cite{EriEst95,EriEst96}, the \emph{Galerkin orthogonality} expressing the orthogonality of the residual $\tilde{R}(U,\cdot) = \dot{U} - f(U,\cdot) - \tilde{g}(U,\cdot)$ to a space of test functions can be used to subtract a test space interpolant $\pi \phi$ of the dual solution $\phi$. In the simplest case of the $\mathrm{cG}(1)$ method for a partition of the interval $(0,T]$ into $M$ subintervals $I_j = (t_{j-1},t_j]$, each of length $k_j = t_j - t_{j-1}$, we subtract a piecewise constant interpolant to obtain \begin{displaymath}
\begin{array}{rcl}
\int_0^T (\phi, \tilde{R}(U,\cdot)) \, dt
&=& \int_0^T (\phi - \pi \phi, \tilde{R}(U,\cdot)) \, dt
\leq \sum_{j=1}^M k_j \max_{I_j} \|\tilde{R}(U,\cdot)\|_{l_2}
\int_{I_j} \|\dot{\phi}\|_{l_2} \, dt \\
&\leq& S^{[1]}(T) \max_{[0,T]} \|k\tilde{R}(U,\cdot)\|_{l_2},
\end{array} \end{displaymath}
where the \emph{stability factor} $S^{[1]}(T) = \int_0^T \|\dot{\phi}\|_{l_2} \, dt$ measures the sensitivity to discretization errors for the given output quantity $(\bar{e}(T),\psi)$.
The second term, $\int_0^T (\phi, \tilde{g}(U,\cdot) - \bar{g}(u,\cdot)) \, dt$, in the error representation corresponds to the \emph{modeling error} $\tilde{u} - \bar{u}$. The sensitivity to modeling errors is measured by the stability factor $S^{[0]}(T) = \int_0^T \|\phi\|_{l_2} \, dt$. We notice in particular that if the stability factor $S^{[0]}(T)$ is of moderate size, a reduced model of the form (\ref{eq:reduced}) for $\tilde{u} \approx \bar{u}$ may be constructed.
We thus obtain the error estimate \begin{equation}
\vert (\bar{e}(T),\psi) \vert
\leq S^{[1]}(T) \max_{[0,T]} \|k\tilde{R}(U,\cdot)\|_{l_2} +
S^{[0]}(T) \max_{[0,T]} \|\tilde{g}(U,\cdot) - \bar{g}(u,\cdot)\|_{l_2}, \end{equation} including both discretization and modeling errors. The initial data $\psi$ for the dual problem (\ref{eq:dual}) is chosen to reflect the desired output quantity, e.g. $\psi = (1,0,\ldots,0)$ to measure the error in the first component of $U$.
To estimate the modeling error, we need to estimate the quantity $\tilde{g} - \bar{g}$. This estimate is obtained by repeatedly solving the full dynamical system (\ref{eq:u'=f}) at a number of control points and comparing the subgrid model $\tilde{g}$ with the computed variance $\bar{g}$. As initial data for the full system at a control point, we take the computed solution $U \approx \bar{u}$ at the control point and add a perturbation of appropriate size, with the size of the perturbation chosen to reflect the initial oscillation at the fastest time scale.
\section{Numerical results}
We present numerical results for two model problems, including the simple model problem (\ref{eq:model}), computed with DOLFIN\cite{logg:www:01} version 0.4.10. With the option \emph{automatic modeling} set, DOLFIN automatically creates the reduced model (\ref{eq:reduced}) for a given dynamical system of the form (\ref{eq:u'=f}) by resolving the full system in a short time simulation and then determining a constant subgrid model $\bar{g}$. Components with constant average, such as $u_2$ in (\ref{eq:model}), are automatically marked as inactive and are kept constant throughout the simulation. The automatic modeling implemented in DOLFIN is rudimentary and many improvements are possible, but it represents a first attempt at the automation of modeling, following the recently presented\cite{logg:thesis:03} directions for the \emph{automation of computational mathematical modeling}.
\subsection{The simple model problem}
The solution for the two components of the simple model problem (\ref{eq:model}) is shown in Figure \ref{fig:model,solution} for $\kappa = 10^{18}$ and $\tau = 10^{-7}$. The value of the subgrid model $\bar{g}_1$ is automatically determined to $0.2495 \approx 1/4$.
\begin{figure}
\caption{The solution of the simple model problem (\ref{eq:model})
on $[0,100]$ (above) and on $[0,4\cdot{10}^{-7}]$ (below). The automatic
modeling is activated at time $t = 2\tau = 2\cdot{10}^{-7}$.}
\label{fig:model,solution}
\end{figure}
\subsection{A lattice with internal vibrations}
The second example is a lattice consisting of a set of $p^2$ large and $(p-1)^2$ small point masses connected by springs of equal stiffness $\kappa = 1$, as shown in Figure \ref{fig:latticedetail} and Figure \ref{fig:lattice}. Each large point mass is of size $M = 100$ and each small point mass is of size $m = 10^{-12}$, giving a large time scale of size $\sim 10$ and a small time scale of size $\sim 10^{-6}$.
The fast oscillations of the small point masses make the initially stationary structure of large point masses contract. Without resolving the fast time scales and ignoring the subgrid model, the distance $D$ between the lower left large point mass at $x = (0,0)$ and the upper right large point mass at $x = (1,1)$ remains constant, $D = \sqrt{2}$. In Figure \ref{fig:lattice,solution2}, we show the computed solution with $\tau = 10^{-4}$, which manages to correctly capture the oscillation in the diameter $D$ of the lattice as a consequence of the internal vibrations at time scale $10^{-6}$.
With a constant subgrid model $\bar{g}$ as in the example, the reduced model stays accurate until the configuration of the lattice has changed sufficiently. When the change becomes too large, the reduced model can no longer give an accurate representation of the full system, as shown in Figure \ref{fig:lattice,solution1}. At this point, the reduced model needs to be reconstructed in a new short time simulation.
\begin{figure}
\caption{Detail of the lattice. The arrows indicate the direction
of vibration perpendicular to the springs connecting the small
mass to the large masses.}
\label{fig:latticedetail}
\end{figure}
\begin{figure}
\caption{Lattice consisting of $p^2$ large masses and $(p-1)^2$ small masses.}
\label{fig:lattice}
\end{figure}
\begin{figure}
\caption{The diameter $D$ of the lattice as function of time on
$[0,20]$ (left) and on $[0,100]$ (right) for $m = 10^{-4}$ and
$\tau = 1$. The solid line represents the diameter for the solution of the
reduced system (\ref{eq:reduced}) and the dashed line
represents the solution of the full system (\ref{eq:u'=f}).}
\label{fig:lattice,solution1}
\end{figure}
\begin{figure}
\caption{Distance $D$ between the lower left large mass and the upper
right large mass (above) and the distance $d$ between the lower
left large mass and the lower left small mass (below) as function of
time on $[0,10]$ and on $[0,4\cdot 10^{-4}]$, respectively.}
\label{fig:lattice,solution2}
\end{figure}
\end{document} |
\begin{document}
\title{Transformed CNNs: \recasting pre-trained convolutional layers with self-attention} \begin{abstract} Vision Transformers (ViT) have recently emerged as a powerful alternative to convolutional networks (CNNs). Although hybrid models attempt to bridge the gap between these two architectures, the self-attention layers they rely on induce a strong computational bottleneck, especially at large spatial resolutions. In this work, we explore the idea of reducing the time spent training these layers by initializing them as convolutional layers. This enables us to transition smoothly from any pre-trained CNN to its functionally identical hybrid model, called Transformed CNN (T-CNN). With only 50 epochs of fine-tuning, the resulting T-CNNs demonstrate significant performance gains over the CNN (+2.2\% top-1 on ImageNet-1k for a ResNet50-RS) as well as substantially improved robustness (+11\% top-1 on ImageNet-C). We analyze the representations learnt by the T-CNN, providing deeper insights into the fruitful interplay between convolutions and self-attention. Finally, we experiment initializing the T-CNN from a partially trained CNN, and find that it reaches better performance than the corresponding hybrid model trained from scratch, while reducing training time. \end{abstract}
\section*{Introduction}
Since the success of AlexNet in 2012~\cite{krizhevsky2017imagenet}, the field of Computer Vision has been dominated by Convolutional Neural Networks (CNNs)~\cite{lecun1998gradient,lecun1989backpropagation}. Their local receptive fields give them a strong inductive bias to exploit the spatial structure of natural images~\cite{scherer_evaluation_2010,schmidhuber_deep_2015,goodfellow_deep_2016}, while allowing them to scale to large resolutions seamlessly. Yet, this inductive bias limits their ability to capture long-range interactions.
In this regard, self-attention (SA) layers, originally introduced in language models~\cite{bahdanau2014neural,vaswani2017attention,devlin2018bert}, have gained interest as a building block for vision~\citet{ramachandran2019stand,zhao2020exploring}. Recently, they gave rise to a plethora of Vision Transformer (ViT) models, able to compete with state-of-the-art CNNs in various tasks~\citet{dosovitskiy2020image,touvron2020training,wu_visual_2020,touvron2021going,liu2021swin,heo2021rethinking} while demonstrating better robustness~\cite{bhojanapalli2021understanding,mao2021rethinking}. However, capturing long-range dependencies necessarily comes at the cost of quadratic complexity in input size, a computational burden which many recent directions have tried to alleviate~\cite{bello2021lambdanetworks,wang2020self,choromanski2020rethinking,katharopoulos2020transformers}. Additionally, ViTs are generally harder to train~\cite{zhang2019adaptive,liu2020understanding}, and require vast amounts of pre-training~\cite{dosovitskiy2020image} or distillation from a convolutional teacher~\cite{hinton2015distilling,jiang2021token,graham2021levit} to match the performance of CNNs.
Faced with the dilemma between efficient CNNs and powerful ViTs, several approaches have aimed to bridge the gap between these architectures. On one side, hybrid models append SA layers onto convolutional backbones ~\cite{chen20182,bello2019attention,graham2021levit,chen2021visformer,srinivas2021bottleneck}, and have already fueled successful results in a variety of tasks~\cite{carion2020end,hu2018relation,chen2020uniter,locatello2020object,sun2019videobert}. Conversely, a line of research has studied the benefit of introducing convolutional biases in Transformer architectures to ease learning~\cite{d2021convit,wu2021cvt,yuan2021incorporating}. Despite these interesting compromises, modelling long-range dependencies at low computational cost remains a challenge for practitioners.
\begin{figure}
\caption{\textbf{Transformed ResNets strike a strong accuracy-robustness balance.} Our models (red) significantly outperform the original ResNet-RS models (dark blue) they were initialized from when evaluated on ImageNet-1k. On various robustness benchmarks (ImageNet-C, A and R, from left to right), they narrow or close the gap with Transformer architectures.}
\label{fig:intro}
\end{figure}
\paragraph{Contributions} At a time when pre-training on vast datasets has become common practice, we ask the following question: does one need to train the SA layers during the whole learning process? Could one instead learn cheap components such as convolutions first, leaving the SA layers to be learnt at the end? In this paper, we take a step in this direction by presenting a method to fully reparameterize a pre-trained convolutional layer as a \textit{Gated Positional Self-Attention} (GPSA) layer~\cite{d2021convit}. The latter is initialized to reproduce the mapping of the convolutional layer, but is then encouraged to learn more general mappings which are not accessible to the CNN by adjusting positional gating parameters.
We leverage this method to reparametrize pre-trained CNNs as functionally equivalent hybrid models. After only 50 epochs of fine-tuning, the resulting Transformed CNNs (T-CNNs) boast significant performance and robustness improvements as shown in Fig.~\ref{fig:intro}, demonstrating the practical relevance of our method. We analyze the inner workings of the T-CNNs, showing how they learn more robust representations by combining convolutional heads and SA heads in a complementary way. Finally, we investigate how performance gains depend on the reparametrization epoch. Results suggest that reparametrizing at intermediate times is optimal in terms of speed-performance trade-offs.
\paragraph{Related work}
Our work mainly builds on two pillars. First, the idea that SA layers can express any convolution, introduced by~\citet{cordonnier2019relationship}. This idea was recently leveraged in~\citet{d2021convit}, which initialize the SA layers of the ViT as \textit{random} convolutions and observe performance gains compared to the standard initialization, especially in the low-data regime where inductive biases are most useful. Our approach is a natural follow-up of this idea: what happens if the SA layers are instead initialized as \textit{trained} convolutions?
Second, we exploit the following learning paradigm: train a simple and fast model, then reparameterize it as a more complex model for the final stages of learning. This approach was studied from a scientific point of view in~\citet{d2019finding}, which shows that reparameterizing a CNN as a fully-connected network (FCN) halfway through training can lead the FCN to outperform the CNN. Yet, the practical relevance of this method is limited by the vast increase in number of parameters required by the FCN to functionally represent the CNN. In contrast, our reparameterization hardly increases the parameter count of the CNN, making it easily applicable to any state-of-the-art CNN. Note that these reparameterization methods can be viewed an informed version of dynamic architecture growing algorithms such as AutoGrow~\cite{wen2020autogrow}.
In the context of hybrid models, various works have studied the performance gains obtained by introducing MHSA layers in ResNets with minimal architectural changes~\cite{srinivas2021bottleneck,graham2021levit,chen2021visformer}. However, the MHSA layers used in these works are initialized randomly and need to be trained from scratch. Our approach is different, as it makes use of GPSA layers, which can be initialized to represent the same function as the convolutional layer it replaces. We emphasize that the novelty in our work is not in the architectures used, but in the unusual way they are blended together.
\section{Background}
\paragraph{Multi-head self-attention} \label{sec:mhsa}
The SA mechanism is based on a trainable associative memory with (key, query) vector pairs. To extract the semantic interpendencies between the $L$ elements of a sequence $\boldsymbol X\in\mathbb{R}^{L\times D_{in}}$, a sequence of ``query'' embeddings $\boldsymbol Q = \boldsymbol W_{qry} \boldsymbol X\in \mathbb{R}^{L\times D_{h}}$ is matched against another sequence of ``key'' embeddings $\boldsymbol K = \boldsymbol W_{key} \boldsymbol X\in\mathbb{R}^{L\times D_{h}}$ using inner products. The result is an attention matrix whose entry $(ij)$ quantifies how semantically relevant $\boldsymbol Q_i$ is to $\boldsymbol K_j$: \begin{equation}
\boldsymbol A = \operatorname{softmax}\left(\frac{\boldsymbol Q \boldsymbol K^\top}{\sqrt {D_{h}}}\right) \in \mathbb{R }^{L\times L}.
\label{eq:attention} \end{equation}
Multi-head SA layers use several SA heads in parallel to allow the learning of different kinds of dependencies: \begin{align}
\operatorname{MSA}(\boldsymbol{X}):=\sum_{h=1}^{N_h} \left[\text{SA}_{h}(\boldsymbol{X})\right] \boldsymbol{W}^h_{out}, \quad\quad \text{SA}_h(\boldsymbol{X}) := \boldsymbol{A}^h \boldsymbol{X} \boldsymbol{W}_{val}^h, \end{align} where $\boldsymbol W_\text{val}^h \in R^{D_{in}\times D_{v}}$ and $\boldsymbol W_\text{out}^h \in R^{D_{v}\times D_{out}}$ are two learnable projections.
To incorporate positional information, ViTs usually add absolute position information to the input at embedding time, before propagating it through the SA layers. Another possibility is to replace the vanilla SA with positional SA (PSA), including a position-dependent term in the softmax~\cite{ramachandran2019stand, shaw2018self}. Although there are several way to parametrize the positional attention, we use encodings $\boldsymbol r_{ij}$ of the relative position of pixels $i$ and $j$ as in~\cite{cordonnier2019relationship,d2021convit}: \begin{align}
\boldsymbol{A}^h_{ij}:=\operatorname{softmax}\left(\boldsymbol Q^h_i \boldsymbol K^{h\top}_j+\boldsymbol{v}_{pos}^{h\top} \boldsymbol{r}_{ij}\right). \label{eq:local-attention} \end{align} Each attention head learns an embedding $\boldsymbol{v}_{pos}^h \in \mathbb R^{D_{pos}}$, and the relative positional encodings $\boldsymbol{r}_{ij}\in \mathbb R^{D_{pos}}$ only depend on the distance between pixels $i$ and $j$, denoted denoted as a two-dimensional vector $\boldsymbol \delta_{ij}$.
\paragraph{Self-attention as a generalized convolution}
\citet{cordonnier2019relationship} shows that a multi-head PSA layer (Eq. \ref{eq:local-attention}) with $N_h$ heads and dimension $D_{pos}\geq 3$ can express any convolutional layer of filter size $\sqrt N_h$, with $D_{in}$ input channels and $\min(D_v, D_{out})$ output channels, by setting the following: \begin{align}
\begin{cases}
&\boldsymbol{v}_{pos}^{h}:=-\alpha^{h}\left(1,-2 \Delta_{1}^{h},-2 \Delta_{2}^{h}, 0,\ldots 0\right)\\ &\boldsymbol{r}_{\boldsymbol{\delta}}:=\left(\|\boldsymbol{\delta}\|^{2}, \delta_{1}, \delta_{2},0,\ldots 0\right)\\
&\boldsymbol{W}_{q r y}=\boldsymbol{W}_{k e y}:=\mathbf{0}
\end{cases}
\label{eq:local-init} \end{align}
In the above, the \emph{center of attention} $\boldsymbol \Delta^h\in\mathbb{R}^2$ is the position to which head $h$ pays most attention to, relative to the query pixel, whereas the \emph{locality strength} $\alpha^h>0$ determines how focused the attention is around its center $\boldsymbol \Delta^h$. When $\alpha^h$ is large, the attention is focused only on the pixel located at $\boldsymbol \Delta^h$; when $\alpha^h$ is small, the attention is spread out into a larger area. Thus, the PSA layer can achieve a convolutional attention map by setting the centers of attention $\boldsymbol \Delta^h$ to each of the possible positional offsets of a $\sqrt{N_h}\times \sqrt{N_h}$ convolutional kernel, and sending the locality strengths $\alpha^h$ to some large value.
\section{Approach}
In this section, we introduce our method for mapping a convolutional layer to a functionally equivalent PSA layer with minimal increase in parameter count. To do this, we leverage the GPSA layers introduced in~\citet{d2021convit}.
\paragraph{Loading the filters}
We want each head $h$ of the PSA layer to functionally mimic the pixel $h$ of a convolutional filter $\boldsymbol W_\text{filter} \in \mathbb{R}^{N_h \times D_{in} \times D_{out}}$, where we typically have $D_{out}\geq D_{in}$. Rewriting the action of the MHSA operator in a more explicit form, we have \begin{equation}
\operatorname{MHSA}(\boldsymbol{X})=\sum_{h =1}^{N_h} \boldsymbol{A}^{h} \boldsymbol{X} \underbrace{\boldsymbol{W}_{\text {val }}^{h} \boldsymbol{W}_{\text {out }}^h}_{\boldsymbol{W}^{h}\in\mathbb{R}^{D_{in}\times D_{out}}} \end{equation} In the convolutional configuration of Eq.~\ref{eq:local-init}, $\boldsymbol A^h \boldsymbol X$ selects pixel $h$ of $\boldsymbol X$. Hence, we need to set $\boldsymbol W^h=\boldsymbol W_\text{filter}^h$. However, as a product of matrices, the rank of $\boldsymbol W_h$ is bottlenecked by $D_v$. To avoid this being a limitation, we need $D_v \geq D_{in}$ (since $D_{out}\geq D_{in}$). To achieve this with a minimal number of parameters, we choose $D_v = D_{in}$, and simply set the following initialization: \begin{align}
\boldsymbol W_\text{val}^h = \boldsymbol I, \quad\quad\quad \boldsymbol W_\text{out}^h = \boldsymbol W_\text{filter}^h. \end{align} Note that this differs from the usual choice made in SA layers, where $D_v = \lfloor D_{in}/N_h \rfloor$. However, to keep the parameter count the same, we share the same $\boldsymbol W_{val}^h$ across different heads $h$, since it plays a symmetric role at initialization.
Note that this reparameterization introduces three additional matrices compared to the convolutional filter: $\boldsymbol W_{qry}, \boldsymbol W_{key}, \boldsymbol W_{val}$, each containing $ D_{in} \times D_{in}$ parameters. However, since the convolutional filter contains $N_h \times D_{in} \times D_{out}$ parameters, where we typically have $N_h=9$ and $D_{out}\in\{D_{in},2D_{in}\}$, these additional matrices are much smaller than the filters and hardly increase the parameter count. This can be seen from the model sizes in Tab.~\ref{tab:robustness}.
\paragraph{Gated Positional self-attention} Recent work~\cite{d2021convit} has highlighted an issue with standard PSA: the fact that the content and positional terms in Eq.~\ref{eq:local-attention} are potentially of very different magnitudes, in which case the softmax ignores the smallest of the two. This can typically lead the PSA to adopt a greedy attitude: choosing the form of attention (content or positional) which is easiest at a given time then sticking to it.
To avoid this, the ConViT~\citet{d2021convit} uses GPSA layers which sum the content and positional terms \emph{after} the softmax, with their relative importances governed by a learnable \emph{gating} parameter $\lambda_h$ (one for each attention head). In GPSA layers, the attention is parametrized as follows: \begin{align}
\boldsymbol{A}^h_{ij}:=&\left(1-\sigma(\lambda_h)\right) \operatorname{softmax}\left(\boldsymbol Q^h_i \boldsymbol K^{h\top}_j\right) + \sigma(\lambda_h) \operatorname{softmax}\left(\boldsymbol{v}_{pos}^{h\top} \boldsymbol{r}_{ij}\right),
\label{eq:gating-param} \end{align} where $\sigma:x\mapsto \nicefrac{1}{\left(1+e^{-x}\right)}$ is the sigmoid function. In the positional part, the encodings $\boldsymbol r_{ij}$ are fixed rather than learnt (see Eq.~\ref{eq:local-init}), which makes changing input resolution straightforward (see SM.~\ref{app:resolution}) and leaves only 3 learnable parameters per head: $\boldsymbol \Delta_1, \boldsymbol \Delta_2$ and $\alpha$\footnote{Since $\alpha$ represents the temperature of the softmax, its value must stay positive at all times. To ensure this, we instead learn a rectified parameter $\tilde \alpha$ using the softplus function: $\alpha = \frac{1}{\beta} \log (1+e^{-\beta \tilde \alpha})$, with $\beta=5$.}.
\paragraph{How convolutional should the initialization be?}
The convolutional initialization of GPSA layers involves two parameters, determining how strictly convolutional the behavior is: the initial value of the \emph{locality strength} $\alpha$, which determines how focused each attention head is on its dedicated pixel, and the initial value of the \emph{gating parameters} $\lambda$, which determines the importance of the positional information versus content. If $\lambda_h\gg 0$ and $\alpha\gg 1$, the T-CNN will perfectly reproduce the input-output function of the CNN, but may stay stuck in the convolutional configuration. Conversely, if $\lambda_h\ll 0$ and $\alpha\ll 1$, the T-CNN will poorly reproduce the input-output function of the CNN. Hence, we choose $\alpha =1$ and $\lambda = 1$ to lie in between these two extremes. This puts the T-CNN ``on the verge of locality'', enabling it to escape locality effectively throughout training.
\paragraph{Architectural details} To make our setup as canonical as possible, we focus on ResNet architectures~\cite{he2016deep}, which contain 5 stages, with spatial resolution halfed and number of channels doubled at each stage. Our method involves reparameterizing $3\times 3$ convolutions as GPSA layers with 9 attention heads. However, global SA is too costly in the first layers, where the spatial resolution is large. We therefore only reparameterize the last stage of the architecture, while replacing the first stride-2 convolution by a stride-1 convolution, exactly as in~\cite{srinivas2021bottleneck}. We also add explicit padding layers to account for the padding of the original convolutions.
\section{Performance of the Transformed CNNs} \label{sec:finetuning}
In this section, we apply our reparametrization to state-of-the-art CNNs, then fine-tune the resulting T-CNNs to learn better representations. This method allows to fully disentangle the training of the SA layers from that of the convolutional backbone, which is of practical interest for two reasons. First, it minimizes the time spent training the SA layers, which typically have a slower throughput. Second, it separates the algorithmic choices of the CNN backbone from those of the SA layers, which are typically different; for example, CNNs are typically trained with SGD whereas SA layers perform much better with adaptive optimizers such as Adam~\cite{zhang2019adaptive}, an incompatibility which may limit the performance of usual hybrid models.
\paragraph{Training details} To minimize computational cost, we restrict the fine-tuning to 50 epochs\footnote{We study how performance depends on the number of fine-tuning epochs in SM.~\ref{app:epochs}.}. Following~\cite{zhang2019adaptive}, we use the AdamW optimizer, with a batch size of 1024\footnote{Confirming the results of~\cite{zhang2019adaptive}, we obtained worse results with SGD.}. The learning rate is warmed up to $10^{-4}$ then annealed using a cosine decay. To encourage the T-CNN to escape the convolutional configuration and learn content-based attention, we use a larger learning rate of 0.1 for the gating parameters of Eq.~\ref{eq:gating-param} (one could equivalently decrease the temperature of the sigmoid function).
We use the same data augmentation scheme as the DeiT~\cite{touvron2020training}, as well as rather large stochastic depth coefficients $d_r$ reported in Tab.~\ref{tab:finetune}. Hoping that our method could be used as an alternative to the commonly used practice of fine-tuning models at higher resolution, we also increase the resolution during fine-tuning~\cite{touvron2019fixing}. In this setting, a ResNet50 requires only 6 hours of fine-tuning on 16 V100 GPUs, compared to 33 hours for the original training. For our largest model (ResNet350-RS), the fine-tuning lasts 50 hours.
\paragraph{Performance gains}
\begin{figure}
\caption{ImageNet-1k}
\caption{Robustness benchmarks}
\caption{\textbf{T-CNNs present better speed-accuracy trade-offs than the CNNs they stem from.} Total training time (original training + finetuning) is normalized by the total training time of the ResNet50-RS. Inference throughput is the number of images processed per second on a V100 GPU at batch size 32.}
\label{fig:pareto}
\end{figure}
We applied our method to pre-trained ResNet-RS~\cite{bello2021revisiting} models, using the weights provided by the timm package~\cite{rw2019timm}. These models are derived from the original ResNet~\cite{he2016deep}, but use improved architectural features and training strategies, enabling them to reach better speed-accuracy trade-offs than EfficientNets. Results are presented in Tab.~\ref{tab:finetune}, where we also report the baseline improvement of fine-tuning in the same setting but without SA. In all cases, our fine-tuning improves top-1 accuracy, with a significant gap over the baseline. To demonstrate the wide applicability of our method, we report similar improvements for ResNet-D architectures in SM.~\ref{app:resnetd}.
Despite the extra fine-tuning epochs and their slower throughput, the resulting T-CNNs match the performance of the original CNNs at equal throughput, while significantly outperforming them at equal total training time, as shown in the Pareto curves of Fig.~\ref{fig:pareto}(a)\footnote{We estimated the training times of the original ResNet-RS models based on their throughput, for the same hardware as used for the T-ResNet-RS.}. However, the major benefit of the reparametrization is in terms of robustness, as shown in Fig.~\ref{fig:pareto}(b) and explained below.
\begin{table}[tb]
\centering
\begin{tabular}{{c|cc|cc|cc|cc|cc}}
\toprule
\multirow{3}{*}{\textbf{Backbone}} & \multicolumn{4}{c|}{Training} & \multicolumn{6}{c}{Fine-tuning} \\\cline{2-11}
\rule{0pt}{3ex}
&&&&&&& \multicolumn{2}{c|}{Without SA} & \multicolumn{2}{c}{With SA} \\
& Res. & $d_r$ & TTT & Top-1 & Res. & $d_r$ & TTT & Top-1 & TTT & Top-1 \\
\midrule
ResNet50-RS & 160 & 0.0 & 1 (ref.) & 78.8 & 224 & 0.1 & 1.16 & 80.4 & 1.30 & \textbf{81.0}\\
ResNet101-RS & 192 & 0.0 & 1.39 & 80.3 & 224 & 0.1 & 1.65 & 81.9 & 1.79 & \textbf{82.4}\\
ResNet152-RS & 256 & 0.0 & 3.08 & 81.2 & 320 & 0.2 & 3.75 & 83.4 & 4.13 & \textbf{83.7}\\
ResNet200-RS & 256 & 0.1 & 4.15 & 82.8 & 320 & 0.2 & 5.04 & 83.7 & 5.42 & \textbf{84.0}\\
ResNet270-RS & 256 & 0.1 & 6.19 & 83.8 & 320 & 0.2 & 7.49 & 83.9 & 7.98 & \textbf{84.3}\\
ResNet350-RS & 288 & 0.1 & 10.49 & 84.0 & 320 & 0.2 & 12.17 & 84.1 & 12.69& \textbf{84.5}\\
\bottomrule
\end{tabular}
\caption{\textbf{Statistics of the models considered, trained from scratch on ImageNet.} Top-1 accuracy is measured on ImageNet-1k validation set. ``TTT'' stands for total training time (including fine-tuning), normalized by the total training time of the ResNet50-RS. $d_r$ is the stochastic depth coefficient used for the various models.
}
\label{tab:finetune} \end{table}
\begin{figure}
\caption{\textbf{Robustness is most improved for strong and blurry corruption categories.} We report the relative improvement between the top-1 accuracy of the T-ResNet50-RS and that of the ResNet50-RS on ImageNet-C, averaging over the different corruption categories (left) and corruption severities (right).}
\label{fig:robustness}
\end{figure}
\newcolumntype{?}{!{\vrule width 1pt}} \begin{table}
\centering
\begin{tabular}{c|cccc|cccc}
\toprule
\textbf{Model} & Res. & Params & Speed & Flops & ImNet-1k & ImNet-C & ImNet-A & ImNet-R\\\midrule
\multicolumn{9}{c}{Transformers}\\\midrule
ViT-B/16 & 224 & 86 M & 182 & 16.9 & 77.9 & 52.2 & 7.0 & 21.9 \\
ViT-L/16 & 224 & 307 M & 55 & 59.7 & 76.5 & 49.3 & 6.1 & 17.9 \\\midrule
DeiT-S & 224 & 22 M & 544 & 4.6 & 79.9 & 55.4 & 18.9 & 31.0 \\
DeiT-B & 224 & 87 M & 182 & 17.6 & 82.0 & 60.7 & 27.4 & 34.6 \\\midrule
ConViT-S & 224 & 28 M & 296 & 5.4 & 81.5 & 59.5 & 24.5 & 34.0 \\
ConViT-B & 224 & 87 M & 139 & 17.7 & 82.4 & \textbf{61.9} & 29.0 & 36.9 \\\midrule
\multicolumn{9}{c}{CNNs}\\\midrule
ResNet50 & 224 & 25 M & 736 & 4.1 & 76.8 & 46.1 & 4.2 & 21.5 \\
ResNet101 & 224 & 45 M & 435 & 7.85 & 78.0 & 50.2 & 6.3 & 23.0 \\
ResNet101x3 & 224 & 207 M & 62 & 69.6 & 80.3 & 53.4 & 9.1 & 24.5 \\
ResNet152x4 & 224 & 965 M & 18 & 183.1& 80.4 & 54.5 & 11.6 & 25.8 \\
\midrule
ResNet50-RS & 160 & 36 M & 938 & 4.6& 78.8 & 36.8 & 5.7 & 39.1 \\
ResNet101-RS & 192 & 64 M & 674 & 12.1& 80.3 & 44.1 & 11.8 & 44.8 \\
ResNet152-RS & 256 & 87 M & 304 & 31.2& 81.2 & 49.9 & 23.4 & 45.9 \\
ResNet200-RS & 256 & 93 M & 225 & 40.4& 82.8 & 49.3 & 25.4 & 48.1 \\
ResNet270-RS & 256 & 130 M & 152 & 54.2& 83.8 & 53.6 & 26.6 & 48.7 \\
ResNet350-RS & 288 & 164 M & 89 & 87.5& 84.0 & 53.9 & 34.9 & 49.7 \\
\midrule
\multicolumn{9}{c}{Our transformed CNNs}\\\midrule
T-ResNet50-RS & 224 & 38 M & 447 & 17.6 & 81.0 & 48.0 & 18.7 & 42.9 \\
T-ResNet101-RS & 224 & 66 M & 334 & 25.1 & 82.4 & 52.9 & 27.7 & 47.8 \\
T-ResNet152-RS & 320 & 89 M & 128 & 65.8 & 83.7 & 54.5 & 39.8 & 50.6 \\
T-ResNet200-RS & 320 & 96 M & 105 & 80.2 & 84.0 & 57.0 & 41.2 & 51.1 \\
T-ResNet270-RS & 320 & 133 M & 75 & 107.2 & 84.3 & 58.6 & 43.7 & 51.4 \\
T-ResNet350-RS & 320 & 167 M & 61 & 130.5 & \textbf{84.5}& 59.2 & \textbf{44.8} & \textbf{53.8}\\
\bottomrule
\end{tabular}
\caption{\textbf{Accuracy of our models on various benchmarks.} Throughput is the number of images processed per second on a V100 GPU at batch size 32. The ViT and ResNet results are reported in~\cite{bhojanapalli2021understanding}. For ImageNet-C, we keep a resolution of 224 at test time to avoid distorting the corruptions.
}
\label{tab:robustness} \end{table}
\paragraph{Robustness gains}
Recent work~\cite{bhojanapalli2021understanding,mao2021rethinking} has shown that Transformer-based architectures are more robust to input perturbations than convolutional architectures. We therefore investigate whether our fine-tuning procedure brings robustness gains to the original CNNs. To do so, we consider three benchmarks. First, ImageNet-C~\cite{hendrycks2019robustness}, a dataset containing 15 sets of randomly generated corruptions, grouped into 4 categories: ‘noise’, ‘blur’, ‘weather’, and ‘digital’. Each corruption type has five levels of severity, resulting in 75 distinct corruptions. Second, ImageNet-A~\cite{hendrycks2021nae}, a dataset containing naturally ``adversarial'' examples from ImageNet. Finally, we evaluate robustness to distribution shifts with ImageNet-R \cite{hendrycks2020many}, a dataset with various stylized “renditions” of ImageNet images ranging from paintings to embroidery, which strongly modify the local image statistics.
As shown in Tab.~\ref{tab:robustness} and illustrated in Fig.~\ref{fig:intro}, the T-ResNet-RS substantially outperforms the ResNet-RS on all three benchmarks. For example, our T-ResNet101-RS reaches similar or higher top-1 accuracy than the ResNet200-RS on each task, despite its lower top-1 accuracy on ImageNet-1k. This demonstrates that SA improves robustness more than it improves classification accuracy.
To better understand where the benefits come from, we decompose the improvement of the T-ResNet50-RS over the various corruption severeties and categories of ImageNet-C in Fig.~\ref{fig:robustness}. We observe that improvement increases almost linearly with corruption severity. Although performance is higher in all corruption categories, there is a strong variability: the T-CNN shines particularly in tasks where the objects in the image are less sharp due to lack of contrast, bad weather or blurriness. We attribute this to the ability of SA to distinguish shapes in the image, as investigated in Sec~\ref{sec:dissection}.
\section{Dissecting the Transformed CNNs} \label{sec:dissection}
In this section, we analyze various observables to understand how the representations of a T-ResNet270-RS evolve from those of the ResNet270-RS throughout training.
\begin{figure*}
\caption{\textbf{The later layers effectively escape the convolutional configuration.} \textbf{A:} top-1 accuracy throughout the 50 epochs of fine-tuning of a T-ResNet270-RS. \textbf{B:} size of the receptive field of the various heads $h$ (thin lines), calculated as $\alpha_h^{-1}$ (see Eq.~\ref{eq:local-attention}). Thick lines represent the average over the heads. \textbf{C:} depicts how much attention the various heads $h$ (thin lines) pay to positional information, through the value of $\sigma(\lambda_h)$ (see Eq.~\ref{eq:gating-param}). Thick lines represent the average over the heads.}
\label{fig:dynamics}
\end{figure*}
\begin{figure}
\caption{Input image}
\caption{Attention maps}
\caption{\textbf{GPSA layers combine local and global attention in a complementary way.} We depicted the attention maps of the four GPSA layers of the T-ResNet270-RS, obtained by feeding the image on the left through the convolutional backbone, then selecting a query pixel in the center of the image (red box). For each head $h$, we indicate the value of the gating parameter $\sigma(\lambda_h)$ in red (see Eq.~\ref{eq:gating-param}). In each layer, at least one of the heads learns to perform content-based attention ($\sigma(\lambda_h)=0$).}
\label{fig:attention}
\end{figure}
\paragraph{Unlearn to better relearn} In Fig.~\ref{fig:dynamics}A, we display the train and test accuracy throughout training\footnote{The train accuracy is lower than the test accuracy due to the heavy data augmentation used during fine-tuning.}. The dynamics decompose into two distinct phases: accuracy dips down during the learning rate warmup phase (first 5 epochs of training), then increases back up as the learning rate is decayed.
Interestingly, as shown in SM.~\ref{app:lr}, the depth of the dip depends on the learning rate. For too small learning rates, the dip is small, but the test accuracy increases too slowly after the dip; for too large learning rates, the test accuracy increases rapidly after the dip, but the dip is too deep to be compensated for. This suggests that the T-CNN needs to ``unlearn'' to some extent, a phenomenon reminiscent of the ``catapult'' mechanism of~\citet{lewkowycz2020large} which propels models out of sharp minima to land in wider minima.
\paragraph{Escaping the convolutional representation} In Fig.~\ref{fig:dynamics}B, we show the evolution of the ``attention span'' $1/\alpha_h$ (see Eq.~\ref{eq:local-init}), which reflects the size of the receptive field of attention head $h$. On average (thick lines), this quantity increases in the first three layers, showing that the attention span widens, but variability exists among different attention heads (thin lines): some broaden their receptive field, whereas others contract it.
In Fig.~\ref{fig:dynamics}C, we show the evolution of the gating parameters $\lambda^h$ of Eq.~\ref{eq:gating-param}, which reflect how much attention head $h$ pays to position versus content. Interestingly, the first layer stays strongly convolutional on average, as $\mathbb{E}_h \sigma(\lambda_h)$ rapidly becomes close to one (thick blue line). The other layers strongly escape locality, with most attention heads focusing on content information at the end of fine-tuning.
In Fig.~\ref{fig:attention}, we display the attention maps after fine-tuning. A clear divide appears between the ``convolutional'' attention heads, which remain close to their initialization, and the ``content-based'' attention heads, which learn more complex dependencies. Notice that the attention head initially focusing on the query pixel (head 5) stays convolutional in all layers. Throughout the layers, the shape of the central object is more and more clearly visible, as observed in~\cite{caron2021emerging}. This supports the hypothesis that robustness gains obtained for blurry corruptions (see Fig.~\ref{fig:robustness}) are partly due to the ability of the SA layers to isolate objects from the background.
\section{When should one start learning the self-attention layers?}
Previous sections have demonstrated the benefits of initializing T-CNNs from pre-trained CNNs, a very compelling procedure given the wide availability of pretrained models. But one may ask: how does this compare to training a hybrid model from scratch? More generally, given a computational budget, how long should the SA layers be trained compared to the convolutional backbone?
\paragraph{Transformed CNN versus hybrid models} To answer the first question, we consider a ResNet-50 trained on ImageNet for 400 epochs. We use SGD with momentum 0.9 and a batch size of 1024, warming up the learning rate for 5 epochs before a cosine decay. To achieve a strong baseline, we use the same augmentation scheme as in~\cite{touvron2020training} for the DeiT. Results are reported in Tab.~\ref{tab:tw}. In this modern training setting, the vanilla ResNet50 reaches a solid performance of 79.04\% on ImageNet, well above the 77\% usually reported in litterature.
\begin{table}
\centering
\begin{tabular}{c|c|c|c|c}
\toprule
\textbf{Name} & $t_1$ & $t_2$ & Train time & Top-1 \\\midrule
Vanilla CNN & 400 & 0 & 2.0k mn & 79.04 \\
Vanilla CNN$\uparrow$320 & 450 & 0 & 2.4k mn & \textbf{79.78}\\\midrule
T-CNN & 400 & 50 & 2.3k mn & 79.88 \\
T-CNN$\uparrow$320 & 400 & 50 & 2.7k mn & \textbf{80.84} \\\midrule
Vanilla hybrid & 0 & 400 & 2.8k mn & 79.95 \\%\midrule
T-CNN$^\star$ & 100 & 300 & 2.6k mn & \textbf{80.44} \\
T-CNN$^\star$ & 200 & 200 & 2.4k mn & 80.28 \\
T-CNN$^\star$ & 300 & 100 & 2.2k mn & 79.28 \\
\bottomrule
\end{tabular}
\caption{\textbf{The benefit of late reparametrization.} We report the top-1 accuracy of a ResNet-50 on ImageNet reparameterized at various times $t_1$ during training. $\uparrow$320 stands for fine-tuning at resolution 320. The models with a $\star$ keep the same optimizer after reparametrization, in contrast with the usual T-CNNs.}
\label{tab:tw}
\end{table}
The T-CNN obtained by fine-tuning the ResNet for 50 epochs at same resolution obtains a top-1 accuracy of 79.88\%, with a 15\% increase in training time, and 80.84 as resolution 320, with a 35\% increase in training time. In comparison, the hybrid model trained for 400 epochs in the same setting only reaches 79.95\%, in spite of a 40\% increase in training time. Hence, fine-tuning yields better results than training the hybrid model from scratch.
\paragraph{What is the best time to reparametrize?}
We now study a scenario between the two extreme cases: what happens if we reparametrize halfway through training? To investigate this question in a systematic way, we train the ResNet50 for $t_1$ epochs, then reparametrize and resume training for another $t_2$ epochs, ensuring that $t_1+t_2=400$ in all cases. Hence, $t_1=400$, amounts to the vanilla ResNet50, whereas $t_1=0$ corresponds to the hybrid model trained from scratch. To study how final performance depends on $t_1$ in a fair setting, we keep the same optimizer and learning rate after the reparametrization, in contrast with the fine-tuning procedure which uses fresh optimizer.
Results are presented in Tab.~\ref{tab:tw}. Interestingly, the final performance evolves non-monotonically, reaching a maximum of $80.44$ for $t_1=100$, then decreasing back down as the SA layers have less and less time to learn. This non-monotonicity is remarkably similar to that observed in~\cite{d2019finding}, where reparameterizing a CNN as a FCN in the early stages of training enables the FCN to outperform the CNN. Crucially, this result suggests that reparametrizing during training not only saves time, but also helps the T-CNN find better solutions.
\section*{Discussion}
In this work, we showed that complex building blocks such as self-attention layers need not be trained from start. Instead, one can save in compute time while gaining in performance and robustness by initializing them from pre-trained convolutional layers. At a time where energy savings and robustness are key stakes, we believe this finding is important.
On the practical side, our fine-tuning method offers an interesting new direction for practitioners. One clear limitation of our method is the prohibitive cost of reparametrizing the early stages of CNNs. This cost could however be alleviated by using linear attention methods~\cite{wang2020self}, an important direction for future work. Note also that while our T-CNNs significantly improve the robustness of CNNs, they do not systematically reach the performance of end-to-end Transformers such as the DeiT (for example on ImageNet-C, see Fig.~\ref{fig:intro}). Bridging this gap is an important next step for hybrid models.
On the theoretical side, our results spark several interesting questions. First, why is it better to reparametrize at intermediate times? One natural hypothesis, which will be explored in future work, is that SA layers benefit from capturing meaningful dependencies between the features learnt by the CNN, rather than the random correlations which exist at initialization. Second, why are the representations learnt by the SA layers more robust? By inspecting the attention maps and the most improved corruption categories of ImageNet-C, we hypothesized that SA helps isolating objects from the background, but a more thorough analysis is yet to come. \paragraph{Acknowledgements} We thank Matthew Leavitt, Hugo Touvron, Hervé Jégou and Francisco Massa for helpful discussions. SD and GB acknowledge funding from the French government under management of Agence Nationale de la Recherche as part of the “Investissements d’avenir” program, reference ANR-19-P3IA-0001 (PRAIRIE 3IA Institute).
\printbibliography
\appendix
\numberwithin{equation}{section}
\appendix \setlength\intextsep{10pt}
\section{Changing of learning rate} \label{app:lr}
As shown in Fig.~\ref{fig:dynamics} of the main text, the learning dynamics decompose into two phases: the learning rate warmup phase, where the test loss drops, then the learning rate decay phase, where the test loss increases again. This could lead one to think that the maximal learning rate is too high, and the dip could be avoided by choosing a lower learning rate. Yet this is not the case, as shown in Fig.~\ref{fig:lr}. Reducing the maximal learning rate indeed reduces the dip, but it also slows down the increase in the second phase of learning. This confirms that the model needs to ``unlearn'' the right amount to find better solutions.
\begin{figure}
\caption{\textbf{The larger the learning rate, the lower the test accuracy dips, but the faster it climbs back up.} We show the dynamics of the ResNet50, fine-tuned for 50 epochs at resolution 224, for three different values of the maximal learning rate.}
\label{fig:lr}
\end{figure}
\section{Changing the test resolution} \label{app:resolution}
One advantage of the GPSA layers introduced by~\cite{d2021convit} is how easily they adapt to different image resolutions. Indeed, the positional embeddings they use are fixed rather than learnt. They simply consist in 3 values for each pair of pixels: their euclidean distance $\Vert \boldsymbol \delta\Vert$, as well as their coordinate distance $\boldsymbol \delta_1, \boldsymbol \delta_2$ (see Eq.~\ref{eq:local-init}). Our implementation automatically adjusts these embeddings to the input image, allowing us to change the test resolution seamlessly.
In Fig.~\ref{fig:resolution}, we show how the top-1 accuracies of our T-ResNet-RS models compares to those of the ResNet-RS models finetuned at same resolution but without SA. At test resolution 416, our T-ResNetRS-350 reaches an impressive top-1 accuracy of 84.9\%, beyond those of the best EfficientNets and BotNets~\cite{srinivas2021bottleneck}.
\begin{figure}
\caption{\textbf{Performance at different test-time resolutions, for the finetuned models with and without SA.} The ResNet50-RS and ResNet101-RS models are finetuned at resolution 224, and all other models are finetuned at resolution 320.}
\label{fig:resolution}
\end{figure}
\section{Changing the number of epochs} \label{app:epochs}
In Tab.~\ref{tab:epochs}, we show how the top-1 accuracy of the T-ResNet-RS model changes with the number of fine-tuning epochs. As expected, performance increases significantly as we fine-tune for longer, yet we chose to set a maximum of 50 fine-tuning epochs to keep the computational cost of fine-tuning well below that of the original training.
\begin{table}[h]
\centering
\begin{tabular}{c|c|c}
\toprule
\textbf{Model} & Epochs & Top-1 acc \\\midrule
ResNet50-RS & 0 & 79.91 \\
T-ResNet50-RS & 10 & 80.11 \\
T-ResNet50-RS & 20 & 80.51 \\
T-ResNet50-RS & 50 & \textbf{81.02} \\\midrule
ResNet101-RS & 0 & 81.70 \\
T-ResNet101-RS & 10 & 81.54 \\
T-ResNet101-RS & 20 & 81.90 \\
T-ResNet101-RS & 50 & \textbf{82.39} \\
\bottomrule
\end{tabular}
\caption{\textbf{Longer fine-tuning increases final performance.} We report the top-1 accuracies of our models on ImageNet-1k at resolution 224.}
\label{tab:epochs} \end{table}
\section{Changing the architecture} \label{app:resnetd}
Our framework, which builds on the timm package, makes changing the original CNN architecture very easy. We applied our fine-tuning procedure to the ResNet-D models~\cite{he2019bag} with the exact same hyperparameters, and observed substantial performance gains, similar to the ones obtained for ResNet-RS, see Tab.~\ref{tab:finetune-resnetd}. This suggests the wide applicability of our method.
\begin{table}[h]
\centering
\begin{tabular}{c|c|c|c|c|c}
\toprule
\textbf{Model} & Original res. & Original acc. & Fine-tune res. & Fine-tune acc. & Gain\\
\midrule
T-ResNet50-D & 224 & 80.6 & 320 & 81.6 & +1.0\\
T-ResNet101-D & 320 & 82.3 & 384 & 83.1 & +0.8\\
T-ResNet152-D & 320 & 83.1 & 384 & 83.8 & +0.7\\
T-ResNet200-D & 320 & 83.2 & 384 & \textbf{83.9} & +0.7\\
\midrule
T-ResNet50-RS & 160 & 78.8 & 224 & 81.0 & +2.8\\
T-ResNet101-RS & 192 & 81.2 & 224 & 82.4 & +1.2\\
T-ResNet152-RS & 256 & 83.0 & 320 & 83.7 & +0.7\\
T-ResNet200-RS & 256 & 83.4 & 320 & \textbf{84.0} & +0.6\\
\bottomrule
\end{tabular}
\caption{\textbf{Comparing the performance gains of the ResNet-RS and ResNet-D architectures.} Top-1 accuracy is measured on ImageNet-1k validation set. The pre-trained models are all taken from the timm library~\cite{rw2019timm}.
}
\label{tab:finetune-resnetd} \end{table}
\section{More attention maps}
\begin{figure}
\caption{Attention maps}
\caption{Attention maps}
\caption{\textbf{GPSA layers combine local and global attention in a complementary way.} We depicted the attention maps of the four GPSA layers of the T-ResNet270-RS, obtained by feeding the image on the left through the convolutional backbone, then selecting a query pixel in the center of the image (red box). For each head $h$, we indicate the value of the gating parameter $\sigma(\lambda_h)$ in red (see Eq.~\ref{eq:gating-param}). ($\sigma(\lambda_h)=0$).}
\label{fig:more-attention}
\end{figure}
\end{document} |
\begin{document}
\title{Faster and more accurate computation of the $\Hinf$ norm via optimization} \begin{abstract} In this paper, we propose an improved method for computing the ${\hinfsym_\infty}$ norm of linear dynamical systems that results in a code that is often several times faster than existing methods. By using standard optimization tools to rebalance the work load of the standard algorithm due to Boyd, Bala\-krish\-nan, Bru\-insma, and Stein\-buch, we aim to minimize the number of expensive eigenvalue computations that must be performed. Unlike the standard algorithm, our modified approach can also calculate the ${\hinfsym_\infty}$ norm to full precision with little extra work, and also offers more opportunity to further accelerate its performance via parallelization. Finally, we demonstrate that the local optimization we have employed to speed up the standard globally-convergent algorithm can also be an effective strategy on its own for approximating the ${\hinfsym_\infty}$ norm of large-scale systems. \end{abstract}
\section{Introduction} \label{sec:intro}
Consider the continuous-time linear dynamical system \begin{subequations} \label{eq:lti_cont} \begin{align} E\dot{x} &= Ax + Bu \\ y &= Cx + Du, \end{align} \end{subequations} where $A \in \Cmn{n}{n}$, $B \in \Cmn{n}{m}$, $C \in \Cmn{p}{n}$, $D \in \Cmn{p}{m}$, and $E \in \Cmn{n}{n}$. The system defined by \eqref{eq:lti_cont} arises in many engineering applications and as a consequence, there has been a strong motivation for fast methods to compute properties that measure the sensitivity of the system or its robustness to noise. Specifically, a quantity of great interest is the ${\hinfsym_\infty}$ norm, which is defined as \begin{equation}
\label{eq:hinf_cont}
\|G\|_{{\hinfsym_\infty}} \coloneqq \sup_{\omega \in \mathbb R} \| G({\bf i} \omega)\|_2, \end{equation} where \begin{equation}
G(\lambda) = C(\lambda E - A)^{-1} B + D \end{equation} is the associated \emph{transfer function} of the system given by \eqref{eq:lti_cont}. The ${\hinfsym_\infty}$ norm measures the maximum sensitivity of the system; in other words, the higher the value of the ${\hinfsym_\infty}$ norm, the less robust the system is, an intuitive interpretation considering that the ${\hinfsym_\infty}$ norm is in fact the reciprocal of the \emph{complex stability radius}, which itself is a generalization of the \emph{distance to instability} \cite[Section 5.3]{HinP05}. The ${\hinfsym_\infty}$ norm is also a key metric for assessing the quality of reduced-order models that attempt to capture/mimic the dynamical behavior of large-scale systems, see, e.g., \cite{morAnt05,morBenCOW17}. Before continuing, as various matrix pencils of the form $\lambda B - A$ will feature frequently in this work, we use notation $(A,B)$ to abbreviate them.
When $E = I$, the ${\hinfsym_\infty}$ norm is finite as long as $A$ is stable, whereas an unstable system would be considered infinitely sensitive. If $E\ne I$, then \eqref{eq:lti_cont} is called a \emph{descriptor system}. Assuming that $E$ is singular but $(A,E)$ is regular and at most index 1, then \eqref{eq:hinf_cont} still yields a finite value, provided that all the controllable and observable eigenvalues of $(A,E)$ are finite and in the open left half plane, where for $\lambda$ an eigenvalue of $(A,E)$ with right and left eigenvectors $x$ and $y$, $\lambda$ is considered \emph{uncontrollable} if $B^*y = 0$ and \emph{unobservable} if $Cx=0$. However, the focus of this paper is not about detecting when \eqref{eq:hinf_cont} is infinite or finite, but to introduce an improved method for computing the ${\hinfsym_\infty}$ norm when it is finite. Thus, for conciseness in presenting our improved method, we will assume in this paper that any system provided to an algorithm has a finite ${\hinfsym_\infty}$ norm, as checking whether it is infinite can be considered a preprocessing step.\footnote{We note that while our proposed improvements are also directly applicable for computing the ${\linfsym_\infty}$ norm, we will restrict the discussion here to just the ${\hinfsym_\infty}$ norm for brevity.}
While the first algorithms \cite{BoyBK89, BoyB90,BruS90} for computing the ${\hinfsym_\infty}$ norm date back to nearly 30 years ago, there has been continued interest in improved methods, particularly as the state-of-art methods remain quite expensive with respective to their dimension $n$, meaning that computing the ${\hinfsym_\infty}$ norm is generally only possible for rather small-dimensional systems. In 1998, \cite{GenVV98} proposed an interpolation refinement to the existing algorithm of \cite{BoyB90,BruS90} to accelerate its rate of convergence. In the following year, for the special case of the distance to instability where $B=C=E=I$ and $D=0$, \cite{HeW99} used an inverse iteration to successively obtain increasingly better locally optimal approximations as a way of reducing the number of expensive Hamiltonian eigenvalue decompositions; as we will discuss in Section~\ref{sec:hybrid_opt}, this method shares some similarity with the approach we propose here. More recently, in 2011, \cite{BelP11} presented an entirely different approach to computing the ${\hinfsym_\infty}$ norm, by finding isolated common zeros of two certain bivariate polynomials. While they showed that their method was much faster than an implementation of \cite{BoyB90,BruS90} on two SISO examples (single-input, single-output, that is, $m=p=1$), more comprehensive benchmarking does not appear to have been done yet. Shortly thereafter, \cite{BenSV12} extended the now standard algorithm of \cite{BoyB90,BruS90} to descriptor systems. There also has been a very recent surge of interest in efficient ${\hinfsym_\infty}$ norm approximation methods for large-scale systems. These methods fall into two broad categories: those that are applicable for descriptor systems with possibly singular $E$ matrices but require solving linear systems \cite{BenV14,FreSV14,AliBMetal17} and those that don't solve linear systems but require that $E=I$ or that $E$ is at least cheaply inverted \cite{GugGO13,MitO16}. Our contribution in this paper is twofold. First, we improve upon the exact algorithms of \cite{BoyB90,BruS90,GenVV98} to not only compute the ${\hinfsym_\infty}$ norm significantly faster but also obtain its value to machine precision with negligible additional expense (a notable difference compared to these earlier methods). This is accomplished by incorporating local optimization techniques within these algorithms, a change that also makes our new approach more amenable to additional acceleration via parallelization. Second, that standard local optimization can even be used on its own to efficiently obtain locally optimal approximations to the ${\hinfsym_\infty}$ norm of large-scale systems, a simple and direct approach that has surprisingly not yet been considered and is even embarrassingly parallelizable.
The paper is organized as follows. In Sections~\ref{sec:bbbs} and \ref{sec:costs}, we describe the standard algorithms for computing the ${\hinfsym_\infty}$ norm and then give an overview of their computational costs. In Section~\ref{sec:hybrid_opt}, we introduce our new approach to computing the ${\hinfsym_\infty}$ norm via leveraging local optimization techniques. Section~\ref{sec:discrete} describes how the results and algorithms are adapted for discrete-time problems. We present numerical results in Section~\ref{sec:numerical} for both continuous- and discrete-time problems. Section~\ref{sec:hinf_approx} provides the additional experiments demonstrating how local optimization can also be a viable strategy for approximating the ${\hinfsym_\infty}$ norm of large-scale systems. Finally, in Section~\ref{sec:parallel} we discuss how significant speedups can be obtained for some problems when using parallel processing with our new approach, in contrast to the standard algorithms, which benefit very little from multiple cores. Concluding remarks are given in Section~\ref{sec:wrapup}.
\section{The standard algorithm for computing the ${\hinfsym_\infty}$ norm} \label{sec:bbbs} We begin by presenting a key theorem relating the singular values of the transfer function to purely imaginary eigenvalues of an associated matrix pencil. For the case of simple ODEs, where $B=C=E=I$ and $D=0$, the result goes back to \cite[Theorem 1]{Bye88}, and was first extended to linear dynamical systems with input and output with $E=I$ in \cite[Theorem 1]{BoyBK89}, and then most recently generalized to systems where $E\ne I$ in \cite[Theorem 1]{BenSV12}. We state the theorem without the proof, since it is readily available in \cite{BenSV12}.
\begin{theo} \label{thm:eigsing_cont} Let $\lambda E - A$ be regular with no finite eigenvalues on the imaginary axis, $\gamma > 0$ not a singular value of $D$, and $\omega \in \mathbb R$. Consider the matrix pencil $(\Mc,\Nc)$, where \begin{equation}
\label{eq:MNpencil_cont}
\mathcal{M}_\gamma \coloneqq \begin{bmatrix} A - BR^{-1}D^*C & -\gamma BR^{-1}B^* \\
\gamma C^*S^{-1}C & -(A - BR^{-1}D^*C)^* \end{bmatrix}
~\text{and}~
\mathcal{N} \coloneqq \begin{bmatrix} E & 0\\ 0 & E^*\end{bmatrix}
\end{equation} and $R = D^*D - \gamma^2 I$ and $S = DD^* - \gamma^2 I$. Then ${\bf i} \omega$ is an eigenvalue of matrix pencil $(\Mc,\Nc)$ if and only if $\gamma$ is a singular value of $G({\bf i} \omega)$. \end{theo}
Theorem~\ref{thm:eigsing_cont} immediately leads to an algorithm for computing the ${\hinfsym_\infty}$ norm based on computing the imaginary eigenvalues, if any, of the associated matrix pencil \eqref{eq:MNpencil_cont}. For brevity in this section, we assume that $\max \|G({\bf i} \omega)\|_2$ is not attained at $\omega = \infty$, in which case the ${\hinfsym_\infty}$ norm would be $\|D\|_2$.
Evaluating the norm of the transfer function for any finite frequency along the imaginary axis immediately gives a lower bound to the ${\hinfsym_\infty}$ norm while an upper bound can be obtained by successively increasing $\gamma$ until the matrix pencil given by \eqref{eq:MNpencil_cont} no longer has any purely imaginary eigenvalues. Then, it is straightforward to compute the ${\hinfsym_\infty}$ norm using bisection, as first proposed in \cite{BoyBK89} and was inspired by the breakthrough result of \cite{Bye88} for computing the \emph{distance to instability} (i.e. the reciprocal of the ${\hinfsym_\infty}$ norm for the special case of $B=C=E=I$ and $D=0$).
As Theorem~\ref{thm:eigsing_cont} provides a way to calculate all the frequencies where $\|G({\bf i} \omega)\|_2 = \gamma$, it was shortly thereafter proposed in \cite{BoyB90, BruS90} that instead of computing an upper bound and then using bisection, the initial lower bound could be successively increased in a monotonic fashion to the value of the ${\hinfsym_\infty}$ norm. For convenience, it will be helpful to establish the following notation for the transfer function and its largest singular value, both as parameters of frequency $\omega$: \begin{align}
\label{eq:tf_cont}
G_\mathrm{c}(\omega) {}& \coloneqq G({\bf i} \omega) \\
\label{eq:ntf_cont}
g_\mathrm{c}(\omega) {}& \coloneqq \| G({\bf i} \omega) \|_2 = \| G_\mathrm{c}(\omega) \|_2. \end{align} Let $\{ \omega_1,\ldots,\omega_l\}$ be the set of imaginary parts of the purely imaginary eigenvalues of \eqref{eq:MNpencil_cont} for the initial value $\gamma$, sorted in increasing order. Considering the intervals $I_k = [\omega_k,\omega_{k+1}]$, \cite{BruS90} proposed increasing $\gamma$ via: \begin{equation}
\label{eq:gamma_mp}
\gamma_\mathrm{mp} = \max g_\mathrm{c}(\hat\omega_k)
\qquad \text{where} \qquad
\hat\omega_k ~\text{are the midpoints of the intervals}~ I_k. \end{equation} Simultaneously and independently, a similar algorithm was proposed by \cite{BoyB90}, with the additional results that (a) it was possible to calculate which intervals $I_k$ satisfied $g_\mathrm{c}(\omega) \ge \gamma$ for all $\omega \in I_k$, thus reducing the number of evaluations of $g_\mathrm{c}(\omega)$ needed at every iteration, and (b) this midpoint scheme actually had a local quadratic rate of convergence, greatly improving upon the linear rate of convergence of the earlier, bisection-based method. This midpoint-based method, which we refer to as the BBBS algorithm for its authors Boyd, Bala\-krish\-nan, Bru\-insma, and Stein\-buch, is now considered the standard algorithm for computing the ${\hinfsym_\infty}$ norm and it is the algorithm implemented in the {MATLAB}\ Robust Control Toolbox, e.g. routine \texttt{hinfnorm}. Algorithm~\ref{alg:bbbs} provides a high-level pseudocode description for the standard BBBS algorithm while Figure~\ref{fig:bbbs_std} provides a corresponding pictorial description of how the method works.
\begin{algfloat} \begin{algorithm}[H] \floatname{algorithm}{Algorithm} \caption{The Standard BBBS Algorithm} \label{alg:bbbs} \begin{algorithmic}[1]
\REQUIRE{
$A \in \Cmn{n}{n}$, $B \in \Cmn{n}{m}$, $C \in \Cmn{p}{n}$, $D \in \Cmn{p}{m}$,
$E \in \Cmn{n}{n}$ and $\omega_0 \in \mathbb R$.
}
\ENSURE{
$\gamma = \| G \|_{\hinfsym_\infty}$ and $\omega$ such that
$\gamma = g_\mathrm{c}(\omega)$.
\\ \quad
}
\STATE $\gamma = g_\mathrm{c}(\omega_0)$
\WHILE {not converged}
\STATE \COMMENT{Compute the intervals that lie under $g_\mathrm{c}(\omega)$ using
eigenvalues of the pencil:}
\STATE Compute $\Lambda_\mathrm{I} =
\{ \Im \lambda : \lambda \in \Lambda(\Mc,\Nc) ~\text{and}~ \Re \lambda = 0\}$.
\STATE Index and sort $\Lambda_\mathrm{I} = \{\omega_1,...,\omega_l\}$ s.t.
$\omega_j \le \omega_{j+1}$.
\STATE Form all intervals $I_k = [\omega_k, \omega_{k+1}]$ s.t.
each interval at height $\gamma$ is below $g_\mathrm{c}(\omega)$.
\label{algline:bbbs_ints}
\STATE \COMMENT{Compute candidate frequencies of the level-set intervals $I_k$:}
\STATE Compute midpoints $\hat\omega_k = 0.5(\omega_k +\omega_{k+1})$ for each interval $I_k$.
\label{algline:bbbs_points}
\STATE \COMMENT{Update to the highest gain evaluated at these candidate frequencies:}
\STATE $\omega = \argmax g_\mathrm{c}(\hat\omega_k)$.
\STATE $\gamma = g_\mathrm{c}(\omega)$.
\ENDWHILE \end{algorithmic} \end{algorithm} \algnote{ The quartically converging variant proposed by \cite{GenVV98} replaces the midpoints of $I_k$ with the maximizing frequencies of Hermite cubic interpolants, which are uniquely determined by interpolating the values of $g_\mathrm{c}(\omega)$ and $g_\mathrm{c}^\prime(\omega)$ at both endpoints of each interval $I_k$. } \end{algfloat}
\begin{figure}
\caption{The blue curves show the value of $g_\mathrm{c}(\omega) = \|G({\bf i} \omega)\|_2$ while the red circles mark the frequencies $\{\omega_1,\ldots,\omega_l\}$ where $g_\mathrm{c}(\omega) = \gamma = 1.4$, computed by taking the imaginary parts of the purely imaginary eigenvalues of \eqref{eq:MNpencil_cont} on a sample problem. The left plot shows the midpoint scheme of the standard BBBS algorithm to obtain an increased value for $\gamma$, depicting by the dashed horizontal line. The right plot shows the cubic interpolation refinement of \cite{GenVV98} for the same problem and initial value of $\gamma$. The dashed black curves depict the cubic Hermite interpolants for each interval while the dotted vertical lines show their respective maximizing frequencies. As can be seen, the cubic interpolation scheme results in a larger increase in $\gamma$ (again represented by the dashed horizontal line) compared to the standard BBBS method on this iterate for this problem. }
\label{fig:bbbs_std}
\label{fig:bbbs_interp}
\label{fig:bbbs_plots}
\end{figure}
In \cite{GenVV98}, a refinement to the BBBS algorithm was proposed which increased its local quadratic rate of convergence to quartic. This was done by evaluating $g_\mathrm{c}(\omega)$ at the maximizing frequencies of the unique cubic Hermite interpolants for each level-set interval, instead of at the midpoints. That is, for each interval $I_k = [\omega_k,\omega_{k+1}]$, the unique interpolant $c_k(\omega) = c_3\omega^3 + c_2\omega^2 + c_1\omega + c_0$ is constructed so that \begin{equation} \label{eq:interp_cont} \begin{aligned}
c_k(\omega_k) &= g_\mathrm{c}(\omega_k) \\
c_k(\omega_{k+1}) &= g_\mathrm{c}(\omega_{k+1}) \end{aligned} \qquad \text{and} \qquad \begin{aligned}
c^\prime_k(\omega_k) &= g_\mathrm{c}^\prime(\omega_k) \\
c^\prime_k(\omega_{k+1}) &= g_\mathrm{c}^\prime(\omega_{k+1}). \end{aligned} \end{equation} Then, $\gamma$ is updated via \begin{equation}
\label{eq:gamma_cubic}
\gamma_\mathrm{cubic} = \max g_\mathrm{c}(\hat\omega_k)
\qquad \text{where} \qquad
\hat\omega_k = \argmax_{\omega \in I_k} c_k(\omega), \end{equation} that is, $\hat\omega_k$ is now the maximizing value of interpolant $c_k(\omega)$ on its interval $I_k$, which of course can be cheaply and explicitly computed. In \cite{GenVV98}, the single numerical example shows the concrete benefit of this interpolation scheme, where the standard BBBS algorithm required six eigenvalue decompositions of \eqref{eq:MNpencil_cont} to converge, while their new method only required four. As only the selection of the $\omega_k$ values is different, the pseudocode for this improved version of the BBBS algorithm remains largely the same, as mentioned in the note of Algorithm~\ref{alg:bbbs}. Figure~\ref{fig:bbbs_interp} provides a corresponding pictorial description of the cubic-interpolant-based refinement.
As it turns out, computing the derivatives in \eqref{eq:interp_cont} for the Hermite interpolations of each interval can also be done with little extra work. Let $u(\omega)$ and $v(\omega)$ be the associated left and right singular vectors corresponding to $g_\mathrm{c}(\omega)$, recalling that $g_\mathrm{c}(\omega)$ is the largest singular value of $G_\mathrm{c}(\omega)$, and assume that $g_\mathrm{c}(\hat\omega)$ is a simple singular value for some value $\hat\omega \in \mathbb R$. By standard perturbation theory (exploiting the equivalence of singular values of a matrix $A$ and eigenvalues of $\left[ \begin{smallmatrix} 0 & A \\ A^* & 0\end{smallmatrix} \right]$ and applying \cite[Theorem 5]{Lan64}), it then follows that \begin{equation}
\label{eq:ntfcprime_cont}
g_\mathrm{c}^\prime(\omega) \Big\rvert_{\omega=\hat\omega} = \Real{ u(\hat\omega)^* G_\mathrm{c}^\prime(\hat\omega) v(\hat\omega) }, \end{equation} where, by standard matrix differentiation rules with respect to parameter $\omega$, \begin{equation}
G_\mathrm{c}^\prime(\omega) = - {\bf i} C \left({\bf i} \omega E - A \right)^{-1} E \left({\bf i} \omega E - A \right)^{-1} B. \end{equation} As shown in \cite{SreVT95}, it is actually fairly cheap to compute \eqref{eq:ntfcprime_cont} if the eigenvectors corresponding to the purely imaginary eigenvalues of \eqref{eq:MNpencil_cont} have also been computed, as there is a correspondence between these eigenvectors and the associated singular vectors for $\gamma$. For $\gamma = g_\mathrm{c}(\hat\omega)$, if $\left[ \begin{smallmatrix} q \\ s \end{smallmatrix} \right]$ is an eigenvector of \eqref{eq:MNpencil_cont} for imaginary eigenvalue ${\bf i} \hat\omega$, then the equivalences \begin{equation}
\label{eq:qs_vecs}
q = \left( {\bf i} \hat\omega E - A \right)^{-1}Bv(\hat\omega)
\quad \text{and} \quad
s = \left( {\bf i} \hat\omega E - A \right)^{-*}C^*u(\hat\omega) \end{equation} both hold, where $u(\hat\omega)$ and $v(\hat\omega)$ are left and right singular vectors associated with singular value $g_\mathrm{c}(\hat\omega)$. (To see why these equivalences hold, we refer the reader to the proof of Theorem~\ref{thm:eigsing_disc} for the discrete-time analog result.) Thus, \eqref{eq:ntfcprime_cont} may be rewritten as follows: \begin{align}
\label{eq:ntfcprime_defn_cont}
g_\mathrm{c}^\prime(\omega)\Big\rvert_{\omega=\hat\omega} &=
\Real{ u(\hat\omega)^* G_\mathrm{c}^\prime(\hat\omega) v(\hat\omega)} \\
\label{eq:ntfcprime_direct_cont}
&= -\Real{ u(\hat\omega)^* {\bf i} C \left({\bf i} \hat\omega E - A \right)^{-1} E \left({\bf i} \hat \omega E - A \right)^{-1} B v(\hat\omega)} \\
\label{eq:ntfcprime_eig_cont}
&= -\Real{{\bf i} s^* E q}, \end{align} and it is thus clear that \eqref{eq:ntfcprime_cont} is cheaply computable for all the endpoints of the intervals $I_k$, provided that the eigenvalue decomposition of \eqref{eq:MNpencil_cont} has already been computed.
\section{The computational costs involved in the BBBS algorithm} \label{sec:costs} The main drawback of the BBBS algorithm is its algorithmic complexity, which is ${\cal O}(n^3)$ work per iteration. This not only limits the tractability of computing the ${\hinfsym_\infty}$ norm to rather low-dimensional (in $n$) systems but can also make computing the ${\hinfsym_\infty}$ norm to full precision an expensive proposition for even moderately-sized systems. In fact, the default tolerance for \texttt{hinfnorm} in {MATLAB}\ is set quite large, 0.01, presumably to keep its runtime as fast as possible, at the expense of sacrificing accuracy. In Table~\ref{table:hinfnorm_tol}, we report the relative error of computing the ${\hinfsym_\infty}$ norm when using \texttt{hinfnorm}'s default tolerance of $0.01$ compared to $10^{-14}$, along with the respective runtimes for several test problems, observing that computing the ${\hinfsym_\infty}$ norm to near full precision can often take between two to three times longer. While computing only a handful of the most significant digits of the ${\hinfsym_\infty}$ norm may be sufficient for some applications, this is certainly not true in general. Indeed, the source code for HIFOO \cite{BurHLetal06}, which designs ${\hinfsym_\infty}$ norm fixed-order optimizing controllers for a given open-loop system via nonsmooth optimization, specifically contains the comment regarding \texttt{hinfnorm}: ``default is .01, which is too crude". In HIFOO, the ${\hinfsym_\infty}$ norm is minimized by updating the controller variables at every iteration but the optimization method assumes that the objective function is continuous; if the ${\hinfsym_\infty}$ norm is not calculated sufficiently accurately, then it may appear to be discontinuous, which can cause the underlying optimization method to break down. Thus there is motivation to not only improve the overall runtime of computing the ${\hinfsym_\infty}$ norm for large tolerances, but also to make the computation as fast as possible when computing the ${\hinfsym_\infty}$ norm to full precision.
\begin{table} \centering
\begin{tabular}{ l | rrr | c | c | SS } \toprule \multicolumn{8}{c}{\texttt{$h$ = hinfnorm($\cdot$,1e-14)} versus \texttt{$\hat h$ = hinfnorm($\cdot$,0.01)}}\\ \midrule \multicolumn{1}{c}{} & \multicolumn{3}{c}{Dimensions} & \multicolumn{1}{c}{} & \multicolumn{1}{c}{Relative Error} & \multicolumn{2}{c}{Wall-clock time (sec.)}\\ \cmidrule(lr){2-4} \cmidrule(lr){6-6} \cmidrule(lr){7-8} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{$n$} &
\multicolumn{1}{c}{$m$} &
\multicolumn{1}{c}{$p$} &
\multicolumn{1}{c}{$E=I$} &
\multicolumn{1}{c}{$ \frac{\hat h - h}{h}$} &
\multicolumn{1}{c}{\texttt{tol=1e-14}} &
\multicolumn{1}{c}{\texttt{tol=0.01}} \\ \midrule \texttt{CSE2} & 63 & 1 & 32 & Y & $-2.47 \times 10^{-4}$ & 0.137 & 0.022 \\ \texttt{CM3} & 123 & 1 & 3 & Y & $-2.75 \times 10^{-3}$ & 0.148 & 0.049 \\ \texttt{CM4} & 243 & 1 & 3 & Y & $-4.70 \times 10^{-3}$ & 1.645 & 0.695 \\ \texttt{ISS} & 270 & 3 & 3 & Y & $-1.04 \times 10^{-6}$ & 0.765 & 0.391 \\ \texttt{CBM} & 351 & 1 & 2 & Y & $-4.20 \times 10^{-5}$ & 3.165 & 1.532 \\ \texttt{randn 1} & 500 & 300 & 300 & Y & 0 & 21.084 & 30.049 \\ \texttt{randn 2} & 600 & 150 & 150 & N & $-6.10 \times 10^{-8}$ & 31.728 & 16.199 \\ \texttt{FOM} & 1006 & 1 & 1 & Y & $-1.83 \times 10^{-5}$ & 128.397 & 36.529 \\ \midrule \texttt{LAHd} & 58 & 1 & 3 & Y & $-7.36 \times 10^{-3}$ & 0.031 & 0.015 \\ \texttt{BDT2d} & 92 & 2 & 4 & Y & $-7.67 \times 10^{-4}$ & 0.070 & 0.031 \\ \texttt{EB6d} & 170 & 2 & 2 & Y & $-5.47 \times 10^{-7}$ & 0.192 & 0.122 \\ \texttt{ISS1d} & 280 & 1 & 273 & Y & $-1.53 \times 10^{-3}$ & 16.495 & 3.930 \\ \texttt{CBMd} & 358 & 1 & 2 & Y & $-2.45 \times 10^{-6}$ & 1.411 & 0.773 \\ \texttt{CM5d} & 490 & 1 & 3 & Y & $-7.38 \times 10^{-3}$ & 10.802 & 2.966 \\ \bottomrule \end{tabular} \caption{For various problems, the relative error of computing the ${\hinfsym_\infty}$ norm using \texttt{hinfnorm} with its quite loose default tolerance is shown. The first eight are continuous time problems while the last six, ending in \texttt{d}, are discrete time. As can be seen, using \texttt{hinfnorm} to compute the ${\hinfsym_\infty}$ norm to near machine accuracy can often take between two to four times longer, and that this penalty is not necessarily related to dimension: for example, the running times are increased by factors of 3.02 and 3.51 for \texttt{CM3} and \texttt{FOM}, respectively, despite that \texttt{FOM} is nearly ten times larger in dimension. } \label{table:hinfnorm_tol} \end{table}
The dominant cost of the BBBS algorithm is computing the eigenvalues of \eqref{eq:MNpencil_cont} at every iteration. Even though the method converges quadratically, and quartically when using the cubic interpolation refinement, the eigenvalues of $(\Mc,\Nc)$ will still generally be computed for multiple values of $\gamma$ before convergence, for either variant of the algorithm. Furthermore, pencil $(\Mc,\Nc)$ is $2n \times 2n$, meaning that the ${\cal O}(n^3)$ work per iteration also contains a significantly larger constant factor; computing the eigenvalues of a $2n \times 2n$ problem typically takes at least eight times longer than a $n \times n$ one. If cubic interpolation is used, computing the derivatives \eqref{eq:ntfcprime_cont} via the eigenvectors of $(\Mc,\Nc)$, as proposed by \cite{GenVV98} using the equivalences in \eqref{eq:qs_vecs} and \eqref{eq:ntfcprime_eig_cont}, can sometimes be quite expensive as well. If on a particular iteration, the number of purely imaginary eigenvalues of $(\Mc,\Nc)$ is close to $n$, say $\hat n$, then assuming 64-bit computation, an additional $4 \hat{n}^2$ doubles of memory would be required to store these eigenvectors.\footnote{ Although computing eigenvectors with \texttt{eig} in {MATLAB}\ is currently an all or none affair, LAPACK does provide the user the option to only compute certain eigenvectors, so that all $2n$ eigenvectors would not always need to be computed.} Finally, computing the purely imaginary eigenvalues of $(\Mc,\Nc)$ using the regular QZ algorithm can be ill advised; in practice, rounding error in the real parts of the eigenvalues can make it difficult to detect which of the computed eigenvalues are supposed to be the purely imaginary ones and which are merely just close to the imaginary axis. Indeed, purely imaginary eigenvalues can easily be perturbed off of the imaginary axis when using standard QZ; \cite[Figure 4]{BenSV16} illustrates this issue particularly well. Failure to properly identify the purely imaginary eigenvalues can cause the BBBS algorithm to return incorrect results. As such, it is instead recommended \cite[Section II.D]{BenSV12} to use the specialized Hamiltonian-structure-preserving eigensolvers of \cite{BenBMetal02,BenSV16} to avoid this problem. However, doing so can be even more expensive as it requires computing the eigenvalues of a related matrix pencil that is even larger: $(2n+m+p)\times(2n+m+p)$.
On the other hand, computing \eqref{eq:ntf_cont}, the norm of the transfer function, is typically rather inexpensive, at least relative to computing the imaginary eigenvalues of the matrix pencil \eqref{eq:MNpencil_cont}; Table~\ref{table:ntf_vs_pencil} presents for data on how much faster computing the singular value decomposition of $G({\bf i} \omega)$ can be compared to computing the eigenvalues of $(\Mc,\Nc)$ (using regular QZ), using randomly-generated systems composed of dense matrices of various dimensions. In the first row of Table~\ref{table:ntf_vs_pencil}, we see that computing the eigenvalues of \eqref{eq:MNpencil_cont} for tiny systems ($n=m=p=20$) can take up to two-and-a-half times longer than computing the SVD of $G({\bf i} \omega)$ on modern hardware and this disparity quickly grows larger as the dimensions are all increased (up to 36.8 faster for $n=m=p=400$). Furthermore, for moderately-sized systems where $m,p \ll n$ (the typical case in practice), the performance gap dramatically widens to up to 119 times faster to compute the SVD of $G({\bf i} \omega)$ versus the eigenvalues of $(\Mc,\Nc)$ (the last row of Table~\ref{table:ntf_vs_pencil}). Of course, this disparity in runtime speeds is not surprising. Computing the eigenvalues of \eqref{eq:MNpencil_cont} involves working with a $2n \times 2n$ (or larger when using structure-preserving eigensolvers) matrix pencil while the main costs to evaluate the norm of the transfer function at a particular frequency involve first solving a linear system of dimension $n$ to compute either the $({\bf i} \omega E - A)^{-1}B$ or $C({\bf i} \omega E - A)^{-1}$ term in $G({\bf i} \omega)$ and then computing the maximum singular value of $G({\bf i} \omega)$, which is $p \times m$. If $\max(m,p)$ is small, the cost to compute the largest singular value is negligible and even if $\max(m,p)$ is not small, the largest singular value can still typically be computed easily and efficiently using sparse methods. Solving the $n$-dimensional linear system is typically going to be much cheaper than computing the eigenvalues of the $2n \times 2n$ pencil, and more so if $A$ and $E$ are not dense and $({\bf i} \omega E - A)$ permits a fast (sparse) LU decomposition.
\begin{table} \setlength{\tabcolsep}{8pt} \centering
\begin{tabular}{ccc|cc} \toprule
\multicolumn{5}{c}{Computing $\|G({\bf i} \omega)\|_2$ versus \texttt{eig}($\Mc,\Nc$)}\\ \midrule \multicolumn{3}{c}{} & \multicolumn{2}{c}{Times faster}\\ \cmidrule(lr){4-5} $n$ & $m$ & $p$ & min & max \\ \midrule 20 & 20 & 20 & 0.71 & 2.47 \\ 100 & 100 & 100 & 6.34 & 10.2 \\ 400 & 400 & 400 & 19.2 & 36.8 \\ 400 & 10 & 10 & 78.5 & 119.0\\ \bottomrule \end{tabular} \caption{ For each set of dimensions (given in the leftmost three columns),
five different systems were randomly generated and the running times to compute $\|G({\bf i} \omega)\|_2$ and \texttt{eig}($\Mc,\Nc$) were recorded to form the ratios of these five pairs of values.
Ratios greater than one indicate that it is faster to compute $\|G({\bf i} \omega)\|_2$ than \texttt{eig}($\Mc,\Nc$), and by how much, while ratios less than one indicate the opposite. The rightmost two columns of the table give the smallest and largest of the five ratios observed per set of dimensions. } \label{table:ntf_vs_pencil} \end{table}
\section{The improved algorithm} \label{sec:hybrid_opt} Recall that computing the ${\hinfsym_\infty}$ norm is done by maximizing $g_\mathrm{c}(\omega)$ over $\omega \in \mathbb R$ but that the BBBS algorithm (and the cubic interpolation refinement) actually converges to a global maximum of $g_\mathrm{c}(\omega)$ by iteratively computing the eigenvalues of the large matrix pencil $(\Mc,\Nc)$ for successively larger values of $\gamma$. However, we could alternatively consider a more direct approach of finding maximizers of $g_\mathrm{c}(\omega)$, which as discussed above, is a much cheaper function to evaluate numerically. Computing such maximizers could allow larger increases in $\gamma$ to be obtained on each iteration, compared to just evaluating $g_\mathrm{c}(\omega)$ at the midpoints or maximizers of the cubic interpolants. This in turn should reduce the number of times that the eigenvalues of $(\Mc,\Nc)$ must be computed and thus speed up the overall running time of the algorithm; given the performance data in Table~\ref{table:ntf_vs_pencil}, the additional cost of any evaluations of $g_\mathrm{c}(\omega)$ needed to find maximizers seems like it should be more than offset by fewer eigenvalue decompositions of
$(\Mc,\Nc)$. Of course, computing the eigenvalues of $(\Mc,\Nc)$ at each iteration
cannot be eliminated completely, as it is still necessary for asserting
whether or not any of the maximizers was a global maximizer
(in which case, the ${\hinfsym_\infty}$ norm has been computed), or
if not, to provide the remaining level set intervals where a global maximizer lies
so the computation can continue.
As alluded to in the introduction, for the special case of the distance to instability, a similar cost-balancing strategy has been considered before in \cite{HeW99} but the authors themselves noted that the inverse iteration scheme they employed to find locally optimal solutions could sometimes have very slow convergence and expressed concern that other optimization methods could suffer similarly. Of course, in this paper we are considering the more general case of computing the ${\hinfsym_\infty}$ norm and, as we will observe in our later experimental evaluation, the first- and second-order optimization techniques we now propose do in fact seem to work well in practice.
Though $g_\mathrm{c}(\omega)$ is typically nonconvex, standard optimization methods should generally still be able to find local maximizers, if not always global maximizers, provided that $g_\mathrm{c}(\omega)$ is sufficiently smooth. Since $g_\mathrm{c}(c)$ is the maximum singular value of $G({\bf i}\omega)$, it is locally Lipschitz (e.g. \cite[Corollary 8.6.2]{GolV13}). Furthermore, in proving the quadratic convergence of the midpoint-based BBBS algorithm, it was shown that at local maximizers, the second derivative of $g_\mathrm{c}(\omega)$ not only exists but is even locally Lipschitz \cite[Theorem~2.3]{BoyB90}. The direct consequence is that Newton's method for optimization can be expected to converge quadratically when it is used to find a local maximizer of $g_\mathrm{c}(\omega)$. Since there is only one optimization variable, namely $\omega$, there is also the benefit that we need only work with first and second derivatives, instead of gradients and Hessians, respectively. Furthermore, if $g_\mathrm{c}^{\prime\prime}(\omega)$ is expensive to compute, one can instead resort to the secant method (which is a quasi-Newton method in one variable) and still obtain superlinear convergence. Given the large disparity in costs to compute the eigenvalues of $(\Mc,\Nc) $ and $g_\mathrm{c}(\omega)$, it seems likely that even just superlinear convergence could still be sufficient to significantly accelerate the computation of the ${\hinfsym_\infty}$ norm. Of course, when $g_\mathrm{c}^{\prime\prime}(\omega)$ is relatively cheap to compute, additional acceleration is likely to be obtained when using Newton's method. Note that many alternative optimization strategies could also be employed here, potentially with additional efficiencies. But, for sake of simplicity, we will just restrict the discussion in this paper to the secant method and Newton's method, particularly since conceptually there is no difference.
Since $g_\mathrm{c}(\omega)$ will now need to be evaluated at any point requested by an optimization method, we will need to compute its first and possibly second derivatives directly; recall that using the eigenvectors of the purely imaginary eigenvalues of \eqref{eq:MNpencil_cont} with the equivalences in \eqref{eq:qs_vecs} and \eqref{eq:ntfcprime_eig_cont} only allows us to obtain the first derivatives at the end points of the level-set intervals. However, as long as we also compute the associated left and right singular vectors $u(\omega)$ and $v(\omega)$ when computing $g_\mathrm{c}(\omega)$, the value of the first derivative $g_\mathrm{c}^\prime(\omega)$ can be computed via the direct formulation given in \eqref{eq:ntfcprime_direct_cont} and without much additional cost over computing $g_\mathrm{c}(\omega)$ itself. For each frequency $\omega$ of interest, an LU factorization of $({\bf i} \omega E - A)$ can be done once and reused to solve the linear systems due to the presence of $({\bf i} \omega E - A)^{-1}$, which appears once in $g_\mathrm{c}(\omega)$ and twice in $g_\mathrm{c}^\prime(\omega)$.
To compute $g_\mathrm{c}^{\prime\prime}(\omega)$, we will need the following result for second derivatives of eigenvalues, which can be found in various forms in \cite{Lan64}, \cite{OveW95}, and \cite{Kat82}. \begin{theo} \label{thm:eig2ndderiv} For $t \in \mathbb R$, let $H(t)$ be a twice-differentiable $n \times n$ Hermitian matrix family with distinct eigenvalues at $t=0$ with $(\lambda_k,x_k)$ denoting the $k$th such eigenpair and where each eigenvector $x_k$ has unit norm and the eigenvalues are ordered $\lambda_1 > \ldots > \lambda_n$. Then: \[
\lambda_1''(t) \bigg|_{t=0}= x_1^* H''(0) x_1 + 2 \sum_{k = 2}^{n} \frac{| x_1^* H'(0) x_k |^2}{\lambda_1 - \lambda_k}. \] \end{theo}
Since $g_\mathrm{c}(\omega)$ is the largest singular value of $\|G({\bf i} \omega)\|_2$, it is also the largest eigenvalue of the matrix: \begin{equation}
\label{eq:eigderiv_mat}
H(\omega) =
\begin{bmatrix} 0 & G_\mathrm{c}(\omega) \\
G_\mathrm{c}(\omega)^* & 0
\end{bmatrix}, \end{equation} which has first and second derivatives \begin{equation}
\label{eq:eigderiv12_mat}
H^\prime(\omega) =
\begin{bmatrix} 0 & G_\mathrm{c}^\prime(\omega) \\
G_\mathrm{c}^\prime(\omega)^* & 0
\end{bmatrix}
\quad \text{and} \quad
H^{\prime\prime}(\omega) =
\begin{bmatrix} 0 & G_\mathrm{c}^{\prime\prime}(\omega) \\
G_\mathrm{c}^{\prime\prime}(\omega)^* & 0
\end{bmatrix}. \end{equation} The formula for $G_\mathrm{c}^\prime(\omega)$ is given by \eqref{eq:ntfcprime_direct_cont} while the corresponding second derivative is obtained by straightforward application of matrix differentiation rules: \begin{equation}
\label{eq:tfc2_cont}
G_\mathrm{c}^{\prime\prime}(\omega) =
-2 C({\bf i} \omega E - A)^{-1}E({\bf i} \omega E - A)^{-1}E({\bf i} \omega E - A)^{-1} B. \end{equation} Furthermore, the eigenvalues and eigenvectors of \eqref{eq:eigderiv_mat} needed to apply Theorem~\ref{thm:eig2ndderiv} are essentially directly available from just the full SVD of $G_\mathrm{c}(\omega)$. Let $\sigma_k$ be the $k$th singular value of $G_\mathrm{c}(\omega)$, along with associated right and left singular vectors $u_k$ and $v_k$, respectively. Then $\pm\sigma_k$ is an eigenvalue of \eqref{eq:eigderiv_mat} with eigenvector $\left[ \begin{smallmatrix} u_k \\ v_k \end{smallmatrix} \right]$ for $\sigma_k$ and eigenvector $\left[ \begin{smallmatrix} u_k \\ -v_k \end{smallmatrix}\right]$ for $-\sigma_k$. When $\sigma_k = 0$, the corresponding eigenvector is either $\left[\begin{smallmatrix} u_k \\ \mathbf{0} \end{smallmatrix} \right]$ if $p > m$ or $\left[\begin{smallmatrix} \mathbf{0} \\ v_k \end{smallmatrix} \right]$ if $p < m$, where $\mathbf{0}$ denotes a column of $m$ or $p$ zeros, respectively. Given the full SVD of $G_\mathrm{c}(\omega)$, computing $g_\mathrm{c}^{\prime\prime}(\omega)$ can also be done with relatively little additional cost. The stored LU factorization of $({\bf i} \omega E - A)$ used to obtain $G_\mathrm{c}(\omega)$ can again be reused to quickly compute the $G_\mathrm{c}^\prime(\omega)$ and $G_\mathrm{c}^{\prime\prime}(\omega)$ terms in \eqref{eq:eigderiv12_mat}. If obtaining the full SVD is particularly expensive, i.e. for systems with many inputs/outputs, as mentioned above, sparse methods can still be used to efficiently obtain the largest singular value and its associated right/left singular vectors, in order to at least calculate $g_\mathrm{c}^{\prime}(\omega)$, if not $g_\mathrm{c}^{\prime\prime}(\omega)$ as well.
\begin{rema} On a more theoretical point, by invoking Theorem~\ref{thm:eig2ndderiv} to compute $g_\mathrm{c}^{\prime\prime}(\omega)$, we are also assuming that the singular values of $G_\mathrm{c}(\omega)$ are unique as well. However, in practice, this will almost certainly hold numerically, and to adversely impact the convergence rate of Newton's method, it would have to frequently fail to hold, which seems an exceptionally unlikely scenario. As such, we feel that this additional assumption is not of practical concern. \end{rema}
Thus, our new proposed improvement to the BBBS algorithm is to not settle for the increase in $\gamma$ provided by the standard midpoint or cubic interpolation schemes, but to increase $\gamma$ \emph{as far as possible} on every iteration using standard optimization techniques applied to $g_\mathrm{c}(\omega)$. Assume that $\gamma$ is still less than the value of the ${\hinfsym_\infty}$ norm and let \[
\hat\omega_j = \argmax g_\mathrm{c}(\hat \omega_k), \] where the finite set of $\hat\omega_k$ values are the midpoints of the level-set intervals $I_k$ or the maximizers of the cubic interpolants on these intervals, respectively defined in \eqref{eq:gamma_mp} or \eqref{eq:gamma_cubic}. Thus the solution $\hat\omega_j \in I_j$ is the frequency that provides the updated value $\gamma_\mathrm{mp}$ or $\gamma_\mathrm{cubic}$ in the standard algorithms. Now consider applying either Newton's method or the secant method (the choice of which one will be more efficient can be made more or less automatically depending on how $m,p$ compares to $n$) to the following optimization problem with a simple box constraint: \begin{equation}
\label{eq:gamma_opt}
\max_{\omega \in I_j} g_\mathrm{c}(\omega). \end{equation} If the optimization method is initialized at $\hat\omega_j$, then even if $\omega_\mathrm{opt}$, a \emph{computed} solution to \eqref{eq:gamma_opt}, is actually just a \emph{local} maximizer (a possibility since \eqref{eq:gamma_opt} could be nonconvex), it is still guaranteed that \[
g_\mathrm{c}(\omega_\mathrm{opt}) >
\begin{cases}
\gamma_\mathrm{mp} & \text{initial point $\hat \omega_j$ is a midpoint of $I_j$} \\
\gamma_\mathrm{cubic} & \text{initial point $\hat \omega_j$ is a maximizer of interpolant $c_j(\omega)$}
\end{cases} \] holds, provided that $\hat\omega_j$ does not happen to be a stationary point of $g_\mathrm{c}(\omega)$. Furthermore, \eqref{eq:gamma_opt} can only have more than one maximizer when the current estimate $\gamma$ of the ${\hinfsym_\infty}$ norm is so low that there are multiple peaks above level-set interval $I_j$. Consequently, as the algorithm converges, computed maximizers of \eqref{eq:gamma_opt} will be assured to be globally optimal over $I_j$ and in the limit, over all frequencies along the entire imaginary axis. By setting tight tolerances for the optimization code, maximizers of \eqref{eq:gamma_opt} can also be computed to full precision with little to no penalty,
due to the superlinear or quadratic rate of convergence we can expect from the secant method or Newton's method, respectively. If the computed optimizer of \eqref{eq:gamma_opt} also happens to be a global maximizer of $g_\mathrm{c}(\omega)$, for all $\omega \in \mathbb R$, then the ${\hinfsym_\infty}$ norm has indeed been computed to full precision, but the algorithm must still verify this by computing the imaginary eigenvalues of $(\Mc,\Nc)$ just one more time. However, if a global optimizer has not yet been found, then the algorithm must compute the imaginary eigenvalues of $(\Mc,\Nc)$ at least two times more: one or more times as the algorithm increases $\gamma$ to the globally optimal value, and then a final evaluation to verify that the computed value is indeed globally optimal. Figure~\ref{fig:bbbs_opt} shows a pictorial comparison of optimizing $g_\mathrm{c}(\omega)$ compared to the midpoint and cubic-interpolant-based updating methods.
\begin{figure}
\caption{For the same example as Figure~\ref{fig:bbbs_plots}, the larger increase in $\gamma$ attained by optimizing \eqref{eq:gamma_opt}
is shown by the red dashed line going through the red circle at the top of the leftmost peak of $\| G({\bf i} \omega)\|_2$. By comparison, the BBBS midpoint (red dotted vertical lines and x's) and the cubic-interpolant-based schemes (black dotted vertical lines and x's) only provide suboptimal increases in $\gamma$.}
\label{fig:opt_comparison}
\label{fig:bbbs_opt}
\end{figure}
In the above discussion, we have so far only considered applying optimization over the single level-set interval $I_j$ but we certainly could attempt to solve \eqref{eq:gamma_opt} for other level-set intervals as well. Let $\phi = 1,2,\ldots$ be the max number of level-set intervals to optimize over per iteration and $q$ be the number of level-set intervals for the current value of $\gamma$. Compared to just optimizing over $I_j$, optimizing over all $q$ of the level-set intervals could yield an even larger increase in the estimate $\gamma$ but would be the most expensive option computationally. If we do not optimize over all the level-set intervals, i.e. $\phi < q$, there is the question of which intervals should be prioritized for optimization. In our experiments, we found that prioritizing by first evaluating $g_\mathrm{c}(\omega)$ at all the $\hat\omega_k$ values and then choosing the intervals $I_k$ to optimize where $g_\mathrm{c}(\hat\omega_k)$ takes on the largest values seems to be a good strategy. However, with a serial MATLAB code, we have observed that just optimizing over the most promising interval, i.e. $\phi = 1$ so just $I_j$, is generally more efficient in terms of running time than trying to optimize over more intervals. For discussion about optimizing over multiple intervals in parallel, see Section~\ref{sec:parallel}.
Finally, if a global maximizer of $g_\mathrm{c}(\omega)$ can potentially be found before ever computing eigenvalues of $(\Mc,\Nc)$ even once, then only one expensive eigenvalue decomposition $(\Mc,\Nc)$ will be incurred, just to verify that the initial maximizer is indeed a global one. Thus, we also propose initializing the algorithm at a maximizer of $g_\mathrm{c}(\omega)$,
obtained via applying standard optimization techniques to \begin{equation}
\label{eq:gamma_init}
\max g_\mathrm{c}(\omega), \end{equation} which is just \eqref{eq:gamma_opt} without the box constraint. In the best case, the computed maximizer will be a global one, but even a local maximizer will still provide a higher initial estimate of the ${\hinfsym_\infty}$ norm compared to initializing at a guess that may not even be locally optimal. Of course, finding maximizers of \eqref{eq:gamma_init} by starting from multiple initial guesses can also be done in parallel; we again refer to Section~\ref{sec:parallel} for more details.
\begin{algfloat} \begin{algorithm}[H] \floatname{algorithm}{Algorithm} \caption{The Improved Algorithm Using Local Optimization} \label{alg:hybrid_opt} \begin{algorithmic}[1]
\REQUIRE{
Matrices $A \in \Cmn{n}{n}$, $B \in \Cmn{n}{m}$, $C \in \Cmn{p}{n}$, $D \in \Cmn{p}{m}$,
and $E \in \Cmn{n}{n}$,
initial frequency guesses $\{\omega_1,\ldots,\omega_q\} \in \mathbb R$ and
$\phi$, a positive integer indicating the number of intervals/frequencies to optimize per round.
}
\ENSURE{
$\gamma = \| G \|_{\hinfsym_\infty}$ and $\omega$ such that
$\gamma = g_\mathrm{c}(\omega)$.
\\ \quad
}
\STATE \COMMENT{Initialization:}
\STATE Compute $[\gamma_1,\ldots,\gamma_q] = [g_\mathrm{c}(\omega_1),\ldots,g_\mathrm{c}(\omega_q)]$.
\label{algline:init_start}
\STATE Reorder $\{\omega_1,\ldots,\omega_q\}$ s.t. $\gamma_j \ge \gamma_{j+1}$.
\STATE Find maximal values $[\gamma_1,\ldots,\gamma_\phi]$ of \eqref{eq:gamma_init}, using initial points
$\omega_j$, $j=1,\ldots,\phi$.
\label{algline:init_opt}
\STATE $\gamma = \max([\gamma_1,\ldots,\gamma_\phi])$.
\label{algline:gamma_init}
\STATE $\omega = \omega_j$ for $j$ s.t. $\gamma = \gamma_j$.
\label{algline:init_end}
\STATE \COMMENT{Convergent Phase:}
\WHILE {not converged}
\STATE \COMMENT{Compute the intervals that lie under $g_\mathrm{c}(\omega)$ using
eigenvalues of the pencil:}
\STATE Compute $\Lambda_\mathrm{I} =
\{ \Im \lambda : \lambda \in \Lambda(\Mc,\Nc) ~\text{and}~ \Re \lambda = 0\}$.
\label{algline:pencil}
\STATE Index and sort $\Lambda_\mathrm{I} = \{\omega_1,...,\omega_l\}$ s.t.
$\omega_j \le \omega_{j+1}$.
\label{algline:frequencies}
\STATE Form all intervals $I_k = [\omega_k, \omega_{k+1}]$ s.t.
each interval at height $\gamma$ is below $g_\mathrm{c}(\omega)$.
\label{algline:hyopt_ints}
\STATE \COMMENT{Compute candidate frequencies of the level-set intervals $I_k$ ($q$ of them):}
\STATE Compute all $\hat\omega_k$ using either \eqref{eq:gamma_mp} or \eqref{eq:gamma_cubic}.
\label{algline:hyopt_points}
\STATE \COMMENT{Run box-constrained optimization on the $\phi$ most promising frequencies:}
\STATE Compute $[\gamma_1,\ldots,\gamma_q] = [g_\mathrm{c}(\hat\omega_1),\ldots,g_\mathrm{c}(\hat\omega_q)]$.
\STATE Reorder $\{\hat\omega_1,\ldots,\hat\omega_q\}$ and intervals $ I_k$ s.t. $\gamma_j \ge \gamma_{j+1}$.
\label{algline:hyopt_reorder}
\STATE Find maximal values $[\gamma_1,\ldots,\gamma_\phi]$ of \eqref{eq:gamma_opt} using initial points $\hat\omega_j$, $j = 1,\ldots,\phi$.
\label{algline:hyopt_opt}
\STATE \COMMENT{Update to the highest gain computed:}
\STATE $\gamma = \max([\gamma_1,\ldots,\gamma_\phi])$.
\STATE $\omega = \omega_j$ for $j$ s.t. $\gamma = \gamma_j$.
\ENDWHILE
\STATE \COMMENT{Check that the maximizing frequency is not at infinity (continuous-time only)}
\IF { $\gamma < \|D\|_2$}
\STATE $\gamma = \|D\|_2$.
\STATE $\omega = \infty$.
\ENDIF \end{algorithmic} \end{algorithm} \algnote{ For details on how embarrassingly parallel processing can be used to further improve the algorithm, see Section~\ref{sec:parallel}. } \end{algfloat}
Algorithm~\ref{alg:hybrid_opt} provides a high-level pseudocode description of our improved method. As a final step in the algorithm, it is necessary to check whether or not the value of the ${\hinfsym_\infty}$ norm is attained at $\omega = \infty$. We check this case after the convergent phase has computed a global maximizer over the union of all intervals it has considered. The only possibility that the ${\hinfsym_\infty}$ norm may be attained at $\omega = \infty$ in Algorithm~\ref{alg:hybrid_opt} is when the initial value of $\gamma$ computed in
line~\ref{algline:gamma_init} of is less than $\|D\|_2$. As the assumptions of Theorem~\ref{thm:eigsing_cont} require
that $\gamma$ not be a singular value of $D$, it is not valid to use $\gamma = \|D\|_2$ in $(\Mc,\Nc)$ to check if this pencil has any imaginary eigenvalues. However, if the optimizer computed by the convergent phase of Algorithm~\ref{alg:hybrid_opt} yields a
$\gamma$ value less than $\|D\|_2$, then it is clear that the optimizing frequency is at $\omega = \infty$.
\section{Handling discrete-time systems} \label{sec:discrete} Now consider the discrete-time linear dynamical system \begin{subequations} \label{eq:lti_disc} \begin{align} Ex_{k+1} &= Ax_k + Bu_k \\ y_k & = Cx_k + Du_k, \end{align} \end{subequations} where the matrices are defined as before in \eqref{eq:lti_cont}. In this case, the ${\hinfsym_\infty}$ norm is defined as \begin{equation}
\label{eq:hinf_disc}
\|G\|_{{\hinfsym_\infty}} \coloneqq \max_{\theta \in [0,2\pi)} \| G(e^{\imagunit \theta}) \|_2, \end{equation} again assuming that pencil $(A,E)$ is at most index one. If all finite eigenvalues are either strictly inside the unit disk centered at the origin or are uncontrollable or unobservable, then \eqref{eq:hinf_disc} is finite and the ${\hinfsym_\infty}$ norm is attained at some $\theta \in [0,2\pi)$. Otherwise, it is infinite.
We now show the analogous version of Theorem~\ref{thm:eigsing_cont} for discrete-time systems. The $D=0$ and $E=I$ case was considered in \cite[Section 3]{HinS91} while the more specific $B=C=E=I$ and $D=0$ case was given in \cite[Theorem 4]{Bye88}.\footnote{Note that equation (10) in \cite{Bye88} has a typo: $A^H - e^{\imagunit \theta} I$ in the lower left block of $K(\theta)$ should actually be $A^H - e^{-\imagunit \theta} I$.} These results relate singular values of the transfer function for discrete-time systems to eigenvalues with modulus one of associated matrix pencils. Although the following more general result is already known, the proof, to the best of our knowledge, is not in the literature so we include it in full here. The proof follows a similar argumentation as the proof of Theorem~\ref{thm:eigsing_cont}. \begin{theo} \label{thm:eigsing_disc} Let $\lambda E - A$ be regular with no finite eigenvalues on the unit circle, $\gamma > 0$ not a singular value of $D$, and $\theta \in [0,2\pi)$. Consider the matrix pencil $(\Md,\Nd)$, where \begin{equation}
\label{eq:MNpencil_disc} \begin{aligned}
\mathcal{S}_\gamma \coloneqq {}& \begin{bmatrix} A - BR^{-1}D^*C & -\gamma BR^{-1}B^* \\
0 & E^* \end{bmatrix}, \\
\mathcal{T}_\gamma \coloneqq {}& \begin{bmatrix} E & 0\\
-\gamma C^*S^{-1}C & (A - BR^{-1}D^*C)^*
\end{bmatrix}, \end{aligned} \end{equation} $R = D^*D - \gamma^2 I$ and $S = DD^* - \gamma^2 I$. Then $e^{{\bf i} \theta}$ is an eigenvalue of matrix pencil $(\Md,\Nd)$ if and only if $\gamma$ is a singular value of $G(e^{{\bf i} \theta})$. \end{theo}
\begin{proof} Let $\gamma$ be a singular value of $G(e^{\imagunit \theta})$ with left and right singular vectors $u$ and $v$, that is, so that $G(e^{\imagunit \theta})v = \gamma u$ and $G(e^{\imagunit \theta})^*u = \gamma v$. Using the expanded versions of these two equivalences \begin{equation}
\label{eq:tfsv_equiv_disc}
\left( \tfs{e^{\imagunit \theta}} \right) v = \gamma u
\quad \text{and} \quad
\left( \tfs{e^{\imagunit \theta}} \right)^* u = \gamma v, \end{equation} we define \begin{equation}
\label{eq:qs_disc}
q = \left( e^{\imagunit \theta} E - A \right)^{-1}Bv
\quad \text{and} \quad
s = \left( e^{-\imagunit \theta} E^* - A^* \right)^{-1}C^*u. \end{equation} Rewriting \eqref{eq:tfsv_equiv_disc} using \eqref{eq:qs_disc} yields the following matrix equation: \begin{equation}
\label{eq:uv_disc}
\begin{bmatrix} C & 0 \\ 0 & B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} -D & \gamma I \\ \gamma I & -D^* \end{bmatrix}
\begin{bmatrix} v \\ u \end{bmatrix}
~\Longrightarrow~
\begin{bmatrix} v \\ u \end{bmatrix}
=
\begin{bmatrix} -D & \gamma I \\ \gamma I & -D^* \end{bmatrix}^{-1}
\begin{bmatrix} C & 0 \\ 0 & B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix} , \end{equation} where \begin{equation}
\label{eq:Dgamma_inv}
\begin{bmatrix} -D & \gamma I \\ \gamma I & -D^* \end{bmatrix}^{-1}
=
\begin{bmatrix} -R^{-1}D^* & -\gamma R^{-1} \\ -\gamma S^{-1} & -DR^{-1} \end{bmatrix}
\quad \text{and} \quad
\begin{bmatrix} q \\ s \end{bmatrix} \ne 0. \end{equation} Rewriting \eqref{eq:qs_disc} as: \begin{equation}
\label{eq:EAqs1_disc}
\left(
\begin{bmatrix} e^{\imagunit \theta} E & 0 \\ 0 & e^{-\imagunit \theta} E^* \end{bmatrix} -
\begin{bmatrix} A & 0 \\ 0 & A^* \end{bmatrix}
\right)
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} B & 0 \\ 0 & C^* \end{bmatrix}
\begin{bmatrix} v \\ u \end{bmatrix} , \end{equation} and then substituting in \eqref{eq:uv_disc} for the rightmost term of \eqref{eq:EAqs1_disc} yields \begin{equation}
\label{eq:EAqs2_disc}
\setlength\arraycolsep{3pt}
\begingroup
\left(
\begin{bmatrix} e^{\imagunit \theta} E & 0 \\ 0 & e^{-\imagunit \theta} E^* \end{bmatrix} -
\begin{bmatrix} A & 0 \\ 0 & A^* \end{bmatrix}
\right)
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} B & 0 \\ 0 & C^* \end{bmatrix}
\begin{bmatrix} -D & \gamma I \\ \gamma I & -D^* \end{bmatrix}^{-1}
\begin{bmatrix} C & 0 \\ 0 & B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix} .
\endgroup \end{equation} Multiplying the above on the left by \[
\begin{bmatrix} I & 0 \\ 0 & -e^{\imagunit \theta} I \end{bmatrix} \] and then rearranging terms, we have \begin{equation*}
\label{eq:EAqs3_disc}
e^{\imagunit \theta} \begin{bmatrix} E & 0 \\ 0 & A^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} A & 0 \\ 0 & E^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
+
\begin{bmatrix} B & 0 \\ 0 & -e^{\imagunit \theta} C^* \end{bmatrix}
\begin{bmatrix} -D & \gamma I \\ \gamma I & -D^* \end{bmatrix}^{-1}
\begin{bmatrix} C & 0 \\ 0 & B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix} . \end{equation*} Substituting the inverse in \eqref{eq:Dgamma_inv} for its explicit form and multiplying terms yields: \begin{equation*}
e^{\imagunit \theta} \begin{bmatrix} E & 0 \\ 0 & A^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} A & 0 \\ 0 & E^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
+
\begin{bmatrix} B & 0 \\ 0 & -e^{\imagunit \theta} C^* \end{bmatrix}
\begin{bmatrix} -R^{-1}D^*C & -\gamma R^{-1}B^* \\
-\gamma S^{-1}C & -DR^{-1}B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix} . \end{equation*} Finally, multiplying terms further, separating out the $-e^{\imagunit \theta}$ terms to bring them over to the left hand side, and then recombining, we have that \begin{equation*}
e^{\imagunit \theta} \begin{bmatrix} E & 0 \\ -\gamma C^*S^{-1}C & A^* - C^*DR^{-1}B^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix}
=
\begin{bmatrix} A - BR^{-1}D^*C & -\gamma BR^{-1}B^* \\ 0 & E^* \end{bmatrix}
\begin{bmatrix} q \\ s \end{bmatrix} . \end{equation*} It is now clear that $e^{\imagunit \theta}$ is an eigenvalue of pencil $(\Md,\Nd)$.
Now suppose that $e^{\imagunit \theta}$ is an eigenvalue of pencil $(\Md,\Nd)$ with eigenvector given by $q$ and $s$ as above. Then it follows that \eqref{eq:EAqs2_disc} holds, which can be rewritten as \eqref{eq:EAqs1_disc} by defining $u$ and $v$ using the right-hand side equation of \eqref{eq:uv_disc}, noting that neither can be identically zero. It is then clear that the pair of equivalences in \eqref{eq:qs_disc} hold. Finally, substituting \eqref{eq:qs_disc} into the left-hand side equation of \eqref{eq:uv_disc}, it is clear that $\gamma$ is a singular value of $G(e^{\imagunit \theta})$, with left and right singular vectors $u$ and $v$. \end{proof}
Adapting Algorithm~\ref{alg:hybrid_opt} to the discrete-time case is straightforward. First, all instances of $g_\mathrm{c}(\omega)$ must be replaced with \[
g_\mathrm{d}(\theta) \coloneqq \|G(e^{\imagunit \theta})\|_2. \] To calculate its first and second derivatives, we will need the first and second derivatives of $G_\mathrm{d}(\theta) \coloneqq G(e^{\imagunit \theta})$ and for notational brevity, it will be convenient to define $Z(\theta) \coloneqq (e^{\imagunit \theta} E - A)$. Then \begin{equation}
\label{eq:tfd1st}
G_\mathrm{d}^\prime(\theta) = - {\bf i} e^{\imagunit \theta} C Z(\theta)^{-1} E Z(\theta)^{-1} B \end{equation} and \begin{equation}
\label{eq:tfd2nd}
G_\mathrm{d}^{\prime\prime}(\theta) =
e^{\imagunit \theta} C Z(\theta)^{-1} E Z(\theta)^{-1} B
- 2 e^{2{\bf i} \theta} C Z(\theta)^{-1} E Z(\theta)^{-1} E Z(\theta)^{-1} B. \end{equation} The first derivative of $g_\mathrm{d}(\theta)$ can thus be calculated using \eqref{eq:ntfcprime_defn_cont}, where $\omega$, $g_\mathrm{c}(\omega)$, and $G_\mathrm{c}^\prime(\omega)$ are replaced by $\theta$, $g_\mathrm{d}(\theta)$, and $G_\mathrm{d}^\prime(\theta)$ using \eqref{eq:tfd1st}. The second derivative of $g_\mathrm{d}(\theta)$ can be calculated using Theorem~\ref{thm:eig2ndderiv} using \eqref{eq:tfd1st} and \eqref{eq:tfd2nd} to define $H(\theta)$, the analog of \eqref{eq:eigderiv_mat}. Line~\ref{algline:pencil} must be changed to instead compute the eigenvalues of unit modulus of \eqref{eq:MNpencil_disc}. Line~\ref{algline:frequencies} must instead index and sort the angles $\{\theta_1,\ldots,\theta_l\}$ of these unit modulus eigenvalues in ascending order. Due to the periodic nature of \eqref{eq:hinf_disc}, line~\ref{algline:hyopt_ints} must additionally consider the ``wrap-around" interval $[\theta_l,\theta_0+2\pi]$.
\section{Numerical experiments} \label{sec:numerical} We implemented Algorithm~\ref{alg:hybrid_opt} in {MATLAB}, for both continuous-time and discrete-time cases. Since we can only get timing information from \texttt{hinfnorm} and we wished to verify that our new method does indeed reduce the number of times the eigenvalues of $(\Mc,\Nc)$ and $(\Md,\Nd)$ are computed, we also designed our code so that it can run just using the standard BBBS algorithm or the cubic-interpolant scheme. For our new optimization-based approach, we used \texttt{fmincon} for both the unconstrained optimization calls needed for the initialization phase and for the box-constrained optimization calls needed in the convergent phase; \texttt{fmincon}'s optimality and constraint tolerances were set to $10^{-14}$ in order to find maximizers to near machine precision. Our code supports starting the latter optimization calls from either the midpoints of the BBBS algorithm \eqref{eq:gamma_mp} or the maximizing frequencies calculated from the cubic-interpolant method \eqref{eq:gamma_cubic}. Furthermore, the optimizations may be done using either
the secant method (first-order information only) or with Newton's method using second derivatives, thus leading
to four variants of our proposed method to test. Our code has a user-settable parameter that determines
when $m,p$ should be considered too large relative to $n$, and thus when it is likely that using secant method
will actually be faster than Newton's method, due to the additional expense of computing the second derivative
of the norm of the transfer function.
For initial frequency guesses, our code simply tests zero and the imaginary part of the rightmost eigenvalue of $(A,E)$, excluding eigenvalues that are either infinite, uncontrollable, or unobservable.
Eigenvalues are deemed uncontrollable or unobservable if $\|B^*y\|_2$ or $\|Cx\|_2$ are respectively below a user-set tolerance, where $x$ and $y$ are respectively the right and left eigenvectors for a given eigenvalue of $(A,E)$. In the discrete-time case, the default initial guesses are zero, $\pi$, and the angle for the largest modulus eigenvalue.\footnote{ For producing a production-quality implementation, see \cite{BruS90} for more sophisticated initial guesses that can be used, \cite[Section III]{BenV11} for dealing with testing properness of the transfer function, and \cite{Var90a} for filtering out uncontrollable/unobservable eigenvalues of $(A,E)$ when it has index higher than one.}
For efficiency of implementing our method and conducting these experiments, our code does not yet take advantage of structure-preserving eigensolvers. Instead, it uses the regular QZ algorithm (\texttt{eig} in {MATLAB}) to compute the eigenvalues of $(\Mc,\Nc)$ and $(\Md,\Nd)$. To help mitigate issues due to rounding errors, we consider any eigenvalue $\lambda$ imaginary or of unit modulus if it lies within a margin of width $10^{-8}$ on either side of the imaginary axis or unit circle. Taking the imaginary parts of these nearly imaginary eigenvalues forms the initial set of candidate frequencies, or the angles of these nearly unit modulus eigenvalues for the discrete-time case. Then we simply form all the consecutive intervals, including the wrap-around interval for the discrete-time case, even though not all of them will be level-set intervals, and some intervals may only be a portion of a level-set interval (e.g. if the use of QZ causes spurious candidate frequencies). The reason we do this is is because we can easily sort which of the intervals at height $\gamma$ are below $g_\mathrm{c}(\omega)$ or $g_\mathrm{d}(\omega)$ just by evaluating these functions at the midpoint or the maximizer of the cubic interpolant for each interval. This is less expensive because we need to evaluate these interior points regardless, so also evaluating the norm of the transfer function at all these endpoints just adds additional cost. However, for the cubic interpolant refinement, we nonetheless still evaluate $g_\mathrm{c}(\omega)$ or $g_\mathrm{d}(\omega)$ at the endpoints since we need the corresponding derivatives there to construct the cubic interpolants; we do not use the eigenvectors of $(\Mc,\Nc)$ or $(\Md,\Nd)$ to bypass this additional cost as \texttt{eig} in {MATLAB}\ does not currently provide a way to only compute selected eigenvectors, i.e. those corresponding to the imaginary (unit-modulus) eigenvalues. Note that while this strategy is sufficient for our experimental comparisons here, it certainly does not negate the need for structure-preserving eigenvalue solvers.
We evaluated our code on several continuous- and discrete-time problems up to moderate dimensions, all listed with dimensions in Table~\ref{table:hinfnorm_tol}. For the continuous-time problems, we chose four problems from the test set used in \cite{GugGO13} (\texttt{CBM}, \texttt{CSE2}, \texttt{CM3}, \texttt{CM4}), two from the SLICOT benchmark examples\footnote{Available at \url{http://slicot.org/20-site/126-benchmark-examples-for-model-reduction}} (\texttt{ISS} and \texttt{FOM}), and two new randomly-generated examples using \texttt{randn()} with a relatively large number of inputs and outputs. Notably, the four problems from \cite{GugGO13} were generated via taking open-loop systems from $COMPL_eib$\ \cite{compleib} and then designing controllers to minimize the ${\hinfsym_\infty}$ norm of the corresponding closed-loop systems via {\sc hifoo}\ \cite{BurHLetal06}. Such systems can be interesting benchmark examples because $g_\mathrm{c}(\omega)$ will often have several peaks, and multiple peaks may attain the value of the ${\hinfsym_\infty}$ norm, or at least be similar in height. Since the discrete-time problems from \cite{GugGO13} were all very small scale (the largest order in that test set is only 16) and SLICOT only offers a single discrete-time benchmark example, we instead elected to take additional open-loop systems from $COMPL_eib$\ and obtain usable test examples by minimizing the discrete-time ${\hinfsym_\infty}$ norm of their respective closed-loop systems, via optimizing controllers using {\sc hifoo}d\ \cite{PopWM10}, a fork of {\sc hifoo}\ for discrete-time systems. On all examples, the ${\hinfsym_\infty}$ norm values computed by our local-optimization-enhanced code (in all its variants) agreed on average to 13 digits with the results provided by \texttt{hinfnorm}, when used with the tight tolerance of $10^{-14}$, with the worst discrepancy being only 11 digits of agreement. However, our improved method often found slightly larger values, i.e. more accurate values, since it optimizes $g_\mathrm{c}(\omega)$ and $g_\mathrm{d}(\omega)$ directly.
All experiments were performed using {MATLAB}\ R2016b running on a Macbook Pro with an Intel i7-6567U dual-core CPU, 16GB of RAM, and Mac OS X v10.12.
\subsection{Continuous-time examples}
\begin{table}[!t] \center
\begin{tabular}{ l | cccc | cc } \toprule \multicolumn{7}{c}{Small-scale examples (continuous time)}\\ \midrule \multicolumn{1}{c}{} &
\multicolumn{4}{c}{Hybrid Optimization} & \multicolumn{2}{c}{Standard Algs.} \\ \cmidrule(lr){2-5} \cmidrule(lr){6-7} \multicolumn{1}{c}{} &
\multicolumn{2}{c}{Newton} & \multicolumn{2}{c}{Secant} & \multicolumn{2}{c}{} \\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} & \multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{BBBS} \\ \midrule \multicolumn{7}{c}{Number of Eigenvalue Computations of $(\Mc,\Nc)$}\\ \midrule \texttt{CSE2} & 2 & 3 & 1 & 1 & 2 & 3 \\ \texttt{CM3} & 2 & 3 & 2 & 2 & 3 & 5 \\ \texttt{CM4} & 2 & 2 & 2 & 2 & 4 & 6 \\ \texttt{ISS} & 1 & 1 & 1 & 1 & 3 & 4 \\ \texttt{CBM} & 2 & 2 & 2 & 2 & 5 & 7 \\ \texttt{randn 1} & 1 & 1 & 1 & 1 & 1 & 1 \\ \texttt{randn 2} & 1 & 1 & 1 & 1 & 2 & 2 \\ \texttt{FOM} & 1 & 1 & 1 & 1 & 2 & 2 \\ \midrule \multicolumn{7}{c}{Number of Evaluations of $g_\mathrm{c}(\omega)$}\\ \midrule \texttt{CSE2} & 10 & 7 & 10 & 10 & 9 & 8 \\ \texttt{CM3} & 31 & 26 & 53 & 45 & 31 & 24 \\ \texttt{CM4} & 19 & 17 & 44 & 43 & 46 & 36 \\ \texttt{ISS} & 12 & 12 & 22 & 22 & 39 & 27 \\ \texttt{CBM} & 34 & 28 & 59 & 55 & 46 & 36 \\ \texttt{randn 1} & 1 & 1 & 1 & 1 & 1 & 1 \\ \texttt{randn 2} & 4 & 4 & 17 & 17 & 6 & 4 \\ \texttt{FOM} & 4 & 4 & 16 & 16 & 7 & 5 \\ \bottomrule \end{tabular} \caption{The top half of the table reports the number of times the eigenvalues of $(\Mc,\Nc)$ were computed in order to compute the ${\hinfsym_\infty}$ norm to near machine precision. From left to right, the methods are our hybrid optimization approach using Newton's method and the secant method, the cubic-interpolant scheme (column `Interp') and the standard BBBS method, all implemented by our single configurable code. The subcolumns `Interp.' and 'MP' of our methods respectively indicate that the optimization routines were initialized at the points from the cubic-interpolant scheme and the BBBS midpoint scheme. The bottom half of the table reports the number of times it was necessary to evaluate the norm of the transfer function (with or without its derivatives). The problems are listed in increasing order of their state-space sizes $n$; for their exact dimensions, see Table~\ref{table:hinfnorm_tol}. } \label{table:evals_dense} \end{table}
In Table~\ref{table:evals_dense}, we list the number of times the eigenvalues of $(\Mc,\Nc)$ were computed and the number of evaluations of $g_\mathrm{c}(\omega)$ for our new method compared to our implementations of the existing BBBS algorithm and its interpolation-based refinement. As can be seen, our new method typically limited the number of required eigenvalue computations of $(\Mc,\Nc)$ to just two, and often it only required one (in the cases where our method found a global optimizer of $g_\mathrm{c}(\omega)$ in the initialization phase). In contrast, the standard BBBS algorithm and its interpolation-based refinement had to evaluate the eigenvalues $(\Mc,\Nc)$ more times; for example, on problem \texttt{CBM}, the BBBS algorithm needed seven evaluations while its interpolation-based refinement still needed five. Though our new method sometimes required more evaluations of $g_\mathrm{c}(\omega)$ than the standard algorithms, often the number of evaluations of $g_\mathrm{c}(\omega)$ was actually less with our new method, presumably due its fewer iterations and particularly when using the Newton's method variants. Even when our method required more evaluations of $g_\mathrm{c}(\omega)$ than the standard methods, the increases were not too significant (e.g. the secant method variants of our method on problems \texttt{CM4}, \texttt{CBM}, \texttt{randn 2}, and \texttt{FOM}). Indeed, the larger number of evaluations of $g_\mathrm{c}(\omega)$ when employing the secant method in lieu of Newton's method was still generally quite low.
\begin{table}[!t] \setlength{\tabcolsep}{3pt} \robustify\bfseries \center
\begin{tabular}{ l | SSSS | SS | SS } \toprule \multicolumn{9}{c}{Small-scale examples (continuous time)} \\ \midrule \multicolumn{1}{c}{} &
\multicolumn{4}{c}{Hybrid Optimization} & \multicolumn{2}{c}{Standard Algs.} & \multicolumn{2}{c}{\texttt{hinfnorm($\cdot$,tol)}}\\ \cmidrule(lr){2-5} \cmidrule(lr){6-7} \cmidrule(lr){8-9} \multicolumn{1}{c}{} &
\multicolumn{2}{c}{Newton} & \multicolumn{2}{c}{Secant} & \multicolumn{2}{c}{} & \multicolumn{2}{c}{\texttt{tol}}\\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} \cmidrule(lr){8-9} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} & \multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{BBBS} &
\multicolumn{1}{c}{\texttt{1e-14}} & \multicolumn{1}{c}{\texttt{0.01}} \\ \midrule \multicolumn{9}{c}{Wall-clock running times in seconds}\\ \midrule \texttt{CSE2} & 0.042 & 0.060 & 0.036 & 0.032 & 0.043 & \bfseries 0.031 & 0.137 & 0.022 \\ \texttt{CM3} & \bfseries 0.125 & 0.190 & 0.198 & 0.167 & 0.170 & 0.167 & 0.148 & 0.049 \\ \texttt{CM4} & \bfseries 0.318 & 0.415 & 0.540 & 0.550 & 0.712 & 0.811 & 1.645 & 0.695 \\ \texttt{ISS} & 0.316 & 0.328 & 0.379 & \bfseries 0.303 & 0.709 & 0.757 & 0.765 & 0.391 \\ \texttt{CBM} & 0.744 & \bfseries 0.671 & 1.102 & 1.071 & 1.649 & 1.757 & 3.165 & 1.532 \\ \texttt{randn 1} & 0.771 & 0.868 & 1.006 & 0.871 & \bfseries 0.700 & 0.756 & 21.084 & 30.049 \\ \texttt{randn 2} & \bfseries 9.551 & 9.746 & 9.953 & 11.275 & 14.645 & 15.939 & 31.728 & 16.199 \\ \texttt{FOM} & \bfseries 3.039 & 3.426 & 4.418 & 4.176 & 5.509 & 5.182 & 128.397 & 36.529 \\ \midrule \multicolumn{9}{c}{Running times relative to hybrid optimization (Newton with `Interp.')}\\ \midrule \texttt{CSE2} & 1 & 1.42 & 0.86 & 0.75 & 1.02 & 0.75 & 3.24 & 0.53 \\ \texttt{CM3} & 1 & 1.52 & 1.59 & 1.34 & 1.36 & 1.34 & 1.19 & 0.39 \\ \texttt{CM4} & 1 & 1.31 & 1.70 & 1.73 & 2.24 & 2.55 & 5.18 & 2.19 \\ \texttt{ISS} & 1 & 1.04 & 1.20 & 0.96 & 2.24 & 2.39 & 2.42 & 1.24 \\ \texttt{CBM} & 1 & 0.90 & 1.48 & 1.44 & 2.22 & 2.36 & 4.26 & 2.06 \\ \texttt{randn 1} & 1 & 1.13 & 1.31 & 1.13 & 0.91 & 0.98 & 27.36 & 38.99 \\ \texttt{randn 2} & 1 & 1.02 & 1.04 & 1.18 & 1.53 & 1.67 & 3.32 & 1.70 \\ \texttt{FOM} & 1 & 1.13 & 1.45 & 1.37 & 1.81 & 1.71 & 42.25 & 12.02 \\ \midrule Average & 1 & 1.18 & 1.33 & 1.24 & 1.67 & 1.72 & 11.15 & 7.39 \\ \bottomrule \end{tabular} \caption{ In the top half of the table, the running times (fastest in bold) are reported in seconds for the same methods and configurations as in Table~\ref{table:evals_dense}, with the running times of \texttt{hinfnorm} additionally listed in the rightmost two columns, for a tolerance of $10^{-14}$ (as used by the other methods) and its default value of $0.01$. The bottom of half the table normalizes all the times relative to the running times for our hybrid optimization method (Newton and `Interp.'), along with the overall averages relative to this variant. } \label{table:times_dense} \end{table}
In Table~\ref{table:times_dense}, we compare the corresponding wall-clock times, and for convenience, we replicate the timing results of \texttt{hinfnorm} from Table~\ref{table:hinfnorm_tol} on the same problems. We observe that our new method was fastest on six out of the eight test problems, often significantly so. Compared to our own implementation of the BBBS algorithm, our new method was on average 1.72 times as fast and on three problems, 2.36-2.55 times faster. We see similar speedups compared to the cubic-interpolation refinement method as well. Our method was even faster when compared to \texttt{hinfnorm}, which had the advantage of being a compiled code rather than interpreted like our code. Our new method was over eleven times faster than \texttt{hinfnorm} overall, but this was largely due to the two problems (\texttt{FOM} and \texttt{randn 1}) where our code was 27-42 times faster. We suspect that this large performance gap on these problems was not necessarily due to a correspondingly dramatic reduction in the number of times that the eigenvalues of $(\Mc,\Nc)$ were computed but rather that the structure-preserving eigensolver \texttt{hinfnorm} employed sometimes has a steep performance penalty compared to standard QZ. However, it is difficult to verify this as \texttt{hinfnorm} is not open source. We also see that for the variants of our method, there was about a 24-33\% percent penalty on average in the runtime when resorting to the secant method instead of Newton's method. Nonetheless, even the slower secant-method-based version of our hybrid optimization approach was still typically much faster than BBBS or the cubic-interpolation scheme. The only exception to this was problem \texttt{CSE2}, where our secant method variants were actually faster than our Newton's method variants; the reason for this was because during initialization, the Newton's method optimization just happened to find worse initial local maximizers than the secant method approach, which led to more eigenvalue computations of $(\Mc,\Nc)$.
The two problems where the variants of our new method were not fastest were \texttt{CSE2} and \texttt{randn 1}. However, for \texttt{CSE2}, our secant method variant using midpoints was essentially as fast as the standard algorithm. As mentioned above, the Newton's method variants ended up being slower since they found worse initial local maximizers. For \texttt{randn 1}, all methods only required a single evaluation of $g_\mathrm{c}(\omega)$ and computing the eigenvalues of $(\Mc,\Nc)$; in other words, their respective initial guesses were all actually a global maximizer. As such, the differences in running times for \texttt{randn 1} seems likely attributed to the variability of interpreted {MATLAB}\ code.
\subsection{Discrete-time examples} We now present corresponding experiments for the six discrete-time examples listed in Table~\ref{table:hinfnorm_tol}. In Table~\ref{table:evals_dense_disc}, we see that our new approach on discrete-time problems also reduces the number of expensive eigenvalue computations of $(\Md,\Nd)$ compared to the standard methods and that in the worst cases, there is only moderate increase in the number of evaluations of $g_\mathrm{d}(\omega)$ and often, even a reduction, similarly as we saw in Table~\ref{table:evals_dense} for the continuous-time problems.
Wall-clock running times are reported in Table~\ref{table:times_dense_disc}, and show similar results, if not identical, to those in Table~\ref{table:times_dense} for the continuous-time comparison. We see that our Newton's method variants are, on average, 1.66 and 1.41 times faster, respectively, than the BBBS and cubic-interpolation refinement algorithms. Our algorithms are often up to two times faster than these two standard methods and were even up to 25.2 times faster on \texttt{ISS1d} compared to \texttt{hinfnorm} using \texttt{tol=1e-14}. For three of the six problems, our approach was not fastest but these three problems (\texttt{LAHd}, \texttt{BDT2d}, \texttt{EB6d}) also had the smallest orders among the discrete-time examples ($n=58,92,170$, respectively). This underscores that our approach is likely most beneficial for all but rather small-scale problems, where there is generally an insufficient cost gap between computing $g_\mathrm{d}(\omega)$ and the eigenvalues of $(\Md,\Nd)$. However, for \texttt{LAHd} and \texttt{EB6d}, it was actually \texttt{hinfnorm} that was fastest, where we are comparing a compiled code to our own pure {MATLAB}\ interpreted code. Furthermore, on these two problems, our approach was nevertheless not dramatically slower than \texttt{hinfnorm} and for \texttt{EB6d}, was actually faster than our own implementation of the standard algorithms. Finally, on \texttt{BDT2}, the fastest version of our approach essentially matched the performance of our BBBS implementation, if not the cubic-interpolation refinement.
\begin{table}[!t] \center
\begin{tabular}{ l | cccc | cc } \toprule \multicolumn{7}{c}{Small-scale examples (discrete time)} \\ \midrule \multicolumn{1}{c}{} &
\multicolumn{4}{c}{Hybrid Optimization} & \multicolumn{2}{c}{Standard Algs.} \\ \cmidrule(lr){2-5} \cmidrule(lr){6-7} \multicolumn{1}{c}{} &
\multicolumn{2}{c}{Newton} & \multicolumn{2}{c}{Secant} & \multicolumn{2}{c}{} \\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} & \multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{BBBS} \\ \midrule \multicolumn{7}{c}{Number of Eigenvalue Computations of $(\Md,\Nd)$}\\ \midrule \texttt{LAHd} & 2 & 2 & 1 & 1 & 3 & 4 \\ \texttt{BDT2d} & 2 & 3 & 2 & 2 & 3 & 4 \\ \texttt{EB6d} & 1 & 1 & 2 & 1 & 3 & 5 \\ \texttt{ISS1d} & 1 & 1 & 1 & 1 & 2 & 2 \\ \texttt{CBMd} & 1 & 1 & 1 & 1 & 3 & 2 \\ \texttt{CM5d} & 2 & 2 & 2 & 2 & 3 & 4 \\ \midrule \multicolumn{7}{c}{Number of Evaluations of $g_\mathrm{d}(\omega)$}\\ \midrule \texttt{LAHd} & 13 & 11 & 24 & 24 & 17 & 15 \\ \texttt{BDT2d} & 17 & 18 & 43 & 40 & 18 & 17 \\ \texttt{EB6d} & 22 & 22 & 37 & 34 & 32 & 32 \\ \texttt{ISS1d} & 5 & 5 & 24 & 24 & 7 & 6 \\ \texttt{CBMd} & 5 & 5 & 26 & 26 & 12 & 6 \\ \texttt{CM5d} & 20 & 16 & 27 & 27 & 22 & 18 \\ \bottomrule \end{tabular} \caption{The column headers remain as described for Table~\ref{table:evals_dense}. } \label{table:evals_dense_disc} \end{table}
\begin{table}[!t] \setlength{\tabcolsep}{3pt} \robustify\bfseries \center
\begin{tabular}{ l | SSSS | SS | SS } \toprule \multicolumn{9}{c}{Small-scale examples (discrete time)} \\ \midrule \multicolumn{1}{c}{} &
\multicolumn{4}{c}{Hybrid Optimization} & \multicolumn{2}{c}{Standard Algs.} & \multicolumn{2}{c}{\texttt{hinfnorm($\cdot$,tol)}}\\ \cmidrule(lr){2-5} \cmidrule(lr){6-7} \cmidrule(lr){8-9} \multicolumn{1}{c}{} &
\multicolumn{2}{c}{Newton} & \multicolumn{2}{c}{Secant} & \multicolumn{2}{c}{} & \multicolumn{2}{c}{\texttt{tol}}\\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} \cmidrule(lr){8-9} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} & \multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{MP} &
\multicolumn{1}{c}{Interp.} & \multicolumn{1}{c}{BBBS} &
\multicolumn{1}{c}{\texttt{1e-14}} & \multicolumn{1}{c}{\texttt{0.01}} \\ \midrule \multicolumn{9}{c}{Wall-clock running times in seconds}\\ \midrule \texttt{LAHd} & 0.051 & 0.038 & 0.056 & 0.056 & 0.034 & 0.040 & \bfseries 0.031 & 0.015 \\ \texttt{BDT2d} & 0.075 & 0.123 & 0.146 & 0.191 & \bfseries 0.057 & 0.076 & 0.070 & 0.031 \\ \texttt{EB6d} & 0.271 & 0.312 & 0.409 & 0.296 & 0.469 & 0.732 & \bfseries 0.192 & 0.122 \\ \texttt{ISS1d} & 0.654 & \bfseries 0.636 & 0.828 & 0.880 & 1.168 & 1.291 & 16.495 & 3.930 \\ \texttt{CBMd} & 0.898 & \bfseries 0.795 & 0.999 & 1.640 & 2.015 & 1.420 & 1.411 & 0.773 \\ \texttt{CM5d} & \bfseries 7.502 & 8.022 & 9.887 & 8.391 & 9.458 & 14.207 & 10.802 & 2.966 \\ \midrule \multicolumn{9}{c}{Running times relative to hybrid optimization (Newton with `Interp.')}\\ \midrule \texttt{LAHd} & 1 & 0.74 & 1.09 & 1.11 & 0.67 & 0.78 & 0.60 & 0.29 \\ \texttt{BDT2d} & 1 & 1.65 & 1.96 & 2.55 & 0.76 & 1.01 & 0.93 & 0.41 \\ \texttt{EB6d} & 1 & 1.15 & 1.51 & 1.09 & 1.73 & 2.70 & 0.71 & 0.45 \\ \texttt{ISS1d} & 1 & 0.97 & 1.27 & 1.34 & 1.79 & 1.97 & 25.21 & 6.01 \\ \texttt{CBMd} & 1 & 0.89 & 1.11 & 1.83 & 2.24 & 1.58 & 1.57 & 0.86 \\ \texttt{CM5d} & 1 & 1.07 & 1.32 & 1.12 & 1.26 & 1.89 & 1.44 & 0.40 \\ \midrule Avg. & 1 & 1.08 & 1.38 & 1.51 & 1.41 & 1.66 & 5.08 & 1.40 \\ \bottomrule \end{tabular} \caption{The column headers remain as described for Table~\ref{table:times_dense}. } \label{table:times_dense_disc} \end{table}
\section{Local optimization for ${\hinfsym_\infty}$ norm approximation} \label{sec:hinf_approx} Unfortunately, the ${\cal O}(n^3)$ work necessary to compute all the imaginary eigenvalues of $(\Mc,\Nc)$ restricts the usage of the level-set ideas from \cite{Bye88,BoyBK89} to rather small-dimensional problems. The same computational limitation of course also holds for obtaining all of the unit-modulus eigenvalues of $(\Md,\Nd)$ in the discrete-time case. Currently there is no known alternative technique that would guarantee convergence to a global maximizer of $g_\mathrm{c}(\omega)$ or $g_\mathrm{d}(\theta)$, to thus ensure exact computation of the ${\hinfsym_\infty}$ norm, while also having more favorable scaling properties. Indeed, the aforementioned scalable methods of \cite{GugGO13,BenV14,FreSV14,MitO16,AliBMetal17} for approximating the ${\hinfsym_\infty}$ norm of large-scale systems all forgo the expensive operation of computing all the eigenvalues of $(\Mc,\Nc)$ and $(\Md,\Nd)$, and consequently, the most that any of them can guarantee in terms of accuracy is that they converge to a local maximizer of $g_\mathrm{c}(\omega)$ or $g_\mathrm{d}(\theta)$. However, a direct consequence of our work here to accelerate the exact computation of the ${\hinfsym_\infty}$ norm is that the straightforward application of optimization techniques to compute local maximizers of either $g_\mathrm{c}(\omega)$ of $g_\mathrm{d}(\theta)$ can itself be considered an efficient and scalable approach for approximating the ${\hinfsym_\infty}$ norm of large-scale systems. It is perhaps a bit staggering that such a simple and direct approach seems to have been until now overlooked, particularly given the sophistication of the existing ${\hinfsym_\infty}$ norm approximation methods.
In more detail, recall that the initialization phase of Algorithm~\ref{alg:hybrid_opt}, lines~\ref{algline:init_start}-\ref{algline:init_end}, is simply just applying unconstrained optimization to find one or more maximizers of $g_\mathrm{c}(\omega)$. Provided that $({\bf i} \omega E - A)$ permits fast linear solves, e.g. a sparse LU decomposition, there is no reason why this cannot also be done for large-scale systems. In fact, the methods of \cite{BenV14,FreSV14,AliBMetal17} for approximating the ${\hinfsym_\infty}$ norm all require that such fast solves are possible (while the methods of \cite{GugGO13,MitO16} only require fast matrix-vector products with the system matrices). When $m,p \ll n$, it is still efficient to calculate second derivatives of $g_\mathrm{c}(\omega)$ to obtain a quadratic rate of convergence via Newton's method. Even if $m,p \ll n$ does not hold, first derivatives of $g_\mathrm{c}(\omega)$ can still be computed using sparse methods for computing the largest singular value (and its singular vectors) and thus the secant method can be employed to at least get superlinear convergence. As such, the local convergence and superlinear/quadratic convergence rate guarantees of the existing methods are at least matched by the guarantees of direct optimization. For example, while the superlinearly-convergent method of \cite{AliBMetal17} requires that $m,p \ll n$, our direct optimization approach remains efficient even if $m,p \approx n$, when it also has superlinear convergence, and it has quadratic convergence in the more usual case of $m,p \ll n$.
Of course, there is also the question of whether there are differences in approximation quality between the methods. This is a difficult question to address since beyond local optimality guarantees, there are no other theoretical results concerning the quality of the computed solutions. Actual errors can only be measured when running the methods on small-scale systems, where the exact value of the ${\hinfsym_\infty}$ norm can be computed, while for large-scale problems, only relative differences between the methods' respective approximations can be observed. Furthermore, any of these observations may not be predictive of performance on other problems. For nonconvex optimization, the quality of a locally optimal computed solution is often dependent on the starting point, which will be a strong consideration for the direct optimization approach. On the other hand, it is certainly plausible that the sophistication of the existing ${\hinfsym_\infty}$ norm algorithms may favorably bias them to converge to better (higher) maximizers more frequently than direct optimization would, particularly if only randomly selected starting points were used. With such complexities, in this paper we do not attempt to do a comprehensive benchmark with respect to existing ${\hinfsym_\infty}$ norm approximation methods but only attempt to demonstrate that direct optimization is a potentially viable alternative.
We implemented lines~\ref{algline:init_start}-\ref{algline:init_end} of Algorithm~\ref{alg:hybrid_opt} in a second, standalone routine, with the necessary addition for the continuous-time case that
the value of $\|D\|_2$ is returned if the computed local maximizers of $g_\mathrm{c}(\omega)$ only yield lower function values than $\|D\|_2$. Since we assume that the choice of starting points will be critical, we initialized our sparse routine using starting frequencies computed by \texttt{samdp}, a {MATLAB}\ code that implements the subspace-accelerated dominant pole algorithm of \cite{RomM06}. Correspondingly, we compared our approach to the {MATLAB}\ code \texttt{hinorm}, which implements the spectral-value-set-based method using dominant poles of \cite{BenV14} and also uses \texttt{samdp} (to compute dominant poles at each iteration). We tested \texttt{hinorm} using its default settings, and since it initially computes 20 dominant poles to find a good starting point, we also chose to compute 20 dominant poles via \texttt{samdp} to obtain 20 initial frequency guesses for optimization.\footnote{Note that these are not necessarily the same 20 dominant poles, since \cite{BenV14} must first transform a system if the original system has nonzero $D$ matrix.} Like our small-scale experiments, we also ensured zero was always included as an initial guess and reused the same choices for \texttt{fmincon} parameter values. We tested our optimization approach by optimizing $\phi=1,5,10$ of the most promising frequencies, again using a serial MATLAB code. Since we used LU decompositions to solve the linear systems, we tested our code in two configurations: with and without permutations, i.e. for some matrix given by variable \texttt{A}, \texttt{[L,U,p,q] = lu(A,'vector')} and \texttt{[L,U] = lu(A)}, respectively.
Table~\ref{table:problems_large} shows our selection or large-scale test problems, all continuous-time since \texttt{hinorm} does not support discrete-time problems (in contrast to our optimization-based approach which supports both). Problems \texttt{dwave} and \texttt{markov} are from the large-scale test set used in \cite{GugGO13} while the remaining problems are freely available from the website of Joost Rommes\footnote{Available at \url{https://sites.google.com/site/rommes/software}}. As $m,p \ll n$ holds in all of these examples, we just present results for our code when using Newton's method. For all problems, our code produced ${\hinfsym_\infty}$ norm approximations that agreed to at least 12 digits with \texttt{hinorm}, meaning that the additional optimization calls done with $\phi=5$ and $\phi=10$ did not produce better maximizers than what was found with $\phi=1$ and thus, only added to the serial computation running time.
\begin{table}[!t] \centering
\begin{tabular}{ l | rrr | c } \toprule \multicolumn{5}{c}{Large-scale examples (continuous time)}\\ \midrule \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{$n$} &
\multicolumn{1}{c}{$p$} &
\multicolumn{1}{c}{$m$} &
\multicolumn{1}{c}{$E=I$} \\ \midrule \texttt{dwave} & 2048 & 4 & 6 & Y \\ \texttt{markov} & 5050 & 4 & 6 & Y \\ \texttt{bips98\_1450} & 11305 & 4 & 4 & N \\ \texttt{bips07\_1693} & 13275 & 4 & 4 & N \\ \texttt{bips07\_1998} & 15066 & 4 & 4 & N \\ \texttt{bips07\_2476} & 16861 & 4 & 4 & N \\ \texttt{descriptor\_xingo6u} & 20738 & 1 & 6 & N \\ \texttt{mimo8x8\_system} & 13309 & 8 & 8 & N \\ \texttt{mimo28x28\_system} & 13251 & 28 & 28 & N \\ \texttt{ww\_vref\_6405} & 13251 & 1 & 1 & N \\ \texttt{xingo\_afonso\_itaipu} & 13250 & 1 & 1 & N \\ \bottomrule \end{tabular} \caption{The list of test problems for the large-scale ${\hinfsym_\infty}$-norm approximation comparing direct local optimization against \texttt{hinorm}, along with the corresponding problem dimensions and whether they are standard state-space systems ($E=I$) or descriptor systems $(E \ne I)$.} \label{table:problems_large} \end{table}
\begin{table}[!t] \setlength{\tabcolsep}{3pt} \robustify\bfseries \center
\begin{tabular}{ l | SSS | SSS | S } \toprule \multicolumn{8}{c}{Large-scale examples (continuous time)}\\ \midrule \multicolumn{1}{c}{} &
\multicolumn{6}{c}{Direct optimization: $\phi=1,5,10$ } & \multicolumn{1}{c}{\texttt{hinorm}} \\ \cmidrule(lr){2-7} \cmidrule(lr){8-8} \multicolumn{1}{c}{} & \multicolumn{3}{c}{\texttt{lu} with permutations} & \multicolumn{3}{c}{\texttt{lu} without permutations} & \multicolumn{1}{c}{} \\ \cmidrule(lr){2-4} \cmidrule(lr){5-7} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{1} & \multicolumn{1}{c}{5} & \multicolumn{1}{c}{10} &
\multicolumn{1}{c}{1} & \multicolumn{1}{c}{5} & \multicolumn{1}{c}{10} & \multicolumn{1}{c}{} \\ \midrule \multicolumn{8}{c}{Wall-clock running times in seconds (initialized via \texttt{samdp})}\\ \midrule \texttt{dwave} & 1.979 & 1.981 & 1.997 & 5.536 & 5.154 & 5.543 & \bfseries 1.861 \\ \texttt{markov} & \bfseries 3.499 & 3.615 & 3.593 & 26.734 & 26.898 & 27.219 & 3.703 \\ \texttt{bips98\_1450} & \bfseries 6.914 & 8.333 & 10.005 & 14.559 & 17.157 & 19.876 & 31.087 \\ \texttt{bips07\_1693} & \bfseries 8.051 & 9.155 & 11.594 & 18.351 & 21.367 & 24.322 & 75.413 \\ \texttt{bips07\_1998} & \bfseries 10.344 & 11.669 & 13.881 & 50.059 & 56.097 & 59.972 & 51.497 \\ \texttt{bips07\_2476} & \bfseries 14.944 & 16.717 & 18.942 & 65.227 & 70.920 & 73.206 & 76.697 \\ \texttt{descriptor\_xingo6u} & 13.716 & 15.328 & 16.997 & \bfseries 7.907 & 9.225 & 11.133 & 36.775 \\ \texttt{mimo8x8\_system} & 7.566 & 8.934 & 11.211 & \bfseries 6.162 & 7.562 & 9.321 & 30.110 \\ \texttt{mimo28x28\_system} & 12.606 & 17.767 & 20.488 & \bfseries 10.815 & 16.591 & 21.645 & 33.107 \\ \texttt{ww\_vref\_6405} & 7.353 & 6.785 & 7.552 & \bfseries 4.542 & 5.076 & 5.437 & 18.553 \\ \texttt{xingo\_afonso\_itaipu} & 5.780 & 6.048 & 7.772 & \bfseries 4.676 & 4.975 & 5.573 & 16.928 \\ \midrule \multicolumn{8}{c}{Running times relative to direct optimization (\texttt{lu} with permutations and $\phi =1$)}\\ \midrule \texttt{dwave} & 1 & 1.00 & 1.01 & 2.80 & 2.60 & 2.80 & \bfseries 0.94 \\ \texttt{markov} & \bfseries 1 & 1.03 & 1.03 & 7.64 & 7.69 & 7.78 & 1.06 \\ \texttt{bips98\_1450} & \bfseries 1 & 1.21 & 1.45 & 2.11 & 2.48 & 2.87 & 4.50 \\ \texttt{bips07\_1693} & \bfseries 1 & 1.14 & 1.44 & 2.28 & 2.65 & 3.02 & 9.37 \\ \texttt{bips07\_1998} & \bfseries 1 & 1.13 & 1.34 & 4.84 & 5.42 & 5.80 & 4.98 \\ \texttt{bips07\_2476} & \bfseries 1 & 1.12 & 1.27 & 4.36 & 4.75 & 4.90 & 5.13 \\ \texttt{descriptor\_xingo6u} & 1 & 1.12 & 1.24 & \bfseries 0.58 & 0.67 & 0.81 & 2.68 \\ \texttt{mimo8x8\_system} & 1 & 1.18 & 1.48 & \bfseries 0.81 & 1.00 & 1.23 & 3.98 \\ \texttt{mimo28x28\_system} & 1 & 1.41 & 1.63 & \bfseries 0.86 & 1.32 & 1.72 & 2.63 \\ \texttt{ww\_vref\_6405} & 1 & 0.92 & 1.03 & \bfseries 0.62 & 0.69 & 0.74 & 2.52 \\ \texttt{xingo\_afonso\_itaipu} & 1 & 1.05 & 1.34 & \bfseries 0.81 &0.86 & 0.96 & 2.93 \\ \midrule Average & 1 & 1.12 & 1.30 & 2.52 & 2.74 & 2.97 & 3.70 \\ \bottomrule \end{tabular} \caption{In the top half of the table, the running times (fastest in bold) are reported in seconds for our direct Newton-method-based optimization approach in two configurations (\texttt{lu} with and without permutations) and \texttt{hinorm}. Each configuration of our approach optimizes the norm of the transfer function for up to $\phi$ different starting frequencies ($\phi=1,5,10$), done sequentially. The bottom half the table normalizes all the times relative to the running times for our optimization method using \texttt{lu} with permutations and $\phi=1$, along with the overall averages relative to this variant. } \label{table:times_large} \end{table}
In Table~\ref{table:times_large}, we present the running times of the codes and configurations. First, we observe that for our direct optimization code, using \texttt{lu} with permutations is two to eight times faster than without permutations; on average, using \texttt{lu} with permutations is typically 2.5 times faster. Interestingly, on the last five problems, using \texttt{lu} without permutations was actually best, but using permutations was typically only about 25\% slower and at worse, about 1.7 times slower (\texttt{descriptor\_xingo6u}). We found that our direct optimization approach, using just one starting frequency ($\phi=1$) was typically 3.7 times faster than \texttt{hinorm} on average and almost up to 10 times faster on problem \texttt{bips07\_1693}. Only on problem \texttt{dwave} was direct optimization actually slower than \texttt{hinorm} and only by a negligible amount. Interestingly, optimizing just one initial frequency versus running optimization for ten frequencies ($\phi=10$) typically only increased the total running time of our code by 20-30\%. This strongly suggested that the dominant cost of running our code is actually just calling \texttt{samdp} to compute the 20 initial dominant poles to obtain starting guesses. As such, in Table~\ref{table:times_samdp}, we report the percentage of the overall running time for each variant/method that was due to their initial calls to \texttt{samdp}. Indeed, our optimization code's single call to \texttt{samdp} accounted for 81.5-99.3\% of its running-time (\texttt{lu} with permutations and $\phi=1$). In contrast, \texttt{hinorm}'s initial call to \texttt{samdp} usually accounted for about only a quarter of its running time on average, excluding \texttt{dwave} and \texttt{markov} as exceptional cases. In other words, the convergent phase of direct optimization is actually even faster than the convergent phase of \texttt{hinorm} than what Table~\ref{table:times_large} appears to indicate. On problem \texttt{bips07\_1693}, we see that our proposal to use Newton's method to optimize $g_\mathrm{c}(\omega)$ directly is actually over 53 times faster than \texttt{hinorm}'s convergent phase.
\begin{table}[!t] \center
\begin{tabular}{ l | SSS | SSS | S } \toprule \multicolumn{8}{c}{Large-scale examples (continuous time)}\\ \midrule \multicolumn{8}{c}{Percentage of time just to compute 20 initial dominant poles (first call to \texttt{samdp})}\\ \midrule \multicolumn{1}{c}{} &
\multicolumn{6}{c}{Direct optimization: $\phi=1,5,10$} & \multicolumn{1}{c}{\texttt{hinorm}} \\ \cmidrule(lr){2-7} \cmidrule(lr){8-8} \multicolumn{1}{c}{} & \multicolumn{3}{c}{\texttt{lu} with permutations} & \multicolumn{3}{c}{\texttt{lu} without permutations} & \multicolumn{1}{c}{} \\ \cmidrule(lr){2-4} \cmidrule(lr){5-7} \multicolumn{1}{l}{Problem} &
\multicolumn{1}{c}{1} & \multicolumn{1}{c}{5} & \multicolumn{1}{c}{10} &
\multicolumn{1}{c}{1} & \multicolumn{1}{c}{5} & \multicolumn{1}{c}{10} & \multicolumn{1}{c}{} \\ \midrule \texttt{dwave} & 99.3 & 99.3 & 99.2 & 99.3 & 99.6 & 99.6 & 98.0 \\ \texttt{markov} & 99.2 & 99.1 & 99.1 & 99.6 & 99.6 & 99.6 & 98.1 \\ \texttt{bips98\_1450} & 84.1 & 72.7 & 60.4 & 84.9 & 73.0 & 61.8 & 21.7 \\ \texttt{bips07\_1693} & 84.2 & 75.1 & 61.6 & 85.6 & 77.1 & 64.4 & 10.1 \\ \texttt{bips07\_1998} & 85.2 & 77.1 & 66.0 & 88.5 & 81.5 & 72.8 & 18.3 \\ \texttt{bips07\_2476} & 88.0 & 80.2 & 71.4 & 89.2 & 83.6 & 73.0 & 18.6 \\ \texttt{descriptor\_xingo6u} & 90.2 & 82.8 & 75.0 & 89.3 & 79.9 & 66.8 & 38.4 \\ \texttt{mimo8x8\_system} & 84.6 & 74.4 & 59.1 & 84.1 & 67.7 & 55.0 & 24.2 \\ \texttt{mimo28x28\_system} & 81.5 & 59.4 & 51.2 & 75.5 & 46.9 & 39.1 & 40.9 \\ \texttt{ww\_vref\_6405} & 95.1 & 79.6 & 63.9 & 91.9 & 78.3 & 75.1 & 33.7 \\ \texttt{xingo\_afonso\_itaipu} & 90.2 & 84.4 & 76.8 & 89.3 & 83.7 & 74.7 & 33.6 \\ \bottomrule \end{tabular} \caption{The column headings are the same as in Table~\ref{table:times_large}.} \label{table:times_samdp} \end{table}
\section{Parallelizing the algorithms} \label{sec:parallel} The original BBBS algorithm, and the cubic-interpolation refinement, only provide little opportunity for parallelization at the \emph{algorithmic} level, i.e. when not considering that the underlying basic linear algebra operations may be parallelized themselves when running on a single shared-memory multi-core machine. Once the imaginary eigenvalues\footnote{ For conciseness, our discussion in Section~\ref{sec:parallel} will be with respect to the continuous-time case but note that it applies equally to the discrete-time case as well.} have been computed, constructing the level-set intervals (line~\ref{algline:bbbs_ints} of Algorithm~\ref{alg:bbbs}) and calculating $g_\mathrm{c}(\omega)$ at their midpoints or cubic-interpolant-derived maximizers (line~\ref{algline:bbbs_points} of Algorithm~\ref{alg:bbbs}) can both be done in an embarrassingly parallel manner, e.g. across nodes on a cluster. However, as we have discussed to motivate our improved algorithm, evaluating $g_\mathrm{c}(\omega)$ is a rather cheap operation compared to computing the eigenvalues of $(\Mc,\Nc)$. Crucially, parallelizing these two steps does not result in an improved (higher) value of $\gamma$ found per iteration and so the number of expensive eigenvalue computations of $(\Mc,\Nc)$ remains the same.
For our new method, we certainly can (and should) also parallelize the construction of the level-set intervals and the evaluations of their midpoints or cubic-interpolants-derived maximizers (lines~\ref{algline:hyopt_ints} and \ref{algline:hyopt_points} in Algorithm~\ref{alg:hybrid_opt}), despite that we do not expect large gains to be had here. However, optimizing over the intervals (line~\ref{algline:hyopt_opt} in Algorithm~\ref{alg:hybrid_opt}) is also an embarrassingly parallel task and here significant speedups can be obtained. As mentioned earlier, with serial computation (at the algorithmic level), we typically recommend only optimizing over a single level-set interval ($\phi=1$) out of the $q$ candidates (the most promising one, as determined by line~\ref{algline:hyopt_reorder} in Algorithm~\ref{alg:hybrid_opt}); otherwise, the increased number of evaluations of $g_\mathrm{c}(\omega)$ can start to outweigh the benefits of performing the local optimization. By optimizing over more intervals in parallel, e.g. again across nodes on a cluster, we increase the chances on every iteration of finding even higher peaks of $g_\mathrm{c}(\omega)$, and possibly a global maximum, \emph{without any increased time penalty} (besides communication latency).\footnote{Note that distributing the starting points for optimization and taking the max of the resulting optimizers involves very little data being communicated on any iteration.} In turn, larger steps in $\gamma$ can be taken, potentially reducing the number of expensive eigenvalue computations of $(\Mc,\Nc)$ incurred. Furthermore, parallelization can also be applied to the initialization stage to optimize from as many starting points as possible without time penalty (lines~\ref{algline:init_start} and \ref{algline:init_opt} in Algorithm~\ref{alg:hybrid_opt}), a standard technique for nonconvex optimization problems. Finding a global maximum of $g_\mathrm{c}(\omega)$ during initialization means that the algorithm will only need to compute the eigenvalues of $(\Mc,\Nc)$ just once, to assert that maximum found is indeed a global one.
When using direct local optimization techniques for ${\hinfsym_\infty}$ approximation, as discussed in Section~\ref{sec:parallel}, optimizing from as many starting points as possible of course also increases the chances of finding the true value of the ${\hinfsym_\infty}$ norm, or at least better approximations than just starting from one point. With parallelization, these additional starting points can also be tried without any time penalty (also lines~\ref{algline:init_start} and \ref{algline:init_opt} in Algorithm~\ref{alg:hybrid_opt}), unlike the experiments we reported in Section~\ref{sec:hinf_approx} where we optimized using $\phi=1,5,10$ starting guesses with a serial MATLAB code and therefore incurred longer running times as $\phi$ was increased.
For final remarks on parallelization, first note that there will generally be less benefits when using more than $n$ parallel optimization calls, since there are at most $n$ peaks of $g_\mathrm{c}(\omega)$. However, for initialization, one could simply try as many starting guesses as there are parallel nodes available (even if the number of nodes is greater than $n$) to maximize the chances of finding a high peak of $g_\mathrm{c}(\omega)$ or a global maximizer. Second, the number of level-set intervals encountered by the algorithm at each iteration may be significantly less than $n$, particularly if good starting guesses are used. Indeed, it is not entirely uncommon for the algorithm to only encounter one or two level-set intervals on each iteration. On the other hand, for applications where $g_\mathrm{c}(\omega)$ has many similarly high peaks, such as controller design where the ${\hinfsym_\infty}$ norm is minimized, our new algorithm may consistently benefit from parallelization with a higher number of parallel optimization calls.
\section{Conclusion and outlook} \label{sec:wrapup} We have presented an improved algorithm that significantly reduces the time necessary to compute the ${\hinfsym_\infty}$ norm of linear control systems compared to existing algorithms. Furthermore, our proposed hybrid optimization approach also allows the ${\hinfsym_\infty}$ norm to be computed to machine precision with relatively little extra work, unlike earlier methods. We have also demonstrated that approximating the ${\hinfsym_\infty}$ norm of large-scale problems via directly optimizing the norm of the transfer function is not only viable but can be quite efficient. In contrast to the standard BBBS and cubic-interpolation refinement algorithms, our new approaches for ${\hinfsym_\infty}$ norm computation and approximation also can benefit significantly more from parallelization. Work is ongoing to add implementations of our new algorithms to a future release of the open-source library ROSTAPACK: RObust STAbility PACKage.\footnote{Available at \url{http://www.timmitchell.com/software/ROSTAPACK}} This is being done in coordination with our efforts to also add implementations of our new methods for computing the spectral value set abscissa and radius, proposed in \cite{BenM17a} and which use related ideas to those in this paper. The current v1.0 release of ROSTAPACK contains implementations of scalable algorithms for approximating all of these aforementioned measures \cite{GugO11,GugGO13,MitO16}, as well as variants where the uncertainties are restricted to be real valued \cite{GugGMetal17}.
Regarding our experimental observations, the sometimes excessively longer compute times for \texttt{hinfnorm} compared to all other methods we evaluated possibly indicates that the structure-preserving eigensolver that it uses can sometimes be much slower than QZ. This certainly warrants further investigation, and if confirmed, suggests that optimizing the code/algorithm of the structure-preserving eigensolver could be a worthwhile pursuit. In the large-scale setting, we have observed that the dominant cost for our direct optimization approach is actually due to obtaining the starting frequency guesses via computing dominant poles. If the process of obtaining good initial guesses can be accelerated, then approximating the ${\hinfsym_\infty}$ norm via direct optimization could be significantly sped up even more.
\end{document} |
\begin{document}
\title{Learning Deterministic Finite Automata Decompositions from Examples and Demonstrations \thanks{This work was partially supported by NSF grants 1545126 (VeHICaL) and 1837132, by the DARPA contracts FA8750-18-C-0101 (Assured Autonomy) and FA8750-20-C-0156 (SDCPS), by Berkeley Deep Drive, by Toyota under the iCyPhy center, and by Toyota Research Institute.}}
\author{
\IEEEauthorblockN{
Niklas Lauffer\thanks{\textsuperscript{\IEEEauthorrefmark{1}}Equal contribution}\textsuperscript{\IEEEauthorrefmark{1}}\IEEEauthorrefmark{2},
Beyazit Yalcinkaya\textsuperscript{\IEEEauthorrefmark{1}}\IEEEauthorrefmark{2},
Marcell Vazquez-Chanlatte\IEEEauthorrefmark{2},
Ameesh Shah\IEEEauthorrefmark{2} and
Sanjit A. Seshia\IEEEauthorrefmark{2}
}\\
\IEEEauthorblockA{\IEEEauthorrefmark{2}University of California, Berkeley} }
\maketitle
\begin{abstract} The identification of a \emph{deterministic finite automaton} (DFA) from labeled examples is a well-studied problem in the literature; however, prior work focuses on the identification of monolithic DFAs. Although monolithic DFAs provide accurate descriptions of systems’ behavior, they lack simplicity and interpretability; moreover, they fail to capture sub-tasks realized by the system and introduce inductive biases away from the inherent decomposition of the overall task. In this paper, we present an algorithm for learning conjunctions of DFAs from labeled examples. Our approach extends an existing SAT-based method to systematically enumerate Pareto-optimal candidate solutions. We highlight the utility of our approach by integrating it with a state-of-the-art algorithm for learning DFAs from demonstrations. Our experiments show that the algorithm learns sub-tasks realized by the labeled examples, and it is scalable in the domains of interest. \end{abstract}
\section{Introduction}
Grammatical inference is a mature and well-studied field with many application domains ranging from various computer science fields, e.g., machine learning, to areas of natural sciences, e.g. computational biology~\cite{de2005bibliographical}. The identification of a minimum size \emph{deterministic finite automaton} (DFA) from labeled examples is one of the most well-investigated problems in this field. Furthermore, with the increase in computational power in recent years, the problem can be solved efficiently by various tools available in the literature (e.g.,~\cite{verwer2017flexfringe,zakirzyanov2019efficient}).
Existing work on DFA identification primarily focuses on the monolithic case, i.e., learning a single DFA from examples. Although such DFAs capture a language consistent with the examples, they may lack simplicity and interpretability. Furthermore, complex tasks often decompose into independent sub-tasks; hence, the system traces implicitly reflect this behavior. However, monolithic DFA identification fails to capture the natural decomposition of the system behavior, introducing an inductive bias away from the inherent decomposition of the overall task. In this paper, we present an algorithm for learning \emph{DFA decompositions} from examples by reducing the problem to graph coloring in SAT and a Pareto-optimal solution search over candidate solutions. A DFA decomposition is a set of DFAs such that \emph{intersection} of their language is the language of the system, which implicitly defines a conjunction of simpler specifications realized by the overall system. We present an application of our algorithm to a state-of-the-art method for learning task specifications from unlabeled demonstrations~\cite{marcell2021DISS} to showcase a domain of interest for DFA decompositions.
\textbf{Related Work.} Existing work considers the problem of minimal DFA identification from labeled examples~\cite{de2005bibliographical}. It is shown that the DFA identification problem with a given upper bound on the number of states is an NP-complete problem~\cite{gold1978complexity}. Another work shows that this problem cannot be efficiently approximated~\cite{pitt1993minimum}. Fortunately, practical methods exist in the literature. A common approach is to apply the evidence driven state-merging algorithm~\cite{lang1998results,lang1999faster,bugalho2005inference}, which is a greedy algorithm that aims to find a good local optimum. Other works for learning DFAs use evolutionary computation~\cite{dupont1994regular,luke1999genetic}, later improved by multi-start random hill climbing~\cite{lucas2003learning}.
A different approach to the monolithic DFA identification is to leverage highly-optimized modern SAT solvers by encoding the problem in SAT~\cite{heule2010exact}. In follow up works, several symmetry breaking predicates are proposed for the SAT encoding to reduce the search space \cite{zakirzyanov2019efficient,ulyantsev2015bfs,ulyantsev2016symmetry,zakirzyanov2017finding}. However, to the best of our knowledge, no work considers directly learning DFA decompositions from examples and demonstrations.
This work also relates to the problem of decomposing a known automaton. Ashar et al.~\cite{ashar1992finite} explore computing cascade and general decomposition of finite state machines. The Krohn–Rhodes theorem \cite{rhodes2010applications} reduces a finite automaton into a cascade of irreducible automata.
Kupferman \& Mosheiff~\cite{kupferman2015prime} present various complexity results for DFA decomposability.
Finally, the problem of learning objectives from demonstrations of an expert dates back to the problem of Inverse Optimal Control~\cite{kalman1964linear} and, more recently in the artificial intelligence community, the problem of Inverse Reinforcement Learning (IRL)~\cite{ng2000irl}. The goal in IRL is to recover the unknown reward function that an expert agent is trying to maximize based on observations of that expert. Recently, several works have considered a version of the IRL problem in which the expert agent is trying to maximize the satisfaction of a Boolean task specification~\cite{kasenberg2017apprenticeship,chou2020explaining, marcell2021DISS}. However, no work considers learning \emph{decompositions} of specifications from demonstrations.
\section{Problem Formulation}
Let $\mathcal{D}$ denote the set of DFAs over some fixed alphabet $\Sigma$. An $(m_1, \ldots, m_n)$-\emph{DFA decomposition} is a tuple of $n$ DFAs $({\mathcal{A}}_1, \dots, {\mathcal{A}}_n) \in \mathcal{D}^n$ where ${\mathcal{A}}_i$ has $m_i$ states and $m_1 \leq m_2 \leq \dots \leq m_n$. We associate a partial order $\prec$ on DFA decompositions using the standard product order on the number of states. That is, $({\mathcal{A}}_1', \dots, {\mathcal{A}}_n') \prec ({\mathcal{A}}_1, \dots, {\mathcal{A}}_n)$, if $m_i' \leq m_i$ for all $i \in [n]$ and $m_j' < m_j$ for some $j \in [n]$. In this case, we say $({\mathcal{A}}_1', \dots, {\mathcal{A}}_n')$ \emph{dominates} $({\mathcal{A}}_1, \dots, {\mathcal{A}}_n)$. A DFA decomposition $({\mathcal{A}}_1, \ldots, {\mathcal{A}}_n)$ \emph{accepts} a string $w$ iff all ${\mathcal{A}}_i$ accept $w$. A string that is not accepted is \emph{rejected}. The \emph{language} of a decomposition, ${\mathcal{L}}({\mathcal{A}}_1, \ldots, {\mathcal{A}}_n)$, is the set of accepting strings, i.e., the intersection of all DFA languages.
We study the problem of finding a DFA decomposition from a set of positive and negative labeled examples such that the decomposition accepts the positive examples and rejects the negative examples. Next, we formally define \emph{the DFA decomposition identification problem} (\texttt{DFA-DIP}\xspace), and then present an overview of the proposed approach.
\begin{problembox}{The Deterministic Finite Automaton Decomposition Identification Problem (\texttt{DFA-DIP}\xspace).} Given positive examples, $D_+$ and negative examples, $D_-$, and a natural number $n \in \mathbb{N}$, find a $(m_1, \ldots, m_n)$-DFA decomposition $({\mathcal{A}}_1, \dots, {\mathcal{A}}_n)$ satisfying the following conditions. \begin{enumerate}[label=\textbf{(C\arabic*)}]
\item\label{problem:language_condition} The decomposition is consistent with $(D_+, D_-)$:
\[
\begin{split}
&D_+ \subseteq {\mathcal{L}}({\mathcal{A}}_1, {\mathcal{A}}_2, \dots, {\mathcal{A}}_n),\\
&D_- \subseteq \Sigma^* \setminus {\mathcal{L}}({\mathcal{A}}_1, {\mathcal{A}}_2, \dots, {\mathcal{A}}_n).
\end{split}
\]
\item\label{problem:numbers_of_states_condition} There does not exist a DFA decomposition that \emph{dominates} $({\mathcal{A}}_1, \dots, {\mathcal{A}}_n)$ and satisfies \ref{problem:language_condition}.
\end{enumerate} \end{problembox}
We refer to the set of DFA decompositions that solve an instance of \texttt{DFA-DIP}\xspace as the Pareto-optimal frontier of solutions. Note that for $n=1$, \texttt{DFA-DIP}\xspace reduces to monolithic DFA identification. We propose finding the set of DFA decompositions that solve \texttt{DFA-DIP}\xspace by reduction to graph coloring in SAT and a breadth first search in solution space. Specifically, we extend the existing work on SAT-based monolithic DFA identification~\cite{heule2010exact,ulyantsev2016symmetry} to finding $n$ DFAs with $m_1, \dots, m_n$ states such that the intersection of their languages is consistent with the given examples. On top of this SAT-based approach, we develop a search strategy over the numbers of states passed to the SAT solver as these values are not known a priori.
\section{Learning DFAs from Examples}
In this section, we present the proposed approach. We start with the SAT encoding of the DFA decomposition problem and continue with the Pareto frontier search in the solution space. We then showcase an example of learning conjunctions of DFAs from labeled examples. Finally, we present experimental results and evaluate the scalability of our method.
\subsection{Encoding \texttt{DFA-DIP}\xspace in SAT}
We extend the SAT encoding for monolithic DFA identification presented in~\cite{heule2010exact,ulyantsev2016symmetry}, which solves a graph coloring problem, to finding $n$ DFAs with $m_1, m_2, \dots, m_n$ states.
The extension relies on the observation that for conjunctions of DFAs, we need to enforce that a positive example must be accepted by \emph{all} DFAs, and a negative example must be rejected by \emph{at least} one of the DFAs. Due to space limitations, we only present the modified clauses of the encoding, and invite reader to \Cref{sec:appendix} for further details.
The encoding works on an \emph{augmented prefix tree acceptor} (APTA), a tree-shaped automaton constructed from given examples, which has paths for each example leading to accepting or rejecting states based on the example's label; therefore, an APTA defines $D_+$ and $D_-$ which then constrains the accepting states, rejecting states, and the transition function of the unknown DFAs. For each DFA, ${\mathcal{A}}_i$, the encoding will associate the APTA states with one of the $m_i$ colors for DFA ${\mathcal{A}}_i$, subject to the constraints imposed by $D_+$ and $D_-$. APTA states with the same (DFA-indexed) color will be the same state in the corresponding DFA. We refer to states of an APTA as $V$, its accepting states as $V_{+}$, and its rejecting states as $V_{-}$. Given $n$ for the number of DFAs and $m_1, \dots, m_n$ for the number of states of DFAs, the SAT encoding uses three types of variables: \begin{enumerate}
\item \emph{color} variables $x^{k}_{v,i} \equiv 1$ ($k \in [n]$; $v \in V$; $i \in [m_k]$) iff APTA state $v$ has color $i$ in DFA $k$,
\item \emph{parent relation} variables $y^{k}_{l,i,j} \equiv 1$ ($k \in [n]$; $l \in \Sigma$, where $\Sigma$ is the alphabet; $i, j \in [m_k]$) iff DFA $k$ transitions with symbol $l$ from state $i$ to state $j$, and
\item \emph{accepting color} variables $z^{k}_{i} \equiv 1$ ($k \in [n]$; $i \in [m_k]$) iff state $i$ of DFA $k$ is an accepting state. \end{enumerate} The encoding for the monolithic DFA identification also uses the same variable types; however, in our encoding, we also index variables over $n$ DFAs instead of a single DFA. With this extension, one can trivially instantiate the encoding presented in \cite{heule2010exact,ulyantsev2016symmetry}. Below, we list the new rules we define for our problem. For the complete list of rules, see~\Cref{sec:appendix}. \begin{enumerate}[label=\textbf{(R\arabic*)},leftmargin=23pt]
\item\label{rule1} A negative example must be rejected by \emph{at least} one DFA:
\[
\bigwedge_{v \in V_{-}} \bigvee_{k \in [n]} \bigwedge_{i \in [m_k]} x^{k}_{v, i} \implies \neg z^{k}_{i}.
\]
\item\label{rule2} Accepting and rejecting states of APTA cannot be merged:
\[
\bigwedge_{v_{-} \in V_{-}} \bigwedge_{v_{+} \in V_{+}} \bigwedge_{k \in [n]} \bigwedge_{i \in [m_k]} (x^{k}_{v_{-}, i} \wedge \neg z^{k}_{i}) \implies \neg x^{k}_{v_{+}, i}.
\] \end{enumerate} In the encoding of \cite{heule2010exact,ulyantsev2016symmetry}, we replace the rule stating that the resulting DFA must reject all negative examples with \ref{rule1}, and \ref{rule2} is used instead of the original rule stating that accepting and rejecting states of APTA cannot be merged. Notice that since a rejecting state of APTA is not necessarily a rejecting state of a DFA $k$, we need to use the new rule~\ref{rule2}.
\begin{theorem}\label{thm:sat} Given labeled examples with $n$ and $m_1, \dots, m_n$, a solution to our SAT encoding is a solution to \texttt{DFA-DIP}\xspace. \end{theorem}
See \Cref{appendix:proof_main} for the proof of \Cref{thm:sat}.
\begin{figure*}
\caption{Experiment results evaluating the scalability of our algorithm w.r.t. (a) number of DFAs implied by the examples and (b) number of labeled examples.}
\label{fig:vary_dfas}
\label{fig:vary_examples}
\label{fig:exps}
\end{figure*}
\subsection{Pareto Frontier Search}
\texttt{DFA-DIP}\xspace requires finding a conjunction of $n$ DFAs that identify a language. There may exist multiple DFA decompositions that solve the problem with varying number of states $m_1, m_2, \dots, m_n$. With only a single DFA, the notion of minimal size is well-captured by the number of states. However, with multiple DFAs in the decomposition, the notion of a minimal solution is less clear. For example, there may exist a decomposition of two size three DFAs, and a separate decomposition of a size two and a size four DFA, both identifying the given set of labeled examples. Neither solution is strictly smaller than the other, and either solution might be preferred in different scenarios. Therefore, the set of solutions to \texttt{DFA-DIP}\xspace form a Pareto-optimal frontier in solution space.
Our proposed Pareto frontier enumeration algorithm is a breadth first search (BFS) over DFA decomposition size tuples that skips tuples that are dominated by an existing solution. This BFS is over a directed acyclic graph $G = (V,E)$ formed in the following way. There is a vertex in the graph for every ordered tuple of states sizes. There is an edge from $(m_1, m_2, \dots, m_n)$ to $(m'_1, m'_2, \dots, m'_n)$ if there exists some $j \in [n]$ such that:
\begin{equation*}
m'_i =
\begin{cases}
m_i + 1 & \text{if $i = j$;} \\
m_i & \text{otherwise.}
\end{cases} \end{equation*} A size tuple $(m_1, \dots, m_n)$ is a sink, i.e., the search does not continue past this vertex, if there exists a $(m_1, \dots, m_n)$-decomposition that solves \texttt{DFA-DIP}\xspace or the size tuple is dominated by a previously traversed solution. In the prior case, the associated DFA decomposition is also returned as a solution on the Pareto-optimal frontier. The BFS starts from $m_1 = m_2 = \dots = m_n = 1$, and performs the search as explained. See \Cref{appendix:pareto_search} for the details of the algorithm.
\begin{theorem}\label{thm:pareto} The described BFS is sound and complete; it outputs the full Pareto-optimal frontier of solutions without returning any dominated solutions. \end{theorem}
See \Cref{appendix:proof_pareto} for the proof of \Cref{thm:pareto}.
\subsection{Example: Learning Partially-Ordered Tasks}\label{subsec:toy_example}
We continue with a toy example showcasing the capabilities of the proposed approach. Later, we use the same class of decompositions to evaluate the scalability of our algorithm. \setlength{\columnsep}{10pt} \begin{wrapfigure}[13]{l}{0.25\textwidth} \centering
\subfloat[Learned DFA recognizing the ordering between ${\color{yellow}\blacksquare}$ and ${\color{redTileColor}\blacksquare}$.\label{fig:toy_example_dfa1}]{
\includegraphics[width=\linewidth]{figures/dfa1.pdf}}
\subfloat[Learned DFA recognizing the ordering between ${\color{blueTileColor}\blacksquare}$ and ${\color{brownTileColor}\blacksquare}$.\label{fig:toy_example_dfa2}]{
\includegraphics[width=\linewidth]{figures/dfa2.pdf}}
\caption{Learned DFA decomposition.} \label{fig:toy_example} \end{wrapfigure} Inspired from the multi-task reinforcement learning literature~\cite{vaezipoor2021ltl2action}, our example focuses on partially-ordered temporal tasks executed in parallel. Specifically, consider a case where an agent is performing two ordering tasks in parallel: \begin{enumerate*}[label=(\roman*)]
\item observe ${\color{yellow}\blacksquare}$ before ${\color{redTileColor}\blacksquare}$, and
\item observe ${\color{blueTileColor}\blacksquare}$ before ${\color{brownTileColor}\blacksquare}$. \end{enumerate*} A positive example of such behavior is simply any sequence of observations ensuring both of the given orderings, e.g. ${\color{yellow}\blacksquare}{\color{redTileColor}\blacksquare}{\color{blueTileColor}\blacksquare}{\color{brownTileColor}\blacksquare}$, and a negative example is any sequence that fails to satisfy both orderings, e.g. ${\color{yellow}\blacksquare}{\color{redTileColor}\blacksquare}{\color{brownTileColor}\blacksquare}{\color{blueTileColor}\blacksquare}$. We generate such positive and negative examples and feed them to our algorithm. \Cref{fig:toy_example} presents the learned DFAs recognizing ordering sub-tasks of the example. The intersection of their languages is consistent with the given observations, and their conjunction is the overall task realized by the system generating the traces. The monolithic DFA recognizing the same language has nine states, and is more complicated, see \Cref{fig:monolithic_dfa} in \Cref{appendix:figs}.
\subsection{Experimental Evaluation}\label{subsec:dfa_eval}
We evaluate the scalability of our algorithm through experiments with changing sizes of partially-ordered tasks introduced in \Cref{subsec:toy_example}. In our evaluation, we aim to answer two questions: \begin{enumerate*}[label=\textbf{(Q\arabic*)}]
\item\label{q1} ``How does solving time scale with the number of ordering tasks?'', and
\item\label{q2} ``How does solving time scale with the number of labeled examples?''.
\end{enumerate*} We implement our algorithm in Python with PySAT~\cite{imms-sat18}, and we use Glucose4~\cite{een2003extensible} as the SAT solver. Our baseline is an implementation of the monolithic DFA identification encoding from~\cite{heule2010exact,ulyantsev2016symmetry} with the same software as our implementation. Experiments are performed on a Quad-Core Intel i7 processor clocked at 2.3 GHz and a 32 GB main memory.
To evaluate the scalability, we randomly generate positive and negative examples with varying problem sizes. For \ref{q1}, we generate 10 (half of which is positive and half of which is negative) partially-ordered task examples with (i) 2 symbols, and (ii) 4 symbols, and we vary the number of DFAs from 2 to 12. For \ref{q2}, we generate 10 to 20 partially-ordered task examples with (i) 2 symbols and 4 DFAs, and (ii) 4 symbols and 2 DFAs. Half of these examples are positive and the other half is negative. Since the examples are generated randomly, we run the experiments for 10 different random seeds and report the average. We set the timeout limit to 10 minutes, and stop when our algorithm timeouts for all random seeds.
\begin{figure*}
\caption{\Cref{fig:gridworld} shows the stochastic grid world environment. \Cref{fig:diss_exp:examples} shows the positive and negative examples of the expert's behavior conjectured by DISS and \Cref{fig:dfa_diss1,fig:dfa_diss2,fig:dfa_diss3} showcases the associated DFA decomposition identified by our algorithm. \Cref{fig:diss} shows the monolithic DFA learned in~\cite{marcell2021DISS}.}
\label{fig:diss_exp}
\label{fig:gridworld}
\label{fig:diss_exp:examples}
\label{fig:dfa_diss1}
\label{fig:dfa_diss2}
\label{fig:dfa_diss3}
\label{fig:diss}
\end{figure*}
\Cref{fig:vary_dfas} presents the experiment results answering \ref{q1}, where we vary the number of DFAs implied by the given examples. For partially-ordered tasks with 2 symbols, green solid line is the (monolithic DFA) baseline and the blue solid is our algorithm. Similarly, for partially-ordered tasks with 4 symbols, pink dashed line is the baseline and the red dashed line is our algorithm. \Cref{fig:vary_examples} presents the experiment results answering \ref{q2}, where we vary the number of examples. For partially-ordered tasks with 2 symbols and 4 DFAs, green solid line is the baseline and the blue solid is our algorithm; for partially-ordered tasks with 4 symbols and 2 DFAs, pink dashed line is the baseline and the red dashed line is our algorithm. As expected, the baseline scales better than our algorithm as we also search for the Pareto frontier and solve an inherently harder problem. Notice that given 10 examples, our algorithm is able to scale up to 11 DFAs for tasks with 2 symbols, and 8 DFAs for tasks with 4 symbols; for 2 symbols and 4 DFAs, it is able to scale up to 60 examples, and for 4 symbols and 2 DFAs, it is able to scale up to 190 examples. As we demonstrate in the next section, these limits for scalability are practically useful in certain domains.
\section{Learning DFAs from Demonstrations}
Next, we show how our algorithm can be incorporated into Demonstration Informed Specification Search (DISS) - a framework for learning languages from expert demonstrations~\cite{marcell2021DISS}. For our purposes a \emph{demonstration} is an unlabeled path through a workspace that maps to a string and is biased towards being accepting by some unknown language. For example, we ran our implementation of DISS using demonstrations produced by an expert attempting to accomplish a task in a stochastic grid world environment, the same example used in \cite{marcell2021DISS} and shown in~\Cref{fig:gridworld}. At each step, the agent can move in any of the four cardinal directions, but because of wind blowing from the north to the south, with some probability, the agent will transition to the space south of it in spite of its chosen action. Two demonstrations of the task ``Reach ${\color{yellow}\blacksquare}$ while avoiding ${\color{redTileColor}\blacksquare}$. If it ever touches ${\color{blueTileColor}\blacksquare}$, it must then touch ${\color{brownTileColor}\blacksquare}$ before reaching ${\color{yellow}\blacksquare}$.'' are shown in~\Cref{fig:gridworld}.
In order to efficiently search for tasks, DISS reduces the learning from demonstrations problem into a series of identification problems to be solved by a black-box identification algorithm. The goal of DISS is to find a task that minimizes the joint description length, called the energy, of the task and the demonstrations assuming the agent were performing said task. The energy is measured in bits to encode an object.
Below, we reproduce the results from~\cite{marcell2021DISS}, but using our algorithm as the task identifier rather than the monolithic DFA identifier provided. The use of DFA decompositions biases DISS to conjecture concepts that are \emph{simpler} to express in terms of a DFA decomposition. To define the description length of DFA decompositions, we adapt the DFA encoding used in~\cite{marcell2021DISS} by expressing a decomposition as the concatenation of the encodings of the individual DFAs. To remove unnecessary redundancy two optimizations were performed. First common headers, e.g. indicating the alphabet size, were combined. Second, as the DFAs in a decomposition are ordered by size, we expressed changes in size rather than absolute size, see \Cref{appendix:encode_sizes} for details.
\subsection{Experimental Evaluation}\label{subsec:diss_eval}
In~\Cref{fig:dfa_diss1,fig:dfa_diss2,fig:dfa_diss3} we present the learned DFA decomposition along with the corresponding~\Cref{fig:diss_exp:examples} labeled examples conjectured by DISS to explain the expert behavior. Importantly, this decomposition exactly captures the demonstrated task. We note that this is in contrast to the DFA learned in~\cite{marcell2021DISS}, shown in~\Cref{fig:diss}, which allows visiting ${\color{redTileColor}\blacksquare}$ after visiting ${\color{yellow}\blacksquare}$. Further, we remark that the time required to learn the monolithic and decomposed DFAs was comparable. In particular, the number of labeled examples was less than 60 and as with the monolithic baseline, most of the time is not spent in task identification, but instead conjecturing the labeled examples. As we saw with in~\Cref{subsec:dfa_eval}, this number of examples is easily handled by our SAT-based identification algorithm. Finally, the number of labeled examples that needed to be conjectured to find low energy tasks was similar for both implementations (see~\Cref{fig:diss_exp:plot,fig:diss_exp:inc_plot} in~\Cref{appendix:figs}). Thus, our variant of DISS performed similar to the monolithic variant, while finding DFAs that exactly represented the task.
\section{Conclusion} To the best of our knowledge, this work presents the first approach for solving \texttt{DFA-DIP}\xspace. Our algorithm works by reducing the problem to a Pareto-optimal search of the space the number of states in a DFA decomposition with a SAT call in the inner loop. The SAT-based encoding is based on an efficient reduction to graph coloring. We demonstrated the scalability of our algorithm on a class of problems inspired by the multi-task reinforcement learning literature and show that the additional computational cost for identifying DFA decompositions over monolithic DFAs is not prohibitive. Finally, we showed how identifying DFA decompositions can provide a useful inductive bias while learning from demonstrations.
\appendix\label{sec:appendix}
\subsection{Complete SAT Encoding of \texttt{DFA-DIP}\xspace} Below, we list the complete SAT encoding of the \texttt{DFA-DIP}\xspace. Observe that the encoding extends the SAT encoding for monolithic DFA identification presented in~\cite{heule2010exact,ulyantsev2016symmetry}. We refer to the root node (i.e., the initial state) of an APTA as $v_r$, for $v \in V \setminus \{v_r\}$, $l(v)$ denotes the symbol on the incoming transition of $v$, and $p(v)$ is the parent node (i.e., the previous state) of $v$, as APTA is a tree-like automaton, $p(v)$ is unique.
\begin{enumerate}
\item A positive example must be accepted by \emph{all} DFAs:
\[
\bigwedge_{v \in V_{+}} \bigwedge_{k \in [n]} \bigwedge_{i \in [m_k]} x^{k}_{v, i} \implies z^{k}_{i}.
\]
\item A negative example must be rejected by \emph{at least} one DFA:
\[
\bigwedge_{v \in V_{-}} \bigvee_{k \in [n]} \bigwedge_{i \in [m_k]} x^{k}_{v, i} \implies \neg z^{k}_{i}.
\]
\item Each state of APTA has at least one color for each DFA:
\[
\bigwedge_{v \in V} \bigwedge_{k \in [n]} \bigwedge_{i \in [m_k]} x^{k}_{v, i}.
\]
\item A transition of a DFA is set when a state and its parent are both colored:
\[
\bigwedge_{v \in V \setminus \{v_r\}} \bigwedge_{k \in [n]} \bigwedge_{i,j \in [m_k]} (x^{k}_{p(v), i} \wedge x^{k}_{v, j}) \implies y^{k}_{l(v), i, j}.
\]
\item A transition of a DFA targets at most one state:
\[
\bigwedge_{l \in \Sigma} \bigwedge_{k \in [n]} \bigwedge_{\substack{i, j, t \in [m_k]\\j < t}} y^{k}_{l, i, j} \implies \neg y^{k}_{l, i, t}.
\]
\item Each state of APTA has at most one color for each DFA:
\[
\bigwedge_{v \in V} \bigwedge_{k \in [n]} \bigwedge_{i, j \in [m_k]} \neg x^{k}_{v, i} \vee \neg x^{k}_{v, j}.
\]
\item A transition of a DFA targets at least one state:
\[
\bigwedge_{l \in \Sigma} \bigwedge_{k \in [n]} \bigwedge_{i,j \in [m_k]} y^{k}_{l, i, j}.
\]
\item For each DFA, a node color is set when the color of the parent node and the transition between them are set:
\[
\bigwedge_{v \in V \setminus \{v_r\}} \bigwedge_{k \in [n]} \bigwedge_{i, j \in [m_k]} (x^{k}_{p(v), i} \wedge y^{k}_{l(v), i, j}) \implies x^{k}_{v, j}.
\]
\item Accepting-rejecting nodes of APTA cannot be merged:
\[
\bigwedge_{v_{-} \in V_{-}} \bigwedge_{v_{+} \in V_{+}} \bigwedge_{k \in [n]} \bigwedge_{i \in [m_k]} (x^{k}_{v_{-}, i} \wedge \neg z^{k}_{i}) \implies \neg x^{k}_{v_{+}, i}.
\] \end{enumerate}
The next set of constraints encode the symmetry breaking clauses intruduced in~\cite{ulyantsev2016symmetry} to avoid consideration of isomorphich DFAs. The main idea of the symmetry breaking clauses is to enforce individual DFA states to be enumerated in a depth-first search (DFS) order. See~\cite{ulyantsev2016symmetry} for more details. The symmetry breaking clauses make use of new auxilliary variables $p^k_{j,i}$ and $t^k_{i,j}$ for $k \in [n], i,j \in [m_k]$ and $m_{l,i,j}$ for $l \in \Sigma, i,j \in [m_k]$. Let $\Sigma = \{l_1, \dots, l_L\}$. \begin{enumerate}
\item Each state must have a smaller parent in the DFS order:
\[
\bigwedge_{k \in [n]} \bigwedge_{i \in [2,m_k]} ( p^{k}_{i, 1} \lor \dots \lor p^{k}_{i,i-1} ).
\]
\item Define $p^k_{j,i}$ in terms of auxilliary variable $t^k_{i,j}$:
\[
\bigwedge_{k \in [n]} \bigwedge_{\substack{i,j \in [m_k] \\ i < j}} (p^k_{j,i} \iff t^k_{i,j} \land t^k_{i+1,j} \land \dots \land t^k_{j-1,j} )
\]
\item Define $t^k_{i,j}$ in terms of $y_{l,j,j}$:
\[
\bigwedge_{k \in [n]} \bigwedge_{\substack{i,j \in [m_k] \\ i < j}} (t^k_{i,j} \iff y^k_{l_1,i,j} \lor \dots \lor y^k_{l_L,i,j})
\]
\item The parent relationship follows the DFS order
\[
\bigwedge_{k \in [n]} \bigwedge_{\substack{i,j,p,q \in [m_k] \\ i < p < j < q}} (p^k_{j,i} \implies \lnot t^k_{p,q})
\]
\item Define $m^k_{l,i,j}$ in terms of $y^k_{l,i,j}$:
\[
\bigwedge_{k \in [n]} \bigwedge_{\substack{i,j \in [m_k] \\ i < j}} \bigwedge_{l_r \in \Sigma} (m^k_{l_r,i,j} \iff y^k_{l_r,i,j}
\dots \land y^k_{l_1,i,j})
\]
\item Enforce DFAs to be DFS-enumerated in the order of symbols on transitions:
\[
\bigwedge_{k \in [n]} \bigwedge_{\substack{i,j,q \in [m_k] \\ i < j < q}} \bigwedge_{\substack{l_r, l_s \in \Sigma \\ r < s}} (p^k_{j,i} \land p^k_{q,i} \land m^k_{l_r,i,}j \implies \lnot m^k_{l_s,i,k})
\] \end{enumerate}
\subsection{Proof of \Cref{thm:sat}} \label{appendix:proof_main}
\begin{theorem_repeat} Given labeled examples with $n$ and $m_1, \dots, m_n$, a solution to our SAT encoding is a solution to \texttt{DFA-DIP}\xspace. \end{theorem_repeat}
\begin{IEEEproof}
We assume that the SAT-based reduction to graph coloring for monolithic DFA identification given in \cite{heule2010exact} is correct.
Constraint \ref{rule1} and \ref{rule2} replace similar constraint in the monolithic encoding given in \cite{heule2010exact}: \begin{enumerate}[label=\textbf{(R\arabic*')},leftmargin=26pt]
\item\label{rule1_prime} a negative example must be rejected by the DFA:
\[
\bigwedge_{v \in V_{-}} \bigwedge_{i \in [m_k]} x_{v, i} \implies \neg z_{i}, \text{ and}
\]
\item\label{rule2_prime} accepting and rejecting states of the APTA cannot be merged:
\[
\bigwedge_{v_{-} \in V_{-}} \bigwedge_{v_{+} \in V_{+}} \bigwedge_{i \in [m_k]} x_{v_{-}, i} \implies \neg x_{v_{+}, i}.
\] \end{enumerate} In the monolithic DFA case, there is only a single DFA so for ease of notation, we drop the index $k$. First notice that constraints \ref{rule1_prime} and \ref{rule2_prime} have no bearing on whether the DFA accepts each positive example. Therefore, our encoding automatically requires that each DFA in the DFA decomposition accepts all of the positive examples and is not constrained to unecessarily accept any unspecified examples.
Constraint \ref{rule1_prime} ensures that the resulting monolithic DFA rejects every negative example by making the color of the node in the APTA associated with the negative example rejecting. Constraint \ref{rule1} replaces this and ensures that at least one of the DFAs in the DFA decomposition rejects a negative example by making the color of the node in the APTA associated with the negative example rejecting in at least one of the $n$ DFAs in the decomposition. Thus, the language intersection of the resulting decomposition correctly rejects negative examples.
Constraint \ref{rule2_prime} ensures that all pairs of rejecting and accepting nodes of the APTA cannot be assigned the same color (i.e., merged) in the resulting DFAs. Constraint \ref{rule2}, which replaces \ref{rule2_prime}, ensures that for each DFA in the decomposition, the pair $(x_{v_{-},i}^k, x_{v_{+},i}^k)$ of accepting and rejecting nodes of the APTA cannot be assigned the same color only if DFA $k$ is rejecting the negative example associated with $x_{v_{-},i}^k$ (which is handled by constraint \ref{rule1}). This allows all but one DFA in the DFA decomposition to accept negative examples. Therefore, no DFA in the decomposition is constrained to unnecessarily reject a negative example if some other DFA in the DFA decomposition already does so. Therefore, the language intersection of the DFAs in the DFA decomposition is not constrained to reject any unspecified examples.
So, if there exists a DFA decomposition with the specified number of states such that all DFAs accept the positive examples and at least one DFA in the decomposition rejects each rejecting example, our encoding will find it. \end{IEEEproof}
\subsection{Details of the Pareto Frontier Search Algorithm} \label{appendix:pareto_search}
\Cref{alg:pareto_frontier} presents the details of the BFS performed in the solution space for finding the Pareto-optimal frontier.
\begin{algorithm}[h]
\caption{Pareto frontier enumeration algorithm.
\label{alg:pareto_frontier}}
\begin{algorithmic}[1]
\Require{Positive $D_{+}$ and negative $D_{-}$ labeled examples and positive integer $n$.}
\State $Q^{\star} \gets \emptyset$ \Comment{Maintains the Pareto frontier}
\State $S \gets \{(2,\dots,2)\}$ \Comment{Initialize the queue}
\While{$S \neq \emptyset$}
\State $m \gets Q.dequeue()$
\If{$\nexists \hat{m} \in P^{\star} \text{ s.t. } \hat{m} \prec m$}
\State $SAT, \mathcal{A} \gets \textproc{Solve}(n, m, D_{+}, D_{-})$
\If{$SAT$}
\State $P^{\star} = P^{\star} \cup \mathcal{A}$ \Comment{Add to the Pareto frontier}
\Else
\For{$k=1,\dots,n$}
\State $m' \gets m$
\State $m'_k \gets m'_k + 1$
\If{$\text{ordered}(m')$}
\State $Q.enqueue(m')$
\EndIf
\EndFor
\EndIf
\EndIf
\EndWhile
\State \Return{$P^{\star}$}
\end{algorithmic} \end{algorithm}
\subsection{Proof of \Cref{thm:pareto}} \label{appendix:proof_pareto}
\begin{theorem_repeat} The described BFS is sound and complete; it outputs the all Pareto-optimal frontier of solutions without returning any dominated solutions. \end{theorem_repeat}
\begin{IEEEproof} The described BFS enumerates the number of states of DFA decomposition in product order. Therefore, before reaching a vertex with number of states $(m_1, m_2, \dots, m_n)$, it explores all number of states of DFA decompositions that dominate the DFA decomposition with $(m_1, m_2, \dots, m_n)$ states. If any of these number of states admit a solution to \texttt{DFA-DIP}\xspace, then the DFA decomposition associated with number of states $(m_1, m_2, \dots, m_n)$ will be marked as a sink and not returned on the Pareto-optimal frontier. Therefore, the described BFS is sound.
If none of those number of states admit a solution to \texttt{DFA-DIP}\xspace, then none of them are sinks, so $(m_1, m_2, \dots, m_n)$ will be reached, and if $(m_1, m_2, \dots, m_n)$ does admit a solution to \texttt{DFA-DIP}\xspace, it will correctly be returned as a solution on the Pareto-optimal frontier. Thus, the described BFS is complete. \end{IEEEproof}
\subsection{DFA Encoding Sizes for DISS} \label{appendix:encode_sizes} When sampling a concept identifying a set of labeled examples, DISS algorithms are exponentially more likely to sample concepts with smaller \textit{size complexity}, i.e., number of bits required to represent the concept. We define the \textit{size} of a DFA decomposition $\mathcal{A} = (\mathcal{A}_1, \dots, \mathcal{A}_n)$ with number of states $m_1, \dots, m_n$ based on the size of the underlying DFAs as: \begin{equation}
\sum_{i=1}^n\text{size}(\mathcal{A}_i) - (n - 1)(2\ln(\Sigma) + 1) - 2(n - 1)\ln(m_1) \end{equation}
where the size of each underlying DFA is given by $\text{size}(\mathcal{A}_i) = 3 + 2 \ln(m_i) + 2 \ln(\Sigma) + (|F_i| + 1) \ln(m_i) + z ( \ln(\Sigma) + 2 \ln(m_i) )$ where $F_i$ are the accepting states of $\mathcal{A}_i$ and $z$ is the number of non-stuttering transitions of $\mathcal{A}_i$.
\subsection{Extra Figures} \label{appendix:figs}
\begin{figure}
\caption{Monolithic DFA for the example presented in \Cref{subsec:toy_example}.}
\label{fig:monolithic_dfa}
\end{figure}
\begin{figure}
\caption{How quickly DISS finds explanatory DFA decompositions compared to the enumeration baselines for a setting without prior partial knowledge. The temperature $\beta$ controls the degree of likelihood to which better explaining DFA decompositions are sampled.
}
\label{fig:diss_exp:plot}
\end{figure}
\begin{figure}
\caption{How quickly DISS finds explanatory DFA decompositions compared to the enumeration baselines for a setting with prior partial knowledge. The temperature $\beta$ controls the degree of likelihood to which better explaining DFA decompositions are sampled.
}
\label{fig:diss_exp:inc_plot}
\end{figure}
\begin{figure}
\caption{This plot compares the scalability of our algorithm to the monolithic DFA identification baseline. Each point in the plot represents a problem instance that is in the experiments presented in \Cref{subsec:dfa_eval}. The dashed black line is the $x = y$ line. As expected, our algorithm requires more time to solve same problem instances. However, we should note that the relationship between the solution times is not far away from the $x = y$ line.}
\label{fig:all}
\end{figure}
\end{document} |
\begin{document}
\title{The duality of computation under focus} \begin{abstract} We review the close relationship between abstract machines for (call-by-name or call-by-value) $\lambda$-calculi (extended with Felleisen's $\cal C$) and sequent calculus, reintroducing on the way Curien-Herbelin's syntactic kit expressing the duality of computation. We use this kit to provide a term language for a presentation of $\mathsf{LK}$ (with conjunction, disjunction, and negation), and to transcribe cut elimination as (non confluent) rewriting. A key slogan here, which may appear here in print for the first time, is that commutative cut elimination rules are explicit substitution propagation rules. We then describe the focalised proof search discipline (in the classical setting), and narrow down the language and the rewriting rules to a confluent calculus (a variant of the second author's focalising system $\mathsf{L}$).
We then define a game of patterns and counterpatterns, leading us to a fully focalised finitary syntax for a synthetic presentation of classical logic, that provides a quotient on (focalised) proofs, abstracting out the order of decomposition of negative connectives.\footnote{A slighlty shorter version appears in the Proceedings of the Conference IFIP TCS, Brisbane, Sept. 2010, published as a Springer LNCS volume., With respect to the published conference version, the present version corrects some minor mistakes in the last section, and develops a bit further the material of Section 5.}
\end{abstract}
\section{Introduction} \label{introduction}
This paper on one hand has an expository purpose and on the other hand pushes further the syntactic investigations on the duality of computation undertaken in \cite{CH2000}.
Section \ref{intro-sec} discusses the relation between familiar {\em abstract machines} for the $\lambda$-calculus (extended with control) and (classical) {\em sequent calculus}. Section \ref{LK-proofs-sec} presents a faithful language (with a one-to-one correspondence between well-typed terms and proof trees) for a presentation of LK that anticipates a key ingredient of focalisation, by choosing a dissymetric presentation for the conjunction on one side and the disjunction on the other side of sequents. We recall the non-confluence of unconstrained classical cut-elimination.
In Section \ref{LKQ-sec}, we present the {\em focalised proof search discipline} (for {\em classical logic}), and adapt the syntactic framework of Section \ref{LK-proofs-sec} to get a confluent system whose normal forms are precisely the terms denoting (cut-free) focalised proofs. The system we arrive at from these proof-search motivations is (a variant of) the second author's {\em focalising system $\mathsf{L}$} ($\mathsf{L_{\textrm{foc}}}$) \cite{Munch2009}
We prove the completeness of $\mathsf{L_{\textrm{foc}}}$ with respect to $\mathsf{LK}$ for provability.
In Section \ref{encodings-sec}, we define some simple encodings having $\mathsf{L_{\textrm{foc}}}$ as source or target, indicating its suitability as an intermediate language (between languages and their execution or compilation).
Finally, in Section \ref{LKQS-sec}, we further reinforce the focalisation discipline, which leads us to {\em synthetic system $\mathsf{L}$} ($\mathsf{L_{\textrm{synth}}}$), a logic of synthetic connectives in the spirit of Girard's ludics and Zeilberger's CU, for which we offer a syntactic account based on a simple game of patterns and counterpatterns that can be seen as another manifestation of dualities of computation. We show that the synthetic system $\mathsf{L}$ is complete with respect to focalising system $\mathsf{L}$.
\noindent {\em Notation.} We shall write $\Subimpl{t}{x}{v}$ the result of substituting $v$ for $x$ at all (free) occurrences of $x$ in $t$, and $\Sub{t}{x}{v}$ for an explicit operator \cite{ACCL} added to the language together with rules propagating it. Explicit substitutions are relevant here because they account for the commutative cut rules (see Section \ref{LK-proofs-sec}).
\section{Abstract machines and sequent calculus} \label{intro-sec}
In this section, we would like to convey the idea that sequent calculus could have arisen from the goal of providing a typing system for the states of an abstract machine for the ``mechanical evaluation of expressions'' (to quote the title of Peter Landin's pioneering paper \cite{Landin64}).
Here is a simple device for executing a (closed) $\lambda$-term in call-by-name (Krivine machine \cite{KrivineMach}): \begin{center} \fbox{$\begin{array}{lllllllll} \coupe{MN}{E} & \longrightarrow& \coupe{M}{N\cdot E} \quad &&& \quad \coupe{\lambda x.M}{N\cdot E} & \longrightarrow & \coupe{\Subimpl{M}{x}{N}}{E} \end{array}$} \end{center} A state of the machine is thus a pair $\coupe{M}{E}$ where
$M$ is ``where the computation is currently active'', and
$E$ is the stack of things that are waiting to be done in the future, or the continuation, or the evaluation context. In $\lambda$-calculus litterature, contexts are more traditionally presented as terms with a hole: with this tradition, $\coupe{M}{E}$ (resp. $M\cdot E$) reads as $E[M]$ (resp. $E[[]M]$), or ``fill the hole of $E$ with $M$ (resp. $[]M$)''.
How can we type the components of this machine? We have three categories of terms and of typing judgements: $$\begin{array}{ccccccccc} \mbox{Expressions} &&& \mbox{Contexts} &&& \mbox{Commands} \\
M::= x \,\mbox{\large\boldmath$\mid$}\, \lambda x.M \,\mbox{\large\boldmath$\mid$}\, MM \quad&&&\quad
E::= [\:] \,\mbox{\large\boldmath$\mid$}\, M\cdot E\quad &&& \quad
c::=\coupe{M}{E}\\
(\lkc{\Gamma}{}{}{M:A})\quad&&&\quad (\lke{\Gamma}{E:A}{}{R})
\quad &&& \quad c:(\lkc{\Gamma}{}{}{R}) \end{array}$$
where $R$ is a (fixed) type of {\em final results}. The type of an expression (resp. a context) is the type of the value that it is producing (resp. expecting). The typing rules for contexts and commands are as follows: $$\seq{}{\lke{\Gamma}{ [\:]:R}{}{R}}\quad\quad \seq{ \lkc{\Gamma}{}{}{ M:A}\quad \lke{\Gamma}{ E:B}{}{R}} {\lke{\Gamma}{M\cdot E:A\rightarrow B}{}{R}}\quad\quad \seq{\lkc{\Gamma}{}{}{ M:A}\quad\lke{\Gamma}{ E:A}{}{R}} {\coupe{M}{E}:(\lkc{\Gamma}{}{}{R})}$$ and the typing rules for expressions are the usual ones for simply typed $\lambda$-calculus. Stripping up the term information, the second and third rules are rules of {\em sequent calculus} (left introduction of implication and cut).
We next review Griffin's typing of Felleisen's control operator ${\cal C}$. As a matter of fact, the behaviour of this constructor is best expressed at the level of an abstract machine: \begin{center} \fbox{$\begin{array}{lllllllll} \coupe{{\cal C}(M)}{E} & \longrightarrow &\coupe{M}{\reify{E}\cdot[\:]} \quad &&& \quad \coupe{\reify{E}}{N\cdot E'} &\longrightarrow& \coupe{N}{E} \end{array}$} \end{center} The first rule explains how the continuation $E$ gets {\em captured}, and the second rule how it gets {\em restored}.
Griffin \cite{Griffin90} observed that the typing constraints induced by the well-typing of these four commands are met when ${\cal C}(M)$ and $\reify{E}$ are typed as follows:
$$\begin{array}{cccc}
\seq{\lkc{\Gamma}{}{}{M: (A\rightarrow R)\rightarrow R}} {\lkc{\Gamma}{}{}{{\cal C}(M): A}} \quad&&& \quad\seq{\lke{\Gamma}{E: A}{}{R}}{\lkc{\Gamma}{}{}{\reify{E}: A\rightarrow R}} \end{array}$$ These are the rules that one adds to intutionistic natural deduction to make it classical, if we interpret $R$ as $\bot$ (false), and if we encode $\neg A$ as $A\rightarrow R$.
Hence, Griffin got no less than {Curry-Howard for classical logic! But how does this sound in sequent calculus style?
In classical sequent calculus, sequents have several formulas on the right and $\lkc{\Gamma}{}{}{\Delta}$ reads as ``if all formulas in $\Gamma$ hold, then at least one formula of $\Delta$ holds''. Then it is natural to associate continuation variables with the formulas in $\Delta$: a term will depend on its input variables, and on its output continuations. With this in mind, we can read the operational rule for ${\cal C}(M)$ as `` ${\cal C}(M)$ is a map $E \mapsto \coupe{M}{\reify{E}\cdot[\:]}$'', and write it with a new binder (that comes from \cite{Parigot92}): $${\cal C}(M) =\mu\beta.\coupe{M}{\reify{\beta}\cdot[\:]}$$ where $[\:]$ is now a continuation variable (of ``top-level'' type $R$). Likewise, we synthesise $\reify{E}=\lambda x.\mu\alpha.\coupe{x}{E}$, with $\alpha,x$ fresh, from the operational rules for $\reify{E}$ and for $\lambda x.M$.
The typing judgements are now: $
(\lkv{\Gamma}{}{M:A}{\Delta})$, $(\lke{\Gamma}{E:A}{}{\Delta})$, and $c:(\lkc{\Gamma}{}{}{\Delta}) $. The two relevant new typing rules are (axiom, right {\em activation}): \begin{center} $\begin{array}{llll}
\seq{}{\lke{\Gamma}{\alpha:A}{}{\alpha:A,\Delta}} &&& \seq{c:(\lkc{\Gamma}{}{}{\alpha:A,\Delta})}{ \lkv{\Gamma}{}{\mu\alpha.c:A}{\Delta}} \end{array}$ \end{center} plus a reduction rule: $\coupe{\mu\alpha.c}{E}\longrightarrow\Subimpl{c}{\alpha}{E}$.
Note that in this setting, there is no more need to ``reify'' a context $E$ into an expression $\reify{E}$, as it can be directly substituted for a continuation variable.
Similarly, we can read off a (call-by-name) definition of $MN$ from its operational rule:
$MN=\mu\beta.\coupe{M}{N.\beta}$. Hence we can
remove application from the syntax and arrive at a system in sequent calculus style {\em only}} (no more elimination rule). This yields Herbelin's $\overline{\lambda}\mu$-calculus \cite{Her95}: $$ \mbox{Expressions} \;\; M::= x \,\mbox{\large\boldmath$\mid$}\, \lambda x.M \,\mbox{\large\boldmath$\mid$}\, \mu\alpha.c\quad\quad \mbox{Contexts} \;\; E::= \alpha \,\mbox{\large\boldmath$\mid$}\, M\cdot E \quad\quad \mbox{Commands} \;\; c::=\coupe{M}{E} $$
which combines the first two milestones above: ``sequent calculus'', ``classical".
Let us step back to the $\lambda$-calculus. The following describes a call-by-value version of Krivine machine:
\begin{center} \fbox{$\begin{array}{llllllll} \coupe{MN}{e} & \longrightarrow & \coupe{N}{M\odot e} \quad&&&\quad \coupe{V}{M\odot e}& \longrightarrow & \coupe{M}{V\cdot e} \end{array}$} \end{center} (the operational rule for $\lambda x.M$ is unchanged)\footnote{The reason for switching notation from $E$ to $e$ will become clear in Section \ref{encodings-sec}.}. Here, $V$ is a {\em value}, defined as being either a variable or an abstraction (this goes back to \cite{PlotkinCBNV}). Again, we can read $M\odot e$ as ``a map $V\mapsto \coupe{M}{V\cdot e}$'', or, introducing a new binder $\tilde\mu$ (binding now ordinary variables): $$M\odot e = \tilde\mu x. \coupe{M}{x\cdot e}$$ The typing rule for this operator is (left activation): \begin{center} $\begin{array}{c}
\seq{ c:(\lkc{\Gamma,x:A}{}{}{\Delta})}{\lke{\Gamma}{\tilde\mu x.c:A}{}{\Delta}} \end{array}$ \end{center} and the operational rule is $\coupe{V}{\tilde\mu x.c}\longrightarrow \Subimpl{c}{x}{V}\;\;(V\mbox{ value})$.
Finally, we get from the rule for $MN$ a call-by-value definition of application: $MN=\mu\alpha.\coupe{N}{\tilde\mu x.\coupe{M}{x\cdot \alpha}}$.
We have arrived at Curien and Herbelin's $\overline\lbd\mu\tilde\mu_Q$-calculus \cite{CH2000}: \begin{center}
\begin{tabular}{|cc|cc|cc|c|} \hline Expressions $ M::= \Vtov{V} \,\mbox{\large\boldmath$\mid$}\, \mu\alpha.c $ && \:Values $\; V::= x \,\mbox{\large\boldmath$\mid$}\, \lambda x.M$ && \:Contexts $\; e::= \alpha \,\mbox{\large\boldmath$\mid$}\, V\cdot e \,\mbox{\large\boldmath$\mid$}\, \tilde\mu x.c$ && \:Commands $\; c::=\coupe{M}{e}$\\
$ \lkv{\Gamma}{}{M:A}{\Delta}$ && $\lkV{\Gamma}{}{V:A}{\Delta}$ &&
$\lke{\Gamma}{e:A}{}{\Delta} $
&&
$c:(\lkc{\Gamma}{}{}{\Delta})$\\
\hline \end{tabular} \end{center} with a new judgement for values (more on this later) and an
explicit coercion from values to expressions. The syntax for contexts is both extended ($\tilde\mu x.c$) and restricted ($V\cdot e$ instead of $M\cdot e$). The reduction rules are as follows: \begin{center} \fbox{$\begin{array}{lllllllll} \coupe{\Vtov{(\lambda x.M)}}{V\cdot e}\longrightarrow \coupe{\Subimpl{M}{x}{V}}{e} \quad&&\quad \coupe{\mu\alpha.c}{e}\longrightarrow \Subimpl{c}{\alpha}{e}\quad &&\quad \coupe{\Vtov{V}}{\tilde\mu x.c}\longrightarrow \Subimpl{c}{x}{V} \end{array}$} \end{center} \section{A language for $\mathsf{LK}$ proofs} \label{LK-proofs-sec} In this section, we use some of the kit of the previous section to give a term language for classical sequent calculus $\mathsf{LK}$, with negation, conjunction, and disjunction as connectives. Our term language is as follows: $$\begin{array}{lllll} \mbox{Commands} &&&& c::=\coupe{x}{\alpha} \,\mbox{\large\boldmath$\mid$}\, \coupe{v}{\alpha} \,\mbox{\large\boldmath$\mid$}\, \coupe{x}{e} \,\mbox{\large\boldmath$\mid$}\, \coupe{\mu\alpha.c}{\tilde\mu x.c}\\ \mbox{Expressions} &&&& v::= (\tilde\mu x.c)^\bullet \,\mbox{\large\boldmath$\mid$}\, (\mu\alpha.c,\mu\alpha.c) \,\mbox{\large\boldmath$\mid$}\, \inl{\mu\alpha.c} \,\mbox{\large\boldmath$\mid$}\, \inr{\mu\alpha.c}\\
\mbox{Contexts} &&&& e::= \tilde\mu\alpha^\bullet.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu (x_1,x_2).c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2} \end{array}$$ (In $\coupe{v}{\alpha}$ (resp. $\coupe{x}{e}$), we suppose $\alpha$ (resp. $x$) fresh for $v$ (resp. $e$).) A {\em term} $t$ is a command, an expression, or a context. As in section \ref{intro-sec}, we have three kinds of sequents: $(\Gamma\vdash\Delta)$, $(\lkv{\Gamma}{}{A}{\Delta})$, and $(\lke{\Gamma}{A}{}{\Delta})$. We decorate $\mathsf{LK}$'s inference rules with terms, yielding the following typing system (one term construction for each rule of $\mathsf{LK}$):
\begin{center} {\small (axiom and cut/contraction) $\quad\quad \seq{}{\coupe{x}{\alpha}:(\Gamma,x:A\vdash \alpha:A,\Delta)}\quad\quad \seq{c:(\Gamma\vdash \alpha: A,\Delta)\quad\quad d:(\Gamma, x:A\vdash\Delta)} {\coupe{\mu\alpha.c}{\tilde\mu x.d}:(\Gamma\vdash\Delta)}$
$(\mbox{right})\quad\quad\quad\quad\quad\seq{c:(\Gamma,x:A\vdash\Delta)}{\lkv{\Gamma}{}{(\tilde\mu x.c)^\bullet:\neg A}{\Delta}}\quad\quad\seq{c_1:(\Gamma\vdash \alpha_1:A_1,\Delta) \quad\quad c_2:(\Gamma\vdash \alpha_2:A_2,\Delta)} {\lkv{\Gamma}{}{(\mu\alpha_1.c_1,\mu\alpha_2.c_2):A_1\wedge A_2}{\Delta}}$
$ \seq{c_1:(\Gamma\vdash \alpha_1:A_1,\Delta)}{\lkv{\Gamma}{}{\inl{\mu\alpha_1.c_1}:A_1\vee A_2}{\Delta}}\quad\quad \seq{c_2:(\Gamma\vdash \alpha_2:A_2,\Delta)}{\lkv{\Gamma}{}{\inr{\mu\alpha_2.c_2}:A_1\vee A_2}{\Delta}}$
$(\mbox{left})\quad \seq{c:(\Gamma\vdash \alpha:A,\Delta)}{\lke{\Gamma}{\tilde\mu\alpha^\bullet.c:\neg A}{}{\Delta}}\quad\quad \seq{c:(\Gamma,x_1:A_1,x_2:A_2\vdash\Delta)}{\lke{\Gamma}{\tilde\mu(x_1,x_2).c:A_1\wedge A_2}{}{\Delta}}\quad\quad\seq{c_1:(\Gamma,x_1:A_1\vdash\Delta)\quad\quad c_2:(\Gamma,x_2:A_2\vdash\Delta)}
{\lke{\Gamma}{\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}:A_1\vee A_2}{}{\Delta}}$
$(\mbox{deactivation})\quad\quad\seq{\lkv{\Gamma}{}{v:A}{\Delta}}{\coupe{v}{\alpha}:(\Gamma\vdash\alpha:A,\Delta)}\quad\quad \quad\quad\seq{\lke{\Gamma}{e:A}{}{\Delta}}{\coupe{x}{e}:(\Gamma,x:A\vdash\Delta)}$} \end{center} Note that the activation rules are packaged in the introduction rules and in the cut rule. As for the underlying sequent calculus rules, we have made the following choices: \begin{enumerate} \item We have preferred {\em additive} formulations for the cut rule and for the right introduction of conjunction (to stay in tune with the tradition of typed $\lambda$-calculi) over a multiplicative one where the three occurrences of $\Gamma$ would be resp. $\Gamma_1$, $\Gamma_2$, and $\Gamma_1,\Gamma_2$ (idem for $\Delta$). An important consequence of this choice is that contraction is a derived rule of our system, whence the name of {\em cut/contraction} rule above\footnote{In usual syntactic accounts of contraction, one says that if, say $t$ denotes a proof of $\Gamma,x:A,y:A\vdash\Delta$, then $\Subm{t}{\Subi{x}{z},\Subi{y}{z}}$ denotes a proof of $\Gamma,z:A\vdash\Delta$. Note that if this substitution is explicit, then we are back to an overloading of cut and contraction.}: \begin{center} $\seq{\seq{}{\lkc{\Gamma,A}{}{}{A,\Delta}}\quad\quad \lkc{\Gamma,A,A}{}{}{\Delta}}{\lkc{\Gamma,A}{}{}{\Delta}}\quad\quad\quad\seq{\lkc{\Gamma}{}{}{A,A,\Delta}\quad\quad \seq{}{\lkc{\Gamma,A}{}{}{A,\Delta}}}{\lkc{\Gamma}{}{}{A,\Delta}}$ \end{center} \item Still in the $\lambda$-calculus tradition, weakening is ``transparent''. If $c:\Gamma\vdash\Delta$ is well-typed, then $c:(\Gamma,\Gamma'\vdash\Delta,\Delta')$ is well-typed (idem $v,e$). (Also, we recall that all free variables of $c$ are among the ones declared in $\Gamma,\Delta$.) \item More importantly, we have adopted {\em irreversible} rules for right introduction of disjunction. On the other hand, we have given a {\em reversible} rule for left introduction of conjunction: the premise is derivable from the conclusion. {\em This choice prepares the ground for the next section on focalisation.}\footnote{For the same reason, we have chosen to take three connectives instead of just two, say, $\vee$ and $\neg$, because in the focalised setting $\neg(\neg A\vee \neg B)$ is only equivalent to $A\wedge B$ at the level of {\em provability}.} \end{enumerate} The relation between our typed terms and $\mathsf{LK}$ proofs is as follows.
\noindent - Every typing proof induces a proof tree of $\mathsf{LK}$ (one erases variables naming assumptions and conclusions, terms, the distinction between the three kinds of sequents, and the application of the deactivation rules).
\noindent - If bound variables are explicitly typed (which we shall refrain from doing in the sequel),
then every provable typing judgement, say $\lke{\Gamma}{e:A}{}{\Delta}$, has a unique typing proof, i.e. all information is in $\Gamma$, $A$, $\Delta$, $e$.
\noindent - If $\Pi$ is an $\mathsf{LK}$ proof tree of $(A_1,\ldots,A_m\vdash B_1,\ldots,B_n)$, and if names $x_1,\ldots,x_m$, $\alpha_1,\ldots,\alpha_n$ are provided, then there exists a unique command $c:(\lkc{x_1:A_1,\ldots,x_m:A_m}{}{}{\alpha_1:B_1,\ldots,\alpha_n:B_n})$, whose (unique) typing proof gives back $\Pi$ by erasing.
With this syntax, we can express the cut-elimination rules of $\mathsf{LK}$ as {\em rewriting rules}:
Logical rules (redexes of the form $\coupe{\mu\alpha.\coupe{v}{\alpha}}{\tilde\mu x.\coupe{x}{e}}$): \begin{center} $\begin{array}{l} \coupe{\mu\alpha.\coupe{(\tilde\mu x.c)^\bullet}{\alpha}} {\tilde\mu y.\coupe{y}{\tilde\mu\alpha^\bullet.d}} \longrightarrow \coupe{\mu\alpha.d}{\tilde\mu x.c}\quad\quad (\mbox{similar rules for conjunction and disjunction})
\end{array}$ \end{center}
Commutative rules (going ``up left'', redexes of the form $\coupe{\mu\alpha.\coupe{v}{\beta}}{\tilde\mu x.c}$): $$\begin{array}{l}
\coupe{\mu\alpha.\coupe{(\tilde\mu y.c)^\bullet}{\beta}}{\tilde\mu x.d} \longrightarrow \coupe{\mu\beta'.\coupe{(\tilde\mu y.\coupe{\mu\alpha.c}{\tilde\mu x.d})^\bullet}{\beta'}}{\tilde\mu y.\coupe{y}{\beta}} \quad(\neg\mbox{ right})\\ (\mbox{similar rules of commutation with the other right introduction rules and with the left introduction rules})\\
\coupe{\mu\alpha.\coupe{\mu\beta.\coupe{y}{\beta}}{\tilde\mu y'.c}}{\tilde\mu x.d}\longrightarrow \coupe{\mu\beta.\coupe{y}{\beta}}{\tilde\mu y'.\coupe{\mu\alpha.c}{\tilde\mu x.d}} \quad(\mbox{contraction right})\\ \coupe{\mu\alpha.\coupe{\mu\beta'.c}{\tilde\mu y.\coupe{y}{\beta}}}{\tilde\mu x.d} \longrightarrow \coupe{\mu\beta'.\coupe{\mu\alpha.c}{\tilde\mu x.d}}{\tilde\mu y.\coupe{y}{\beta}} \quad (\mbox{contraction left})\\ \coupe{\mu\alpha.\coupe{\mu\alpha'.c}{\tilde\mu x'.\coupe{x'}{\alpha}}}{\tilde\mu x.d} \longrightarrow \coupe{\mu\alpha.\coupe{\mu\alpha'.c}{\tilde\mu x.d}}{\tilde\mu x.d} \quad(\mbox{duplication})\\ \coupe{\mu\alpha.\coupe{y}{\beta}}{\tilde\mu x.d} \longrightarrow \coupe{y}{\beta} \quad (\mbox{erasing}) \end{array}$$
Commutative rules (going ``up right'', redexes of the form $\coupe{\mu\alpha.c}{\tilde\mu x.\coupe{y}{e}}$ ): similar rules.
The (only?) merit of this syntax is its tight fit with proof trees and traditional cut elimination defined as transformations of undecorated proof trees. If we accept to losen this,
we arrive at the following more ``atomic'' syntax: $$\begin{array}{lllll} \mbox{Commands} &&&& c::=\coupe{v}{e} \,\mbox{\large\boldmath$\mid$}\, \Subm{c}{\sigma}\\ \mbox{Expressions} &&&& v::= x \,\mbox{\large\boldmath$\mid$}\, \mu\alpha.c \,\mbox{\large\boldmath$\mid$}\, e^\bullet \,\mbox{\large\boldmath$\mid$}\, (v,v) \,\mbox{\large\boldmath$\mid$}\, \inl{v} \,\mbox{\large\boldmath$\mid$}\, \inr{v} \,\mbox{\large\boldmath$\mid$}\, \Subm{v}{\sigma}\\
\mbox{Contexts} &&&& e::= \alpha \,\mbox{\large\boldmath$\mid$}\, \tilde\mu x.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu\alpha^\bullet.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu (x_1,x_2).c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2} \,\mbox{\large\boldmath$\mid$}\, \Subm{e}{\sigma} \end{array}$$ where $\sigma$ is a list $\Subi{x_1}{v_1},\ldots,\Subi{x_m}{v_m},\Subi{\alpha_1}{e_1},\ldots, \Subi{\alpha_n}{e_n}$.
In this syntax, activation becomes ``first class'', and two versions of the axiom are now present ($x$, $\alpha$, which give back the axiom of the previous syntax by deactivation).
The typing rules are as follows (we omit the rules for $\tilde\mu x.c$, $\tilde\mu\alpha^\bullet.c$, $\tilde\mu (x_1,x_2).c$, $\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}$, which are unchanged): {\small $$\seq{}{\lkv{\Gamma\:,\:x:A}{}{x:A}{\Delta}}\quad\quad \seq{}{\lke{\Gamma}{\alpha:A}{}{\alpha:A\:,\:\Delta}}\quad\quad \seq{\lkv{\Gamma}{}{v:A}{\Delta}\quad\quad \lke{\Gamma}{e:A}{}{\Delta}} {\coupe{v}{e}:(\lkc{\Gamma}{}{}{\Delta})} $$ $$ \seq{c:(\lkc{\Gamma\:,\:x:A}{}{}{\Delta})}{\lke{\Gamma}{\tilde\mu x.c:A}{}{\Delta}} \quad\quad \seq{c:(\lkc{\Gamma}{}{}{\alpha:A\:,\:\Delta})}{\lkv{\Gamma}{}{\mu\alpha.c:A}{\Delta}} $$ $$\seq{\lke{\Gamma}{e:A}{}{\Delta}}{\lkv{\Gamma}{}{e^\bullet:\neg A}{\Delta}}\quad\quad\seq{\lkv{\Gamma}{}{v_1:A_1}{\Delta} \quad\quad \lkv{\Gamma}{}{v_2:A_2}{\Delta}} {\lkv{\Gamma}{}{(v_1,v_2):A_1\wedge A_2}{\Delta}}\quad\quad \seq{\lkv{\Gamma}{}{v_1:A_1}{\Delta}}{\lkv{\Gamma}{}{\inl{v_1}:A_1\vee A_2}{\Delta}}\quad\quad \seq{\lkv{\Gamma}{}{v_2:A_2}{\Delta}}{\lkv{\Gamma}{}{\inr{v_2}:A_1\vee A_2}{\Delta}}$$ $$\seq{c:(\lkc{\Gamma,x_1:A_1,\ldots,x_m:A_m}{}{}{\alpha_1:B_1,\ldots,\alpha_n:B_n})\;\ldots\;\lkv{\Gamma}{}{v_i:A_i}{\Delta}\;\ldots\;\ldots\;\lke{\Gamma}{e_j:B_j}{}{\Delta}\;\ldots}{\Subm{c}{\Subi{x_1}{v_1},\ldots,\Subi{x_m}{v_m},\Subi{\alpha_1}{e_1},\ldots, \Subi{\alpha_n}{e_n}}:(\lkc{\Gamma}{}{}{\Delta})} \quad (\mbox{idem } \Subm{v}{\sigma}, \Subm{e}{\sigma})$$}
Note that we also have now {\em explicit substitutions}
$\Subm{t}{\sigma}$, which feature a form of (multi-)cut where the receiver $t$'s active formula, if any, is not among the cut formulas, in contrast with the construct $\coupe{v}{e}$ where the cut formula is active on both sides.
It is still the case that, by erasing, a well-typed term of this new syntax induces a proof of $\mathsf{LK}$, and that all proofs of $\mathsf{LK}$ are reached (although not injectively anymore), since all terms of the previous syntax are terms of the new syntax.
The rewriting rules divide now in {\em three} groups: $$\begin{array}{lllll} (\mbox{control}) && \coupe{\mu\alpha.c}{e} \longrightarrow \Sub{c}{\alpha}{e} \quad\quad\quad\quad\quad\quad
\coupe{v}{\tilde\mu x.c} \longrightarrow \Sub{c}{x}{v}\\
(\mbox{logical}) &&\coupe{e^\bullet}{\tilde\mu \alpha^\bullet.c} \longrightarrow \Sub{c}{\alpha}{e} \quad\quad\quad\quad\quad \coupe{(v_1,v_2)}{\tilde\mu(x_1,x_2).c} \longrightarrow \Subm{c}{\Subi{x_1}{v_1},\Subi{x_2}{v_2}}\\
&& \coupe{\inl{v_1}}{\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}} \longrightarrow \Sub{c_1}{x_1}{v_1}\quad (\mbox{idem }{\it inr})\\ (\mbox{commutation}) && \Subm{\coupe{v}{e}}{\sigma}\longrightarrow \coupe{\Subm{v}{\sigma}}{\Subm{e}{\sigma}}\\ && \Subm{x}{\sigma}\longrightarrow x \;\; (x\mbox{ not declared in }\sigma)\quad\quad \Subm{x}{\Subi{x}{v},\sigma}\longrightarrow v \quad (\mbox{idem }\Subm{\alpha}{\sigma})\\ && \Subm{(\mu\alpha.c)}{\sigma} \longrightarrow \mu\alpha.(\Subm{c}{\sigma}) \quad(\mbox{idem } \Subm{(\tilde\mu x.c)}{\sigma}) \quad(\mbox{capture avoiding})\\ && (\mbox{etc, no rule for composing substitutions})
\end{array}$$ The {\em control rules} mark the decision to launch a substitution (and, in this section, of the direction in which to go, see below). The {\em logical rules} provide the interesting cases of cut elimination, corresponding to cuts where the active formula has been just introduced on both sides. The {\em commutative cuts} are now accounted for ``trivially'' by means of the {\em explicit substitution machinery} that carries substitution progressively inside terms towards their variable occurrences. Summarising, by liberalising the syntax, we have gained considerably in readability of the cut elimination rules\footnote{The precise relation with the previous rules is as follows: for all $s_1,s_2$ such that $s_1\longrightarrow s_2$ in the first system, there exists $s$ such that $s_1\longrightarrow^* s {}^*\longleftarrow s_2$ in the new system, e.g.,
for ($\neg$ right) $\coupe{\mu\alpha.\coupe{(\tilde\mu y.c)^\bullet}{\beta}}{e}$ $\longrightarrow^*$ $\coupe{(\tilde\mu y.(\Sub{c}{\alpha}{e}))^\bullet}{\beta}$ $ {}^*\!\!\longleftarrow$ $\coupe{\mu\beta'.\coupe{(\tilde\mu y.\coupe{\mu\alpha.c}{e})^\bullet}{\beta'}}{\tilde\mu y.\coupe{y}{\beta}}$.}.
\begin{remark} In the ``atomic'' syntax, contractions are transcribed as terms of the form $\coupe{v}{\beta}$ where $\beta$ occurs free in $v$, or of the form $\coupe{x}{e}$ where $x$ occurs freely in $e$. If $\beta$ (resp. $x$) does not occur free in $v$ (resp. $e$), then the command expresses a simple deactivation.
\end{remark}
The problem with classical logic viewed as a computational system is its wild non confluence, as captured by Lafont's critical pair \cite{Girard89,DJSDec}, for which the $\mu\tilde\mu$ kit offers a crisp formulation. For any $c_1,c_2$ both of type $(\Gamma\vdash\Delta)$, we have (with $\alpha,x$ fresh for $c_1,c_2$, respectively): \begin{center} $ c_1\quad{}^*\!\longleftarrow \quad \coupe{\mu\alpha.c_1}{\tilde\mu x.c_2}\quad\longrightarrow^*\quad c_2$ \end{center} So, all proofs are identified... {\em Focalisation}, discussed in the next section, will guide us to solve this dilemma.
\section{A syntax for focalised classical logic} \label{LKQ-sec} In this section, we adapt the
{\em focalisation discipline} (originally introduced by \cite{Andreoli92} in the setting of linear logic) to $\mathsf{LK}$.
A focalised proof search
alternates between right and left phases, as follows:
\noindent - {\it Left phase}: Decompose (copies of) formulas on the left, in any order. Every decomposition of a negation on the left feeds the right part of the sequent. At any moment, one can change the phase from left to right.
\noindent - {\it Right phase}: Choose a formula $A$ on the right, and {\em hereditarily} decompose a copy of it in all branches of the proof search. This {\em focusing} in any branch can only end with an axiom (which ends the proof search in that branch), or with a decomposition of a negation, which prompts a phase change back to the left. Etc\ldots
Note the irreversible (or {\em positive, active}) character of the whole right phase, by the choice of $A$, by the choice of the left or right summand of a disjunction. One takes the risk of not being able to eventually end a proof search branch with an axiom. In contrast, all the choices on the left are reversible (or {\em negative, passive}). This strategy is not only complete (see below), it also guides us to design a disciplined logic whose behaviour will not collapse all the proofs.
To account for right focalisation, we introduce a fourth kind of judgement and a fourth syntactic category of terms: the {\em values}, typed as $(\lkV{\Gamma}{}{V:A}{\Delta})$ (the zone between the turnstyle and the semicolon is called the {\em stoup}, after \cite{GirardLC}). We also make official the existence of two disjunctions (since the behaviours of the conjunction on the left and of the disjunction on the right are different) and two conjunctions, by renaming $\wedge,\vee,\neg$ as $\otimes,\oplus,\notP{}$, respectively. Of course, this choice of linear logic like notation is not fortuitous. Note however that the source of distinction is not based here on the use of resources like in the founding work on linear logic, which divides the line between {\em additive} and {\em multiplicative} connectives. In contrast, our motivating dividing line here is that between {\em irreversible} and {\em reversible} connectives, and hopefully this provides additional motivation for the two conjunctions and the two disjunctions. Our formulas are thus defined by the following syntax: \begin{center} $P::=X \,\mbox{\large\boldmath$\mid$}\, P\otimes P \,\mbox{\large\boldmath$\mid$}\, P\oplus P \,\mbox{\large\boldmath$\mid$}\, \notP{P}$ \end{center} These formulas are called positive. We can define their De Morgan duals as follows: \begin{center} $\overline{P_1\otimes P_2}=\overline{P_1}\parr\overline{P_2}\quad\quad \overline{P_1\oplus P_2}=\overline{P_1}\with\overline{P_2}\quad\quad \overline{\notP{P}}=\notN{\overline{P}}$ \end{center} These duals are {\em negative} formulas: $N::=\overline{X} \,\mbox{\large\boldmath$\mid$}\, N\parr N \,\mbox{\large\boldmath$\mid$}\, N\with N \,\mbox{\large\boldmath$\mid$}\, \notN{N}$. They restore the duality of connectives, and are implicit in the presentation that follows (think of $P$ on the left as being a $\overline{P}$ in a unilateral sequent $\vdash \overline{\Gamma},\Delta$).
We are now ready to give the syntax of our calculus, which is a variant of the one given by the second author in \cite{Munch2009}\footnote{The main differences with the system presented in \cite{Munch2009} is that we have here an explicit syntax of values, with an associated form of typing judgement, while focalisation is dealt with at the level of the reduction semantics in \cite{Munch2009} (see also Remark \ref{LK-LKQ-not-red-refl}). Also, the present system is bilateral but limited to positive formulas on both sides, it thus corresponds to the positive subsystem of the bilateral version of $\mathsf{L}_{\textrm{foc}}$ as presented in \cite{Munch2009}[long version, Appendix A].}.
\begin{center} \fbox{$\begin{array}{lllllll} \mbox{Commands} && c::=\coupe{v}{e} \,\mbox{\large\boldmath$\mid$}\, \Subm{c}{\sigma}&&\\ \mbox{Expressions} && v::= \Vtov{V} \,\mbox{\large\boldmath$\mid$}\, \mu\alpha.c \,\mbox{\large\boldmath$\mid$}\, \Subm{v}{\sigma}\\ \mbox{Values} && V::= x \,\mbox{\large\boldmath$\mid$}\, (V,V) \,\mbox{\large\boldmath$\mid$}\, \inl{V} \,\mbox{\large\boldmath$\mid$}\, \inr{V} \,\mbox{\large\boldmath$\mid$}\, e^\bullet \,\mbox{\large\boldmath$\mid$}\, \Subm{V}{\sigma}&&
\\
\mbox{Contexts} && e::=\alpha \,\mbox{\large\boldmath$\mid$}\, \tilde\mu x.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu\alpha^\bullet.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu (x_1,x_2).c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2} \,\mbox{\large\boldmath$\mid$}\, \Subm{e}{\sigma}&&
\end{array}$} \end{center} The typing rules are given in Figure 1. Henceforth, we shall refer to the calculus of this section (syntax + rewriting rules) as $\mathsf{L}_{\textrm{foc}}$, and to the typing system as $\mathsf{LKQ}$ (after \cite{DJSDec}).
Here are examples of proof terms in $\mathsf{LKQ}$.
\begin{example} \label{LKQ-proofs-ex} \begin{itemize} \item[] $(\lkV{}{}{(\tilde\mu(x,\alpha^\bullet).\coupe{\Vtov{x}}{\alpha})^\bullet:\notP{(P\otimes \notP{P})}}{}$), where $\tilde\mu(x,\alpha^\bullet).c$ is an abbreviation for $\tilde\mu(x,y).\coupe{\Vtov{y}}{\tilde\mu\alpha^\bullet.c}$.
\item[]
$\coupe{\Vtov{\inr{(\tilde\mu x.\coupe{\Vtov{\inl{x}}}{\alpha})^\bullet}}}{\alpha}:(\lkc{}{}{}{\alpha:P\oplus\notP{P}})$. \item[]
$(\lke{}{\tilde\mu(x_2,x_1).\coupe{\Vtov{(x_1,x_2)}}{\alpha}:P_2\otimes P_1}{}{\alpha:P_1\otimes P_2}$). \end{itemize} \end{example} \begin{figure}
\caption{System $\mathsf{LKQ}$}
\label{LKQ-fig}
\end{figure} \begin{proposition} \label{LKQ-complete}
If $\Gamma\vdash\Delta$ is provable in $\mathsf{LK}$, then it is provable in $\mathsf{LKQ}$. \end{proposition}
\noindent {\sc Proof}. Since we have defined a syntax for $\mathsf{LK}$ proofs in section \ref{LK-proofs-sec}, all we have to do is to translate this syntax into the focalised one. All cases are obvious (only inserting the coercion from values to expressions where appropriate) except for the introduction of $\otimes$ and $\oplus$ on the right, for which we can define $\inl{\mu\alpha_1.c_1}$ as \begin{center} $ \lkv{\Gamma}{}{\mu\alpha.\coupe{\mu\alpha_1.c_1}{\tilde\mu x_1.\coupe{\Vtov{(\inl{x_1})}} {\alpha}}:P_1\oplus P_2}{\Delta} \quad\quad(\mbox{idem {\it inr}})$ \end{center} and $(\mu\alpha_1.c_1,\mu\alpha_2.c_2)$ as $(\lkv{\Gamma}{}{\mu\alpha.\coupe{\mu\alpha_2.c_2} {\tilde\mu x_2.\coupe{\mu\alpha_1.c_1} {\tilde\mu x_1.\coupe{\Vtov{(x_1,x_2)}} {\alpha}}}:P_1\otimes P_2}{\Delta})$. \qed
We make two observations on the translation involved in the proof of Proposition \ref{LKQ-complete}.
\noindent \begin{remark} \label{LK-LKQ-choice} The translation {\em introduces cuts}: in particular, a cut-free proof is translated to a proof with (lots of) cuts. It also {\em fixes an order of evaluation}: one should read the translation of right introduction as a protocol prescribing the evaluation of the second element of a pair and then of the first (the pair is thus in particular {\em strict}, as observed in \cite{Munch2009}) (see also
\cite{ZeilbergerCU,Levy2004}). An equally reasonable choice would have been to permute the two $\tilde\mu$s: that would have encoded a left-to-right order of evaluation. This non-determinism of the translation has been known ever since Girard's seminal work \cite{GirardLC}. \end{remark} \begin{remark} \label{LK-LKQ-not-red-refl} The translation is not reduction-preserving, which is expected (since focalisation induces restrictions on the possible reductions), but it is not reduction-reflecting either, in the sense that new reductions are possible on the translated terms. Here is an example (where, say $\mu\_.c$ indicates a binding with a dummy (i.e., fresh) variable). The translation of $\coupe{(\mu\_.c_1,\mu\_.c_2)}{\tilde\mu x.c_3}$ rewrites to (the translation of) $c_2$: \begin{center} $\coupe{\mu\alpha.\coupe{\mu\_.c_2} {\tilde\mu x_2.\coupe{\mu\_.c_1} {\tilde\mu x_1.\coupe{\Vtov{(x_1,x_2)}} {\alpha}}}}{\tilde\mu x.c_3}\quad\longrightarrow^*\quad \coupe{\mu\alpha.c_2}{\tilde\mu x.c_3}\quad\longrightarrow^*\quad c_2 $ \end{center} while the source term is blocked. If we wanted to cure this, we could turn Proposition \ref{LKQ-complete}'s encodings into additional rewriting rules in the source language. We refrain to do so, since we were merely interested in the source syntax as a stepping stone for the focalised one, and we are content that on one hand the rewriting system of Section \ref{LK-proofs-sec} was good enough to eliminate cuts, and that on the other hand the focalised system is complete with respect to provability. But we note that the same additional rules {\em do} appear in the {\em target} language (and are called $\varsigma$-rules, after \cite{Wadlerdual}) in \cite{Munch2009}. This is because in the focalised syntax proposed in \cite{Munch2009} there is no restriction on the terms of the language, hence $(\mu\_.c_1,\mu\_.c_2)$ is a legal term. \end{remark}
We move on to cut elimination, which (cf. Section \ref{LK-proofs-sec}) is expressed by means of three sets of rewriting rules, given in Figure 2.
Note that we now have only one way to reduce $\coupe{\mu\alpha.c_1}{\tilde\mu x.c_2}$ (no more critical pair).
As already stressed in Section \ref{LK-proofs-sec}), the commutation rules
are the usual rules defining (capture-avoiding) substitution.
The overall operational semantics features call-by-value by the fact that variables $x$ receive values,
and features also call-by-name (through symmetry, see the logic $\mathsf{LKT}$ in Section \ref{encodings-sec}) by the fact that continuation variables $\alpha$ receive contexts.
\begin{figure}\label{LKQ-cut-elim-rules}
\end{figure}
The reduction system presented in Figure 2 is {\em confluent}, as it is an orthogonal system in the sense of higher-order rewriting systems (left-linear rules, no critical pairs) \cite{NipkHORS}.
\begin{remark} \label{mu-cosmetic} {\it About $\mu$}: we note that an expression $\mu\beta.c$ is used only in a command $\coupe{\mu\beta.c}{e}$, and in such a context it can be expressed as $\coupe{\Vtov{(e^\bullet)}}{\tilde\mu\beta^\bullet.c}$, which indeed reduces to $\Sub{c}{\beta}{e}$. However, using such an encoding would mean to shift from a direct to an indirect style for terms of the form $\mu\alpha.c$. \end{remark} \begin{proposition} \label{LKQ-cut-elim-prop} Cut-elimination holds in $\mathsf{LKQ}$. \end{proposition} \noindent {\sc Proof}. This is an easy consequence of the following three properties:
\noindent 1) {\it Subject reduction.} This is checked as usual rule by rule.
\noindent 2) {\it Weak normalisation.} One first gets rid of the redexes $\coupe{\mu\alpha.c}{e}$ by reducing them all (no such redex is ever created by the other reduction rules). As usual, one measures cuts by the size of the cut formula, called the degree of the redex, and at each step of normalisation, one chooses a redex of maximal degree all of whose subredexes have striclty lower degree. We then package reductions by considering $\coupe{\Vtov{V}}{\tilde\mu x.c}\longrightarrow \Subwn{c}{\Subi{x}{V}}$
(idem for the logical rules) as a single step, where $\Subwn{c}{\sigma}$ is an augmented (implicit) substitution, defined by induction as usually except for $\coupe{v}{e}$: $$\begin{array}{l} \Subwn{\coupe{\Vtov{x}}{\tilde\mu\alpha^\bullet.c}}{\Subi{x}{e^\bullet},\sigma} = \Subwn{c}{\Subi{\alpha}{e},\Subi{x}{e^\bullet},\sigma}\\ \Subwn{\coupe{\Vtov{x}}{\tilde\mu (x_1,x_2).c}}{\Subi{x}{(V_1,V_2)},\sigma}= \Subwn{c}{\Subi{x_1}{V_1},\Subi{x_2}{V_2},\Subi{x}{(V_1,V_2)},\sigma}\\
\Subwn{\coupe{\Vtov{x}}{\tilde\mu[\inl{x_1}.c_1|\inr{x_2}.c_2]}}{\Subi{x}{\inl{V_1}},\sigma}= \Subwn{c}{\Subi{x_1}{V_1},\Subi{x}{\inl{V_1}},\sigma} \quad(\mbox{idem {\it inr}})\\ \Subwn{\coupe{v}{e}}{\sigma} = \coupe{\Subwn{v}{\sigma}}{\Subwn{e}{\sigma}} \quad\mbox{otherwise} \end{array}$$ This is clearly a well-founded definition, by induction on the term in which substitution is performed (whatever the substitution is). This new notion of reduction ensures the following property: is $t_1 \longrightarrow t_2$ is obtained by reducing $R_1$ in $t_1$ and if $R_2$ is a redex created in $t_2$ by this (packaged) reduction, then $R_1$ is of the form $\coupe{e^\bullet}{\tilde\mu\alpha^\bullet.c}$, where $c$ contains a subterm $\coupe{\Vtov{V}}{\alpha}$, which becomes $R_2$ in $t_2$. The key property is then that the degree of the created redex (the size of some formula $P$) is strictly smaller than the degree of the creating one (the size of $\notP{P}$)\footnote{If we had not packaged reduction, we would have had to deal with the creation of redexes, say by susbstitution of some $V$ for $x$, where the substitution could have been launched by firing a redex of the same degree as the created one.}. The other useful property is that residuals of redexes preserve their degree. Then the argument is easily concluded by associating to each term as global measure the multiset of the degrees of its redexes. This measure strictly decreases at each step (for the multiset extension of the ordering on natural numbers).
\noindent 3) {\it Characterisation of normal forms.} A command in normal form has one of the following shapes (all contractions): $$\coupe{\Vtov{V}}{\alpha} \quad\quad\quad \coupe{\Vtov{x}}{\tilde\mu\alpha^\bullet.c}\quad\quad \coupe{\Vtov{x}}{\tilde\mu(x_1,x_2).c}\quad\quad
\coupe{\Vtov{x}}{\tilde\mu[\inl{x_1}.c_1|\inr{x_2}.c_2]} \quad\quad\quad\quad\qed$$
\begin{corollary} Every sequent $\Gamma\vdash\Delta$ that is provable in $\mathsf{LK}$ admits a (cut-free) proof respecting the focalised discipline. \end{corollary} \noindent {\sc Proof}. Let $\pi$ be a proof of $\Gamma\vdash\Delta$. By Proposition \ref{LKQ-complete}, $\pi$ translates to a command $c:(\lkc{\Gamma}{}{}{\Delta})$, which by Proposition \ref{LKQ-cut-elim-prop} reduces to a term denoting a cut-free proof. The $\mathsf{LK}$ proof obtained by erasing meets the requirement\footnote{This argument of focalisation via normalisation goes back to \cite{GirardLC} (see also \cite{LaurentFoc} for a detailed proof in the case of linear logic).}. \qed
Also, by confluence and weak normalisation, $\mathsf{LKQ}$ is computationally coherent: $(\lkV{x:P,y:P}{}{x:P}{})$ and $(\lkV{x:P,y:P}{}{y:P}{})$ are not provably equal, being normal forms.
Our syntactic choices in this paper have been guided by the phases of focalisation. Indeed, with our syntax, the focalised proof search cycle can be represented as follows (following a branch from the root): {\small $$\begin{array}{lllllll} (\mbox{right phase}) && \coupe{\Vtov{V}}{\alpha}:(\lkc{\Gamma}{}{}{\alpha:P,\Delta}) \;\rightsquigarrow_{-/+} \;\lkV{\Gamma}{}{V:P}{\alpha:P,\Delta}\; \rightsquigarrow_{+}^*\; \lkV{\Gamma}{}{(\tilde\mu x.c)^\bullet:\notP{Q}}{\Delta}\\ &&\quad \rightsquigarrow_{+/-}\; \lke{\Gamma}{\tilde\mu x.c:Q}{}{\Delta}\; \rightsquigarrow_{-} \; c:(\lkc{\Gamma,x:Q}{}{}{\Delta}) \quad\quad(\mbox{idem other $\tilde\mu$ binders})\\ (\mbox{left phase}) && \coupe{\Vtov{x}}{\tilde\mu\alpha^\bullet.c}:(\lkc{\Gamma,x:\notP{P}}{}{}{\Delta})
\; \rightsquigarrow_{-}^* \; c:(\lkc{\Gamma,x:\notP{P}}{}{}{\alpha:P,\Delta}) \\ && \coupe{\Vtov{x}}{\tilde\mu(x_1,x_2).c}:(\lkc{\Gamma,x:P_1\otimes P_2}{}{}{\Delta})
\; \rightsquigarrow_{-}^* \; c:(\lkc{\Gamma,x_1:P_1,x_2:P_2,x:P_1\otimes P_2}{}{}{\Delta})\\ &&
\coupe{\Vtov{x}}{\tilde\mu[\inl{x_1}.c_1|\inr{x_2}.c_2]}:(\lkc{\Gamma,x:P_1\oplus P_2}{}{}{\Delta})
\; \rightsquigarrow_{-}^* \; c_1:(\lkc{\Gamma,x_1:P_1,x:P_1\oplus P_2}{}{}{\Delta})\\
&&
\coupe{\Vtov{x}}{\tilde\mu[\inl{x_1}.c_1|\inr{x_2}.c_2]}:(\lkc{\Gamma,x:P_1\oplus P_2}{}{}{\Delta})
\; \rightsquigarrow_{-}^* \; c_2:(\lkc{\Gamma,x_2:P_2,x:P_1\oplus P_2}{}{}{\Delta}) \end{array} $$} Note that values and commands correspond to positive and negative phases, respectively. The other two categories of terms act as intermediates.
We can also add $\eta$-equivalences (or expansion rules, when read from right to left) to the system, as follows (where all mentioned variables are fresh for the mentioned terms): \begin{center} $\begin{array}{lll} \mu\alpha.\coupe{v}{\alpha}=v &\quad\quad\quad & \tilde\mu(x_1,x_2).\coupe{\Vtov{(x_1,x_2)}}{e}=e\\
\tilde\mu x.\coupe{\Vtov{x}}{e}=e &\quad\quad\quad & \tilde\mu[\inl{x_1}.\coupe{\Vtov{\inl{x_1}}}{e}|\inr{x_2}.\coupe{\Vtov{\inr{x_2}}}{e}]=e\\ &\quad\quad\quad& \tilde\mu\alpha^\bullet\coupe{\Vtov{(\alpha^\bullet)}}{e} =e \end{array}$ \end{center} The rules on the left column allow us to cancel a deactivation followed by an activation (the control rules do the job for the sequence in the reverse order), while the rules in the right column express the reversibility of the negative rules.
\begin{example} \label{LKQ-exp-ex} We relate $(\notP{P_1})\otimes(\notP{P_2})$ and $\notP{(P_1\oplus P_2)}$ (cf. the well-know isomorphism of linear logic, reading $\notP{P}$ as $!\overline{P}$).
There exist $$\begin{array}{l} c_1:(\lkc{y:\notP{(P_1\oplus P_2)}}{}{}{\alpha:\notP{P_1}\otimes\notP{P_2}})\quad\quad c_2:(\lkc{x:\notP{P_1}\otimes\notP{P_2}}{}{}{\gamma:\notP{(P_1\oplus P_2)}}) \end{array}$$ such that, say\footnote{In the $\lambda$-calculus a provable isomorphism is a pair $(x:A\vdash v:B)$, $(y:B\vdash w:A)$ such that $\Subimpl{w}{y}{v}$ reduces to $x$ (and conversely). Here, we express this substitution (where $v,w$ are not values) as $\mu\alpha.\coupe{v}{\tilde\mu y.\coupe{w}{\alpha}}$, and the reduction as $\coupe{v}{\tilde\mu y.\coupe{w}{\alpha}}\longrightarrow^*\coupe{\Vtov{x}}{\alpha}$. } $\coupe{\mu\gamma.c_2}{\tilde\mu y.c_1}$, reduces
$\coupe{\Vtov{x}}{\alpha}:(\lkc{x:\notP{P_1}\otimes\notP{P_2}}{}{}{\alpha:\notP{P_1}\otimes\notP{P_2}})$ . We set $$\begin{array}{lll} V_1=((\tilde\mu y'_1.\coupe{\Vtov{\inl{y'_1}}}{\beta})^\bullet,(\tilde\mu y'_2.\coupe{\Vtov{\inr{y'_2}}}{\beta})^\bullet) &\quad\quad\quad& \lkV{}{}{V_1:\notP{P_1}\otimes\notP{P_2}}{\beta:P_1\oplus P_2}\\
V_2=(\tilde\mu\copaireP{\inl{y_1}.\coupe{\Vtov{y_1}}{\alpha_1}|\inr{y_2}.\coupe{\Vtov{y_2}}{\alpha_2}})^\bullet &\quad\quad&
\lkV{}{}{V_2:\notP{(P_1\oplus P_2)}}{\alpha_1:P_1,\alpha_2:P_2} \end{array}$$ We take $c_1=\coupe{\Vtov{y}}{\tilde\mu\beta^\bullet. \coupe{\Vtov{V_1}}{\alpha}}$ and $c_2=\coupe{\Vtov{x}}{\tilde\mu(\alpha_1^\bullet,\alpha_2^\bullet). \coupe{\Vtov{V_2}}{\gamma}}$, where $\tilde\mu(\alpha_1^\bullet,\alpha_2^\bullet).c$ is defined as a shorthand for, say $\tilde\mu(x_1,x_2).\coupe{\Vtov{x_2}}{\tilde\mu\alpha_2^\bullet.\coupe{\Vtov{x_1}}{\tilde\mu\alpha_1^\bullet .c}}$. We have: $$\begin{array}{lll} \coupe{\mu\gamma.c_2}{\tilde\mu y.c_1} & \longrightarrow^* & \coupe{\Vtov{x}}{\tilde\mu(\alpha_1^\bullet,\alpha_2^\bullet). \coupe{((\tilde\mu y'_1.\coupe{\Vtov{(y')_1}}{\alpha_1})^\bullet,(\tilde\mu y'_2.\coupe{\Vtov{(y')_2}}{\alpha_2}^\bullet)}{\alpha}}\\ && = \coupe{\Vtov{x}}{\tilde\mu(\alpha_1^\bullet,\alpha_2^\bullet). \coupe{(\alpha_1^\bullet,\alpha_2^\bullet)}{\alpha}}\\ && = \coupe{\Vtov{x}}{\alpha} \end{array}$$
\end{example}
We end the section with a lemma that will be useful in Section \ref{LKQS-sec}. \begin{lemma} \label{e-subst-lemma} \begin{itemize} \item If $\lke{\Gamma,x:\notP{P}}{e:Q}{}{\Delta}$, then $\lke{\Gamma}{\Subimpl{e}{x}{\alpha^\bullet}:Q}{}{\alpha:P,\Delta}$. \item If $\lke{\Gamma,x:P_1\otimes P_2}{e:Q}{}{\Delta}$, then $\lke{\Gamma,x_1:P_1,x_2:P_2}{\Subimpl{e}{x}{(x_1,x_2)}:Q}{}{\Delta}$. \item If $\lke{\Gamma,x:P_1\oplus P_2}{e:Q}{}{\Delta}$, then $\lke{\Gamma,x_1:P_1}{\Subimpl{e}{x}{\inl{x_1}}:Q}{}{\Delta}$ and $\lke{\Gamma,x_2:P_2}{\Subimpl{e}{x}{\inr{x_2}}:Q}{}{\Delta}$. \end{itemize} (and similarly for $c,V,v$), where $\Subimpl{t}{x}{V}$ (resp. $\Subimpl{t}{\alpha}{e}$) denotes the usual substitution (cf. Section \ref{introduction}). \end{lemma}
\section{Encodings} \label{encodings-sec}
\noindent{\bf Encoding CBV $\lambda$($\mu$)-calculus into $\mathsf{LKQ}$.} We are now in a position to hook up with the material of Section \ref{intro-sec}. We
can encode the call-by-value $\lambda$-calculus, by defining the following derived CBV implication and terms: $$\begin{array}{c} P\rightarrow^v Q=\notP{(P\:\otimes\:\notP{Q})}\\ \lambda x.v=\Vtov{((\tilde\mu(x,\alpha^\bullet).\coupe{v}{\alpha})^\bullet)}\quad\quad\quad v_1v_2=\mu\alpha.\coupe{v_2}{\tilde\mu x.\coupe{v_1}{\Vtoe{(\Vtov{(x,\alpha^\bullet)})}}} \end{array}$$ where $\tilde\mu(x,\alpha^\bullet).c$ is the abbreviation used in Example \ref{LKQ-proofs-ex} and where $\Vtoe{V}$ stands for $\tilde\mu\alpha^\bullet.\coupe{\Vtov{V}}{\alpha}$. These definitions provide us with a translation, which extends to (call-by-value) $\lambda\mu$-calculus \cite{OngStew,Rocheteau}, and factors though $\overline\lbd\mu\tilde\mu_Q$-calculus (cf. Section \ref{intro-sec}), defining $V\cdot e$ as $\Vtoe{(V,e^\bullet)}$\footnote{In \cite{CH2000} we also had a difference operator $B-A$ (dual to implication), and two associated introduction operations, whose encodings in the present syntax are $\beta\lambda.e=\tilde\mu(\beta^\bullet,x).\coupe{\Vtov{x}}{e}$ and $e\cdot V=(e^\bullet,V)$.}. The translation makes also sense in the untyped setting, as the following example shows. \begin{example} \label{Delta-Delta} Let $\Delta=\lambda x.xx$. We have $\CBVP{\Delta\Delta}=\mu\gamma.c$, and $c\longrightarrow^* c$, with
$$c=\coupe{\Vtov{(e^\bullet)}}{\tilde\mu z.\coupe{\Vtov{(e^\bullet)}}{\Vtoe{(z,\gamma^\bullet)}}}\quad\mbox{ and }\quad e=\tilde\mu(x,\alpha^\bullet).\coupe{\Vtov{x}}{\tilde\mu y.\coupe{\Vtov{x}}{\Vtoe{(y,\alpha^\bullet)}}}$$ \end{example}
\noindent {\bf Encoding CBN $\lambda$($\mu$)-calculus.} What about CBN? We can translate it to $\mathsf{LKQ}$, but at the price of translating terms to contexts, which is a violence to our goal of giving an intuitive semantics to the first abstract machine presented in Section \ref{intro-sec}. Instead, we spell out the system dual to $\mathsf{LKQ}$, which is known as $\mathsf{LKT}$, in which expressions and contexts will have negative types, and in which we shall be able to express CBN $\lambda$-terms as expressions. Our syntax for $\mathsf{LKT}$ is a mirror image of that for $\mathsf{LKQ}$: it exchanges the $\mu$ and $\tilde\mu$, the $x$'s and the $\alpha$'s, etc..., and renames {\it inl}, {\it inr} as {\it fst}, {\it snd}, which are naturally associated with $\with$ while the latter were naturally associated with $\oplus$: $$\begin{array}{lll} \mbox{Commands} && c::=\coupe{v}{e}\\ \mbox{Covalues} && E::= \alpha \,\mbox{\large\boldmath$\mid$}\, [E,E] \,\mbox{\large\boldmath$\mid$}\, \fst{E} \,\mbox{\large\boldmath$\mid$}\, \snd{E} \,\mbox{\large\boldmath$\mid$}\, v^\bullet\\ \mbox{Contexts} && e::= \Etoe{E} \,\mbox{\large\boldmath$\mid$}\, \tilde\mu x.c\\ \mbox{Expressions} && v::=x \,\mbox{\large\boldmath$\mid$}\, \mu \alpha.c \,\mbox{\large\boldmath$\mid$}\, \mu x^\bullet.c \,\mbox{\large\boldmath$\mid$}\, \ldots \end{array}$$ Note that focalisation is now on the left, giving rise to a syntactic category of {\em covalues} (that were called applicative contexts in \cite{CH2000}).\footnote{Note also that the duality sends a command $\coupe{v}{e}$ to a command $\coupe{e'}{v'}$ where $v'$, $e'$ are the mirror images of $v$, $e$.}
The rules are all obtained from $\mathsf{LKQ}$ by duality: $$\begin{array}{c} \seq{}{\lkE{\Gamma}{\alpha:N}{}{\Delta\:,\:\alpha:N}}\quad\quad \seq{\lkE{\Gamma}{E_1:N_1}{}{\Delta}}{\lkE{\Gamma}{\fst{E_1}:N_1\with N_2}{}{\Delta}} \\\\ \seq{}{\lkv{\Gamma\:,\:x:N}{}{x:N}{\Delta}}\quad\quad\seq{\lkv{\Gamma}{}{v:N}{\Delta}\quad\quad \lke{\Gamma}{e:N}{}{\Delta}} {\coupe{v}{e}:(\lkc{\Gamma}{}{}{\Delta})} \quad\quad\quad \ldots \end{array}$$
We would have arrived to this logic naturally if we had chosen in Section \ref{LK-proofs-sec} to present $\mathsf{LK}$ with a reversible disjunction on the right and an irreversible conjunction on the left, and in Section \ref{LKQ-sec} to present a focalisation discipline with focusing on formulas on the left.
In $\mathsf{LKT}$ we can define the following derived CBN implication and terms: $$\begin{array}{c} M\rightarrow^n N=(\notN{M})\:\parr\:N\\ \lambda x.v=\mu( x^\bullet,\alpha).\coupe{v}{\Vtov{\alpha}}\quad\quad\quad v_1v_2=\mu\alpha.\coupe{v_1}{\Vtov{(v_2^\bullet,\alpha)}} \end{array}$$ The translation extends to $\lambda\mu$-calculus \cite{Parigot92} and factors though the $\overline\lbd\mu\tilde\mu_T$-calculus of \cite{CH2000}, defining $v \cdot E$ as $(v^\bullet,E)$. Note that the covalues involved in executing call-by-name $\lambda$-calculus are just {\em stacks} of expressions (cf. Section \ref{intro-sec}).
With these definitions, we have: $$\begin{array}{lll}
\coupe{\lambda x.v_1}{\Vtov{(v_2\cdot E)}} =
\coupe{\mu( x^\bullet,\alpha).\coupe{v_1}{\Vtov{\alpha}}}{\Vtov{(v_2^\bullet,E)}}
\longrightarrow \coupe{\Sub{v_1}{x}{v_2}}{\Vtov{E}}\\
\coupe{v_1v_2}{\Vtov{E}} = \coupe{\mu\alpha.\coupe{v_1}{\Vtov{(v_2^\bullet,\alpha)}}}{\Vtov{E}} \longrightarrow \coupe{v_1}{\Vtov{(v_2^\bullet,E)}} =
\coupe{v_1}{\Vtov{(v_2\cdot E)}} \end{array}$$ We are thus back on our feet (cf. section 1)!
\noindent {\bf Translating $\mathsf{LKQ}$ into $\mathsf{NJ}$.}
Figure 3
presents a translation from $\mathsf{LKQ}$ to intuitionistic natural deduction $\mathsf{NJ}$, or, via Curry-Howard, to $\lambda$-calculus extended with products and sums. In the translation, $R$ is a fixed target formula (cf. Section \ref{intro-sec}). We translate $(\notP{\_})$ as ``$\_$ implies $R$'' (cf. \cite{Krivine91,LRS93}).
We write $B^A$ for function types / intuitionistic implications. The rules of $\mathsf{L}_{\textrm{foc}}$ are simulated by $\beta$-reductions. One may think of the source $\mathsf{L}_{\textrm{foc}}$ terms as a description of the target ones ``in direct style'' (cf. \cite{DanvyBack}).
\begin{figure}
\caption{Translation of $\mathsf{LKQ}$ into the $\lambda$-calculus / $\mathsf{NJ}$}
\label{LKQ-to-NJ}
\end{figure} \begin{proposition} We set $\CPSP{\Gamma}=\setc{x:\CPSP{P}}{x:P\in\Gamma}\quad\quad R^{\CPSP{\Delta}}=\setc{\contvartovar{\alpha}:R^{\CPSP{P}}}{\alpha:P\in\Delta}$. We have:
$$\begin{array}{|cc|cc|cc|c|} \hline c:(\lkc{\Gamma}{}{}{\Delta}) && \lkV{\Gamma}{}{V:P}{\Delta} && \lkv{\Gamma}{}{v:P}{\Delta} && \lke{\Gamma}{e:P}{}{\Delta}\\ \Downarrow &&\Downarrow &&\Downarrow &&\Downarrow \\ \CPSP{\Gamma}\:,\:R^{\CPSP{\Delta}}\vdash \CPSP{c}:R && \CPSP{\Gamma}\:,\:R^{\CPSP{\Delta}}\vdash \CPSP{V}:\CPSP{P} && \CPSP{\Gamma}\:,\:R^{\CPSP{\Delta}}\vdash \CPSP{v}:R^{R^{\CPSP{P}}} && \CPSP{\Gamma}\:,\:R^{\CPSP{\Delta}}\vdash \CPSP{e}:R^{\CPSP{P}}\\ \hline \end{array}$$
Moreover, the translation preserves reduction: if $t\longrightarrow t'$, then $\CPSP{t}\longrightarrow^*\CPSP{(t')}$. \end{proposition} Composing the previous translations, from CBN $\lambda\mu$-calculus to $\mathsf{LKT}$ then through duality to $\mathsf{LKQ}$ then to $\mathsf{NJ}$, what we obtain is the CPS translation due to Lafont, Reus, and Streicher \cite{LRS93} (LRS translation, for short).
\noindent {\bf Translating $\mathsf{LKQ}$ into $\mathsf{LLP}$.} The translation just given from $\mathsf{LKQ}$ to $\mathsf{NJ}$ does actually two transformations for the price of one: {\em from classical to intuitionistic}, and {\em from sequent calculus style to natural deduction style}. The intermediate target and source of this decomposition is nothing but a subsystem of Laurent's polarised linear logic $\mathsf{LLP}$ \cite{LaurentTh} \footnote{Specifically, no positive formula is allowed on the right in the rules for $\parr$ and $\with$ and in the right premise of the cut rule.}. We adopt a presentation of $\mathsf{LLP}$ in which all negative formulas are handled as positive formulas on the left, and hence in which $!N$ and $?P$ are replaced by $\notP{}$ with the appropriate change of side. With these conventions, $\mathsf{LLP}$ is nothing but the system called $\mathsf{LJ}_0$ in \cite{Laurent2009}. For the purpose of giving a system $\mathsf{L}$ term syntax, we distinguish three kinds of sequents for our subsystem of $\mathsf{LLP}$: $$(\lkc{\Gamma}{}{}{})\quad\quad(\lkV{\Gamma}{}{P}{})\quad\quad (\lke{\Gamma}{P}{}{})$$ The syntax, the computation rules, and the typing rules are as follows (omitting explicit substitutions): $$\begin{array}{l}
c::=\coupe{V}{e}\quad V::= x \,\mbox{\large\boldmath$\mid$}\, e^\bullet \,\mbox{\large\boldmath$\mid$}\, (V,V) \,\mbox{\large\boldmath$\mid$}\, \inl{V} \,\mbox{\large\boldmath$\mid$}\, \inr{V}\\ e::=\derel{V} \,\mbox{\large\boldmath$\mid$}\, \tilde\mu x.c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu(x_1,x_2).c \,\mbox{\large\boldmath$\mid$}\, \tilde\mu[\inl{x_1}.c_1|\inr{c_2}.c_2] \end{array} $$
$$\begin{array}{lllll} \coupe{V}{\tilde\mu x.c} \longrightarrow \Sub{c}{x}{V}\\ \coupe{e^\bullet}{\derel{V}} \longrightarrow \coupe{V}{e} \\ \coupe{(V_1,V_2)}{\tilde\mu(x_1,x_2).c} \longrightarrow \Subm{c}{\Subi{x_1}{V_1},\Subi{x_2}{V_2}}\\
\coupe{\inl{V_1}}{\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}} \longrightarrow \Sub{c_1}{x_1}{V_1}
\quad\quad \coupe{\inr{V_2}}{\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}} \longrightarrow \Sub{c_2}{x_2}{V_2}
\end{array}$$
$$\seq{}{\lkV{\Gamma\:,\:x:P}{}{x:P}{}}\quad\quad \seq{\lkV{\Gamma}{}{V:P}{}\quad\quad \lke{\Gamma}{e:P}{}{}} {\coupe{V}{e}:(\lkc{\Gamma}{}{}{})}\quad\quad \seq{c:(\lkc{\Gamma\:,\:x:P}{}{}{})}{\lke{\Gamma}{\tilde\mu x.c:P}{}{}} $$ $$\seq{\lke{\Gamma}{e:P}{}{}}{\lkV{\Gamma}{}{e^\bullet:\notP{P}}{}}\quad\quad\seq{\lkV{\Gamma}{}{V_1:P_1}{} \quad\quad \lkV{\Gamma}{}{V_2:P_2}{}} {\lkV{\Gamma}{}{(V_1,V_2):P_1\otimes P_2}{}}\quad\quad \seq{\lkV{\Gamma}{}{V_1:P_1}{}}{\lkV{\Gamma}{}{\inl{V_1}:P_1\oplus P_2}{}}\quad\quad \seq{\lkV{\Gamma}{}{V_2:P_2}{}}{\lkV{\Gamma}{}{\inr{V_2}:P_1\oplus P_2}{}}$$ $$\quad\seq{\lkV{\Gamma}{}{V:P}{}}{\lke{\Gamma}{\derel{V}:\notP{P}}{}{}}\quad\quad \seq{c:(\Gamma,x_1:P_1,x_2:P_2\vdash)}{\lke{\Gamma}{\tilde\mu(x_1,x_2).c:P_1\otimes P_2}{}{}}\quad\quad \seq{c_1:(\Gamma,x_1:P_1\vdash)\quad\quad c_2:(\Gamma,x_2:P_2\vdash)}
{\lke{\Gamma}{\tilde\mu\copaireP{\inl{x_1}.c_1|\inr{x_2}.c_2}:P_1\oplus P_2}{}{}}$$
\noindent
The constructs $e^\bullet$ and $\derel{V}$ transcribe $\mathsf{LLP}$'s {\em promotion} and {\em dereliction} rule, respectively. The compilation from $\mathsf{LKQ}$ to (the subsystem of) $\mathsf{LLP}$
turns every $\alpha:P$ on the right to a $\contvartovar{\alpha}:\notP{P}$ on the left. We write
$\notP{(\ldots,\alpha:P,\ldots)}= (\ldots,k_\alpha:\notP{P},\ldots)$.
The translation is as follows (we give only the non straightforward cases):
$$\begin{array}{l}
\coupe{v}{e}_{\mathsf{LLP}}=\coupe{(e_{\mathsf{LLP}})^\bullet}{v_{\mathsf{LLP}}}\\
(\mu\alpha.c)_{\mathsf{LLP}}=\tilde\mu\contvartovar{\alpha}.c_{\mathsf{LLP}}\quad\quad
(\Vtov{V})_{\mathsf{LLP}}= \derel{(V_{\mathsf{LLP}})}\\
\alpha_{\mathsf{LLP}}=\tilde\mu x.\coupe{\contvartovar{\alpha}}{\derel{x}}\quad (\tilde\mu\alpha^\bullet.c)_{\mathsf{LLP}}=\tilde\mu\contvartovar{\alpha}.(c_{\mathsf{LLP}})
\end{array}$$
Note that we can optimise the translation of $\coupe{\Vtov{V}}{e}$, and (up to an expansion rule) of $\Vtoe{V}=\tilde\mu\alpha^\bullet.\coupe{\Vtov{V}}{\alpha}$:
$$\begin{array}{lll}
\coupe{\Vtov{V}}{e}_{\mathsf{LLP}}=\coupe{(e_{\mathsf{LLP}})^\bullet}{\derel{(V_{\mathsf{LLP}})}}
\longrightarrow\coupe{V_{\mathsf{LLP}}}{e_{\mathsf{LLP}}}\\
(\Vtoe{V})_{\mathsf{LLP}} =
\tilde\mu \contvartovar{\alpha}.\coupe{V_{\mathsf{LLP}}}{\alpha_{\mathsf{LLP}}}
\;= \tilde\mu \contvartovar{\alpha}.\coupe{V_{\mathsf{LLP}}}{\tilde\mu x.\coupe{\contvartovar{\alpha}}{\derel{x}}} \longrightarrow \tilde\mu \contvartovar{\alpha}.\coupe{\contvartovar{\alpha}}{\Vtov{(V_{\mathsf{LLP}})}} = \derel{(V_{\mathsf{LLP}})} \end{array} $$ These optimisations allow us to define a right inverse to ${}_{\mathsf{LLP}}$ (that maps $\Vtov{V}$ to $\Vtoe{V}$)), i.e.: \begin{center} {\em $\mathsf{LLP}$ (restricted as above) appears as a retract of $\mathsf{LKQ}$}. \end{center}
The translation simulates reductions and is well typed:
$$\begin{array}{lll}
c:(\lkc{\Gamma}{}{}{\Delta}) & \quad\Rightarrow\quad & c_{\mathsf{LLP}}: (\lkc{\Gamma,\notP{\Delta}}{}{}{})\\
\lkv{\Gamma}{}{v:P}{\Delta} & \quad\Rightarrow\quad & \lke{\Gamma,\notP{\Delta}}{v_{\mathsf{LLP}}:\notP{P}}{}{}\\
\lkV{\Gamma}{}{V:P}{\Delta} & \quad\Rightarrow\quad & \lkV{\Gamma,\notP{\Delta}}{}{V_{\mathsf{LLP}}:P}{}\\ \lke{\Gamma}{e:P}{}{\Delta} & \quad\Rightarrow\quad & \lke{\Gamma,\notP{\Delta}}{e_{\mathsf{LLP}}:P}{}{}
\end{array}$$
We note that this compilation blurs the distinction between a continuation variable and an ordinary variable (like in the classical CPS translations).
\begin{example} The classical proof $(\lke{}{\tilde\mu\beta^\bullet.\coupe{\Vtov{(\alpha^\bullet)}}{\beta}:\notP{\notP{P}}}{}{\alpha:P})$
is translated (using the above optimisation) to the intuitionistic proof
$(\lke{\contvartovar{\alpha}:\notP{P}}{\derel{((\tilde\mu x.\coupe{\contvartovar{\alpha}}{\derel{x}})^\bullet)}:\notP{\notP{P}}}{}{})$. Without term decorations, we have turned a proof of the classically-only provable sequent
$(\lke{}{\notP{\notP{P}}}{}{P})$ into an intuitionistic proof of $(\lke{\notP{P}}{\notP{\notP{P}}}{}{})$. \end{example}
All what is left to do in order to reach then $\mathsf{NJ}$\footnote{In fact, the target of the translation uses only implications of the form $R^P$. Seeing $R$ as ``false'', this means that the target is in fact intuitionistic logic with conjunction, disjunction and negation in natural deduction style.} from (our subsystem of) $\mathsf{LLP}$ is to turn contexts $e:P$ into values of type $\notP{P}$, and to rename $\notP{\_}$, $\otimes$, and $\oplus$ as $R^{\_},\times$, and $+$, respectively. More precisely, we describe the target syntax (a subset of a $\lambda$-calculus with sums and products) as follows: $$\begin{array}{l} c::= VV\quad\quad V::= x \,\mbox{\large\boldmath$\mid$}\, (V,V) \,\mbox{\large\boldmath$\mid$}\, \inl{V} \,\mbox{\large\boldmath$\mid$}\, \inr{V} \,\mbox{\large\boldmath$\mid$}\, \lambda x.c \,\mbox{\large\boldmath$\mid$}\, \lambda(x_1,x_2).c \,\mbox{\large\boldmath$\mid$}\, \lambda z.\fcase{z}{\fcasei{\inl{x_1}}{c_1,\fcasei{\inr{x_2}}{c_2}}} \end{array}$$ Again, we give only the non trivial cases of the translation: $$\begin{array}{l} \coupe{V}{e}_{\mathsf{NJ}}= (e_{\mathsf{NJ}})(V_{\mathsf{NJ}})\quad\quad (e^\bullet)_{\mathsf{NJ}}=e_{\mathsf{NJ}}\quad\quad (\derel{V})_{\mathsf{NJ}}=\lambda k.k(V_{\mathsf{NJ}})\\(\tilde\mu x.c)_{\mathsf{NJ}}=\lambda x.(c_{\mathsf{NJ}})\quad (\tilde\mu(x_1,x_2).c)_{\mathsf{NJ}}=\lambda(x_1,x_2).(c_{\mathsf{NJ}})\\
(\tilde\mu[\inl{x_1}.c_1|\inr{c_2}.c_2])_{\mathsf{NJ}}=\lambda z.\fcase{z}{\fcasei{\inl{x_1}}{((c_1)_{\mathsf{NJ}}),\fcasei{\inr{x_2}}{(c_2)_{\mathsf{NJ}}}}} \end{array}$$
\begin{proposition} \label{factor-cps} For all $\mathsf{L}_{\textrm{foc}}$ term $t$ (where $t::= c \,\mbox{\large\boldmath$\mid$}\, V \,\mbox{\large\boldmath$\mid$}\, v \,\mbox{\large\boldmath$\mid$}\, e$), we have: $$\CPSP{t}=_{\beta\eta} (t_{\mathsf{LLP}})_{\mathsf{NJ}}\;.$$ \end{proposition} \noindent {\sc Proof}. We treat the non trivial cases: $$\begin{array}{lll} \coupe{v}{e}_{\mathsf{LLP},\mathsf{NJ}} = \coupe{(e_{\mathsf{LLP}})^\bullet}{v_{\mathsf{LLP}}}_{\mathsf{NJ}} = (v_{\mathsf{LLP},\mathsf{NJ}})(((e_{\mathsf{LLP}})^\bullet)_{\mathsf{NJ}}) = (v_{\mathsf{LLP},\mathsf{NJ}})(e_{\mathsf{LLP},\mathsf{NJ}})\\
(\Vtov{V})_{\mathsf{LLP},\mathsf{NJ}} = ( \derel{(V_{\mathsf{LLP}})})_{\mathsf{NJ}} =\lambda k.k(V_{\mathsf{LLP},\mathsf{NJ}} )\\ \alpha_{\mathsf{LLP},\mathsf{NJ}} = (\tilde\mu x.\coupe{\contvartovar{\alpha}}{\derel{x}})_{\mathsf{NJ}} = \lambda x.(\coupe{\contvartovar{\alpha}}{\derel{x}}_{\mathsf{NJ}})=\lambda x.((\derel{x})_{\mathsf{NJ}})\contvartovar{\alpha}=\lambda x.(\lambda k.kx)\contvartovar{\alpha}=_\beta\lambda x.\contvartovar{\alpha}x=_\eta \contvartovar{\alpha}\;\qed \end{array}$$ Note that, in the proof of the above proposition, the $\beta$ step is a typical ``administrative reduction'', so that morally the statement of the proposition holds with $\eta$ only.
\noindent {\bf A short foray into linear logic.} We end this section by placing the so-called Girard's translation of the call-by-name $\lambda$-calculus to linear logic in perspective. The target of this translation is in fact the polarised fragment $\mathsf{LL}_{\textrm{pol}}$ of linear logic, obtained by restriction to the polarised formulas: $$P::= X \,\mbox{\large\boldmath$\mid$}\, P\otimes P \,\mbox{\large\boldmath$\mid$}\, P\oplus P \,\mbox{\large\boldmath$\mid$}\, !N\quad\quad\quad N::= X^\bot \,\mbox{\large\boldmath$\mid$}\, N\parr N \,\mbox{\large\boldmath$\mid$}\, N\with N \,\mbox{\large\boldmath$\mid$}\, ?P$$ This fragment is also a fragment of $\mathsf{LLP}$, (cf. \cite{LaurentTh}), up to the change of notation for the formulas: we write here $\overline{P}$ for $P^\bot$ and $\notP{\overline{N}}$ for $!N$. Girards translation encodes call-by-name implication as follows:
$$(A\rightarrow B)^*=!(A^*)\multimap B^*=(?(A^*)^\bot)\parr B^*$$
and then every
$\lambda$-term $\Gamma\vdash M:A$ into a proof of $!(\Gamma)^*\vdash A^*$.
Up to the inclusion of $\mathsf{LL}_{\textrm{pol}}$ into $\mathsf{LLP}$, up to the definition of the $\lambda$-calculus inside $\mathsf{LKT}$ given above, up to the change of notation, and up to the duality between $\mathsf{LKT}$ and $\mathsf{LKQ}$, Girard's translation coincides with the (restriction of) our translation above from $\mathsf{LKT}$ to $\mathsf{LLP}$.
On the other hand, the restriction to the $\lambda\mu$-calculus of our translation from $\mathsf{LKT}$ to $\mathsf{NJ}$ is the CPS translation of Lafont-Reus-Streicher. Thus, restricted to the $\lambda$-calculus, Proposition \ref{factor-cps} reads as follows: \begin{center} {\em Lafont-Reus-Streicher's CPS factors through Girard's translation}. \end{center} Explicitly, on types, we have that $A^*$ coincides with $A$ as expanded in $\mathsf{LKT}$, and (cf. \cite{CH2000}), starting from the simply-typed $\lambda$-term $(\Gamma\vdash M:A)$, \begin{itemize} \item we view $M$ as an expression $(\lkv{\Gamma}{}{M:A}{})$ of $\mathsf{LKT}$ (using the CBN encoding of implication), \item and then as a context $(\lke{}{M:\overline{A}}{}{\overline{\Gamma}})$ of $\mathsf{LKQ}$, \item then by our above translation we find back the result of Girard's translation $(\lke{\notP{(\overline{\Gamma})}}{M_{\mathsf{LLP}}:\overline{A}}{}{})$, \item and we arrive finally at the Hofmann-Streicher CPS-transform $(\notP{(\overline{\Gamma})}\vdash \CPSP{M}:\notP{(\overline{A})})$ of $M$, through the translation ${}_{\mathsf{NJ}}$.\footnote{The LRS translation of implication goes as follows: $(A\rightarrow B)_{\textrm{LRS}}= R^{A_{\textrm{LRS}}}\times B_{\textrm{LRS}}$, and we have $\CPSP{(\overline{A})}=A_{\textrm{LRS}}$ .} \end{itemize} But the above reading is actually stronger, because it is not hard to describe a translation ${}_{\mathsf{LJ}}$ inverse to ${}_{\mathsf{NJ}}$, so that up to this further isomorphism, we have that: \begin{center} {\em The LRS translation of the CBN $\lambda$-calculus coincides with Girard's translation}. \end{center} This nice story does not extend immediately to the $\lambda\mu$-calculus, for which the simplest extension of Girard's translation, taking $\lkv{\Gamma}{}{M:A}{\Delta}$ to a proof of $!(\Gamma^*)\vdash A^*,?(\Delta)^*$ is not polarised. In fact, Laurent \cite{LaurentTh} has shown that the natural target for an extension of Girard's translation to CBN $\lambda\mu$-calculus is $\mathsf{LLP}$, in which we can spare the ?'s on the right, i.e., we can translate $\lkv{\Gamma}{}{M:A}{\Delta}$ into a proof of $!(\Gamma^*)\vdash A^*,\Delta^*$ (contractions on negative formulas are free in $\mathsf{LLP}$). So, the extension of the picture to call-by-name $\lambda\mu$-calculus is\footnote{See also \cite{LR2003} for further discussion.}: \begin{center} {\em The LRS translation of the CBN $\lambda\mu$-calculus coincides with Laurent-Girard's translation into $\mathsf{LLP}$}. \end{center}
\section{A synthetic system} \label{LKQS-sec}
In this section we pursue two related goals.
\begin{enumerate} \item We want to account for the full (or strong) focalisation (cf. \cite{QuaTor96}), which consists in removing the use of contractions in the negative phases and carrying
these phases maximally, up to having only atoms on the left of the sequent. The positive phases are made also ``more maximal'' by allowing the use of the axiom only on positive atoms $X$. This is of interest in a proof search perspective, since the stronger discipline
further reduces the search space. \item We would like our syntax to quotient proofs over the order of decomposition of negative formulas. The use of structured pattern-matching
(cf. Examples \ref{LKQ-proofs-ex}, \ref{LKQ-exp-ex}) is relevant, as we can describe the construction of a proof of $(\Gamma, x:(P_1\otimes P_2)\otimes(P_3\otimes P_4)\vdash \Delta)$ out of a proof of $c:(\Gamma, x_1:P_1,x_2:P_2,x_3:P_3,x_4:P_4\vdash\Delta)$ ``synthetically'', by writing $\coupe{\Vtov{x}}{\tilde\mu((x_1,x_2),(x_3,x_4)).c}$, where $\tilde\mu((x_1,x_2),(x_3,x_4)).c$ stands for an abbreviation of either of the following two commands: $$\begin{array}{lll} \coupe{\Vtov{x}}{\tilde\mu(y,z).\coupe{\Vtov{y}}{\tilde\mu(x_1,x_2).\coupe{\Vtov{z}}{\tilde\mu(x_3,x_4).c}}} & \quad\quad & \coupe{\Vtov{x}}{\tilde\mu(y,z).\coupe{\Vtov{z}}{\tilde\mu(x_3,x_4).\coupe{\Vtov{y}}{\tilde\mu(x_1,x_2).c}}} \end{array}$$ \end{enumerate} The two goals are connected, since applying strong focalisation will forbid the formation of these two terms (because $y,z$ are values appearing with non atomic types), keeping the synthetic form only... provided we make it first class.
We shall proceed in {\em two steps}. The first, intermediate one consists in introducing first-class {\em counterpatterns} and will serve goal 1 but not quite goal 2: $$\begin{array}{lllllllll} \mbox{Simple commands} && c::=\coupe{v}{e} &\quad\quad\quad\quad& \mbox{Commands} && C::= c \,\mbox{\large\boldmath$\mid$}\, \copaireC{C}{q}{q}{C}\\ \mbox{Expressions} && v::= \Vtov{V} \,\mbox{\large\boldmath$\mid$}\, \mu\alpha.C &\quad\quad\quad\quad& \mbox{Values} && V::= x \,\mbox{\large\boldmath$\mid$}\, (V,V) \,\mbox{\large\boldmath$\mid$}\, \inl{V} \,\mbox{\large\boldmath$\mid$}\, \inr{V} \,\mbox{\large\boldmath$\mid$}\, e^\bullet \\ \mbox{Contexts} && \fbox{$e::=\alpha \,\mbox{\large\boldmath$\mid$}\, \tilde\mu q.C$} &\quad\quad\quad\quad&
\mbox{Counterpatterns} && \fbox{$q::= x \,\mbox{\large\boldmath$\mid$}\, \alpha^\bullet \,\mbox{\large\boldmath$\mid$}\, (q,q) \,\mbox{\large\boldmath$\mid$}\, \copaireP{q,q}$} \end{array}$$ The counterpatterns are to be thought of as constructs that match patterns (see below).
In this syntax, we have gained a unique $\tilde\mu$ binder, but the price to pay (provisionally) is that now commands are trees of copairings $\copaireC{\_}{q_1}{q_2}{\_}$ whose leaves are simple commands.
The typing discipline is restricted with respect to that of Figure 1 (and adapted to the setting with explicit counterpatterns). Let $\Xi=x_1:X_1,\ldots,x_n:X_n$ denote a left context consisting of {\it atomic formulas only}. The rules are as follows: {\small $$\begin{array}{c}\seq{}{\lkV{\Xi\:,\:x:X}{}{x:X}{\Delta}}\quad\quad \seq{}{\lke{\Xi}{\alpha:P}{}{\alpha:P\:,\:\Delta}}\quad\quad \seq{\lkv{\Xi}{}{v:P}{\Delta}\quad\quad \lke{\Xi}{e:P}{}{\Delta}} {\coupe{v}{e}:(\lkc{\Xi}{}{}{\Delta})} \\\\ \seq{C:(\lkc{\Xi\:,\:q:P}{}{}{\Delta})}{\lke{\Xi}{\tilde\mu q.C:P}{}{\Delta}} \quad\quad \seq{C:(\lkc{\Xi}{}{}{\alpha:P\:,\:\Delta})}{\lkv{\Xi}{}{\mu\alpha.C:P}{\Delta}}\quad\quad \seq{\lkV{\Xi}{}{V:P}{\Delta}}{\lkv{\Xi}{}{\Vtov{V}:P}{\Delta}} \\\\ \seq{\lke{\Xi}{e:P}{}{\Delta}}{\lkV{\Xi}{}{e^\bullet:\notP{P}}{\Delta}}\quad\quad\seq{\lkV{\Xi}{}{V_1:P_1}{\Delta} \quad\quad \lkV{\Xi}{}{V_2:P_2}{\Delta}} {\lkV{\Xi}{}{(V_1,V_2):P_1\otimes P_2}{\Delta}}\quad\quad \seq{\lkV{\Xi}{}{V_1:P_1}{\Delta}}{\lkV{\Xi}{}{\inl{V_1}:P_1\oplus P_2}{\Delta}}\quad\quad \seq{\lkV{\Xi}{}{V_2:P_2}{\Delta}}{\lkV{\Xi}{}{\inr{V_2}:P_1\oplus P_2}{\Delta}} \\\\\seq{C:(\lkc{\Gamma}{}{}{\alpha:P\:,\:\Delta})}{C:(\lkc{\Gamma\:,\:\alpha^\bullet:\notP{P}}{}{}{\Delta})}\quad\quad \seq{C:(\lkc{\Gamma\:,\:q_1:P_1\:,\:q_2:P_2}{}{}{\Delta})}{C:(\lkc{\Gamma\:,\:(q_1,q_2):P_1\otimes P_2}{}{}{\Delta})}\quad\quad \seq{C_1:(\lkc{\Gamma\:,\:q_1:P_1}{}{}{\Delta})\quad\quad C_2:(\lkc{\Gamma\:,\:q_2:P_2}{}{}{\Delta})} {\copaireC{C_1}{q_1}{q_2}{C_2}:(\lkc{\Gamma\:,\:\copaireP{q_1,q_2}:P_1\oplus P_2}{}{}{\Delta})} \end{array}$$}
Our aim now ({\em second step}) is to get rid of the tree structure of a command. Indeed, towards our second goal, if $c_{ij}:(\Gamma,x_i:P_i,x_j:P_j\vdash_S\Delta)$ ($i=1,2, j=3,4$), we want to identify $\copaireC{\copaireC{c_{13}}{x_3}{x_4}{c_{14}}}{x_1}{x_2}{\copaireC{c_{23}}{x_3}{x_4}{c_{24}}}$ and $\copaireC{\copaireC{c_{13}}{x_1}{x_2}{c_{23}}}{x_3}{x_4}{\copaireC{c_{14}}{x_1}{x_2}{c_{24}}} $. To this effect, we need a last ingredient. We introduce a syntax of {\it patterns}, and we redefine the syntax of values, as follows: \begin{center} \fbox{${\cal V}::=x \,\mbox{\large\boldmath$\mid$}\, e^\bullet\quad\quad\quad V::=p\:\patc{\Subi{i}{{\cal V}_i}}{i\in p} \quad\quad\quad\quad p::= x \,\mbox{\large\boldmath$\mid$}\, \alpha^\bullet \,\mbox{\large\boldmath$\mid$}\, (p,p) \,\mbox{\large\boldmath$\mid$}\, \inl{p} \,\mbox{\large\boldmath$\mid$}\, \inr{p}$} \end{center} where $i\in p$ is defined by: $$\seq{}{x\in x}\quad\seq{}{\alpha^\bullet\in\alpha^\bullet}\quad \seq{i\in p_1}{i\in (p_1,p_2)}\quad\seq{i\in p_2}{i\in(p_1,p_2)}\quad \seq{i\in p_1}{i\in\inl{p_1}}\quad\seq{i\in p_2}{i\in \inr{p_2}}$$ Moreover, ${\cal V}_i$ must be of the form $y$ (resp. $e^\bullet$) if $i=x$ (resp. $i=\alpha^\bullet$).
Patterns are required to be linear, as well as the counterpatterns, for which the definition of ``linear'' is adjusted in the case $\copaireP{q_1,q_2}$, in which a variable can occur (but recursively linearly so) in both $q_1$ and $q_2$.
Note also that the reformulation of values is up to $\alpha$-conversion: for example, it is understood that $\alpha^\bullet\langle\Subi{\alpha^\bullet}{e^\bullet}\rangle = \beta^\bullet\langle\Subi{\beta^\bullet}{e^\bullet}\rangle$
We can now rephrase the logical reduction rules in terms of pattern/counterpattern interaction (whence the terminology), resulting in the following packaging of rules: \begin{center} $\seq{V=p\:\langle\ldots \Subi{x}{y},\ldots,\Subi{\alpha^\bullet}{e^\bullet},\ldots\rangle\quad\quad\Sub{C}{q}{p}\longrightarrow^* c}{ \coupe{\Vtov{V}}{\tilde\mu q.C} \longrightarrow \Submimp{c}{\ldots, \Subi{x}{y},\ldots,\Subi{\alpha}{e},\ldots} }$ \end{center} where $c\{\sigma\}$ is the usual, implicit substitution, and where $c$ (see the next proposition) is the normal form of $\Sub{C}{q}{p}$ with respect to the following set of rules:
$$\begin{array}{l} \Subm{C}{\Subi{(q_1,q_2)}{(p_1,p_2)},\sigma}\longrightarrow \Subm{C}{\Subi{q_1}{p_1},\Subi{q_2}{p_2},\sigma}\\ \Subm{\copaireC{C_1}{q_1}{q_2}{C_2}}{\Subi{\copaireP{q_1,q_2}}{\inl{p_1}},\sigma}\longrightarrow \Subm{C_1}{\Subi{q_1}{p_1},\sigma}\quad\quad \Subm{\copaireC{C_1}{q_1}{q_2}{C_2}}{\Subi{\copaireP{q_1,q_2}}{\inr{p_2}},\sigma}\longrightarrow \Subm{C_2}{\Subi{q_2}{p_2},\sigma}\\ \Subm{C}{\Subi{\alpha^\bullet}{\alpha^\bullet},\sigma}\longrightarrow \Subm{C}{\sigma}\quad\quad\quad \Subm{C}{\Subi{x}{x},\sigma}\longrightarrow \Subm{C}{\sigma}\\ \end{array}$$ Logically, this means that we now consider each formula as made of blocks of {\em synthetic} connectives. \begin{example} \begin{itemize} \item Patterns for $P=X\otimes(Y\oplus\notP{Q})$. Focusing on the right yields two possible proof searches: $$\seq{\lkV{\Gamma}{}{x'\set{{\cal V}_{x'}}:X}{\Delta} \quad\lkV{\Gamma}{}{y'\set{{\cal V}_{y'}}:Y}{\Delta}}{\lkV{\Gamma}{}{{ (x',\inl{y'})}\set{{\cal V}_{x'},{\cal V}_{y'}}:X\otimes(Y\oplus\notP{Q})}{\Delta}} \quad\quad \seq{\lkV{\Gamma}{}{x'\set{{\cal V}_{x'}}:X}{\Delta}\quad\lkV{\Gamma}{}{{\alpha'}^\bullet\set{{\cal V}_{{\alpha'}^\bullet}}:\notP{Q}}{\Delta}}{\lkV{\Gamma}{}{{ (x',\inr{{\alpha'}^\bullet})}\set{{\cal V}_{x'},{\cal V}_{{\alpha'}^\bullet}}:X\otimes(Y\oplus\notP{Q})}{\Delta}}$$ \item Counterpattern for $P=X\otimes(Y\oplus\notP{Q})$. The counterpattern describes the tree structure of $P$:
$$\seq{{ c_1:(\lkc{\Gamma\:,\:x:X\:,\:y:Y}{}{}{\Delta})}\quad\quad { c_2:(\lkc{\Gamma\:,\:x:X\:,\:\alpha^\bullet:\notP{Q}}{}{}{\Delta})}} {\copaireC{c_1}{y}{\alpha^\bullet}{c_2}:(\lkc{\Gamma\:,\:{(x,\copaireP{y,\alpha^\bullet})}:X\otimes(Y\oplus\notP{Q})}{}{}{\Delta})}$$ \end{itemize} We observe that the { leaves} of the decomposition are in one-to-one correspondence with the patterns $p$ for the (irreversible) decomposition of $P$ on the right: $$\begin{array}{l}\Sub{\copaireC{c_1}{y}{\alpha^\bullet}{c_2}}{{ q}}{{ p_1}} \longrightarrow^* c_1\quad\quad \Sub{\copaireC{c_1}{y}{\alpha^\bullet}{c_2}}{{ q}}{{ p_2}}\longrightarrow^* c_2 \end{array}$$ where ${ q=(x,\copaireP{y,\alpha^\bullet})}\;, \;{ p_1=(x,\inl{y})}\;, \;{ p_2=(x,\inr{\alpha^\bullet})}$. \end{example} This correspondence is general. We define two predicates $c\in C$ and $\suits{q}{p}$ (``$q$ is orthogonal to $p$'') as follows: $$\seq{}{c\in c}\quad\quad\seq{c\in C_1}{c\in\copaireC{C_1}{q_1}{q_2}{C_2}}\quad\quad \seq{c\in C_2}{c\in\copaireC{C_1}{q_1}{q_2}{C_2}}$$ \begin{center} \fbox{$\seq{}{\suits{x}{x}} \quad\quad \seq{}{\suits{\alpha^\bullet}{\alpha^\bullet}}\quad\quad \seq{\suits{q_1}{p_1}\;\;\suits{q_2}{p_2}}{\suits{(q_1,q_2)}{(p_1,p_2)}}\quad\quad \seq{\suits{q_1}{p_1}}{\suits{\copaireP{q_1,q_2}}{\inl{p_1}}}\quad\quad \seq{\suits{q_2}{p_2}}{\suits{\copaireP{q_1,q_2}}{\inr{p_2}}} \quad\quad\quad$} \end{center}
We can now state the correspondence result. \begin{proposition} \label{pattern-counterpatten-corr} Let { $C:(\lkc{\Xi\,,\,q:P}{}{}{\Delta})$} (as in the assumption of the typing rule for $\tilde\mu q.C$), and let $p$ be such that $q$ is orthogonal to $p$. Then the normal form $c$ of $\Sub{C}{q}{p}$ is a simple command, and the mapping $p\mapsto c$ ($q,C$ fixed) from $\setc{p}{\suits{q}{p}}$ to $\setc{c}{c\in C}$ is one-to-one and onto. \end{proposition} \noindent {\sc Proof}. The typing and the definition of orthogonality entail that in all intermediate $\Subm{C}{\sigma}$'s the substitution $\sigma$ has an item for each counterpattern in the sequent, and that reduction progresses. The rest is easy. (Note that a more general statement is needed for the induction to go through, replacing $\Xi\,,\,q:P$, ``$q$ orthogonal to $p$'', and $\Sub{C}{q}{p}$ with $\Xi\,,\,q_1:P_1\,,\,\ldots\,,\,q_n:P_n$, ``$q_i$ orthogonal to $p_i$ for $i=1,\ldots,n$'', and $\Subm{C}{\Subi{q_1}{p_1},\ldots\Subi{q_n}{p_n}}$, respectively.) \qed
\begin{figure}\label{big-step-mu-tilde}
\end{figure}
Thanks to this correspondence, we can quotient over the ``bureaucracy'' of commands, and we arrive at the calculus
described in Figure 4, together with its typing rules,
which we call {\em synthetic system }$\mathsf{L}$, or $\mathsf{L}_{\textrm{synth}}$.
The $\tilde\mu$ construct of $\mathsf{L}_{\textrm{synth}}$ is closely related to Zeilberger's higher-order abstract approach to focalisation in \cite{ZeilbergerCU}: indeed we can view $\setc{p\mapsto c}{\suits{q}{}{p}}$ as a function from patterns to commands. We actually prefer to see here a {\em finite} record whoses fields are the $p$'s orthogonal to $q$. There are only two reduction rules in $\mathsf{L}_{\textrm{synth}}$. the $\mu$-rule now expressed with implicit substitution and the $\tilde\mu^+$-rule, which
combines two familiar operations: select a field $p$ (like in object-oriented programming), and substitute (like in functional programming). The next proposition relates $\mathsf{L}_{\textrm{synth}}$ to $\mathsf{L}_{\textrm{foc}}$. \begin{proposition} \label{synth-foc-comp} The typing system of $\:\mathsf{L}_{\textrm{synth}}$ is complete\footnote{It is also easy to see that the underlying translation from $\mathsf{L}_{\textrm{foc}}$ to $\mathsf{L}_{\textrm{synth}}$ is reduction-reflecting (cf. Remark \ref{LK-LKQ-not-red-refl}).} with respect to $\mathsf{LKQ}$. \end{proposition}
\noindent {\sc Proof}. The completeness of $\mathsf{L}_{\textrm{synth}}$ with respect to the intermediate system above is an easy consequence of Proposition \ref{pattern-counterpatten-corr}. We are thus left with proving the completeness of the intermediate system. We define a rewriting relation between sets of sequents as follows: \begin{center} $\begin{array}{l} (\lkc{\Gamma,x:\notP{P}}{}{}{\Delta}),{\bf S} \rightsquigarrow (\lkc{\Gamma}{}{}{\alpha: P,\Delta}),{\bf S} \\ (\lkc{\Gamma,x:P_1\otimes P_2}{}{}{\Delta}),{\bf S} \rightsquigarrow (\lkc{\Gamma,x_1:P_1,x_2:P_2}{}{}{\Delta}),{\bf S} \\ (\lkc{\Gamma,x:P_1\oplus P_2}{}{}{\Delta}),{\bf S} \rightsquigarrow (\lkc{\Gamma,x_1:P_1}{}{}{\Delta}),(\lkc{\Gamma,x_2:P_2}{}{}{\Delta}),{\bf S} \end{array}$ \end{center}
(where $\alpha,x_1,x_2$ are fresh). A normal form for this notion of reduction is clearly a set of sequents of the form $\lkc{\Xi}{}{}{\Delta}$. It is also easy to see that
$\rightsquigarrow$ is confluent and (strongly) normalising.
In what follows, $\vdash_S$ (resp. $\vdash$) will signal the intermediate proof system (resp. $\mathsf{L}_{\textrm{foc}}$). The following property is easy to check.
If $(\Xi_1\vdash \Delta_1),\ldots,(\Xi_n\vdash\Delta_n)$ is the normal form of $(x_1:P_1,\ldots,x_m:P_m\vdash\Delta)$ for $\rightsquigarrow$ and if $c_i:(\Xi_i\vdash_S\Delta_i)$, then there exist $q_1,\ldots,q_m$ and a command $C:(q_1:P_1,\ldots,q_m:P_m\vdash_S\Delta)$ whose leaves are the $c_i$'s.
We prove the following properties together:
\noindent 1) If $c:(x_1:P_1,\ldots,x_m:P_m\vdash\Delta)$, then there exist $q_1,\ldots,q_m$ and $C$ such that $C:(q_1:P_1,\ldots,q_m:P_m\vdash_S\Delta)$.
\noindent 2) If $\lke{\Xi}{e:P}{}{\Delta}$, then there exists $e'$ such that $\lkes{\Xi}{e':P}{}{\Delta}$ (and similarly for expressions $v$).
The proof is by induction on a notion of size which is the usual one except that the size of a variable $x$ is not 1 but the size of its type. It is easy to check that the substitutions involved in Lemma \ref{e-subst-lemma} do not increase the size. The interesting case is $c=\coupe{v}{e}$. Let $(\Xi_1\vdash \Delta_1),\ldots,(\Xi_n\vdash\Delta_n)$ be the normal form of $(x_1:P_1,\ldots,x_m:P_m\vdash\Delta)$. Then, by repeated application of Lemma \ref{e-subst-lemma}, we get $v_1,\ldots,v_n$ and $e_1,\ldots,e_n$, and then by induction $v'_1,\ldots,v'_n$ and $e'_1,\ldots,e'_n$ which we assemble pairwise to form $\coupe{v'_1}{e'_1},\ldots, \coupe{v'_n}{e'_n}$, which in turn, as noted above, can be assembled in a tree $C:(q_1:P_1,\ldots,q_m:P_m\vdash_S\Delta)$. \qed
Putting together Propositions \ref{LKQ-complete} and \ref{synth-foc-comp}, we have proved that $\mathsf{L}_{\textrm{synth}}$ is complete with respect to $\mathsf{LK}$ for provability.
\begin{remark} \label{Boehm-ludique}
\begin{itemize} \item In the multiplicative case (no $C$, $\inl{V}$, $\inr{V}$, $\copaireP{q_1,q_2}$), there is a unique $p$ such that $\suits{q}{}{p}$, namely $q$, and the syntax boils down to \begin{center} $ {\cal V}::=x \,\mbox{\large\boldmath$\mid$}\, e^\bullet\quad
V::=p\:\patc{\Subi{i}{{\cal V}_i}}{i\in p}\quad { v::=x \,\mbox{\large\boldmath$\mid$}\, \tilde\mu q.\set{c}}\quad { c::=\coupe{\Vtov{V}}{\alpha}} $ \end{center} Compare with {\it B\"ohm trees}: $\begin{array}{lll} M::=\overbrace{\lambda\vec{x}.\underbrace{P}_{{ c}}}^{{ e}} &\quad\quad& P::=y\underbrace{\overbrace{M_1}^{{ {\cal V}}}\ldots \overbrace{M_n}^{{ {\cal V}}}}_{{ V}} \end{array}$. For example (cf. the CBN translation in Section \ref{encodings-sec}),
if $M_j$ translates to $e_j$, then $\lambda x_1x_2.xM_1M_2M_3$ translates to $\tilde\mu(\tilde{x_1}^\bullet,\tilde{x_2}^\bullet,y).\coupe{\Vtov{p\patc{\Subi{i}{{\cal V}_i}}{i\in p}}}{\tilde{x}}$, where $p=(\alpha_1^\bullet,\alpha_2^\bullet,\alpha_3^\bullet,z)$, ${\cal V}_{z_j}=(e_j)^\bullet$ , and ${\cal V}_z=y$. \item (for readers familiar with \cite{GirardLS})
Compare with the syntax for ludics presented in \cite{CuLLintroII}:
$\begin{array}{l} \overbrace{M}^{{ e}}::= \setc{\overbrace{J}^p \mapsto \lambda\setc{x_j}{j\in J}.P_J}{\overbrace{J\in{\cal N}}^{\suits{q}{}{p}}}\quad\quad \underbrace{P}_{ c}::=(x\cdot \underbrace{\overbrace{I}^p)\setc{M_i}{i\in I}}_{ V} \,\mbox{\large\boldmath$\mid$}\, \Omega \,\mbox{\large\boldmath$\mid$}\, \maltese \end{array}$
\end{itemize} \end{remark}
\section{Conclusion} \label{conclusion-sec}
We believe that Curien-Herbelin's syntactic kit, which we could call {\em system $\mathsf{L}$} for short, provides us with a robust infrastructure for proof-theoretical investigations, and for applications in formal studies in operational semantics. Thus, the work presented here is faithful to the spirit of Herbelin's Habilitation Thesis \cite{HerbelinHabil}, where he advocated an incremental approach to connectives, starting from a pure control kernel.
On the proof-theoretical side, we note, with respect to the original setting of ludics \cite{GirardLS}, that a pattern $p$ is more precise than a ramification $I$ (finite tree of subaddresses vs a set of immediate subaddresses). We might use this additional precision to design a version of ludics where axioms are first-class rather than treated as infinite expansions.
On the side of applications to programming language semantics, the good fit between abstract machines and our syntax $\mathsf{L}_{\textrm{foc}}$ makes it a good candidate for being used as an intermediate language appropriate to reason about the correctness of abstract machines (see also \cite{Levy2004}). In this spirit, in order to account for languages with mixed call-by-value / call-by-name features, one may give a truly bilateral presentation of $\mathsf{L}_{\textrm{foc}}$ that freely mixes positive and negative formulas like in Girard's $\mathsf{LC}$ \cite{GirardLC}.\footnote{See also \cite{MurthyLC} for an early analysis of the computational meaning of $\mathsf{LC}$ from a programming language perspective.} Such a system is presented in the long version of \cite{Munch2009}.
Finally, we wish to thank Bob Harper, Hugo Herbelin, and Olivier Laurent for helpful discussions.
\end{document} |
\begin{document}
\begin{abstract}
{\em Ropelength} and {\em embedding thickness} are related measures of geometric complexity of classical knots and links in Euclidean space. In their recent work, Freedman and Krushkal posed a question regarding lower bounds for embedding thickness of $n$-component links in terms of the Milnor linking numbers. The main goal of the current paper is to provide such estimates, and thus generalizing the known linking number bound. In the process, we collect several facts about finite type invariants and ropelength/crossing number of knots. We give examples of families of knots, where such estimates behave better than the well known knot--genus estimate.
\end{abstract}
\maketitle
\section{Introduction}\label{S:intro}
Given an $n$--component link (we assume class $C^1$ embeddings) in $3$--space
\begin{equation}\label{eq:n-link}
L:S^1\sqcup\ldots \sqcup S^1\longrightarrow \mathbb{R}^3,\qquad L=(L_1, L_2,\ldots, L_n),\quad L_i=L|_{\text{the $i$'th circle}},
\end{equation} its {\em ropelength} $\operatorname{\mathsf{rop}}(L)$ is the ratio $\operatorname{\mathsf{rop}}(L)=\frac{\ell(L)}{r(L)}$ of length $\ell(L)$, which is a sum of lengths of individual components of $L$, to {\em reach} or {\em thickness}: $r(L)$, i.e. the largest radius of the tube embedded as a normal neighborhood of $L$. The {\em ropelength within the isotopy class} $[L]$ of $L$ is defined as
\begin{equation}\label{eq:Rop-def}
\operatorname{\mathsf{Rop}}(L)=\inf_{L'\in [L]} \operatorname{\mathsf{rop}}(L'),\qquad \operatorname{\mathsf{rop}}(L')=\frac{\ell(L')}{r(L')},
\end{equation}
(in \cite{Cantarella-Kusner-Sullivan:2002} it is shown that the infimum is achieved within $[L]$ and the minimizer is of class $C^{1,1}$).
A related measure of complexity, called {\em embedding thickness} was introduced recently in \cite{Freedman-Krushkal:2014}, in the general context of embeddings' complexity. For links, the embedding thickness $\tau(L)$ of $L$ is given by a value of its reach $r(L)$ assuming that $L$ is a subset of the unit ball $B_1$ in $\mathbb{R}^3$ (note that any embedding can be scaled and translated to fit in $B_1$). Again, the embedding thickness of the isotopy class $[L]$ is given by
\begin{equation}\label{eq:thck-def}
\mathcal{T}(L)=\sup_{L'\in [L]} \tau(L').
\end{equation}
For a link $L\subset B_1$, the volume of the embedded tube of radius $\tau(L)$ is $\pi \ell(L)\tau(L)^2$, \cite{Gray:2004} and the tube is contained in the ball of radius $r=2$, yielding
\begin{equation}\label{eq:t(L)-rop(L)}
\operatorname{\mathsf{rop}}(L)=\frac{\pi\ell(L)\tau(L)^2}{\pi\tau(L)^3}\leq \frac{\frac 43 \pi 2^3}{\pi\tau(L)^3},\quad \Rightarrow\quad \tau(L)\leq \Bigl( \frac{11}{\operatorname{\mathsf{rop}}(L)}\Bigr)^{\frac 13}.
\end{equation}
In turn a lower bound for $\operatorname{\mathsf{rop}}(L)$ gives an upper bound for $\tau(L)$ and vice versa. For other measures of complexity of embeddings such as distortion or Gromov-Guth thickness see e.g. \cite{Gromov:1983}, \cite{Gromov-Guth:2012}.
Bounds for the ropelength of knots, and in particular the lower bounds, have been studied by many researchers, we only list a small fraction of these works here \cite{Buck-Simon:1999,Buck-Simon:2007,Cantarella-Kusner-Sullivan:2002,Diao-Ernst-Janse-van-Rensburg:1999,Diao-Ernst:2007,Diao:2003,Ernst-Por:2012,Litherland-Simon-Durumeric-Rawdon:1999,Rawdon:1998, Ricca-Maggioni:2014, Maggioni-Ricca:2009, Ricca-Moffatt:1992}. Many of the results are applicable directly to links, but the case of links is treated in more detail by Cantarella, Kusner and Sullivan \cite{Cantarella-Kusner-Sullivan:2002} and in the earlier work of Diao, Ernst, and Janse Van Rensburg \cite{Diao-Ernst-Janse-Van-Rensburg:2002} concerning the estimates in terms of the pairwise linking number. In \cite{Cantarella-Kusner-Sullivan:2002}, the authors introduce a cone surface technique and show the following estimate, for a link $L$ (defined as in \eqref{eq:n-link}) and a given component $L_i$ \cite[Theorem 11]{Cantarella-Kusner-Sullivan:2002}:
\begin{equation}\label{eq:len(L_i)-lk} \operatorname{\mathsf{rop}}(L_i)\geq 2\pi+2\pi\sqrt{\operatorname{\mathsf{Lk}}(L_i,L)}, \end{equation} where $\operatorname{\mathsf{Lk}}(L_i,L)$ is the maximal total linking number between $L_i$ and the other components of $L$. A stronger estimate was obtained in \cite{Cantarella-Kusner-Sullivan:2002} by combining the Freedman and He \cite{Freedman-He:1991} asymptotic crossing number bound for energy of divergence free fields and the cone surface technique as follows
\begin{equation}\label{eq:len(L_i)-Ac-gen} \operatorname{\mathsf{rop}}(L_i)\geq 2\pi+2\pi\sqrt{\operatorname{\mathsf{Ac}}(L_i,L)},\qquad \operatorname{\mathsf{rop}}(L_i)\geq 2\pi+2\pi\sqrt{2g(L_i,L)-1}, \end{equation} where $\operatorname{\mathsf{Ac}}(L_i,L)$ is the {\em asymptotic crossing number} (c.f. \cite{Freedman-He:1991}) and the second inequality is a consequence of the estimate $\operatorname{\mathsf{Ac}}(L_i,L)\geq 2g(L_i,L)-1$, where $g(L_i,L)$ is a minimal genus among surfaces embedded in $\mathbb{R}^3\setminus\{L_1\cup\ldots\cup\widehat{L_i}\cup\ldots\cup L_n\}$, \cite[p. 220]{Freedman-He:1991} (in fact, Estimate \eqref{eq:len(L_i)-Ac-gen} subsumes Estimate \eqref{eq:len(L_i)-lk} since $\operatorname{\mathsf{Ac}}(L_i,L)\geq \operatorname{\mathsf{Lk}}(L_i,L)$).
A relation between $\operatorname{\mathsf{Ac}}(L_i,L)$ and the higher linking numbers of Milnor, \cite{Milnor:1954, Milnor:1957} is unknown and appears difficult. The following question, concerning the embedding thickness, is stated in \cite[p. 1424]{Freedman-Krushkal:2014}:
\begin{prob}\label{q:F-K}
Let $L$ be an $n$-component link which is Brunnian (i.e. almost trivial in the sense of Milnor \cite{Milnor:1954}). Let $M$ be the maximum value among Milnor's $\bar{\mu}$-invariants with distinct indices i.e. among $|\bar{\mu}_{\mathtt{I};j}(L)|$. Is there a bound
\begin{equation}\label{eq:thk-FK}
\operatorname{\tau}(L) \leq c_n M^{-\frac{1}{n}},
\end{equation}
for some constant $c_n > 0$, independent of the link $L$? Is there a bound on the crossing number $\operatorname{\mathsf{Cr}}(L)$ in terms of $M$? \end{prob} \noindent Recall that the Milnor $\bar{\mu}$--invariants $\{\bar{\mu}_{\mathtt{I};j}(L)\}$ of $L$, are indexed by an ordered tuple $(\mathtt{I};j)=(i_1,i_2,\ldots,i_k;$ $j)$ from the index set $\{1,\ldots,n\}$, there the last index $j$ has a special role (see below). If all the indexes in $(\mathtt{I};j)$ are distinct, $\{\bar{\mu}_{\mathtt{I};j}\}$ are link homotopy invariants of $L$ and are often referred to simply as {\em Milnor linking numbers} or {\em higher linking numbers}, \cite{Milnor:1954, Milnor:1957}. The definition $\{\bar{\mu}_{\mathtt{I};j}\}$ begins with coefficients $\mu_{\mathtt{I};j}$ of the Magnus expansion of the $j$th longitude of $L$ in $\pi_1(\mathbb{R}^3-L)$. Then
\begin{equation}\label{eq:bar-mu-Delta(I)} \overline{\mu}_{\mathtt{I};j}(L) \equiv \mu_{\mathtt{I};j}(L) \mod\ \Delta_\mu(\mathtt{I};j),\qquad\quad \Delta_\mu(\mathtt{I};j)=\gcd(\Gamma_\mu(\mathtt{I};j)). \end{equation} where $\Gamma_\mu(\mathtt{I};j)$ is a certain subset of lower order Milnor invariants, c.f. \cite{Milnor:1957}. Regarding $\bar{\mu}_{\mathtt{I};j}(L)$ as an element of $\mathbb{Z}_d=\{0,1,\ldots,d-1\}$, $d=\Delta_\mu(\mathtt{I};j)$ (or $\mathbb{Z}$, whenever $d=0$) let us set
\begin{equation}\label{eq:[mu]}
\Big[\bar{\mu}_{\mathtt{I};j}(L)\Big]:=\begin{cases}
\min\Bigl(\bar{\mu}_{\mathtt{I};j},d-\bar{\mu}_{\mathtt{I};j}\Bigr) &\ \text{for $d> 0$},\\
|\bar{\mu}_{\mathtt{I};j}| & \ \text{for $d= 0$}.
\end{cases} \end{equation} \noindent Our main result addresses Question \ref{q:F-K} for general $n$--component links (deposing of the Brunnian assumption) as follows.
\begin{thm}\label{thm:main}
Let $L$ be an $n$-component link $n\geq 2$ and $\bar{\mu}(L)$ one of its top Milnor linking numbers, then
\begin{equation}\label{eq:mthm-rop-cr}
\operatorname{\mathsf{rop}}(L)^{\frac 43}\geq \sqrt[3]{n} \Bigl(\Big[\bar{\mu}(L)\Big]\Bigr)^{\frac{1}{n-1}},\qquad \operatorname{\mathsf{Cr}}(L) \geq \tfrac{1}{3}(n-1) \Bigl(\Big[\bar{\mu}(L)\Big]\Bigr)^{\frac{1}{n-1}}.
\end{equation} \end{thm} \noindent In the context of Question \ref{q:F-K}, the estimate of Theorem \ref{thm:main} transforms, using \eqref{eq:t(L)-rop(L)}, as follows \[
\tau(L)\leq \Bigl(\frac{11}{\sqrt[4]{n}}\Bigr)^{\frac 13} M^{-\frac{1}{4(n-1)}} . \] Naturally, Question \ref{q:F-K} can be asked for knots and links and lower bounds in terms of finite type invariants in general. Such questions have been raised for instance in \cite{Cantarella:2009}, where the Bott-Taubes integrals \cite{Bott-Taubes:1994, Volic:2007} have been suggested as a tool for obtaining estimates. \begin{prob}\label{q:F-K-general}
Can we find estimates for ropelength of knots/links, in terms of their finite type invariants? \end{prob} In the remaining part of this introduction let us sketch the basic idea behind our approach to Question \ref{q:F-K-general}, which relies on the relation between the finite type invariants and the crossing number.
Note that since $\operatorname{\mathsf{rop}}(K)$ is scale invariant, it suffices to consider {\em unit thickness knots}, i.e. $K$ together with the unit radius tube neighborhood (i.e. $r(K)=1$). In this setting, $\operatorname{\mathsf{rop}}(K)$ just equals the {\em length} $\ell(K)$ of $K$. From now on we assume unit thickness, unless stated otherwise. In \cite{Buck-Simon:1999}, Buck and Simon gave the following estimates for $\ell(K)$, in terms of the crossing number $\operatorname{\mathsf{Cr}}(K)$ of $K$:
\begin{equation}\label{eq:L(K)-BS} \ell(K)\geq \Bigr(\frac{4\pi}{11} \operatorname{\mathsf{Cr}}(K)\Bigl)^{\frac 34},\qquad \ell(K)\geq 4\sqrt{\pi \operatorname{\mathsf{Cr}}(K)}. \end{equation} Clearly, the first estimate is better for knots with large crossing number, while the second one can be sharper for low crossing number knots (which manifests itself for instance in the case of the trefoil). Recall that $\operatorname{\mathsf{Cr}}(K)$ is a minimal crossing number over all possible knot diagrams of $K$ within the isotopy class of $K$. The estimates in \eqref{eq:L(K)-BS} are a direct consequence of the ropelength bound for the {\em average crossing number}\footnote{i.e. an average of the crossing numbers of diagrams of $K$ over all projections of $K$, see Equation \eqref{eq:aov-acr}.} $\operatorname{\mathsf{acr}}(K)$ of $K$ (proven in \cite[Corollary 2.1]{Buck-Simon:1999}) i.e.
\begin{equation}\label{eq:rop-BS-acr} \ell(K)^{\frac 43}\geq \frac{4\pi}{11} \operatorname{\mathsf{acr}}(K),\qquad \ell(K)^{2}\geq 16\pi \operatorname{\mathsf{acr}}(K). \end{equation}
In Section \ref{S:links_ropelength}, we obtain an analog of \eqref{eq:L(K)-BS} for $n$--component links ($n\geq 2$) in terms of the {\em pairwise crossing number}\footnote{see \eqref{eq:PCr(L)} and Corollary \ref{cor:rop-PCr}, generally $\operatorname{\mathsf{PCr}}(L)\leq \operatorname{\mathsf{Cr}}(L)$, as the individual components can be knotted.} $\operatorname{\mathsf{PCr}}(L)$, as follows \begin{equation}\label{eq:PCr-rop-3/4-2} \ell(L)\geq \frac{1}{\sqrt{n-1}}\Bigl(\tfrac 32 \operatorname{\mathsf{PCr}}(L)\Bigr)^{\frac 34},\qquad \ell(L)\geq \frac{n\sqrt{16\pi}}{\sqrt{n^2-1}}\Bigl(\operatorname{\mathsf{PCr}}(L)\Bigr)^{\frac 12}. \end{equation}
For low crossing number knots, the Buck and Simon bound \eqref{eq:L(K)-BS} was further improved by Diao\footnote{More precisely: $16\pi \operatorname{\mathsf{Cr}}(K) \leq \ell(K)(\ell(K)-17.334)$ \cite{Diao:2003}.} \cite{Diao:2003}, \begin{equation}\label{eq:L(K)-D} \ell(K)\geq \tfrac{1}{2} \bigl(d_0 +\sqrt{d^2_0+64\pi \operatorname{\mathsf{Cr}}(K)}\bigr),\qquad d_0=10-6(\pi+\sqrt{2})\approx 17.334. \end{equation}
On the other hand, there are well known estimates for $\operatorname{\mathsf{Cr}}(K)$ in terms of finite type invariants of knots. For instance, \begin{equation}\label{eq:c_2-cr-LW-PV}
\tfrac{1}{4}\operatorname{\mathsf{Cr}}(K)(\operatorname{\mathsf{Cr}}(K)-1)+\tfrac{1}{24}\geq |c_2(K)|, \qquad \tfrac{1}{8}(\operatorname{\mathsf{Cr}}(K))^2\geq |c_2(K)|. \end{equation} Lin and Wang \cite{Lin-Wang:1996} considered the second coefficient of the Conway polynomial $c_2(K)$ (i.e. the first nontrivial type $2$ invariant of knots) and proved the first bound in \eqref{eq:c_2-cr-LW-PV}. The second estimate of \eqref{eq:c_2-cr-LW-PV} can be found in Polyak--Viro's work \cite{Polyak-Viro:2001}. Further, Willerton, in his thesis \cite{Willerton:2002} obtained estimates for the ``second'', after $c_2(K)$, finite type invariant $V_3(K)$ of type $3$, as \begin{equation}\label{eq:V_3-cr}
\tfrac{1}{4}\operatorname{\mathsf{Cr}}(K)(\operatorname{\mathsf{Cr}}(K)-1)(\operatorname{\mathsf{Cr}}(K)-2)\geq |V_3(K)|. \end{equation}
In the general setting, Bar-Natan \cite{Bar-Natan:1995} shows that if $V(K)$ is a type $n$ invariant then $|V(K)| = O(\operatorname{\mathsf{Cr}}(K)^n)$. All these results rely on the arrow diagrammatic formulas for Vassiliev invariants developed in the work of Goussarov, Polyak and Viro \cite{Goussarov-Polyak-Viro:2000}.
Clearly, combining \eqref{eq:c_2-cr-LW-PV} and \eqref{eq:V_3-cr} with \eqref{eq:L(K)-BS} or \eqref{eq:L(K)-D}, immediately yields lower bounds for ropelength in terms of a given Vassiliev invariant. One may take these considerations one step further and extend the above estimates to the case of the $2n$\textsuperscript{th} coefficient of the Conway polynomial $c_{2n}(K)$, with the help of arrow diagram formulas for $c_{2n}(K)$, obtained recently in \cite{Chmutov-Duzhin-Mostovoy:2012, Chmutov-Khoury-Rossi:2009}. In Section \ref{S:knots}, we follow the Polyak--Viro's argument of \cite{Polyak-Viro:2001} to obtain
\begin{thm}\label{thm:conway}
Given a knot $K$, we have the following crossing number estimate
\begin{equation}\label{eq:crn_conway}
\operatorname{\mathsf{Cr}}(K) \geq \bigl((2n)! |c_{2n}(K)|\bigl)^{\frac{1}{2n}}\geq\frac{2n}{3} |c_{2n}(K)|^{\frac{1}{2n}}.
\end{equation}
\end{thm}
\noindent Combining \eqref{eq:crn_conway} with Diao's lower bound \eqref{eq:L(K)-D} one obtains
\begin{cor}
For a unit thickness knot $K$,
\begin{equation}\label{eq:rop_conway}
\ell(K) \geq \tfrac{1}{2} \Bigl(d_0 +\bigl(d^2_0+ \tfrac{128 n \pi}{3} |c_{2n}(K)|^{\frac{1}{2n}}\bigr)^{\frac 12}\Bigr).
\end{equation}
\end{cor}
Recall that a somewhat different approach to ropelength estimates is presented in \cite{Cantarella-Kusner-Sullivan:2002}, where the authors introduce a cone surface technique, which combined with the asymptotic crossing number, $\operatorname{\mathsf{Ac}}(K)$, bound of Freedman and He, \cite{Freedman-He:1991} gives
\begin{equation}\label{eq:L(K)-AC-g(K)}
\ell(K)\geq 2\pi+2\pi\sqrt{\operatorname{\mathsf{Ac}}(K)},\qquad \ell(K)\geq 2\pi+2\pi\sqrt{2 g(K)-1},
\end{equation}
where the second bound follows from the knot genus estimate of \cite{Freedman-He:1991}: $\operatorname{\mathsf{Ac}}(K)\geq 2 g(K)-1$.
When comparing Estimate \eqref{eq:L(K)-AC-g(K)} and \eqref{eq:rop_conway}, in favor of Estimate \eqref{eq:rop_conway}, we may consider a family of {\em pretzel knots}: $P(a_1,\ldots,a_n)$ where $a_i$ is the number of signed crossings in the $i$th tangle of the diagram, see Figure \ref{fig:pretzel_knots}. Additionally, for a diagram $P(a_1,\ldots,a_n)$ to represent a knot one needs to assume either both $n$ and all $a_i$ are odd or one of the $a_i$ is even, \cite{Kawauchi:1996}.
\begin{wrapfigure}{l}{7cm}
\centering
\includegraphics[width=.35\textwidth]{pretzel_knot.pdf}
\caption{$P(a_1,\ldots,a_n)$ pretzel knots.}\label{fig:pretzel_knots}
\end{wrapfigure}
Genera of selected subfamilies of pretzel knots are known, for instance \cite[Theorem 13]{Manchon:2012} implies
\begin{align*}
\qquad & \qquad & \qquad & \qquad & g(P(a,b,c)) =1,\\
\qquad & \qquad & \qquad & \qquad & c_2(P(a,b,c))=\frac 14 (ab+ac+bc+1),
\end{align*}
where $a$, $b$, $c$ are odd integers with the same sign
(for the value of $c_2(P(a,b,c))$ see a table in \cite[p. 390]{Manchon:2012}). Concluding, the lower bound in
\eqref{eq:rop_conway} can be made arbitrary large by letting $a,b,c\to+\infty$, while the lower bound
in \eqref{eq:L(K)-AC-g(K)} stays constant for any values of $a$, $b$, $c$, under consideration. Yet another\footnote{out of a few such examples given in \cite{Manchon:2012}.} example of a family of pretzel knots with constant genus one and arbitrarily large $c_2$--coefficient is $D(m,k)=P(m,\varepsilon,\stackrel{|k|-\text{times}}{\ldots},\varepsilon)$, $m>0$, $k$, where $\varepsilon=\frac{k}{|k|}$ is the sign of $k$ (e.g. $D(3,-2)=P(3,-1,-1)$). For any such $D(m,k)$, we have $c_2(D(m,k))=\frac{mk}{4}$. \begin{rem} {\rm A natural question can be raised about the the reverse situation: Can we find a family of knots with constant $c_{2n}$-coefficient (or any finite type invariant, see Remark \ref{rem:method-extended}), but arbitrarily large genus? For instance, there exist knots with $c_2=0$ and nonzero genus (such as $8_2$), in these cases \eqref{eq:L(K)-AC-g(K)} still provides a nontrivial lower bound.} \end{rem}
The paper is structured as follows: Section \ref{S:knots} is devoted to a review of arrow polynomials for finite type invariants and Kravchenko-Polyak tree invariants in particular, it also contains the proof Theorem \ref{thm:conway}. Section \ref{S:links_ropelength} contains information on the average overcrossing number for links and link ropelength estimates analogous to the ones obtained by Buck and Simon \cite{Buck-Simon:1999} (see Equation \eqref{eq:rop-BS-acr}). The proof of Theorem \ref{thm:main} is presented in Section \ref{S:proof-main}, together with final comments and remarks.
\begin{figure}
\caption{$5_2$ knot and its Gauss diagram (all crossings are positive).}
\label{fig:5_2-gauss}
\end{figure}
\section{Arrow polynomials and finite type invariants}\label{S:knots}
Recall from \cite{Chmutov-Duzhin-Mostovoy:2012}, the {\em Gauss diagram} of a knot $K$ is a way of representing signed overcrossings in a knot diagram, by arrows based on a circle ({\em Wilson loops}, \cite{Bar-Natan:1991}) with signs encoding the sign of the crossings (see Figure \ref{fig:5_2-gauss} showing the $5_2$ knot and its Gauss diagram). More precisely, the Gauss diagram $G_K$ of a knot $K:S^1\longrightarrow \mathbb{R}^3$ is constructed by marking pairs of points in the domain $S^1$, endpoints of a correspoinding arrow in $G_K$, which are mapped to crossings in a generic planar projection of $K$. The arrow always points from the under-- to the over--crossing and the orientation of the circle $S^1$ in $G_K$ agrees with the orientation of the knot.
Given a Gauss diagram $G$, the {\em arrow polynomials} of \cite{Goussarov-Polyak-Viro:2000, Polyak-Viro:1994} are defined simply as a signed count of selected subdiagrams in $G$. For instance the second coefficient of the Conway polynomial $c_2(K)$ is given by the signed count of $\vvcenteredinclude{.2}{c2-arrow.pdf}$ in $G$, denoted as \begin{equation}\label{eq:c_2-arrow}
c_2(K)=\langle\vvcenteredinclude{.2}{c2-arrow.pdf}, G\rangle=\sum_{\phi:\vvcenteredinclude{.1}{c2-arrow.pdf}\longrightarrow G} \text{\rm sign}(\phi),\qquad \text{\rm sign}(\phi)=\prod_{\alpha\in \vvcenteredinclude{.1}{c2-arrow.pdf}} \text{\rm sign}(\phi(\alpha)), \end{equation} where the sum is over all basepoint preserving graph embeddings $\{\phi\}$ of $\vvcenteredinclude{.2}{c2-arrow.pdf}$ into $G$, and the sign is a product of signs of corresponding arrows in $\phi(\vvcenteredinclude{.2}{c2-arrow.pdf})\subset G$. For example in the Gauss diagram of $5_2$ knot in Figure \ref{fig:5_2-gauss}, there are two possible embeddings of $\vvcenteredinclude{.2}{c2-arrow.pdf}$ into the diagram. One matches the pair of arrows $\{a,d\}$ and another pair $\{c,d\}$, since all crossings are positive we obtain $c_2(5_2)=2$. \begin{figure}
\caption{Turning a one-component chord diagram with a base point into an arrow diagram}
\label{fig:chord_to_arrow}
\end{figure}
For other even coefficients of the Conway polynomial, the work in \cite{Chmutov-Khoury-Rossi:2009} provides the following recipe for their arrow polynomials. Given $n>0$, consider any chord diagram $D$, on a single circle component with $2n$ chords, such as $\vvcenteredinclude{.25}{chord-1.pdf}$, $\vvcenteredinclude{.25}{chord-2.pdf}$, $\vvcenteredinclude{.25}{chord-3.pdf}$. A chord diagram $D$ is said to be a {\em $k$-component diagram}, if after parallel doubling of each chord according to \vcenteredinclude{k-component_chord.png}, the resulting curve will have $k$--components. For instance $\vvcenteredinclude{.25}{1-component.pdf}$ is a $1$--component diagram and $\vvcenteredinclude{.25}{3-component.pdf}$ is a $3$--component diagram. For the coefficients $c_{2n}$, only one component diagrams will be of interest and we turn a one-component chord diagram with a base point into an arrow diagram according to the following rule \cite{Chmutov-Khoury-Rossi:2009}: \begin{quote}
{\em Starting from the base point we move along the diagram with doubled chords. During this journey we pass both copies of each chord in opposite directions. Choose an arrow on each chord which corresponds to the direction of the first passage of the copies of the chord (see Figure \ref{fig:chord_to_arrow} for the illustration).} \end{quote}
We call, the arrow diagram obtained according to this method, the {\em ascending arrow diagram} and denote by $C_{2n}$ the sum of all based one-component ascending arrow diagrams with $2n$ arrows. For example $\vvcenteredinclude{.3}{C_2.pdf}$ and $C_{4}$ is shown below (c.f. \cite[p. 777]{Chmutov-Khoury-Rossi:2009}). \[ \vvcenteredinclude{.31}{C_4.pdf} \] In \cite{Chmutov-Khoury-Rossi:2009}, the authors show for $n \geq 1$, that the $c_{2n}(K)$ coefficient of the Conway polynomial of $K$ equals
\begin{equation}\label{eq:conway_coefficients}
c_{2n}(K) = \langle C_{2n}, G_K \rangle.
\end{equation}
\begin{repthm}{thm:conway}
Given a knot $K$, we have the following crossing number estimate
\begin{equation}\label{eq2:crn_conway}
\operatorname{\mathsf{Cr}}(K) \geq \bigl((2n)! |c_{2n}(K)|\bigl)^{\frac{1}{2n}}\geq\tfrac{2n}{3} |c_{2n}(K)|^{\frac{1}{2n}}.
\end{equation}
\end{repthm}
\begin{proof}
\noindent Given $K$ and its Gauss diagram $G_K$, let $X=\{1,2,\ldots, \operatorname{\mathsf{cr}}(K)\}$ index arrows of $G_K$ (i.e. crossings of a diagram of $K$ used to obtain $G_K$). For diagram term $A_i$ in the sum $C_{2n}=\sum_i A_i$, an embedding $\phi:A_i\longmapsto G_K$ covers a certain $2n$ element subset of crossings in $X$ that we denote by $X_{\phi}(i)$. Let $\mathcal{E}(i;G_K)$ be the set of all possible embeddings $\phi:A_i\longmapsto G_K$, and
\[
\mathcal{E}(G_K)=\bigsqcup_i \mathcal{E}(i;G_K).
\]
Note that $X_\phi(i)\neq X_\xi(j)$ for $i\neq j$ and $X_\phi(i)\neq X_\xi(i)$ for $\phi\neq \xi$, thus for each $i$ we have an injective map
\[
F_i:\mathcal{E}(i;G_K)\longmapsto \mathcal{P}_{2n}(X),\qquad F_i(\phi)=X_\phi(i),
\]
where $\mathcal{P}_{2n}(X)=\{\text{$2n$--element subsets of $X$}\}$. $F_i$ extends in an obvious way to the whole disjoint union $\mathcal{E}(G_K)$, as $F:\mathcal{E}(G_K)\longrightarrow \mathcal{P}_{2n}(X)$, $F=\sqcup_i F_i$ and remains injective. In turn, for every $i$ we have
\[
|\langle A_i,G_K\rangle|\leq \# \mathcal{E}(i;G_K)
\]
and therefore
\[
|\langle C_{2n},G_K\rangle|\leq \# \mathcal{E}(G_K)< \#\mathcal{P}_{2n}(X)={\operatorname{\mathsf{cr}}(K) \choose 2n}.
\]
If $\operatorname{\mathsf{cr}}(K)<2n$ then the left hand side vanishes.
Since ${\operatorname{\mathsf{cr}}(K) \choose 2n}\leq \frac{\operatorname{\mathsf{cr}}(K)^{2n}}{(2n)!}$, we obtain
\[
|c_{2n}(K)|\leq \frac{\operatorname{\mathsf{cr}}(K)^{2n}}{(2n)!}\quad \Rightarrow\quad \bigl((2n)! |c_{2n}(K)|\bigl)^{\frac{1}{2n}}\leq \operatorname{\mathsf{cr}}(K),
\] which gives the first part of \eqref{eq2:crn_conway}. Using the following upper lower bound for $m!$ (Stirling's approximation, \cite{Abramowitz:1964aa}) \[ m!\geq \sqrt{2\pi} m^{m+\frac 12} e^{-m}, \] applying , $e^{-1}\geq \frac{1}{3}$, $\bigl(\sqrt{2\pi}\bigr)^{\frac{1}{m}}\geq 1$, $\bigl(m^{m+\frac 12}\bigr)^{\frac{1}{m}}\geq m \bigl(\sqrt{m}\bigr)^{\frac{1}{m}}\geq m$, yields \begin{equation}\label{eq:stirling2} \bigl(m!\bigr)^{\frac{1}{m}}\geq \bigl(\sqrt{2\pi} (m)^{m+\frac 12} e^{-m}\bigr)^{\frac{1}{m}} \geq \frac{m}{3}, \end{equation} for $m=2n$ one obtains the second part of \eqref{eq2:crn_conway}. \end{proof}
\begin{figure}
\caption{Elementary trees $e$ and $\bar{e}$ and the $Z_{2;1}$ arrow polynomial. }
\label{fig:elem-tree}
\end{figure}
Next, we turn to arrow polynomials for Milnor linking numbers. In \cite{Kravchenko-Polyak:2011}, Kravchenko and Polyak introduced tree invariants of string links and established their relation to Milnor linking numbers via the skein relation of \cite{Polyak:2000}. In the recent paper, the authors\footnote{consult \cite{Kotorii:2013} for a related result.} \cite{Komendarczyk-Michaelides:2016} showed that the arrow polynomials of Kravchenko and Polyak, applied to Gauss diagrams of closed based links, yield certain $\bar{\mu}$--invariants (as defined in \eqref{eq:bar-mu-Delta(I)}). For a purpose of the proof of Theorem \ref{thm:main}, it suffices to give a recursive definition, provided in \cite{Komendarczyk-Michaelides:2016}, for the arrow polynomial of $\bar{\mu}_{23\ldots n;1}(L)$ denoted by $Z_{n;1}$. Changing the convention, adopted for knots, we follow \cite{Kravchenko-Polyak:2011}, \cite{Komendarczyk-Michaelides:2016} and use vertical segments (strings) oriented downwards in place of circles (Wilson loops) as components. \begin{figure}
\caption{Obtaining a term in $Z_{3;1}$ via stacking $e$ on the second component of $Z_{2;1}$, i.e. $Z_{2;1}\prec_2 e$. }
\label{fig:Z_2;1-to-term-Z_3;1}
\end{figure} The polynomial $Z_{n;1}$ is obtained inductively from~$Z_{n-1;1}=\sum_k \pm A_k$ by expanding each term $A_k$ of $Z_{n;1}$ through stacking elementary tree diagrams $e$ and $\bar{e}$, shown in Figure \ref{fig:elem-tree}, the sign of a resulting term is determined accordingly. The stacking operation is denoted by $\prec_i$, where $i=1,\ldots, n$ tells which component is used for stacking. Figure \ref{fig:Z_2;1-to-term-Z_3;1} shows $Z_{2;1}\prec_2 e$. The inductive procedure is defined as follows: \begin{itemize}
\item[$(i)$] $Z_{2;1}$ is shown in Figure \ref{fig:elem-tree}(right).
\item[$(ii)$] For each term $A_k$ in $Z_{n-1;1}$ produce terms in $Z_{n;1}$ by stacking\footnote{Note that $\bar{e}$ is not allowed to be stacked on the first component.} $e$ and $\bar{e}$ on each component i.e. $A_k\prec_i e$ for $i=1,\ldots, n$ and $A_k\prec_i \bar{e}$ for $i=2,\ldots, n$, see Figure \ref{fig:Z_2;1-to-term-Z_3;1}. Eliminate isomorphic (duplicate) diagrams.
\item[$(iii)$] The sign of each term in $Z_{n;1}$ equals to $(-1)^q$, $q=$number of arrows pointing to the right. \end{itemize}
As an example consider $Z_{3;1}$; we begin with the initial tree $Z_{2;1}$, and expand by stacking $e$ and $\bar{e}$ on the strings of $Z_{2;1}$, this is shown in Figure \ref{fig:Z_2;1-to-Z_3;1}, we avoid stacking $\bar{e}$ on the first component (called the {\em trunk}, \cite{Komendarczyk-Michaelides:2016}). Thus $Z_{3;1}$ is obtained as $A+B-C$, where $A=Z_{2;1}\prec_2 e$, $B=Z_{2;1}\prec_1 e$, and $C=Z_{2;1}\prec_2 \bar{e}$.
\begin{figure}
\caption{$Z_{3;1}=A+B-C$ obtained from $Z_{2;1}$ via $(i)$--$(iii)$. }
\label{fig:Z_2;1-to-Z_3;1}
\end{figure}
\noindent Given $Z_{n;1}$, the main result of \cite{Komendarczyk-Michaelides:2016} (see also \cite{Kotorii:2013} for a related result) yields the following formula \begin{equation}\label{eq:bar-mu-Z_n;1} \overline{\mu}_{n;1}(L) \equiv \langle Z_{n;1}, G_L\rangle \mod\ \Delta_\mu(n;1). \end{equation} where $\overline{\mu}_{n;1}(L):=\overline{\mu}_{2\ldots n;1}(L)$, $G_L$ a Gauss diagram of an $n$--component link $L$, and the indeteminacy $\Delta_\mu(n;1)$ is defined in \eqref{eq:bar-mu-Delta(I)}. Recall that $\langle Z_{n;1}, G_L\rangle=\sum_k \pm \langle A_k, G_L\rangle$ ($Z_{n;1}=\sum_k \pm A_k$) where $\langle A_k, G_L\rangle=\sum_{\phi:A_k\longrightarrow G_L} \text{\rm sign}(\phi)$ is a signed count of subdiagrams isomorphic to $A_k$ in $G_L$.
For $n = 2$, we obtain the usual linking number \begin{equation}\label{eq:lk-gauss} \bar{\mu}_{2;1}(L)=\langle Z_{2;1}, G_L\rangle = \Bigl\langle\vvcenteredinclude{0.23}{Z_2_1.pdf}, G_L\Bigr\rangle. \end{equation} For $n=3$ and $n=4$ the arrow polynomials can be obtained following the stacking procedure \[ \begin{split}
\bar{\mu}_{3;1}(L) & =\langle Z_{3;1}, G_L\rangle \mod \gcd\{\bar{\mu}_{2;1}(L), \bar{\mu}_{3;1}(L), \bar{\mu}_{3;2}(L)\},\\
&\quad Z_{3;1} = \vvcenteredinclude{0.3}{Z_23_1.pdf}, \end{split} \] and \[ \begin{split} \bar{\mu}_{4;1}(L) & =\langle Z_{4;1}, G_L\rangle \mod \Delta_\mu(4;1),\\
&\quad Z_{4;1} = \vvcenteredinclude{0.3}{Z_234_1-a.pdf} \\ &\qquad\qquad\vvcenteredinclude{0.3}{Z_234_1-b.pdf}\ . \end{split} \]
Given a formula for $\bar{\mu}_{n;1}(L)=\bar{\mu}_{23\ldots n;1}(L)$ all remaining $\bar{\mu}$--invariants with distinct indices can be obtained from the following permutation identity (for $\sigma\in \Sigma(1,\ldots,n)$) \begin{equation}\label{eq:mu-symmetry} \bar{\mu}_{\sigma(2)\sigma(3)\ldots\sigma(n);\sigma(1)}(L)=\bar{\mu}_{23\ldots n;1}(\sigma(L)),\qquad \sigma(L)=(L_{\sigma(1)},L_{\sigma(2)},\ldots, L_{\sigma(n)}). \end{equation} By \eqref{eq:bar-mu-Z_n;1}, \eqref{eq:mu-symmetry} and \eqref{eq:bar-mu-Delta(I)} we have \begin{equation}\label{eq:bar-mu-sigma-Z}
\bar{\mu}_{\sigma(2)\sigma(3)\ldots\sigma(n);\sigma(1)}(L)= \langle \sigma(Z_{n;1}), G_L\rangle \mod\ \Delta_\mu(\sigma(2)\sigma(3)\ldots\sigma(n);\sigma(1)), \end{equation} where $\sigma(Z_{n;1})$ is the arrow polynomial obtained from $Z_{n;1}$ by permuting the strings according to $\sigma$. \begin{rem}\label{rem:mu-cyclic}
{\em
One of the properties of $\bar{\mu}$--invariants is their cyclic symmetry, \cite[Equation (21)]{Milnor:1957}, i.e. given a cyclic permutation $\rho$, we have
\[
\bar{\mu}_{\rho(2)\rho(3)\ldots\rho(n);\rho(1)}(L)=\bar{\mu}_{23\ldots n;1}(L).
\]
} \end{rem}
\section{Overcrossing number of links}\label{S:links_ropelength}
We will denote by $D_L$ a regular diagram of a link $L$, and by $D_L(v)$, the diagram obtained by the projection of $L$ onto the plane normal to a vector\footnote{unless otherwise stated we assume that $v$ is generic and thus $D_L(v)$ to be a regular diagram.} $v\in S^2$. For a pair of components $L_i$ and $L_j$ in $L$, define the {\em overcrossing number} in the diagram and the {\em pairwise crossing number} of components $L_i$ and $L_j$ in $D_L$ i.e.
\begin{equation}\label{eq:ov-cr} \begin{split} \mathsf{ov}_{i,j}(D_L) & =\{\text{number of times $L_i$ overpasses $L_j$ in $D_L$}\}.\\ \mathsf{cr}_{i,j}(D_L) & =\{\text{number of times $L_i$ overpasses and underpasses $L_j$ in $D_L$}\}\\ & =\mathsf{ov}_{i,j}(D_L)+\mathsf{ov}_{j,i}(D_L)=\mathsf{cr}_{j,i}(D_L). \end{split} \end{equation}
In the following, we also use the {\em average overcrossing number} and {\em average pairwise crossing number} of components $L_i$ and $L_j$ in $L$, defined as an average over all $D_L(v)$, $v\in S^2$, i.e.
\begin{equation}\label{eq:aov-acr} \mathsf{aov}_{i,j}(L)=\frac{1}{4\pi}\int_{S^2} \mathsf{ov}_{i,j}(v)\, dv,\qquad \mathsf{acr}_{i,j}(L)=\frac{1}{4\pi}\int_{S^2} \mathsf{cr}_{i,j}(v)\, dv=2\, \mathsf{aov}_{i,j}(L) \end{equation}
In the following result is based on the work in \cite{Cantarella:2009} and \cite{Buck-Simon:1999}, the idea of using the rearrangement inequality comes from \cite{Cantarella:2009}. \begin{lem}\label{lem:aov-bound}
Given a unit thickness link $L$, and any $2$--component sublink $(L_i,L_j)$:
\begin{equation}\label{eq:aov-bound}
\min\bigl(\ell_i\ell^{\frac 13}_j,\ell_j\ell^{\frac 13}_i\bigr)\geq 3\,\mathsf{aov}_{i,j}(L),\qquad \ell_i\ell_j\geq 16\pi\,\mathsf{aov}_{i,j}(L),
\end{equation}
for $\ell_i=\ell(L_i)$, $\ell_j=\ell(L_j)$, the
length of $L_i$ and $L_j$ respectively. \end{lem}
\begin{proof}
Consider the Gauss map of $L_i=L_i(s)$ and $L_j=L_j(t)$:
\begin{equation*}
F_{i,j}:S^1\times S^1\longmapsto \text{\rm Conf}_2(\mathbb{R}^3)\longmapsto S^2,\quad F_{i,j}(s,t)=\frac{L_i(s)-L_j(t)}{||L_i(s)-L_j(t)||}.
\end{equation*}
\noindent If $v\in S^2$ is a regular value of $F_{i,j}$ (which happens for the set of full measure on $S^2$) then
\begin{equation*}
\mathsf{ov}_{i,j}(v)=\#\{\text{points in $F^{-1}_{i,j}(v)$} \}.
\end{equation*}
i.e. $\mathsf{ov}_{i,j}(v)$ stands for number of times the $i$--component of $L$ passes over the $j$--component, in the projection of $L$ onto the plane in $\mathbb{R}^3$ normal to $v$. As a direct consequence of Federer's coarea formula \cite{Federer:1969} (see e.g. \cite{Michaelides:2015} for a proof)
\begin{align} \label{eq:gauss-linking-int}
\int_{L_i\times L_j} |F^\ast_{i,j} \omega| & =\frac{1}{4\pi}\int_{S^1\times S^1} \frac{|\langle L_i(s)-L_j(t),L'_i(s), L'_j(t) \rangle|}{||L_i(s)-L_j(t)||^3} ds \, dt\\
& =\frac{1}{4\pi}\int_{S^2} \mathsf{ov}_{i,j}(v)\, dv, \label{eq:avg-overcrossing}
\end{align}
where $\omega = \frac{1}{4\pi}\bigl(x \,dy \wedge dz - y \, dx \wedge dz + z\, dx \wedge dy\bigr)$ is the normalized area form on the unit sphere in $\mathbb{R}^3$ and
\[
\langle v, w, z \rangle:=\det\bigl(v,w,z\bigr),\qquad\text{for}\quad v,w,z\in \mathbb{R}^3.
\]
Assuming the arc--length parametrization by $s \in [0, \ell_i]$ and $t \in [0, \ell_j]$ of the components we have $\|L'_i (s)\|=\|L'_j(t)
\|=1$ and therefore:
\begin{equation}\label{eq:quarter}
\Bigl|\frac{\langle L_i(s)-L_j(t), L'_i(s),L'_j(t) \rangle}{||L_i(s)-L_j(t)||^3}\Bigl|\leq \frac{1}{||L_i(s)-L_j(t)||^2}
\end{equation}
\noindent Combining Equations \eqref{eq:avg-overcrossing} and \eqref{eq:quarter} yields
\begin{equation}\label{eq:overcrossing}
\int^{\ell_j}_0\int^{\ell_i}_0 \frac{1}{||L_i(s) - L_j(t)||^2}ds\,dt=\int^{\ell_j}_0 I_i(L_j(t))dt,
\end{equation}
where
\[
I_i(p)=\int^{\ell_i}_0 \frac{1}{||L_i(s) - p||^2}ds=\int^{\ell_i}_0 \frac{1}{r(s)^2}ds,\quad r(s)=\|L_i(s) - p\|,
\]
is often called {\em illumination} of $L_i$ from the point $p\in \mathbb{R}^3$, c.f. \cite{Buck-Simon:1999}. Following the approach of \cite{Buck-Simon:1999}, and \cite{Cantarella:2009} we estimate $I_i(t)=I_i(p)$ for $p=L_j(t)$. Denote by $B_a(p)$ the ball at $p=L_j(t)$ of radius $a$, and $s(z)$ the length of a portion of $L_i$ within the spherical shell: $Sh(z)=B_z(p)\setminus B_1(p)$, $z\geq 1$. Note that, because the distance between $L_i$ and $L_j$ is at least $2$, the unit thickness tube about $L_i$ is contained entirely in $Sh(z)$ for big enough $z$. Clearly, $s(z)$ is nondecreasing. Since the volume of a unit thickness tube of length $a$ is $\pi a$, comparing the volumes we obtain
\begin{equation}\label{eq:vol-tube-shell}
\begin{split}
\pi s(z) & \leq \operatorname{Vol}(Sh(z))=\tfrac{4}{3}\pi\bigl(z^3-1^3\bigr), \ \text{and}\\
s(z) & \leq \tfrac{4}{3}\, z^3, \qquad \text{for}\quad z\geq 1.
\end{split}
\end{equation}
Next, using the monotone rearrangement $\bigl(\frac{1}{r^2}\bigr)^\ast$ of $\frac{1}{r^2}$, (Remark \ref{rem:rearrangement}):
\begin{equation}\label{eq:rng-ineq}
\Bigl(\frac{1}{r^2}\Bigr)^\ast(s)\leq (\tfrac{4}{3})^{\frac 23}\,s^{-\frac 23},
\end{equation}
and the monotone rearrangement inequality \cite{Lieb-Loss:2001}:
\begin{equation}\label{eq:I_i-monotone}
I_i(p)= \int^{\ell_i}_0 \frac{1}{r^2(s)}ds \leq \int^{\ell_i}_0 \Bigl(\frac{1}{r^2}\Bigr)^\ast(s)ds \leq \int^{\ell_i}_0 (\tfrac{4}{3})^{\frac 23}\,s^{-\frac 23} ds= 3(\tfrac{4}{3})^{\frac 23}\,\ell_i^{\frac 13}.
\end{equation}
\noindent Integrating \eqref{eq:I_i-monotone} with respect to the $t$--parameter, we obtain
\[
\mathsf{aov}(L)\leq \frac{1}{4\pi}\int^{\ell_j}_0\int^{\ell_i}_0 \frac{1}{||L_i(s) - L_j(t)||^2}ds\,dt\leq 3(\tfrac{4}{3})^{\frac 23}\tfrac{1}{4\pi}\, \ell_j\,\ell_i^{\frac 13}< \tfrac{1}{3} \ell_j\,\ell_i^{\frac 13}.
\]
Since the argument works for any choice of $i$ and $j$ estimates in Equation \eqref{eq:aov-bound} are proven. The second estimate in \eqref{eq:aov-bound} follows immediately from the fact that $\frac{1}{\|L_i(s)-L_j(t)\|^2}\leq \frac 14$. \end{proof}
\begin{rem}\label{rem:rearrangement}
{\rm Recall, that for a nonnegative real valued function $f$, (on $\mathbb{R}^n$) vanishing at infinity, the {\em rearrangement, $f^\ast$ of $f$} is given by
\[
f^\ast(x)=\int^\infty_0 \chi^\ast_{\{f > u\}}(x)\, du,
\]
where $\chi^\ast_{\{f > u\}}(x)=\chi_{B_\rho}(x)$ is the characteristic function of the ball $B_\rho$ centered at the origin, determined by the volume condition: $\operatorname{Vol}(B_\rho)=\operatorname{Vol}(\{x\ |\ f(x)>u\})$, see \cite[p. 80]{Lieb-Loss:2001} for further properties of the rearrangements. In particular, the rearrangement inequality states \cite[p. 82]{Lieb-Loss:2001}:
$\int_{\mathbb{R}^n} f(x)\,dx\leq \int_{\mathbb{R}^n} f^\ast(x)\,dx$. For one variable functions, we may use the interval $[0,\rho]$ in place of the ball $B_\rho$, then $f^\ast$ is a decreasing function on $[0,+\infty)$. Specifically, for $f(s)=\frac{1}{r^2(s)}=\frac{1}{\|L_i(s)-p\|^2}$, we have \[
\Bigl(\frac{1}{r^2}\Bigr)^\ast(s)=u,\qquad\text{for}\quad \operatorname{length}\bigl(\{x\ |\ u<\frac{1}{r^2(x)}\leq 1\}\bigr)=s, \]
where $\operatorname{length}\bigl(\{x\ |\ u<\frac{1}{r^2(x)}\leq 1\}\bigr)$ stands for the length of the portion of $L_i$ satisfying the given condition. Further, by the definition of $s(z)$, from the previous paragraph, and \eqref{eq:vol-tube-shell}, we obtain \[
s=\operatorname{length}\bigl(\{x\ |\ \frac{1}{r^2(x)}>u\}\bigr)=\operatorname{length}\bigl(\{x\ |\ 1\leq r(x)<\frac{1}{\sqrt{u}}\}\bigr)=s\bigl(\frac{1}{\sqrt{u}}\bigr)\leq \tfrac{4}{3}\, \bigl(\frac{1}{\sqrt{u}}\bigr)^3, \] and \eqref{eq:rng-ineq} as a result.
}
\end{rem}
\noindent From the Gauss linking integral \eqref{eq:gauss-linking-int}: \[
|\operatorname{\mathsf{Lk}}(L_i,L_j)|\leq \operatorname{\mathsf{aov}}_{i,j}(L), \]
thus we immediately recover the result of \cite{Diao-Ernst-Janse-Van-Rensburg:2002} (but with a specific constant): \begin{equation}\label{eq:aov-lk-bound}
3 |\operatorname{\mathsf{Lk}}(L_i,L_j)|\leq \min\bigl(\ell_i\ell^{\frac 13}_j,\ell_j\ell^{\frac 13}_i\bigr),\qquad 16\pi|\operatorname{\mathsf{Lk}}(L_i,L_j)|\leq \ell_i\ell_j. \end{equation} Summing up over all possible pairs: $i$, $j$ and using the symmetry of the linking number we have \[
6\sum_{i< j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|=3\sum_{i\neq j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|\leq \sum_{i\neq j} \ell_i\ell^{\frac 13}_j= (\sum_i \ell_i)(\sum_j \ell^{\frac 13}_j)-\sum_i \ell^{\frac 43}_i. \] From Jensen's Inequality \cite{Lieb-Loss:2001}, we know that $\frac{1}{n}(\sum_i \ell^{\frac 13}_i)\leq \bigl(\frac{1}{n}\sum_i \ell_i\bigr)^{\frac 13}$ and $\frac{1}{n}(\sum_i \ell^{\frac 43}_i)\geq \bigl(\frac{1}{n}\sum_i \ell_i\bigr)^{\frac 43}$, therefore \begin{equation}\label{eq:lk-jensen} (\sum_i \ell_i)(\sum_j \ell^{\frac 13}_j)-\sum_i \ell^{\frac 43}_i\leq n^{\frac 23} \operatorname{\mathsf{rop}}(L)\operatorname{\mathsf{rop}}(L)^{\frac 13}-n^{-\frac 13} \operatorname{\mathsf{rop}}(L)^{\frac 43}=\frac{n-1}{n^\frac 13}\operatorname{\mathsf{rop}}(L)^{\frac 43}. \end{equation} Analogously, using the second estimate in \eqref{eq:aov-lk-bound} and Jensen's Inequality, yields \[
32\pi \sum_{i< j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|=16\pi \sum_{i\neq j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|\leq \sum_{i\neq j} \ell_i\ell_j\leq (1-\frac{1}{n^2})\bigl(\sum_i \ell_i\bigr)^2. \] \begin{cor}\label{cor:rop-lk}
Let $L$ be an $n$-component link ($n\geq 2$), then \begin{equation}\label{eq:lk-rop-3/4-2}
\operatorname{\mathsf{rop}}(L)^{\frac 43}\geq \frac{6 n^{\frac{1}{3}}}{(n-1)}\sum_{i<j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|,\qquad \operatorname{\mathsf{rop}}(L)^{2}\geq \frac{32\pi n^2 }{n^2-1}\sum_{i<j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|. \end{equation} \end{cor}
In terms of growth of the pairwise linking numbers $|\operatorname{\mathsf{Lk}}(L_i,L_j)|$, for a fixed $n$, the above estimate performs better than the one in \eqref{eq:len(L_i)-lk}. One may also replace $\sum_{i<j} |\operatorname{\mathsf{Lk}}(L_i,L_j)|$ with the isotopy invariant \begin{equation}\label{eq:PCr(L)} \operatorname{\mathsf{PCr}}(L)=\min_{D_L} \Bigl(\sum_{i\neq j} \operatorname{\mathsf{cr}}_{i,j}(D_L)\Bigr), \end{equation} (satisfying $\operatorname{\mathsf{PCr}}(L)\leq \operatorname{\mathsf{Cr}}(L)$), we call the {\em pairwise crossing number} of $L$. This conclusion can be considered as an analog of the Buck and Simon estimate \eqref{eq:L(K)-BS} for knots.
\begin{cor}\label{cor:rop-PCr}
Let $L$ be an $n$-component link ($n\geq 2$), and $\operatorname{\mathsf{PCr}}(L)$ its pairwise crossing number, then
\begin{equation}\label{eq2:PCr-rop-3/4-2}
\operatorname{\mathsf{rop}}(L)^{\frac{4}{3}}\geq \frac{3 n^{\frac{1}{3}}}{(n-1)}\operatorname{\mathsf{PCr}}(L),\qquad \operatorname{\mathsf{rop}}(L)^2\geq \frac{16\pi n^2}{n^2-1}\operatorname{\mathsf{PCr}}(L).
\end{equation} \end{cor}
\section{Proof of Theorem \ref{thm:main}}\label{S:proof-main}
\noindent The following auxiliary lemma will be useful. \begin{lem}\label{lem:a_i-ineq}
Given nonnegative numbers: $a_1$, \ldots, $a_{N}$ we have for $k\geq 2$:
\begin{equation}\label{eq:prod-est}
\sum_{1\leq i_1<i_2<\ldots<i_{k}\leq N} a_{i_1}a_{i_2}\ldots a_{i_{k}}\leq \frac{1}{N^{k}} {N \choose k}\Bigl(\sum^{N}_{i=1} a_i\Bigr)^{k}.
\end{equation} \end{lem} \begin{proof}
It suffices to observe that for $a_i\geq 0$ the ratio $\Bigl(\sum_{1\leq i_1<i_2<\ldots<i_{k}\leq N} a_{i_1}a_{i_2}\ldots a_{i_{k}}\Bigr)/\Bigl(\sum^{N}_{i=1} a_i\Bigr)^{k}$ achieves its maximum for $a_1=a_2=\ldots=a_N$. \end{proof} \noindent Recall from \eqref{eq:[mu]} that $\bar{\mu}_{n;1}:=\bar{\mu}_{23\ldots n;1}$, and
\begin{equation}\label{eq2:[mu]} \Big[\bar{\mu}_{n;1}(L)\Big]:=\begin{cases} \min\Bigl(\bar{\mu}_{n;1}(L),d-\bar{\mu}_{n;1}(L)\Bigr) &\ \text{for $d> 0$},\\
|\bar{\mu}_{n;1}(L)| & \ \text{for $d= 0$} \end{cases}\qquad d=\Delta_\mu(n;1). \end{equation} For convenience, recall the statement of Theorem \ref{thm:main}.
\begin{repthm}{thm:main}
Let $L$ be an $n$-component link of unit thickness, and $\bar{\mu}(L)$ one of its top Milnor linking numbers, then
\begin{equation}\label{eq2:mthm-rop-cr}
\ell(L)\geq \sqrt[4]{n} \Bigl(\sqrt[n-1]{\Big[\bar{\mu}(L)\Big]}\Bigr)^{\frac 34},\qquad \operatorname{\mathsf{Cr}}(L) \geq \frac{1}{3}(n-1) \sqrt[n-1]{\Big[\bar{\mu}(L)\Big]}.
\end{equation} \end{repthm} \begin{proof} Let $G_L$ be a Gauss diagram of $L$ obtained from a regular link diagram $D_L$. Consider, any term $A$ of the arrow polynomial: $Z_{n;1}$ and index the arrows of $A$ by $(i_k,j_k)$, $k=1,\ldots,n-1$ in such a way that $i_k$ is the arrowhead and $j_k$ is the arrowtail, we have the following obvious estimate: \begin{equation}\label{eq:estimate_term-n}
|\bigl\langle A, G_L\bigr\rangle|\leq \prod^{n-1}_{k=1}\mathsf{ov}_{i_k,j_k}(D_L)\leq \prod^{n-1}_{k=1}\mathsf{cr}_{i_k,j_k}(D_L). \end{equation} Let $N={n \choose 2}$, since every term (a tree diagram) of $Z_{n;1}$ is uniquely determined by its arrows indexed by string components, ${N\choose n-1}$ gives an upper bound for the number of terms in $Z_{n;1}$. Using Lemma \ref{lem:a_i-ineq}, with $k=n-1$, $N$ as above and $a_k=\mathsf{cr}_{i_k,j_k}(D_L)$, $k=1,\ldots, N$, one obtains from \eqref{eq:estimate_term-n} \begin{equation}\label{eq:Z_n;1-cr}
|\langle Z_{n;1},G_L\rangle|\leq \frac{1}{N^{n-1}} {N \choose n-1}\Bigl(\sum_{i<j} \mathsf{cr}_{i,j}(D_L) \Bigr)^{n-1}. \end{equation} \begin{rem}\label{rem:general-arrow-poly}
{\em The estimate \eqref{eq:Z_n;1-cr} is valid for any arrow polynomial, in place of $Z_{n;1}$, which has arrows based on different components and no parallel arrows on a given component.} \end{rem} By \eqref{eq:bar-mu-Z_n;1}, we can find $k\in \mathbb{Z}$ such that $\langle Z_{n;1},G_L\rangle=\bar{\mu}_{n;1}+k\,d$. Since \[
\Big[\bar{\mu}_{n;1}(D_L)\Big]\leq |\bar{\mu}_{n;1}(D_L)+k\,d|=|\langle Z_{n;1},G_L\rangle|,\qquad\text{for all}\ k\in\mathbb{Z}, \] replacing $D_L$ with a diagram obtained by projection of $L$ in a generic direction $v\in S^2$, we rewrite the estimate \eqref{eq:Z_n;1-cr} as follows \begin{equation}\label{eq:alpha_n-cr} \alpha_n\sqrt[n-1]{\Big[\bar{\mu}_{n;1}(D_L(v))\Big]}\leq \sum_{i<j} \mathsf{cr}_{i,j}(v),\qquad \alpha_n=\Bigl(\frac{1}{ N^{n-1}} {N \choose n-1}\Bigr)^{\frac{-1}{n-1}}. \end{equation} Integrating over the sphere of directions and using invariance\footnote{both $\bar{\mu}_{n;1}$ and $d$ are isotopy invariants.} of $\Big[\bar{\mu}_{n;1}\Big]$ yields \[ 4\pi \alpha_n \sqrt[n-1]{\Big[\bar{\mu}_{n;1}(L)\Big]}\leq \sum_{i<j} \int_{S^2}\mathsf{cr}_{i,j}(v) dv. \] By Lemma \ref{lem:aov-bound}, we obtain \[ \alpha_n \sqrt[n-1]{\Big[\bar{\mu}_{n;1}(L)\Big]}\leq \sum_{i<j} \mathsf{acr}_{i,j}(L)= 2\sum_{i<j} \mathsf{aov}_{i,j}(L)\leq 2\sum_{i< j} \tfrac{1}{3}\min\bigl(\ell_i\ell^{\frac 13}_j,\ell_j\ell^{\frac 13}_i\bigr)\leq \tfrac{1}{3}\sum_{i \neq j} \ell_i\ell^{\frac 13}_j, \] since $\sum_{i< j} 2\min\bigl(\ell_i\ell^{\frac 13}_j,\ell_j\ell^{\frac 13}_i\bigr)\leq \sum_{i \neq j} \ell_i\ell^{\frac 13}_j$. As in derivation of \eqref{eq:lk-jensen} (see Corollary \ref{cor:rop-lk}), by Jensen Inequality: \begin{equation}\label{eq:rop-end} \operatorname{\mathsf{rop}}(L)^{\frac 43}\geq \frac{3\, n^{\frac 13}\, \alpha_n}{(n-1)} \sqrt[n-1]{\Big[\bar{\mu}_{n;1}(L)\Big]}. \end{equation} Now, let us estimate the constant $\alpha_n$. Note that \[ \frac{N^{n-1}}{{N \choose n-1}}=\frac{N^{n-1}}{N(N-1)\ldots (N-(n-1)+1)} (n-1)!\geq (n-1)!\ . \] Again, by Stirling's approximation (letting $m=n-1$ in \eqref{eq:stirling2}) we obtain for $n\geq 2$: \begin{equation}\label{eq:alpha_n-stirling} \alpha_n\geq \bigl((n-1)!\bigr)^{\frac{1}{n-1}}\geq \frac{n-1}{3}, \end{equation} thus \eqref{eq:rop-end} can be simplified to \begin{equation}\label{eq:rop-end2} \operatorname{\mathsf{rop}}(L)^{\frac 43}\geq \sqrt[3]{n} \sqrt[n-1]{\Big[\bar{\mu}_{n;1}(L)\Big]}, \end{equation} as claimed in the first inequality of Equation \eqref{eq2:mthm-rop-cr}. For a minimal diagram $D^{\min}_L$ of $L$: \[ \operatorname{\mathsf{Cr}}(L)\geq \sum_{i<j} \mathsf{cr}_{i,j}(D^{\min}_L), \]
thus the second inequality of \eqref{eq2:mthm-rop-cr} is an immediate consequence of \eqref{eq:alpha_n-cr}(with $D_L(v)$ replaced by $D^{\min}_L$) and \eqref{eq:alpha_n-stirling}. Using the permutation identity \eqref{eq:mu-symmetry} and the fact that $\operatorname{\mathsf{rop}}(\sigma(L))=\operatorname{\mathsf{rop}}(L)$ for any $\sigma\in\Sigma(1,\ldots,n)$, we may replace $\bar{\mu}_{n;1}(L)$ with any other\footnote{there are $(n-2)!$ different top Milnor linking numbers \cite{Milnor:1954}.} top $\bar{\mu}$--invariant of $L$. \end{proof}
In the case of almost trivial (Borromean) links $d=0$, and we may slightly improve the estimate in \eqref{eq:Z_n;1-cr} of the above proof, by using cyclic symmetry of $\bar{\mu}$--invariants pointed in \eqref{rem:mu-cyclic}. We have in particular
\begin{equation}\label{eq:mu-sum-cyclic}
n\,\bar{\mu}_{23\ldots n;1}(L)=\sum_{\rho, \text{$\rho$ is cyclic}} \bar{\mu}_{\rho(2)\rho(3)\ldots\rho(n);\rho(1)}(L)=\sum_{\rho, \text{$\rho$ is cyclic}} \langle \rho(Z_{n;1}),G_L\rangle.
\end{equation}
Since cyclic permutations applied to the terms of $Z_{n;1}$ produce distinct arrow diagrams\footnote{since the trunk of a tree diagram is unique, c.f. \cite{Kravchenko-Polyak:2011}, \cite{Komendarczyk-Michaelides:2016}.}, by Remark \ref{rem:general-arrow-poly}, we obtain the following bound
\begin{equation}\label{eq:rho-Z_n;1-cr}
n\,|\bar{\mu}_{n;1}(L)|\leq \sum_{\rho, \text{$\rho$ is cyclic}} |\langle \rho(Z_{n;1}),G_L\rangle|\leq \frac{1}{N^{n-1}} {N \choose n-1}\Bigl(\sum_{i<j} \mathsf{cr}_{i,j}(D_L) \Bigr)^{n-1}.
\end{equation}
Disregarding the Stirling's approximation, we have
\begin{equation}
\operatorname{\mathsf{rop}}(L)^{\frac 43}\geq \frac{3\sqrt[3]{n}\, \tilde{\alpha}_n}{(n-1)} \sqrt[n-1]{|\bar{\mu}_{n;1}(L)|},\quad \tilde{\alpha}_n=\Bigl(\frac{1}{n N^{n-1}} {N \choose n-1}\Bigr)^{\frac{-1}{n-1}},
\end{equation}
or using the second bound in \eqref{eq:aov-bound}
\[
\operatorname{\mathsf{rop}}(L)^2\geq 4^3\pi \tilde{\alpha}_n\Bigl(\frac{n^2}{n^2-1}\Bigr) \sqrt[n-1]{|\bar{\mu}_{n;1}(L)|}.
\]
In particular, for $n=3$, we have $N=3$ and $\tilde{\alpha}_3=3$ and the estimates read \begin{equation}\label{eq:rop-mu123}
\operatorname{\mathsf{rop}}(L)\geq \Bigl(5\sqrt[3]{3}\sqrt{|\bar{\mu}_{23;1}(L)|}\Bigr)^{\frac 34},\quad \operatorname{\mathsf{rop}}(L)\geq 6\sqrt{6\pi}\sqrt[4]{|\bar{\mu}_{23;1}(L)|}. \end{equation}
\noindent Since $6\sqrt{6\pi}\approx 26.049$, the second estimate is better for Borromean rings ($\mu_{23;1}=1$) and improves the linking number bound of \eqref{eq:len(L_i)-lk}: $6\pi\approx 18.85$, but fails short of the genus bound \eqref{eq:len(L_i)-Ac-gen}: $12\pi\approx 37.7$. Numerical simulations suggest that the ropelength of Borromean rings $\approx 58.05$, \cite{Cantarella-Kusner-Sullivan:2002, Buniy-Cantarella-Kephart-Rawdon:2014}.
\begin{rem}\label{rem:method-extended} {\em
This methodology can be easily extended to other families of finite type invariants of knots and links. For illustration, let us consider the third coefficient of the Conway polynomial i.e. $c_3(L)$ of a two component link $L$. The arrow polynomial $C_3$ of $c_3(L)$ is given as follows \cite[p. 779]{Chmutov-Khoury-Rossi:2009}
\[
\vvcenteredinclude{.31}{C_3.pdf}
\]
Let $G_L$ be the Gauss diagram obtained from a regular link diagram $D_L$, and $D_{L_k}$ the subdiagram of the $k$th--component of $L$, $k=1,2$. The absolute value of the first term $\langle \vvcenteredinclude{.25}{C_3-term-1.pdf}, G_L\rangle$ of $\langle C_3, G_L\rangle$ does not exceed ${\operatorname{\mathsf{cr}}_{1,2}(D_L) \choose 3}$, the absolute value of the sum $\langle \vvcenteredinclude{.25}{C_3-term-2.pdf}, G_L\rangle$ does not exceed $\operatorname{\mathsf{cr}}(D_{L_1}){\operatorname{\mathsf{cr}}_{1,2}(D_{L}) \choose 2}$ and for the remaining terms a bound is ${\operatorname{\mathsf{cr}}(D_{L_1}) \choose 2}\operatorname{\mathsf{cr}}_{1,2}(D_{L})$. Therefore, a rough upper bound for $|\langle C_3, G_L\rangle|$ can be written as
\[
|\langle C_3, G_L\rangle|\leq (\operatorname{\mathsf{cr}}_{1,2}(D_{L})+\operatorname{\mathsf{cr}}(D_{L_1}))^3
\]
\noindent Similarly, as in \eqref{eq:alpha_n-cr}, replacing $D_L$ with $D_L(v)$ and integrating over the sphere of directions we obtain \[
|c_3(L)|^{\frac 13}\leq \mathsf{acr}_{1,2}(L)+\mathsf{acr}(L_1). \] For a unit thickness link $L$, \eqref{eq:rop-BS-acr} and \eqref{eq:aov-bound} give $\mathsf{acr}_{1,2}(L)+\mathsf{acr}(L_1)\leq \frac{1}{3}\ell^{\frac 13}_1\ell_2+\frac{1}{3}\ell^{\frac 13}_2\ell_1+\frac{4}{11}\ell^{\frac 13}_1\ell_1$, and $\mathsf{acr}_{1,2}(L)+\mathsf{acr}(L_1)\leq \frac{\ell^2_1}{16\pi}+\frac{\ell_1\ell_2}{8\pi}$. Thus, for some constants: $\alpha$, $\beta>0$, we have \[
\ell(L)^2\geq A\, |c_3(L)|^{\frac 13},\qquad \ell(L)^{\frac{4}{3}}\geq B\, |c_3(L)|^{\frac 13}. \] \noindent In general, given a finite type $n$ invariant $V_n(L)$ and a unit thickness $m$--link $L$, we may expect constants $\alpha_{m,n}$, $\beta_{m,n}$; such that \[
\ell(L)^2\geq \alpha_{m,n}\, |V_n(L)|^{\frac{1}{n}},\qquad \ell(L)^{\frac{4}{3}}\geq \beta_{m,n}\, |V_n(L)|^{\frac{1}{n}}. \] } \end{rem}
\end{document} |
\begin{document}
\begin{center}
\Large\bf{A covariant Stinespring type theorem for $\tau$-maps} \end{center}
\begin{center} HARSH TRIVEDI \end{center}
\begin{abstract} Let $\tau$ be a linear map from a unital $C^*$-algebra $\CMcal A$ to a von Neumann algebra $\CMcal B$ and let $\CMcal C$ be a unital $C^*$-algebra. A map $T$ from a Hilbert $\CMcal A$-module $E$ to a von Neumann $\CMcal C$-$\CMcal B$ module $F$ is called a $\tau$-map if $$\langle T(x),T(y)\rangle=\tau(\langle x, y\rangle)~\mbox{for all}~x,y\in E.$$ A Stinespring type theorem for $\tau$-maps and its covariant version are obtained when $\tau$ is completely positive. We show that there is a bijective correspondence between the set of all $\tau$-maps from $E$ to $F$ which are $(u',u)$-covariant with respect to a dynamical system $(G,\eta,E)$ and the set of all $(u',u)$-covariant $\widetilde{\tau}$-maps from the crossed product $E\times_{\eta} G$ to $F$, where $\tau$ and $\widetilde{\tau}$ are completely positive.
\noindent {\bf AMS 2010 Subject Classification:} Primary: 46L08,~46L55; Secondary: 46L07,~46L53.\\ \noindent {\bf Key words:} Stinespring representation; completely positive maps; von Neumann modules; dynamical systems.
\end{abstract}
\section{Introduction}
A linear mapping $\tau$ from a (pre-)$C^*$-algebra $\CMcal A$ to a (pre-)$C^*$-algebra $\CMcal B$ is called {\it completely positive} if $$\sum_{i,j=1}^n b_j^{*} \tau(a_j^{*}a_i)b_i\geq 0$$ for each $n\in \mathbb{N}$, $b_1,b_2,\ldots,b_n\in\CMcal B$ and $a_1,a_2,\ldots,a_n\in\CMcal A$. The completely positive maps are used significantly in the theory of measurements, quantum mechanics, operator algebras etc. Paschke's Gelfand-Naimark-Segal (GNS) construction (cf. Theorem 5.2, \cite{Pas73}) characterizes completely positive maps between unital $C^*$-algebras, which is an abstraction of the Stinespring's theorem for operator valued completely positive maps (cf. Theorem 1, \cite{St55}). Now we define Hilbert $C^*$-modules which are a generalization of Hilbert spaces and $C^*$-algebras, were introduced by Paschke in the paper mentioned above and were also studied independently by Rieffel in \cite{Ri74}. \begin{definition} Let $\CMcal B$ be a (pre-)$C^*$-algebra and ${E}$ be a vector space which is a right $\CMcal B$-module satisfying $\alpha(xb)=(\alpha x)b=x(\alpha b)$ for $x\in {E},b\in \CMcal B,\alpha\in\mathbb{C}$. The space ${E}$ is called an {\rm inner-product $\CMcal B$-module} or a {\rm pre-Hilbert $\CMcal B$-module} if there exists a mapping $\langle \cdot,\cdot \rangle : E \times E \to \CMcal{B}$ such that \begin{itemize}
\item [(i)] $\langle x,x \rangle \geq 0 ~\mbox{for}~ x \in {E} $ and $\langle x,x \rangle = 0$ only if $x = 0 ,$ \item [(ii)] $\langle x,yb \rangle = \langle x,y \rangle b ~\mbox{for}~ x,y \in {E}$ and $~\mbox{for}~ b\in \CMcal B, $
\item [(iii)]$\langle x,y \rangle=\langle y,x \rangle ^*~\mbox{for}~ x ,y\in {E} ,$ \item [(iv)]$\langle x,\mu y+\nu z \rangle = \mu \langle x,y \rangle +\nu \langle x,z \rangle ~\mbox{for}~ x,y,z \in {E} $ and for $\mu,\nu \in \mathbb{C}$. \end{itemize}
An inner-product $\CMcal B$-module ${E}$
which is complete with respect to the norm $$\| x\| :=\|\langle x,x\rangle\|^{1/2} ~\mbox{for}~ x \in {E}$$ is called a {\rm Hilbert $\CMcal B$-module} or {\rm Hilbert $C^{*}$-module over $\CMcal B$}. It is said to be {\rm full} if the closure of the linear span of $\{\langle x,y\rangle:x,y\in{E}\}$ equals $\CMcal B$. \end{definition}
Hilbert $C^*$-modules are important objects to study the classification theory of $C^*$-algebras, the dilation theory of semigroups of completely positive maps, and so on. If a completely positive map takes values in any von Neumann algebra, then it gives us a von Neumann module by Paschke's GNS construction (cf. \cite{Sk01}). The von Neumann modules were recently utilized in \cite{BSu13} to explore Bures distance between two completely positive maps. Using the following definition of adjointable maps we define von Neumann modules: Let $E$ and $ F$ be (pre-)Hilbert $\CMcal A$-modules, where $\CMcal A$ is a (pre-)$C^*$-algebra. A map $S:E\to F$ is called {\it adjointable} if there exists a map $S': F\to E$ such that
\[
\langle S (x),y\rangle =\langle x,S'(y) \rangle~\mbox{for all}~x\in E, y\in F.
\] $S'$ is unique for each $S$, henceforth we denote it by $S^{*}$. We denote the set of all adjointable maps from $E$ to $ F$ by $\mathcal B^a (E,F)$ and we use $\mathcal B^a (E)$ for $\mathcal B^a (E,E)$. Symbols $\mathcal B(E, F)$ and $\mathcal B^r (E,F)$ represent the set of all bounded linear maps from $E$ to $ F$ and the set of all bounded right linear maps from $E$ to $F$, respectively.
\begin{definition}(cf. \cite{Sk00}) Let $\CMcal B$ be a von Neumann algebra acting on a Hilbert space $\CMcal H$, i.e., strongly closed $C^*$-subalgebra of $\mathcal B(\CMcal H)$ containing the identity operator. Let $E$ be a (pre-)Hilbert $\CMcal B$-module. The Hilbert space $E\bigodot \CMcal H$ is the interior tensor product of $E$ and $\CMcal H$. For each $x\in E$ we get a bounded linear map from $\CMcal H$ to $E\bigodot \CMcal H$ defined as $$L_x (h):=x\odot h~\mbox{for all}~ h\in \CMcal H.$$ Note that $L^*_{x_1} L_{x_2} =\langle x_1,x_2\rangle~\mbox{for}~ x_1,x_2\in E.$ So we identify each $x\in E$ with $L_x$ and consider $E$ as a concrete submodule of $\mathcal B(\CMcal H,E\bigodot \CMcal H)$. The module $E$ is called a {\rm von Neumann $\CMcal B$-module} or a {\rm von Neumann module over $\CMcal B$} if $E$ is strongly closed in $\mathcal B(\CMcal H,E\bigodot \CMcal H)$. Let $\CMcal A$ be a unital (pre-)$C^*$-algebra. A von Neumann $\CMcal B$-module $E$ is called a {\rm von Neumann $\CMcal A$-$\CMcal B$ module} if there exists an adjointable left action of $\CMcal A$ on $E$. \end{definition}
An alternate approach to the theory of von Neumann modules is introduced recently in \cite{BMSS12} and an analogue of the Stinespring's theorem for von Neumann bimodules is discussed. The comparison of results coming from these two approach is provided by \cite{Ske12}.
Let $G$ be a locally compact group and let $M(\CMcal A)$ denote the multiplier algebra of any $C^*$-algebra $\CMcal A$. An {\it action of $G$ on $\CMcal A$} is defined as a group homomorphism $\alpha:G\to Aut(\CMcal A)$. If $t\mapsto \alpha_{t}(a)$ is continuous for all $a\in\CMcal A$, then we call $(G,\alpha,\CMcal A)$ a {\it $C^*$-dynamical system}. \begin{definition}\label{def3}(cf. \cite{Kap93}) Let $\CMcal A$, $\CMcal B$ be unital (pre-)$C^*$-algebras and $G$ be a locally compact group. Let $(G,\alpha,\CMcal A)$ be a $C^*$-dynamical system and $u:G\to \CMcal U\CMcal B$ be a unitary representation where $\CMcal U \CMcal B$ is the group of all unitary elements of $\CMcal B$. A completely positive map $\tau:\CMcal A\to\CMcal B$ is called {\rm $u$-covariant} with respect to $(G,\alpha,\CMcal A)$ if \[ \tau(\alpha_{t}(a))=u_{t}\tau(a)u^{*}_t~\mbox{for all}~a\in\CMcal A~\mbox{and}~t\in G. \]
\end{definition}
The existence of covariant completely positive liftings (cf. \cite{CE76}) and a covariant version of the Stinespring's theorem for operator-valued $u$-covariant completely positi- ve maps were obtained by Paulsen in \cite{Pau82}, and they were used to provide three groups out of equivalence classes of covariant extensions. Later Kaplan (cf. \cite{Kap93}) extended this covariant version and as an application analyzed the completely positive lifting problem for homomorphisms of the reduced group $C^*$-algebras.
A map ${T}$ from a (pre-)Hilbert $\CMcal A$-module ${E}$ to a (pre-)Hilbert $\CMcal B$-module ${F}$ is called {\it $\tau$-map} (cf. \cite{SSu14}) if $$\langle T(x),T(y)\rangle=\tau(\langle x,y\rangle)~\mbox{for all} ~x,y\in{E}.$$ Recently a Stinespring type theorem for $\tau$-maps was obtained by Bhat, Ramesh and Sumesh (cf. \cite{BRS12}) for any operator valued completely positive map $\tau$ defined on a unital $C^*$-algebra. There are two covariant versions of this Stinespring type theorem see Theorem 3.4 of \cite{Jo11} and Theorem 3.2 of \cite{HJ11}. In Section \ref{sec1.2}, we give a Stinespring type theorem for $\tau$-maps, when $\CMcal B$ is any von Neumann algebra and $F$ is any von Neumann $\CMcal B$-module.
In \cite{DH14} the notion of $\mathfrak K$-families is introduced, which is a generalization of the $\tau$-maps, and several results are derived for covariant $\mathfrak K$-families. In \cite{SSu14} different characterizations of the $\tau$-maps were obtained and as an application the dilation theory of semigroups of the completely positive maps was discussed. Extending some of these results for $\mathfrak K$-families, application to the dilation theory of semigroups of completely positive definite kernels is explored in \cite{DH14}.
In this article we get a covariant version of our Stinespring type theorem which requires the following notions: Let $\CMcal A$ and $\CMcal B$ be $C^*$-algebras, $E$ be a Hilbert $\CMcal A$-module, and let $ F$ be a Hilbert $\CMcal B$-module. A map $\Psi:E\to F$ is said to be a {\it morphism of Hilbert $C^*$-modules} if there exists a $C^*$-algebra homomorphism $\psi:\CMcal A\to\CMcal B$ such that $$\langle \Psi(x),\Psi(y)\rangle=\psi(\langle x,y\rangle)~\mbox{for all}~ x,y\in E.$$ If $E$ is full, then $\psi$ is unique for $\Psi$. A bijective map $\Psi:E\to F$ is called an {\it isomorphism of Hilbert $C^*$-modules} if $\Psi$ and $\Psi^{-1}$ are morphisms of Hilbert $C^*$-modules. We denote the group of all isomorphisms of Hilbert $C^*$-modules from $E$ to itself by $Aut(E)$.
\begin{definition}\label{def1.8} Let $G$ be a locally compact group and let $\CMcal A$ be a $C^*$-algebra. Let $E$ be a full Hilbert $\CMcal A$-module. A group homomorphism $t\mapsto\eta_{t}$ from $G$ to $Aut({E})$ is called a {\em continuous action of $G$ on ${E}$} if $t\mapsto \eta_{t}(x)$ from $G$ to ${E}$ is continuous for each $x\in E$. In this case we call the triple $(G,\eta,E)$ a {\rm dynamical system on the Hilbert $\CMcal A$-module $E$}. Any $C^*$-dynamical system $(G,\alpha,\CMcal A)$ can be regarded as a dynamical system on the Hilbert $\CMcal A$-module $\CMcal A$. \end{definition}
Let $E$ be a full Hilbert $C^*$-module over a unital $C^*$-algebra $\CMcal A$. Let $ F$ be a von Neumann $\CMcal C$-$\CMcal B$ module, where $\CMcal C$ is a unital $C^*$-algebra and $\CMcal B$ is a von Neumann algebra. We define covariant $\tau$-maps with respect to $(G,\eta,E)$ in Section \ref{sec1.2}, and develop a covariant version of our Stinespring type theorem. If $(G,\eta,E)$ is a dynamical system on $E$, then there exists a crossed product Hilbert $C^*$-module $E\times_{\eta} G$ (cf. \cite{EKQR00}). In Section \ref{sec1.3}, we prove that any
$\tau$-map from $E$ to $F$ which is $(u',u)$-covariant with respect to the dynamical system $(G,\eta,E)$ extends to a $(u',u)$-covariant $\widetilde{\tau}$-map from $E\times_{\eta} G$ to $F$, where $\tau$ and $\widetilde{\tau}$ are completely positive. As an application we describe how covariant $\tau$-maps on $(G,\eta,E)$ and covariant $\widetilde{\tau}$-maps on $E\times_{\eta} G$ are related, where $\tau$ and $\widetilde{\tau}$ are completely positive maps. The approach in this article is similar to \cite{BRS12} and \cite{Jo11}. \section{A Stinespring type theorem and its covariant version}\label{sec1.2} \begin{definition}\label{def1.1} Let $\CMcal A$ and $\CMcal B$ be (pre-)$C^*$-algebras. Let $E$ be a Hilbert $\CMcal A$-module and let $ F$, $ F'$ be inner product $\CMcal B$-modules. A map $\Psi:E\to \mathcal B^r( F, F')$ is called {\rm quasi-representation} if there exists a $*$-homomorphism $\pi:\CMcal A\to \mathcal B^a ( F)$ satisfying $$ \langle\Psi(y) f_1,\Psi(x) f_2\rangle=\langle\pi(\langle x,y\rangle)f_1,f_2\rangle~\mbox{for all}~x,y\in E~\mbox{and}~f_1,f_2\in F.$$ In this case we say that $\Psi$ is a quasi-representation of $E$ on $ F$ and $ F'$, and $\pi$ is associated to $\Psi$. \end{definition}
It is clear that Definition \ref{def1.1} generalizes the notion of representations of Hilbert $C^*$-modules on Hilbert spaces (cf. p.804 of \cite{Jo11}). The following theorem provides a decomposition of $\tau$-maps in terms of quasi-representations. We use the symbol sot-$\lim$ for the limit with respect to the strong operator topology. Notation $[S]$ will be used for the norm closure of the linear span of any set $S.$
\begin{theorem}\label{prop1.3} Let $\CMcal A$ be a unital $C^*$-algebra and let $\CMcal B$ be a von Neumann algebra acting on a Hilbert space $\CMcal H$. Let $E$ be a Hilbert $\CMcal A$-module, $ E'$ be a von Neumann $\CMcal B$-module and let $\tau:\CMcal A\to\CMcal B$ be a completely positive map. If $T:E\to E'$ is a $\tau$-map, then there exist
\begin{itemize} \item [(i)] \begin{enumerate} \item [(a)] a von Neumann $\CMcal B$-module $F$ and a representation $\pi$ of $\CMcal A$ to $\mathcal B^a (F)$, \item [(b)] a map $V\in \mathcal B^a (\CMcal B,F)$ such that $\tau(a)b=V^{*}\pi(a)Vb~\mbox{for all}~a\in\CMcal A~\mbox{and}~b\in \CMcal B,~$ \end{enumerate} \item [(ii)] \begin{enumerate} \item [(a)] a von Neumann $\CMcal B$-module $ F'$ and a quasi-representation $\Psi:E\to \mathcal B^a (F, F')$ such that $\pi$ is associated to $\Psi$, \item [(b)] a coisometry $S$ from $ E'$ onto $ F'$ satisfying
$$T(x)b=S^{*}\Psi(x)Vb~\mbox{for all}~x\in E~\mbox{and}~b\in \CMcal B.$$ \end{enumerate} \end{itemize} \end{theorem} \begin{proof}
Let $\langle~,~\rangle$ be a $\CMcal B$-valued positive definite semi-inner product on $\CMcal A\bigotimes_{alg} \CMcal B$ defined by $$\langle a\otimes b, c\otimes d\rangle:=b^*\tau(a^* c)d~\mbox{for}~a,c\in \CMcal A~\mbox{and}~b,d\in\CMcal B.$$ Using Cauchy-Schwarz inequality we deduce that $K=\{x\in {\CMcal A\bigotimes_{alg} \CMcal B}:\langle x, x\rangle=0\}$ is a submodule of $\CMcal A\bigotimes_{alg} \CMcal B$. Therefore $\langle~,~\rangle$ extends naturally on the quotient module $\left({\CMcal A\bigotimes_{alg} \CMcal B}\right)/ K$ as a $\CMcal B$-valued inner product. We get a Stinespring triple $(\pi_0, V, F_0)$ associated to $\tau$, construction is similar to Proposition 1 of \cite{Kap93}, where $F_0$ is the completion of the inner-product $\CMcal B$-module $\left({\CMcal A\bigotimes_{alg} \CMcal B}\right)/ K$, $\pi_0:\CMcal A\to \mathcal B^a (F_0)$ is a $*$-homomorphism defined by $$\pi_0(a')(a\otimes b+ K):= a' a\otimes b+ K~\mbox{for all}~a,a'\in\CMcal A~\mbox{and}~b\in \CMcal B,$$ and a mapping $V\in \mathcal B^a(\CMcal B, F_0)$ is defined by \[ V(b)=1\otimes b+ K~\mbox{for all}~b\in \CMcal B.\] Indeed, $[\pi_0(\CMcal A)V \CMcal B]=F_0$. Let $F$ be the strong operator topology closure of $F_0$ in $\mathcal B(\CMcal H,F_0\bigodot\CMcal H)$. Without loss of generality we can consider $V\in \mathcal B^a(\CMcal B, F)$. Adjointable left action of $\CMcal A$ on $F_0$ extends to an adjointable left action of $\CMcal A$ on $F$ as follows: \[
\pi(a)(f):=\mbox{sot-}\displaystyle\lim_{\alpha} \pi_0(f^0_{\alpha})~\mbox{where $a\in \CMcal A$,
$f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ with $f^0_{\alpha}\in F_0$.} \] For all $a\in \CMcal A$; $f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}$, $g$=sot-$\displaystyle\lim_{\beta} g^0_{\beta}\in F$ with $f^0_{\alpha},g^0_{\beta}\in F_0$ we have
\begin{align*}
\langle \pi(a) f,g\rangle &=\mbox{sot-}\displaystyle\lim_{\beta}\langle \pi(a) f,g^0_{\beta}\rangle
=\mbox{sot-}\displaystyle\lim_{\beta}(\mbox{sot-}\displaystyle\lim_{\alpha}\langle g^0_{\beta},\pi_0(a) f^0_{\alpha}\rangle)^*
\\ &=\mbox{sot-}\displaystyle\lim_{\beta}(\mbox{sot-}\displaystyle\lim_{\alpha}\langle \pi_0(a)^*g^0_{\beta}, f^0_{\alpha}\rangle)^*
= \langle f,\pi(a^*)g\rangle. \end{align*} The triple $(\pi, V, F)$ satisfies all the conditions of the statement (i).
Let $ F''$ be the Hilbert $\CMcal B$-module $[T(E)\CMcal B]$. For $x\in E$, define $\Psi_0(x):F_0\to F''$ by \begin{align*}\Psi_0(x)(\displaystyle\sum_{j=1}^{n}\pi_0(a_j)Vb_j):=\displaystyle\sum_{j=1}^{n}T(xa_j)b_j~\mbox{ for all}~a_j\in \CMcal A, b_j\in\CMcal B. \end{align*} It follows that \begin{align*}&\langle\Psi_0(y) (\displaystyle\sum_{j=1}^n\pi_0(a_j)Vb_j),\Psi_0(x)(\displaystyle\sum_{i=1}^m\pi_0(a'_i)Vb'_i)\rangle
= \displaystyle\sum_{j=1}^n \displaystyle\sum_{i=1}^m b_j^{*}\langle T(ya_j),T(xa'_i)\rangle b'_i\\ &=\displaystyle\sum_{i=1}^m \displaystyle\sum_{j=1}^n b_j^{*}\tau(\langle ya_j,xa'_i\rangle) b'_i = \displaystyle\sum_{j=1}^n \displaystyle\sum_{i=1}^m\langle \pi_0(a'_i)^*\pi_0(\langle x,y\rangle)\pi_0(a_j)Vb_j,Vb'_i\rangle \\& = \langle \pi_0(\langle x,y\rangle)(\displaystyle\sum_{j=1}^n\pi_0(a_j)Vb_j), \displaystyle\sum_{i=1}^m\pi_0(a'_i)Vb'_i\rangle \end{align*} for all $x,y\in E, a'_i,a_j\in\CMcal A, b'_i,b_j\in\CMcal B~\mbox{where}~ 1\leq j\leq n~\mbox{and}~1\leq i\leq m$. This computation proves that $\Psi_0(x) \in \mathcal B^r(F_0, F'')$ for each $x\in E$ and also that $\Psi_0:E\to \mathcal B^r(F_0, F'')$ is a quasi-representation. We denote by $ F'$ the strong operator topology closure of $F''$ in $\mathcal B(\CMcal H, E'\bigodot \CMcal H)$. Let $x\in E$, and let $\Psi(x):F\to F'$ be a mapping defined by \[ \Psi(x)(f):=\mbox{sot-}\lim_{\alpha} \Psi_0 (x) f^0_{\alpha}~\mbox{where $f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ for $f^0_{\alpha}\in F_0$.}~ \]
For all $f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ with $f^0_{\alpha} \in F_0$ and for all $x,y\in E$ we have \begin{align*} \langle \Psi(x) f,\Psi(y) f\rangle= \mbox{sot-}\lim_{\alpha}\{\mbox{sot-}\lim_{\beta}\langle \Psi_0(y)f^0_{\alpha}, \Psi_0(x)f^0_{\beta}\rangle\}^*=\langle f,\pi(\langle x,y\rangle)f\rangle. \end{align*} Since $F$ is a von Neumann $\CMcal B$-module, this proves that $\Psi:E\to \mathcal B^a (F, F')$ is a quasi-representation. Since, $ F'$ is a von Neumann $\CMcal B$-submodule of $ E'$, there exists an orthogonal projection
from $ E'$ onto $ F'$ (cf. Theorem 5.2 of \cite{Sk00}) which we denote by $S$. Eventually \begin{eqnarray*} S^*\Psi(x) Vb=\Psi(x)Vb=\Psi(x)(\pi(1)Vb)=T(x)b~\mbox{for all}~x\in E, b\in\CMcal B. \end{eqnarray*}
\end{proof}
Let $E$ be a (pre-)Hilbert $\CMcal A$-module, where $\CMcal A$ is a (pre-)$C^*$-algebra $\CMcal A$. A map $u\in \mathcal B^a (E)$ is said to be {\it unitary} if $u^{*} u=u u^{*}=1_{E}$ where $1_{E}$ is the identity operator on $E$. We denote the set of all unitaries in $\mathcal B^a (E)$ by $\CMcal U \mathcal B^a (E)$.
\begin{definition}\label{def1.4} Let $\CMcal B$ be a (pre-)$C^*$-algebra, $(G,\alpha,\CMcal A)$ be a $C^*$-dynamical system of a locally compact group $G$, and let $ F$ be a (pre-)Hilbert $\CMcal B$-module. A representation $\pi:\CMcal A\to \mathcal B^a ( F)$ is called {\rm $v$-covariant} with respect to $(G,\alpha,\CMcal A)$ and with respect to a unitary representation $v:G\to \CMcal U \mathcal B^a ( F)$ if \[ \pi(\alpha_{t}(a))=v_{t}\pi(a)v^{*}_{t}~\mbox{for all}~a\in\CMcal A,t\in G. \] In this case we write $(\pi,v)$ is a covariant representation of $(G,\alpha, \CMcal A)$. \end{definition}
Let $E$ be a full Hilbert $\CMcal A$-module and let $G$ be a locally compact group. If $(G,\eta,E)$ is a dynamical system on $E$, then there exists a unique $C^*$-dynamical system $(G,\alpha^{\eta},\CMcal A)$ (cf. p.806 of \cite{Jo11}) such that $$\alpha^{\eta}_t(\langle x,y \rangle)=\langle {\eta}_{t}(x), {\eta}_{t}(y)\rangle~\mbox{for all}~x,y\in E~\mbox{and}~t\in G.$$ We denote by $(G,\alpha^{\eta},\CMcal A)$ the $C^*$-dynamical system coming from the dynamical system $(G,\eta,E)$. For all $x\in E$ and $a\in \CMcal A$ we infer that $\eta_t (xa)=\eta_t(x) \alpha^{\eta}_t (a)$, for \begin{align*}
\|\eta_t (xa)-\eta_t(x)\alpha^{\eta}_t (a)\|^2 =&\| \langle \eta_t (xa), \eta_t (xa)\rangle
- \langle\eta_t (xa), \eta_t (x)\alpha^{\eta}_t (a)\rangle\\ &-\langle \eta_t (x)\alpha^{\eta}_t (a),\eta_t (xa)\rangle
+\langle\eta_t (x)\alpha^{\eta}_t (a),\eta_t (x)\alpha^{\eta}_t (a)\rangle\|\\ =&\| \alpha^{\eta}_t(\langle xa, xa\rangle)
- \langle\eta_t (xa), \eta_t (x)\rangle\alpha^{\eta}_t (a)\\ &-\alpha^{\eta}_t (a^*)\langle \eta_t (x),\eta_t (xa)\rangle
+\alpha^{\eta}_t (a^*)\langle\eta_t (x),\eta_t (x)\rangle\alpha^{\eta}_t (a)\| =& 0. \end{align*}
\begin{definition} Let $\CMcal B$ and $\CMcal C$ be unital (pre-)$C^*$-algebras. A {\rm (pre-)$C^*$-correspondence from $\CMcal C$ to $\CMcal B$} is defined as a (pre-)Hilbert $\CMcal B$-module $ F$ together with a $*$-homomorphism $\pi':\CMcal C\to \mathcal B^a ( F)$. The adjointable left action of $\CMcal C$ on $ F$ induced by $\pi'$ is defined as \[ cy:=\pi'(c)y~\mbox{for all}~c\in \CMcal C,y\in F. \] \end{definition}
In the remaining part of this section a covariant version of Theorem \ref{prop1.3} is derived, which finds applications in the next section. For that we first define covariant $\tau$-maps using the notion of (pre-)$C^*$-correspondence. Every von Neumann $\CMcal B$-module $E$ can be considered as a (pre-)$C^*$-correspondence from $\mathcal B^a (E)$ to $\CMcal B$.
\begin{definition} (cf. \cite{Jo11}) Let $\CMcal A$ be a unital $C^*$-algebra and let $\CMcal B$, $\CMcal C$ be unital (pre-)$C^*$-algebras. Let $E$ be a Hilbert $\CMcal A$-module and let $ F$ be a (pre-)$C^*$-correspondence from $\CMcal C$ to $\CMcal B$. Let $u:G\to \CMcal U \CMcal B$ and $u':G\to \CMcal U \CMcal C$ be unitary representations on a locally compact group $G$. A $\tau$-map, $T:E\to F$, is called {\rm $(u',u)$-covariant} with respect to the dynamical system $(G,\eta,E)$ if \[ T(\eta_{t}(x))=u'_tT(x)u^{*}_t~\mbox{for all}~x\in E~\mbox{and}~t\in G. \] \end{definition}
If $E$ is full and $T:E\to F$ is a $\tau$-map which is $(u',u)$-covariant with respect to $(G,\eta,E)$, then the map $\tau$ is $u$-covariant with respect to the induced $C^*$-dynamical system $(G,\alpha^{\eta},\CMcal A)$, because \begin{align*} \tau(\alpha^{\eta}_t(\langle x,y \rangle))&=\tau(\langle {\eta}_{t}(x), {\eta}_{t}(y)\rangle)=\langle T({\eta}_{t}(x)), T({\eta}_{t}(y))\rangle=\langle u'_tT(x)u^{*}_t,u'_tT(y)u^{*}_t\rangle \\&=\langle T(x)u^{*}_t,T(y)u^{*}_t\rangle = u_t \langle T(x),T(y)\rangle u^{*}_t=u_t\tau(\langle x,y\rangle)u^{*}_t \end{align*} for all $x,y\in E$ and $t\in G$.
\begin{definition} Let $(G,\eta,E)$ be a dynamical system on a Hilbert $\CMcal A$-module $E$, where $\CMcal A$ is a $C^*$-algebra. Let $ F$ and $ F'$ be Hilbert $\CMcal B$-modules over a (pre-)$C^*$-algebra $\CMcal B$. $w:G\to \CMcal U\mathcal B^a ( F')$ and $v:G\to \CMcal U\mathcal B^a ( F)$ are unitary representations on a locally compact group $G$. A quasi-representation of $E$ on $ F$ and $ F'$ is called {\rm $(w,v)$-covariant with respect to $(G,\eta,E)$} if \[ \Psi(\eta_{t}(x))=w_t\Psi(x)v^{*}_t~\mbox{for all}~x\in E~\mbox{and}~t\in G. \] In this case we say that $(\Psi,v,w, F, F')$ is a covariant quasi-representation of $(G,\eta,E)$. Any $v$-covariant representation of a $C^*$-dynamical system $(G,\alpha,\CMcal A)$ can be regarded as a $(v,v)$-covariant representation of a dynamical system on the Hilbert $\CMcal A$-module $\CMcal A$. \end{definition}
Let $\CMcal A$ be a $C^*$-algebra and let $G$ be a locally compact group. Let $E$ be a full Hilbert $\CMcal A$-module, and let $ F$ and $ F'$ be Hilbert $\CMcal B$-modules over a (pre-)$C^*$-algebra $\CMcal B$. If $(\Psi,v,w, F, F')$ is a covariant quasi-representation with respect to $(G,\eta,E)$, then the representation of $\CMcal A$ associated to $\Psi$ is $v$-covariant with respect to $(G,\alpha^{\eta},\CMcal A)$. Moreover, if $\pi$ is the representation associated to $\Psi$, then \begin{align*} \langle\pi(\alpha^{\eta}_t(\langle x,y \rangle))f,f'\rangle &=\langle\pi(\langle {\eta}_{t}(x), {\eta}_{t}(y)\rangle)f,f'\rangle=\langle \Psi({\eta}_{t}(y))f,\Psi({\eta}_{t}(x)) f'\rangle\\&= \langle w_t\Psi(y)v^{*}_t f, w_t\Psi(x)v^{*}_t f'\rangle=\langle v_t\pi(\langle x,y\rangle)v^{*}_t f,f'\rangle \end{align*} for all $x,y\in E$, $t\in G$ and $f,f'\in F$.
\begin{theorem}\label{prop1.6} Let $\CMcal A$, $\CMcal C$ be unital $C^*$-algebras and let $\CMcal B$ be a von Neumann algebra acting on $\CMcal H$. Let $u:G\to \CMcal U \CMcal B$, $u':G\to \CMcal U \CMcal C$ be unitary representations of a locally compact group $G$. Let $E$ be a full Hilbert $\CMcal A$-module and $E'$ be a von Neumann $\CMcal C$-$\CMcal B$ module. If $T:E\to E'$ is a $\tau$-map which is $(u',u)$-covariant with respect to $(G,\eta,E)$ and if $\tau:\CMcal A\to \CMcal B$ is completely positive, then there exists
\begin{itemize} \item [(i)] \begin{enumerate} \item [(a)] a von Neumann $\CMcal B$-module $F$ with a covariant representation $(\pi, v)$ of $(G,\alpha^{\eta},\CMcal A)$ to $\mathcal B^a (F)$, \item [(b)] a map $V\in \mathcal B^a (\CMcal B,F)$ such that \begin{enumerate} \item [(1)] $\tau(a)b=V^{*}\pi(a)Vb~\mbox{for all}~a\in\CMcal A,~b\in \CMcal B,~$ \item [(2)] $v_{t}Vb=Vu_{t}b$ for all $t\in G$, $b\in \CMcal B$, \end{enumerate} \end{enumerate} \item [(ii)] \begin{enumerate} \item [(a)] a von Neumann $\CMcal B$-module $ F'$ and a covariant quasi-representation \newline $(\Psi,v, w,F, F')$ of $(G,\eta,E)$ such that $\pi$ is associated to $\Psi$, \item [(b)] a coisometry $S$ from $ E'$ onto $ F'$ such that \begin{enumerate} \item [(1)] $T(x)b=S^{*}\Psi(x)Vb~\mbox{for all}~x\in E,~b\in \CMcal B,$ \item [(2)] $w_{t}Sy=Su'_{t}y$ for all $t\in G$, $y\in E'$. \end{enumerate} \end{enumerate} \end{itemize} \end{theorem} \begin{proof} By part (i) of Theorem \ref{prop1.3} we obtain the triple $(\pi,V, F)$ associated to $\tau$. Here $F$ is a von Neumann $\CMcal B$-module,
$V\in \mathcal B^a (\CMcal B,F)$, and $\pi$ is a representation of $\CMcal A$ to $\mathcal B^a (F)$ such that $$\tau(a)b=V^{*}\pi(a)Vb~\mbox{for all}~a\in\CMcal A,~b\in \CMcal B.~$$ Recall the proof, using the submodule $K$ we have constructed the triple $(\pi_0, V, F_0)$ with $[\pi_0(\CMcal A)V\CMcal B]=F_0$. Define $v^0: G\to \mathcal B^a (F_0)$ (cf. Theorem 3.1, \cite{He99}) by $$v^0_t(a\otimes b+ K):=\alpha_t(a)\otimes u_{t}(b) + K~\mbox{for all $a\in\CMcal A$, $b\in \CMcal B$ and $t\in G.$}~$$
Since $\tau$ is $u$-covariant with respect to $(G,\alpha^{\eta},\CMcal A)$, for $a,a'\in\CMcal A$, $b,b'\in \CMcal B$ and $t\in G$ it follows that
\begin{align*}
\langle v^0_t a\otimes b+ K, v^0_t a'\otimes b'+ K\rangle
&=\langle \alpha_t (a)\otimes u_t b,\alpha_t (a')\otimes u_t b'\rangle=(u_t b)^* \tau(\alpha_t(a^*a'))u_t b'
\\ &=b^*\tau(a^*a')b'=\langle a\otimes b+ K, a'\otimes b'+ K\rangle.
\end{align*} This map $v^0_t$ extends as a unitary on $F_0$ for each $t\in G$ and further we get a group homomorphism $v^0:G\to \CMcal U \mathcal B^a (\CMcal F_0)$. The continuity of $t\mapsto\alpha^{\eta}_{t}(b)$ for each $b\in\CMcal B$, the continuity of $u$ and the fact that $v^0_t$ is a unitary for each $t\in G$ together implies the continuity of $v^0$. Thus $v^0:G\to \CMcal U \mathcal B^a (\CMcal F_0)$ becomes a unitary representation. For each $t\in G$ define $v_t :F\to F$ by \[
v_t (\mbox{sot-}\lim_{\alpha} f^0_{\alpha}):=\mbox{sot-}\lim_{\alpha} v^0_t (f^0_{\alpha})~\mbox{where $f$
=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ for $f^0_{\alpha}\in F_0$.}~ \] It is clear that $v:G\to \mathcal B^a (F)$ is a unitary representation of $G$ on $F$ and moreover it satisfies the condition (i)(b)(2) of the statement.
Notation $F''$ will be used for $[T(E)\CMcal B]$ which is a Hilbert $\CMcal B$-module. Let $ F'$ be the strong operator topology closure of $ F''$ in $\mathcal B(\CMcal H, E'\bigodot \CMcal H)$. For each $x\in E$, define $\Psi_0(x):F_0\to F''$ by \[\Psi_0(x)(\displaystyle\sum_{j=1}^{n}\pi(a_j)Vb_j):=\displaystyle\sum_{j=1}^{n}T(xa_j)b_j~\mbox{ for all}~a_j\in \CMcal A, b_j\in\CMcal B \] and define $\Psi(x):F\to F'$ by \[ \Psi(x)(f):=\mbox{sot-}\lim_{\alpha} \Psi_0 (x) f^0_{\alpha}~\mbox{where $f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ for $f^0_{\alpha}\in F_0$.}~ \] $\Psi_0:E\to \mathcal B^r(F_0, F'')$ and $\Psi:E\to \mathcal B^a (F, F')$ are quasi-representations (see part (ii) of Theorem \ref{prop1.3}). Indeed, there exists an orthogonal projection $S$ from $ E'$ onto $ F'$ such that $$T(x)b=S^{*}\Psi(x)Vb~\mbox{for all}~x\in E~\mbox{and}~b\in \CMcal B.$$ Since $T$ is $(u',u)$-covariant, we have
\[u'_t(\displaystyle\sum_{i=1}^{n}T(x_i)b_i)=\displaystyle\sum_{i=1}^{n}T(\eta_{t}(x_i))u_{t}b_i ~\mbox{for all}~t\in G,x_i\in E,b_i\in \CMcal B, i=1,2,\ldots,n.\] From this computation it is clear that $ F''$ is invariant under $u'$. For each $t\in G$ define $w^0_t:=u'_t|_{ F''}$, the restriction of $u'_t$ to $ F''$. In fact, $t\mapsto w^0_t$ is a unitary representation of $G$ on $ F''$. Further \begin{align*} &\Psi_0(\eta_{t}(x))(\displaystyle\sum_{i=1}^{n}\pi_0(a_i)Vb_i)= \displaystyle\sum_{i=1}^{n}T(\eta_{t}(x)\alpha^{\eta}_{t}\alpha^{\eta}_{t^{-1}}(a_i))b_i = \displaystyle\sum_{i=1}^{n}T(\eta_{t}(x\alpha^{\eta}_{t^{-1}}(a_i)))b_i \\ &=\displaystyle\sum_{i=1}^{n}u^{'}_tT(x\alpha^{\eta}_{t^{-1}}(a_i))u_{t^{-1}}b_i =w^0_{t}\Psi_0(x)(\displaystyle\sum_{i=1}^{n}\pi_0(\alpha^{\eta}_{t^{-1}}(a_i))Vu_{t^{-1}}b_i)\\ &= w^0_{t}\Psi_0(x)v_{t^{-1}}(\displaystyle\sum_{i=1}^{n}\pi_0(a_i)Vb_i) \end{align*} for all $a_1,a_2,\ldots,a_n\in \CMcal A$, $b_1,b_2,\ldots,b_n\in \CMcal B, x\in E,t\in G$. Therefore $(\Psi_0,v^0, w^0,F_0, F'')$ is a covariant quasi-representation of $(G,\eta,E)$ and $\pi_0$ is associated to $\Psi_0$. For each $t\in G$ define $w_t: F'\to F'$ by \[
w_t (\mbox{sot-}\displaystyle\lim_{\alpha} f''_{\alpha}):=\mbox{sot-}\displaystyle\lim_{\alpha} u'_t f''_{\alpha}
~\mbox{where all $f''_{\alpha}\in F''$.}~ \]
It is evident that the map $t\mapsto w_t$ is a unitary representation of $G$ on $ F'$. $S$ is the orthogonal projection of $ E'$ onto $ F'$ so we obtain $w_tS=Su'_t$ on $ F$ for all $t\in G$. Finally \begin{align*} &\Psi(\eta_{t}(x))f=\mbox{sot-}\lim_{\alpha} \Psi_0 (\eta_{t}(x)) f^0_{\alpha} =\mbox{sot-}\lim_{\alpha} w^0_{t}\Psi_0(x)v^0_{t^{-1}} f^0_{\alpha}=w_{t}\Psi(x)v_{t^{-1}}f \end{align*} for all $x\in E$, $t\in G$ and $f$=sot-$\displaystyle\lim_{\alpha} f^0_{\alpha}\in F$ for $f^0_{\alpha}\in F_0$. Whence $(\Psi,v, w,F, F')$ is a covariant quasi-representation of $(G,\eta,E)$ and observe that $\pi$ is associated to $\Psi$. \end{proof}
\section{$\tau$-maps from the crossed product of Hilbert $C^*$-modules}\label{sec1.3}
Let $(G,\eta,E)$ be a dynamical system on $E$, which is a full Hilbert $C^*$-module over $\CMcal A$, where $G$ is a locally compact group. The {\it crossed product} Hilbert $C^*$-module $E\times_{\eta}G$ (cf. Proposition 3.5, \cite{EKQR00}) is the completion of an inner-product $\CMcal A\times_{\alpha^{\eta}}G$-module $C_c(G,E)$ such that the module action and the $\CMcal A\times_{\alpha^{\eta}}G$-valued inner product are given by \begin{align*}\label{eqn1} lg(s)&=\int_{G} l(t)\alpha^{\eta}_{t}(g(t^{-1}s))dt,\\ \langle l,m\rangle_{\CMcal A\times_{\alpha^{\eta}}G }(s)&=\int_{G} \alpha^{\eta}_{t^{-1}}(\langle l(t),m(ts)\rangle)dt \end{align*} respectively for $s\in G$, $g\in C_c (G,\CMcal A)$ and $l,m\in C_c (G,E)$. The following lemma shows that any covariant quasi-representation $(\Psi_0,v^0,w^0,F_0, F')$ with respect to $(G,\eta,E)$ provides a quasi-representation $\Psi_0\times v^0$ of $E\times_{\eta} G$ on $F_0$ and $F'$ satisfying \[ (\Psi_0\times v^0)(l)=\int_{G} \Psi_0(l(t))v^0_t dt~\mbox{for all $l\in C_c(G,E)$.} \] Moreover, it says that if $\pi_0$ is associated to $\Psi_0$, then the integrated form of the covariant representation $(\pi_0,v^0,F_0)$ with respect to $(G,\alpha^{\eta},\CMcal A)$ is associated to $\Psi_0\times v^0$.
\begin{lemma}\label{lem1.3} Let $(G,\eta,E)$ be a dynamical system on a full Hilbert $\CMcal A$-module $E$, where $\CMcal A$ is a unital $C^*$-algebra and $G$ is a locally compact group. Let $F_0$ and $ F'$ be Hilbert $\CMcal B$-modules, where $\CMcal B$ is a von Neumann algebra acting on a Hilbert space $\CMcal H$. If $(\Psi_0,v^0,w^0,F_0, F')$ is a covariant quasi-representation with respect to $(G,\eta,E)$, then $\Psi_0\times v^0$ is a quasi-representation of $E\times_{\eta} G$ on $F_0$ and $ F'$. \end{lemma} \begin{proof} For $l\in C_c(G,E)$ and $g\in C_c(G,\CMcal A)$, we get \begin{align*} (\Psi_0\times v^0)(lg)&=\int_{G}\int_{G}\Psi_0(l(t)\alpha^{\eta}_{t} (g(t^{-1}s))v^0_{s}ds dt \\&=\int_{G}\int_{G}\Psi_0(l(t))\pi_0(\alpha^{\eta}_{t} (g(t^{-1}s))v^0_{s}ds dt\\ &=\int_{G}\int_{G}\Psi_0(l(t))v^0_{t}\pi_0 (g(t^{-1}s)v^{0*}_{t}v^0_{s}ds dt \\ &=(\Psi_0\times v^0)(l)(\pi_0\times v^0)(g). \end{align*} For $l,m\in C_c(G,E)$ and $f_0,f'_0\in F_0$ we have \begin{align*} \langle(\pi_0\times v^0)(\langle l,m\rangle)f_0,f'_0\rangle &=\left< \int_{G}\pi_0(\langle l,m\rangle(s))v^{0}_{s}f_0 ds,f'_0 \right> \\ &= \left< \int_{G}\int_{G}v^{0*}_t\pi_0 (\langle l(t),m(ts)\rangle)v^{0}_{ts} f_0 dt ds, f'_0 \right> \\&= \int_{G}\int_{G} \langle \Psi_0(m(ts))v^0_{ts}f_0,\Psi_0(l(t))v^0_t f'_0\rangle dt ds \\ &= \left< \int_{G}\Psi_0(m(s))v^0_{s}f_0 ds,\int_{G}\Psi_0(l(t))v^0_t f'_0dt\right> \end{align*} \begin{align*} &= \langle(\Psi_0\times v^0)(m)f_0,(\Psi_0\times v^0)(l)f'_0\rangle.\qedhere \end{align*} \end{proof}
\begin{definition}(cf. \cite{Jo11})
Let $G$ be a locally compact group with the modular function $\bigtriangleup$. Let $u:G\to \CMcal U \CMcal B$ and $u':G\to \CMcal U \CMcal C$ be unitary representations of $G$ on unital
(pre-)$C^*$-algebras $\CMcal B$ and $\CMcal C$, respectively. Let $ F$ be a (pre-)$C^*$-correspondence from $\CMcal C$ to $\CMcal B$ and let $(G,\eta,E)$ be a dynamical system on a Hilbert $\CMcal A$-module $E$, where $\CMcal A$ is a unital $C^*$-algebra. A $\tau$-map, $T:E\times_{\eta} G\to F$, is called {\rm $(u',u)$-covariant} if \begin{enumerate}
\item [(a)] $T(\eta_t \circ m^{l}_t)=u'_t
T(m)$ where
$m^{l}_t(s)=m(t^{-1} s)$ for all $s,t\in G$, $m\in C_c(G,E)$;
\item [(b)] $T(m^{r}_t)=T(m)u_t$ where
$m^{r}_t (s)=\bigtriangleup(t)^{-1}m(s t^{-1})$ for all $s,t\in G$, $m\in C_c(G,E)$. \end{enumerate}
\end{definition}
\begin{proposition}\label{prop3.3} Let $\CMcal B$ be a von Neumann algebra acting on a Hilbert space $\CMcal H$, $\CMcal C$ be a unital $C^*$-algebra, and let $ F$ be a von Neumann $\CMcal C$-$\CMcal B$ module. Let $(G,\eta,E)$ be a dynamical system on a full Hilbert $\CMcal A$-module $E$, where $\CMcal A$ is a unital $C^*$-algebra and $G$ is a locally compact group. Let $u:G\to \CMcal U \CMcal B$, $u':G\to \CMcal U \CMcal C$ be unitary representations and let $\tau:\CMcal A\to \CMcal B$ be a completely positive map. If $T:E\to F$ is a $\tau$-map which is $(u',u)$-covariant
with respect to $(G,\eta,E)$, then there exist a completely positive map $\widetilde{\tau}:\CMcal A\times_{\alpha^{\eta}}G\to \CMcal B$ and a $(u',u)$-covariant map $\widetilde{T}:E\times_{\eta} G\to F$ which is a $\widetilde{\tau}$-map. Indeed, $\widetilde{T}$ satisfies $$\widetilde{T}(l)=\int_G T(l(s))u_s ds~\mbox{for all}~l\in C_c (G,E).$$ \end{proposition} \begin{proof} By Theorem \ref{prop1.6} there exists the Stinespring type construction $(\Psi,\pi,v,w,V,$ $S,F,F')$, associated to $T$, based on the construction $(\Psi_0,\pi_0,v^0,T,F_0, F'')$. Define a map $\widetilde{T}:E\times_{\eta} G\to
F$ by \[ \widetilde{T}(l):=S^{*}(\Psi_0\times v^0)(l)V,~\mbox{for all}~l\in C_c(G,E). \] Indeed, for all $l\in C_c (G,E)$ we obtain \begin{align*}\widetilde{T}(l)&=S^{*}(\Psi_0\times v^0)(l)V=S^{*}\int_{G}\Psi_0(l(s))v^0_s dsV=\int_{G}S^{*}\Psi_0(l(s))V u_s ds \\ &=\int_G T(l(s))u_s ds. \end{align*} It is clear that $(\pi_0\times v^0,V,F_0)$ is the Stinespring triple (cf. Theorem \ref{prop1.3}) associated to the completely positive map $\widetilde{\tau}:\CMcal A\times_{\alpha^{\eta}}G\to \CMcal B$ defined by \[\widetilde{\tau}(h):=\int_{G} \tau(f(t))v^0_{t}dt~\mbox{for all}~ f\in C_c (G,\CMcal A); b,b'\in \CMcal B. \] We have \begin{eqnarray*} \langle \widetilde{T}(l),\widetilde{T}(m)\rangle b &=&\langle S^{*}(\Psi_0\times v^0)(m)V,S^{*}(\Psi_0\times v^0)(l)V\rangle b =\widetilde{\tau}(\langle l,m\rangle)b \end{eqnarray*} for all $l,m\in E\times_{\eta} G,~b\in\CMcal B.$ Hence $\widetilde{T}$ is a $\widetilde{\tau}$-map. Further, \begin{align*} \widetilde{T}(\eta_t \circ m^{l}_t)&=S^*\int_G \Psi_0(\eta_t(m(t^{-1} s)))v^0_s ds V=S^*\int_G w^0_t \Psi_0(m(t^{-1} s))v^0_{t^{-1} s} ds V \\ &= u'_t
\widetilde{T}(m);\\
\widetilde{T}(m^{r}_t)&=S^*\int_G \bigtriangleup (t)^{-1}\Psi_0(m(s t^{-1}))v^0_s ds V= S^*\int_G \Psi_0(m(g))v^0_g v^0_t dg V\\ &=\widetilde{T}(m)u_t~\mbox{where $t\in G$, $m\in C_c(G,E)$.}~ \end{align*}
\end{proof} Proposition \ref{prop3.3} gives us a map $T\mapsto \widetilde{T}$ where $T:E\to F$ is a $\tau$-map which is $(u',u)$-covariant
with respect to $(G,\eta,E)$ and $\widetilde{T}:E\times_{\eta} G\to F$ is $(u',u)$-covariant $\widetilde{\tau}$-map such that $\tau$ and $\widetilde{\tau}$ are completely positive. This map is actually a one-to-one correspondence. To prove this result we need the following terminologies:
We identify $M(\CMcal A)$ with $\mathcal B^a (\CMcal A)$ (cf. Theorem 2.2 of \cite{La95}), here $\CMcal A$ is considered as a Hilbert $\CMcal A$-module in the natural way.
The {\it strict topology} on $\mathcal B^a ({E})$ is the topology given by the seminorms $a\mapsto \|ax\| $, $a\mapsto \|a^*y\| $ for each $x,y\in E$. For each $C^*$-dynamical system $(G,\alpha,\CMcal A)$ we get a non-degenerate faithful homomorphism $i_{\CMcal A}:\CMcal A\to M(\CMcal A\times_{\alpha} G)$ and an injective strictly continuous homomorphism $i_G : G\to \CMcal UM(\CMcal A \times_{\alpha} G)$ (cf. Proposition 2.34 of \cite{Wi07}) defined by \[
i_{\CMcal A}(a)(f)(s):=af(s)~\mbox{for}~a\in\CMcal A,~s\in G,~f\in C_c (G,\CMcal A); \] \[
i_{G} (r) f(s):=\alpha_r (f(r^{-1} s))~\mbox{for}~r,s\in G,~f\in C_c (G,\CMcal A). \] Let $E$ be a Hilbert $C^*$-module over a $C^*$-algebra $\CMcal A$.
Define the {\it multiplier module} $M(E):=\mathcal B^a (\CMcal A,E)$. $M(E)$ is a Hilbert $C^*$-module over $M(\CMcal A)$ (cf. Proposition 1.2 of \cite{RT03}). For a dynamical system $(G,\eta,E)$ on $E$ we get a non-degenerate morphism of modules $i_{E}$ from $E$ to $M(E\times_{\eta} G)$ (cf. Theorem 3.5 of \cite{Jo12b}) as follows: For each $x\in E$ define $i_{E}(x):C_c(G,\CMcal A)\to C_c(G,E)$ by $$ i_{E}(x)(f)(s):=xf(s)~\mbox{for all}~f\in C_c(G,\CMcal A),~s\in G.$$ Note that $i_{E}$ is an $i_{\CMcal A}$-map.
\begin{theorem}
Let $\CMcal A$, $\CMcal C$ be unital $C^*$-algebras, and let $\CMcal B$ be a von Neumann algebra acting on a Hilbert space $\CMcal H$. Let $u:G\to \CMcal U \CMcal B$, $u':G\to \CMcal U \CMcal C$ be unitary representations of a locally compact group $G$. If $(G,\eta,E)$ is a dynamical system on a full Hilbert $\CMcal A$-module $E$, and if $ F$ is a von Neumann $\CMcal C$-$\CMcal B$ module, then there exists a bijective correspondence $\mathfrak{I}$ from the set of all $\tau$-maps, $T:E\to F$, which are $(u',u)$-covariant with respect to $(G,\eta,E)$ onto the set of all maps $\widetilde{T}:E\times_{\eta} G\to F$ which are $(u',u)$-covariant $\widetilde{\tau}$-maps such that $\tau:\CMcal A\to \CMcal B$ and $\widetilde{\tau}:\CMcal A\times_{\alpha^{\eta}}G\to \CMcal B$ are completely positive maps. \end{theorem} \begin{proof} Proposition \ref{prop3.3} ensures that the map $\mathfrak{I}$ exists and is well-defined. Let $T:E\times_\eta G\to F$ be a $(u',u)$-covariant $\tau$-map, where $\tau:\CMcal A\times_{\alpha^{\eta}}G\to \CMcal B$ is a completely positive map. Suppose $(\Psi_0,\pi_0,V,F_0, F'')$ and $(\Psi,\pi,V,S,F, F')$ are the Stinespring type constructions associated to $T$ as in the proof of Theorem \ref{prop1.3}. Let $\{e_i\}_{i\in \CMcal I}$ be an approximate identity for $\CMcal A\times_{\alpha^{\eta}} G$. Then there exists a representation $\overline{\pi_0}:M(\CMcal A\times_{\alpha^{\eta}} G)\to \mathcal B^a (F_0)$ (cf. Proposition 2.39 of \cite{Wi07}) defined by \[
\overline{\pi_0}(a)x:=\displaystyle\lim_i \pi_0(a e_i)x~\mbox{for all $a\in M(\CMcal A\times_{\alpha^{\eta}} G)$ and $x\in F_0$.}~ \] A mapping $\overline{\Psi_0}:M(E \times_{\eta} G)\to \mathcal B^r(F_0, F'')$ defined by \[ \overline{\Psi_0}(h)x:=\lim_i \Psi_0(h e_i)x~\mbox{for all $h\in M(E \times_{\eta} G)$ and $x\in F_0$,} \] is a quasi representation and $\overline{\pi_0}$ is associated to $\overline{\Psi_0}$. If $\widetilde{\pi_0}:=\overline{\pi_0}\circ i_{\CMcal A}$, then we further get a quasi-representation $\widetilde{\Psi_0}:E \to \mathcal B^r(F_0, F'')$ defined as $\widetilde{\Psi_0}:=\overline{\Psi_0}\circ i_{E}$ such that $\widetilde{\pi_0}$ is associated to $\widetilde{\Psi_0}$. Define maps $T_0:E\to F$ and $\tau_0:\CMcal A\to \CMcal B$ by \[
T_0(x)b:=S^*\widetilde{\Psi_0}(x)Vb~\mbox{for}~b\in \CMcal B,~x\in E~{and} \] \[
\tau_0(a):=V^*\widetilde{\pi_0}(a)V~\mbox{for all}~a\in\CMcal A. \] It follows that $\tau_0$ is a completely positive map and $T_0$ is a $\tau_0$-map.
Let $v^0:G\to \CMcal U\mathcal B^a (F_0)$ be a unitary representation defined by $v^0:=\overline{\pi_0}\circ i_G$ where $$i_G (t)(f)(s):=\alpha_t (f(t^{-1} s))~\mbox{for all}~t,s\in G,~f\in C_c (G,\CMcal A).$$ Observe that $\widetilde{\pi_0}:\CMcal A\to \mathcal B^a (F_0)$ is a $v^0$-covariant and $\widetilde{\pi_0} \times v^0=\pi_0$ (cf. Proposition 2.39, \cite{Wi07}). We extend $v^0$ to a unitary representation $v:G\to \CMcal U\mathcal B^a (F)$ as in the proof of Theorem \ref{prop1.6}. It is easy to verify that $$\alpha^\eta_t\circ \langle m,m'\rangle^l_t=\langle m^r_{t^{-1}},m'\rangle~\mbox{for all}~m,m'\in C_c (G,E).$$ Using the fact that $T$ is $(u',u)$-covariant we get \[ \tau(\alpha^\eta_t\circ \langle m,m'\rangle^l_t)=\tau(\langle m^r_{t^{-1}},m'\rangle)=\langle T(m) u_{t^{-1}} ,T(m')\rangle=u_t \tau (\langle m,m'\rangle), \] $\mbox{for all $m,m'\in C_c (G,E)$.}$ Therefore we have \begin{eqnarray*}
\langle v_t(\pi_0 (f)V b), Vb'\rangle &=& \langle v^0_t ((\widetilde{\pi_0} \times v^0)(f)V b, Vb'\rangle
=\left< \int_G \widetilde{\pi_0} (\alpha^\eta_t (f(s))) v^0_{ts} V b ds,Vb'\right>
\\ &=&\langle (\widetilde{\pi_0}\times v^0) (\alpha^\eta_t \circ f^l_t)V b, Vb'\rangle
=\langle\tau(\alpha^\eta_t \circ f^l_t)b, b'\rangle\\ &=& \langle(\pi_0(f)V b),Vu_{t^{-1}}b'\rangle \end{eqnarray*} for all $t\in G$, $b,b'\in \CMcal B$ and $f\in C_c(G, \CMcal A)$. This implies that $v_t V=Vu_t$ for each $t\in G$. For each $t\in G$ define $w^0_t :[\widetilde{\Psi_0}(E)V\CMcal B]\to [\widetilde{\Psi_0}(E)V\CMcal B]$ by \[
w^0_t (\widetilde{\Psi_0}(x)Vb):=\widetilde{\Psi_0}(\eta_t(x))Vu_t b~\mbox{for all $x\in E$, $b\in \CMcal B$.}~ \] Let $t\in G$, $x,y\in E$ and $b,b'\in \CMcal B$. Then \begin{align*}
&\langle \widetilde{\Psi_0}(\eta_t (x)) Vu_t b, \widetilde{\Psi_0}(\eta_t (y)) Vu_t b'\rangle
\\=&\langle \widetilde{\pi_0}(\langle \eta_t (y),\eta_t(x)\rangle)Vu_t b, Vu_t b'\rangle
=\langle \widetilde{\pi_0}(\alpha^{\eta}_t(\langle y,x\rangle))Vu_t b, Vu_t b'\rangle \\ =&\langle v^0_t\widetilde{\pi_0}(\langle y,x\rangle)v^0_{t^{-1}}Vu_t b, Vu_t b'\rangle=\langle \widetilde{\pi_0}(\langle y,x\rangle)V b, V b'\rangle \\ =& \langle \widetilde{\Psi_0}(x) V b, \widetilde{\Psi_0} (y) V b'\rangle. \end{align*} Indeed, for fix $t\in G$, the continuity of the maps $t\mapsto \eta_t (x)$ and $t\mapsto u_t b$ for $b\in \CMcal B,~x\in E$ provides the fact that the map $t\mapsto w^0_t(z)$ is continuous for each $z\in \widetilde{\Psi_0}(E)V\CMcal B$. Therefore $w^0$ is a unitary representation of $G$ on $[\widetilde{\Psi_0}(E)V\CMcal B]$ and hence it naturally extends to a unitary representation of $G$ on the strong operator topology closure of $[\widetilde{\Psi_0}(E)V\CMcal B]$ in $\mathcal B(\CMcal H,F\bigodot \CMcal H)$, which we denote by $w$.
Note that $E\otimes C_c (G)$ is dense in $E\times_{\eta} G$ (cf. Theorem 3.5 of \cite{Jo12b}). For $x\in E$ and $f\in C_c(G)$ we have \begin{eqnarray*} (\widetilde{\Psi_0}\times v^0)(x\otimes f)&=&\int_G \widetilde{\Psi_0} (x f(t))v^0_t dt =\int_G \overline{\Psi_0}(i_{E}(x f(t))) \overline{\pi_0}(i_G (t))dt\\ &=& \overline{\Psi_0}(i_{E}(x)\int_G f(t) i_G (t)dt )=\overline{\Psi_0}(i_{E}(y)i_{\CMcal A}(\langle y,y \rangle)\int_G f(t) i_G (t)dt)\\ &=& \overline{\Psi_0} (i_{E}(y)(\langle y,y\rangle\otimes f))=\overline{\Psi_0}(y\langle y,y\rangle\otimes f)={\Psi_0}(x\otimes f) \end{eqnarray*} where $x=y\langle y,y\rangle~\mbox{for some}~y\in E~(\mbox{cf. Proposition 2.31 \cite{RW98}})$. Also the 3rd last equality follows from Corollary 2.36 of \cite{Wi07}. This proves $\widetilde{\Psi_0}\times v^0=\Psi_0$ on $E\times_{\eta} G$. Also for all $m\in C_c(G,E)$ and $b\in \CMcal B$ we get \begin{eqnarray*}
S u'_t (T(m)b) &=& S T(\eta_t \circ m^l_t)b=SS^*\Psi_0 (\eta_t\circ m^l_t)Vb=\Psi_0 (\eta_t\circ m^l_t)Vb
\\ &=&\int_G \widetilde{\Psi_0}(\eta_t (m(t^{-1}s))) v^0_s V b ds = w_t\int_G \widetilde{\Psi_0}(m(t^{-1}s)) v^0_{t^{-1}s}V b ds \\ &=& w_t \Psi_0 (m) Vb=w_t ST(m)b. \end{eqnarray*} As $T$ is $(u',u)$-covariant, it satisfies $T(\eta_t \circ m^{l}_t)=u'_t
T(m)$, where
$m^{l}_t(s)=m(t^{-1} s)$ for all $s,t\in G$, $m\in C_c(G,E)$. Thus the strong operator topology closure of $[T(E\times_{\eta} G)\CMcal B]$ in $\mathcal B(\CMcal H, F \bigodot \CMcal H)$, say $F_T$, is invariant under $u'$. This together with the fact that $S$ is an orthogonal projection onto $F_T$ provides $Su'_tz=w_tSz$ for all $z\in F^{\perp}_T$. So we obtain the equality $S u'_t y=w_t S y~\mbox{for all}~y\in F.$ Hence \begin{eqnarray*}
T_0(\eta_t(x))b=S^*\widetilde{\Psi_0}(\eta_t(x))V b=S^* w_t \widetilde{\Psi_0}(x)V u_{t^{-1}}b=u'_t T_0(x)u^*_t b \end{eqnarray*} for all $t\in G$, $x\in E$ and $b\in \CMcal B$. Moreover, \begin{eqnarray*} \widetilde{T_0}(m)b&=&S^*\int_G \widetilde{\Psi_0}(m(t))Vu_t bdt=S^*\Psi_0(m)Vb=T(m)b \end{eqnarray*} for all $m\in C_c (G,E)$, $b\in \CMcal B$. This gives $\widetilde{T_0}=T$ and proves that the map $\mathfrak{I}$ is onto.
Let $\tau_1:\CMcal A\to\CMcal B$ be a completely positive map and let $T_1:E\to F$ be a $(u',u)$-covariant $\tau_1$-map satisfying $\widetilde{T}_1=T$. If $(\Psi_1,\pi_1,V_1,S_1,F_1, F'_1)$ is the $(w_1,v_1)$-covariant Stinespring type construction associated to $T_1$ coming from Theorem \ref{prop1.6}, then we show that $(\Psi_1\times v_1, V_1, S_1,F_1, F'_1)$ is unitarily equivalent to the Stinespring type construction associated to $T$. Indeed, from Proposition \ref{prop3.3}, there exists a decomposition $$\widetilde{T_1}(m)=S^*_1 (\Psi_1\times v_1)(m)V_1~\mbox{for all}~m\in C_c(G,E).$$ This implies that for all $m,m'\in C_c(G,E)$ we get \begin{align*}
\tau(\langle m,m'\rangle)&=\langle T(m),T(m')\rangle=\langle \widetilde{T_1}(m),\widetilde{T_1}(m')\rangle \\ & =\langle S^*_1 (\Psi\times v_1)(m)V_1,S^*_1 (\Psi\times v_1)(m')V_1\rangle \\ & =\langle (\pi_1\times v_1)(\langle m,m'\rangle)V_1,V_1\rangle.
\end{align*}
$E$ is full gives $E\times_{\eta} G$ is full (cf. the proof of Proposition 3.5, \cite{EKQR00}) and hence $\tau(f)=\langle (\pi_1\times v_1)(f)V_1,V_1\rangle$ for all $f\in C_c (G,\CMcal A)$. Using this fact we deduce that
\begin{align*}
\langle \pi(f)Vb,\pi(f')Vb'\rangle &=\langle \pi(f^{\prime*}f)Vb,Vb'\rangle=b^*\tau(f^{\prime*}f) b'\\ &=\langle \pi_1\times v_1(f)V_1b,
\pi_1\times v_1(f')V_1b'\rangle
\end{align*} for all $f,f'\in C_c(G,\CMcal A)$ and $b,b'\in \CMcal B$. Thus we get a unitary $U_1:F\to F_1$ defined by \[ U_1(\pi(f)V b):=\pi_1\times v_1 (f)V_1b ~\mbox{for}~f \in C_c(G,\CMcal A),~b\in\CMcal B \] and which satisfies $V_1=U_1 V$, $\pi_1\times v_1 (f)=U_1\pi(f)U^*_1$ for all $f \in C_c(G,\CMcal A)$. Another computation
\begin{align*}
\| \Psi(m)Vb\|^2 &=\|\langle\Psi(m)Vb,\Psi(m)Vb\rangle\|=\|\langle\pi(\langle m,m\rangle)Vb,Vb\rangle\|=\|b^*\tau(\langle m,m\rangle) b\|
\\ &= \|b^*\langle \pi_1\times v_1(\langle m,m\rangle)V_1,V_1\rangle b\|=\|\langle\Psi_1\times v_1(m)V_1b,\Psi_1\times v_1(m)V_1b\rangle\|
\\ &=\| \Psi_1\times v_1(m)V_1b\|^2 \end{align*} for all $m\in C_c (G,E)$, $b\in \CMcal B$ provides a unitary $U_2: F'\to F'_1$ defined as \[
U_2 (\Psi(m)Vb):=\Psi_1\times v_1 (m)V_1 b~\mbox{for}~m \in C_c(G,E),~b\in\CMcal B. \] Further, it satisfies conditions $S_1=U_2S$ and $U_2\Psi(m)=\Psi_1\times v_1(m)U_1$ for all $m\in C_c(G,E)$. This implies $U_2\overline{\widetilde{\Psi} \times v}(z')=\overline{\Psi_1 \times v_1}(z')U_1$ for all $z' \in M(E\times_{\eta} G)$ and so $U_2\widetilde{\Psi}(x)=\Psi_1\times v_1 (x)U_1$ for all $x\in E$. Using it we have \begin{eqnarray*}
T_0(x)=S^*\widetilde{\Psi}(x)V=S^*_1 U_2\widetilde{\Psi}(x)U^*_1V_1 =S^*_1 U_2 U^*_2 (\Psi_1\times v_1)(x)U_1U^*_1V_1 =T_1(x) \end{eqnarray*} for all $x\in E$ and $b\in\CMcal B$. Hence $\mathfrak{I}$ is injective. \end{proof}
\noindent \textbf{Acknowledgement.} The author would like to express thanks of gratitude to Santanu Dey for several discussions. This work was supported by CSIR, India.
{
}
{\footnotesize Department of Mathematics, Indian Institute of Technology Bombay,}
{\footnotesize Powai, Mumbai-400076,}
{\footnotesize India.}
{\footnotesize e-mail: [email protected]}
\end{document} |
\begin{document}
\title{ {Complete convergence of message passing algorithms for
some satisfiability problems } \begin{abstract} In this paper we analyze the performance of \textsf{Warning Propagation}, a popular message passing algorithm. We show that for 3CNF formulas drawn from a certain distribution over random satisfiable 3CNF formulas, commonly referred to as the planted-assignment distribution, running \textsf{Warning Propagation} in the standard way (run message passing until convergence, simplify the formula according to the resulting assignment, and satisfy the remaining subformula, if necessary, using a simple ``off the shelf" heuristic) results in a satisfying assignment when the clause-variable ratio is a sufficiently large constant.
\end{abstract}
\section{Introduction} A CNF formula over the variables $x_{1},x_{2},...,x_{n}$ is a conjunction of clauses $C_{1},C_{2},... ,C_{m}$ where each clause is a disjunction of one or more literals. Each literal is either a variable or its negation. A formula is said to be in $k$-CNF form if every clause contains exactly $k$ literals. A CNF formula is satisfiable if there is a boolean assignment to the variables such that every clause contains at least one literal which evaluates to true. 3SAT, the language of all satisfiable 3CNF formulas, is well known to be NP-complete \cite{Cook71}.
The plethora of worst-case NP-hardness results for many interesting optimization problems motivates the study of heuristics that give ``useful'' answers for ``typical'' subset of the problem instances. In this paper we seek to evaluate those two measures rigorously and for that we shall use random models and average case analysis.
In this paper we study random satisfiable 3CNF formulas with an arbitrary density. For this we use the {\em planted distribution} \cite{flaxman,AlonKrivSudCliqe,OnTheGreedy,ExpectedPoly3SAT,AlonKahale97,ChenFrieze} denoted throughout by ${\cal{P}}^{{\rm plant}}_{n,p}$. A random 3CNF in this distribution is obtained by first picking an assignment $\varphi$ to the variables, and then including every clause satisfied by $\varphi$ with probability $p=p(n)$, thus guaranteeing that the resulting instance is satisfiable.
We briefly note that there exists another model of random 3CNF formulas which consists of $m$ clauses chosen uniformly at random from the set of all $8\binom{n}{3}$ possible ones ($m$ is a parameter of the distribution, and $m/n$ is referred to as the clause-variable ratio, or the density, of the random instance). This distribution shows a sharp threshold with respect to satisfiability \cite{Friedgut}. Specifically, a random 3CNF with clause-variable ratio below the threshold is satisfiable $whp$ (with high probability, meaning with probability tending to 1 as $n$ goes to infinity) and one with ratio above the threshold is unsatisfiable $whp$. Experimental results predict the threshold to be around 4.2 \cite{ExperimentalUpperBoundOnThresh}.
\\\\ To describe our main result, we formally define the \textsf{Warning Propagation (WP)} algorithm.
\subsection{Warning Propagation}\label{WPSubs} $\textsf{WP}$ is a simple iterative message passing algorithm similar to \textsf{Belief Propagation} \cite{Pearl82} and \textsf{Survey Propagation} \cite{SurveyPropagation}.
Messages in the $\textsf{WP}$ algorithm can be interpreted as "warnings", telling a clause the values that variables will have if the clause "keeps quite" and does not announce its wishes, and telling a variable which clauses will not be satisfied if the variable does not commit to satisfying them. We now present the algorithm in a formal way.
Let ${\cal{F}}$ be a CNF formula. For a variable $x$, let $N^+(x)$ be the set of clauses in ${\cal{F}}$ in which $x$ appears positively (namely, as the literal $x$), and $N^{-}(x)$ be the set of clauses in which $x$ appears negatively. For a clause $C$, let $N^+(C)$ be the set of variables that appear positively in $C$, and respectively $N^-(C)$ for negative ones.
There are two types of messages involved in the \textsf{WP} algorithm. Messages sent from a variable $x_i$ to a clause $C_j$ in which it appears, and vice a versa. Consider a clause $C_j$ that contains a variable $x_i$. A message from $x_i$ to $C$ is denoted by $x_i \rightarrow C_j$, and it has an integer value. A positive value indicates that $x_i$ is tentatively set to true, and a negative value indicates that $x_i$ is tentatively set to false. A message from $C_j$ to $x_i$ is denoted by $C_j \rightarrow x_i$, and it has a Boolean value. A value of 1 indicates that $C_j$ tentatively wants $x_i$ to satisfy it, and a 0 value indicates that $C_j$ is tentatively indifferent to the value of $x_i$. We now present the update rules for these messages. $$ x_i \rightarrow C_j = \left(\sum_{C_k \in N^+(x_i),k\neq j} C_k \rightarrow x_i \right) - \left(\sum_{C_k\in N^-(x_i),k\neq j}C_k \rightarrow x_i\right).$$ If $x_i$ appears only in $C_j$ then we set the message to 0. $$ C_j \rightarrow x_i = \left(\prod_{x_k \in N^+(C_j),k\neq i} I_{<0}(x_k \rightarrow C_j) \right) \cdot \left( \prod_{x_k \in N^-(C_j),k\neq i} I_{>0}(x_k \rightarrow C_j) \right),$$ where $I_{< 0}(b)$ is an indicator function which is `1' iff $b<0$ (respectively $I_{>0}$). If $C_j$ contains only $x_i$ (which cannot be the case in 3CNF formulas) then the message is set to 1.
Lastly, we define the current assignment of a variable $x_i$ to be $$ B_i = \left(\sum_{C_j \in N^+(x_i)} C_j \rightarrow x_i \right)- \left(\sum_{C_j\in N^-(x_i)}C_j \rightarrow x_i\right).$$ If $B_i>0$ then $x$ is assigned TRUE, if $B_i<0$ then $x_i$ is assigned FALSE, otherwise $x_i$ is UNASSIGNED. Assume some order on the clause-variable messages (e.g. the lexicographical order on pairs of the form $(j,i)$ representing the message $C_{j} \rightarrow x_i$). Given a vector $\alpha \in \{0,1\}^{3m}$ in which every entry is the value of the corresponding $C_{j}\rightarrow x_i$ message, a partial assignment $\psi\in \{TRUE,FALSE,UNASSIGNED\}^n$ can be generated according to the corresponding $B_i$ values (as previously explained).
Given a 3CNF formula ${\cal{F}}$ on $n$ variables and $m$ clauses, the \textbf{factor graph} of ${\cal{F}}$, denoted by $FG({\cal{F}})$, is the following graph representation of ${\cal{F}}$. The factor graph is a bipartite graph, $FG({\cal{F}})=(V_1\cup V_2,E)$ where $V_{1}=\{x_{1},x_2,...,x_n\}$ (the set of variables) and $V_2=\{C_1,C_2,...,C_m\}$ (the set of clauses). $(x_i,C_j)\in E$ iff
$x_i$ appears in $C_j$. For a 3CNF ${\cal{F}}$ with $m$ clauses it holds that $\#E=3m$, because every clause contains exactly 3 different variables. (Here and elsewhere, $\#A$ denotes the cardinality of a set $A$. The notation $|a|$ will denote the absolute value of a real number $a$.)
It would be convenient to think of the messages in terms of the corresponding factor graph. Every undirected edge $(x_i,C_j)$ of the factor graph is replaced with 2 anti-parallel directed edges, $(x_i\rightarrow C_j)$ associated with the message $x_i\rightarrow C_j$ and respectively the edge $(C_j\rightarrow x_i)$.
Let us now formally describe the algorithm. \\\\ $\verb"Warning Propagation(3CNF formula"$ ${\cal{F}}):$ \\ $\verb"1. construct the corresponding factor graph"$ $FG({\cal{F}}).$\\ $\verb"2. randomly initialize the clause-variable messages to 0 or 1."$\\ $\verb"3. repeat until no clause-variable message changed from the"$\\ $\verb" previous iteration:"$\\ $\verb" 3.a randomly order the edges of" $ $FG({\cal{F}}).$\\ $\verb" 3.b update all clause-variable messages"$ $C_j\rightarrow x_i$ $\verb"according"$\\ $\verb" to the random edge order."$\\ $\verb"4. compute a partial assignment"$ $\psi$ $\verb"according to the"$ $B_i$ $\verb"messages".$\\ $\verb"5. return "$ $\psi$.\\
The variable-clause message updates are implicit in line 3.b: when evaluating the clause-variable message along the edge $C \rightarrow x$, $C=(x\vee y\vee z)$, the variable-clause messages concerning this calculation ($z,y \rightarrow C$) are evaluated on-the-fly using the last updated values $C_i \rightarrow y$, $C_j \rightarrow z$ (allowing feedback from the same iteration). We allow the algorithm not to terminate (the clause-variable messages may keep changing every iteration). If the algorithm does return an assignment $\psi$ then we say that it converged. In practice it is common to limit in advance the number of iterations, and if the algorithm does not converge by then, return a failure.
\subsection{Main Results}\label{ResutlsSubs} Our contribution is analyzing the performance of \textsf{Warning Propagation} (\textsf{WP} for brevity), a popular message passing algorithm, when applied to satisfiable formulas drawn from a certain random distribution over satisfiable 3CNF formulas, commonly called the planted distribution. We show that the standard way of running message passing algorithms -- run message passing until convergence, simplify the formula according to the resulting assignment, and satisfy the remaining subformula, if possible, using a simple ``off the shelf" heuristic -- works for planted random satisfiable formulas with a sufficiently large constant clause-variable ratio. As such, our result is the first to rigorously prove the effectiveness of a message passing algorithm for the solution of a non-trivial random SAT distribution.
We note that a recent work \cite{AminAchi} demonstrated the usefulness of analytical tools developed for the planted distribution for ``hard" uniform instances.
To formally state our result we require a few additional definitions.
Given a 3CNF ${\cal{F}}$, \textbf{simplify} ${\cal{F}}$ according to $\psi$, when $\psi$ is a partial assignment, means: in every clause substitute every assigned variable with the value given to it by
$\psi$. If a clause contains a literal which evaluates to true, remove the clause. From the remaining clauses, remove all literals which evaluate to false. The resulting instance is not necessarily in 3CNF form, as clauses may have any number of literals between~0 and~3. Denote by ${\cal{F}}|_{\psi}$ the 3CNF ${\cal{F}}$ simplified according to $\psi$. Note that ${\cal{F}}|_{\psi}$ may contain empty clauses, in which case it is not satisfiable. For a set of variables $A\subseteq V$, denote by ${\cal{F}}[A]$ the set of clauses in which all variables belong to $A$.
We call a 3CNF formula \textbf{simple}, if it can be satisfied using simple well-known heuristics (examples include very sparse random 3CNF formulas which are solvable $whp$ using the pure-literal heuristic \cite{BorderFriezeUpfal}, formulas with small weight terminators -- to use the terminology of \cite{BenSassonAlke} -- solvable $whp$ using RWalkSat, etc). This is a somewhat informal notion, but it suffices for our needs.
\begin{thm}\label{ConvergenceThmMed} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p \geq d/n^2$, $d$ a sufficiently large constant. Then the following holds $whp$ (the probability taken over the choice of ${\cal{F}}$, and the random choices in lines 2 and 4 of the \textsf{WP} algorithm). There exists a satisfying assignment $\varphi^*$ (not necessarily the planted one) such that:
\begin{enumerate}
\item $\textsf{WP}({\cal{F}})$ converges after at most $O(\log n)$
iterations.
\item Let $\psi$ be the partial assignment returned by $\textsf{WP}({\cal{F}})$, let
$V_A$ denote the variables assigned to either TRUE or FALSE in
$\psi$, and $V_U$ the variables left UNASSIGNED. Then for every
variable $x\in V_A$,
$\psi(x)=\varphi^*(x)$. Moreover, $\#{V_A} \geq (1-e^{-\Theta(d)})n$.
\item
${\cal{F}}|_{\psi}$ is a simple formula which can be satisfied in time $O(n)$. \end{enumerate} \end{thm}
\begin{rem}\rm Theorem~\ref{ConvergenceThmMed} relates to the planted 3SAT model, but \cite{UniformSAT} implies that it also true to the uniform random 3SAT distribution, in which a \emph{satisfiable} formula with $m$ clauses is chosen uniformly at random among all such formulas. \end{rem}
\begin{prop}\label{ConvergencePropDense} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p\geq c\log n /n^2$, with $c$ a sufficiently large constant, and let $\varphi$ be its planted assignment. Then $whp$ after at most 2 iterations, $\textsf{WP}({\cal{F}})$ converges, and the returned $\psi$ equals $\varphi$. \end{prop}
It is worth noting that formulas in ${\cal{P}}^{{\rm plant}}_{n,p}$, with $n^2p$ some large constant, are \emph{not} known to be simple (in the sense that we alluded to above). For example, it is shown in \cite{BenSassonAlke} that RWalkSat is very unlikely to hit a satisfying assignment in polynomial time when running on a random ${\cal{P}}^{{\rm plant}}_{n,p}$ instance in the setting of Theorem \ref{ConvergenceThmMed}. Nevertheless, planted 3CNF formulas with sufficiently large (constant) density were shown to be solvable $whp$ in $\cite{flaxman}$ using a spectral algorithm. Though in our analysis we use similar techniques to \cite{flaxman} (which relies on \cite{AlonKahale97}), our result is conceptually different in the following sense. In \cite{AlonKahale97,ChenFrieze,flaxman}, the starting point is the planted distribution, and then one designs an algorithm that works well under this distribution. The algorithm may be designed in such a way that makes its analysis easier. In contrast, our starting point is a given algorithm, $\textsf{WP}$, and then we ask for which input distributions it works well. We cannot change the algorithm in ways that would simplify the analysis. Another difference between our work and that of \cite{AlonKahale97,ChenFrieze,flaxman} is that unlike the algorithms analyzed in those other papers, \textsf{WP} is a randomized algorithm, a fact which makes its analysis more difficult. We could have simplified our analysis had we changed \textsf{WP} to be deterministic (for example, by initializing all clause-variable messages to~1 in step~2 of the algorithm), but there are good reasons why \textsf{WP} is randomized. For example, it can be shown that (the randomized version) \textsf{WP} converges with probability~1 on 2CNF formulas that form one cycle of implications, but might not converge if step~4 does not introduce fresh randomness in every iteration of the algorithm.
The planted 3SAT model is also similar to LDPC codes in many ways. Both constructions are based on random factor graphs. In codes, the received corrupted codeword provides noisy information on a single bit or on the parity of a small number of bits of the original codeword. In ${\cal{P}}^{{\rm plant}}_{n,p}$, $\varphi$ being the planted assignment, the clauses containing a variable $x_i$ provide noisy information on the polarity of $\varphi(x_i)$. Comparing our results with the coding setting, the effectiveness of message passing algorithms for amplifying local information in order to decode codes close to channel capacity was established in a number of papers, e.g.~\cite{LMSS98,RSU01}. Our results are similar in flavor, however the combinatorial analysis provided here allows to recover an assignment satisfying \emph{all} clauses, whereas in the random LDPC codes setting, message passing allows to recover only $1-o(1)$ fraction of the codeword correctly. In \cite{LMSS01} it is shown that for the erasure channel, all bits may be recovered correctly using a message passing algorithm, however in this case the LDPC code is designed so that message passing works for it.
It is natural to ask whether our analysis can be extended to show that \textsf{Belief Propagation} (\textsf{BP}) finds a satisfying assignment to ${\cal{P}}^{{\rm plant}}_{n,p}$ in the setting of Theorem \ref{ConvergenceThmMed}. Experimental results predict the answer to be positive. However our analysis of \textsf{WP} does not extend as is to \textsf{BP}. In \textsf{WP}, all warnings received by a variable (or by a clause) have equal weight, but in \textsf{BP} this need not be the case (there is a probability level associated with each warning). In particular, this may lead to the case where a small number of ``very" wrongly assigned variables ``pollute" the entire formula, a possibility that our analysis managed to exclude for the \textsf{WP} algorithm.
\subsection{Paper's organization} The remainder of the paper is structured as follows. Section \ref{sec:intuition} provides an overview that may help the reader follow the more technical parts of the proofs. In Section \ref{PropOfRandomInstSection} we discuss some properties that a typical instance in ${\cal{P}}^{{\rm plant}}_{n,p}$ possesses. Using these properties, we prove in Section \ref{ResultsProofSection} Theorem \ref{ConvergenceThmMed} and Proposition \ref{ConvergencePropDense}.
\section{Proof Outline} \label{sec:intuition}
Let us first consider some possible fixed points of the Warning Propagation (WP) algorithm. The {\em trivial} fixed point is the one in which all messages are \textbf{0}. One may verify that this is the unique fixed point in some cases when the underlying 3CNF formula is very easy to satisfy, such as when all variables appear only positively, or when every clause contains at least two variables that do not appear in any other clause. A {\em local maximum} fixed point is one that corresponds to a strict local maximum of the underlying MAX-3SAT instance, namely to an assignment $\tau$ to the variables in which flipping the truth assignment of any single variable causes the number of satisfied clauses to strictly decrease. The reader may verify that if every clause $C$ sends a \textbf{1}\ message to a variable if no other variable satisfies $C$ under $\tau$, and a \textbf{0}\ message otherwise, then this is indeed a fixed point of the WP algorithm. Needless to say, the WP algorithm may have other fixed points, and might not converge to a fixed point at all.
Recall the definition of ${\cal{P}}^{{\rm plant}}_{n,p}$. First a truth assignment $\varphi$ to the variables $V=\{x_{1},x_{2},...,x_{n}\}$ is picked uniformly at random. Next, every clause satisfied by $\varphi$ is included in the formula with probability $p$ (in our case $p\geq d/n^2$, $d$ a sufficiently large constant). There are $(2^3-1)\cdot\binom{n}{3}$ clauses satisfied by $\varphi$, hence the expected size of ${\cal{F}}$ is $p\cdot7\cdot\binom{n}{3} = 7 d n/6+o(n)$ (when $d$ is constant, then this is linear in $n$, and therefore such instances are sometimes referred to as \emph{sparse} 3CNF formulas). To simplify the presentation, we assume w.l.o.g. (due to symmetry) that the planted assignment $\varphi$ is the all-one vector.
To aid intuition, we list some (incorrect) assumptions and analyze the performance of WP on a ${\cal{P}}^{{\rm plant}}_{n,p}$ instance under these assumptions.
\begin{enumerate}
\item In expectation, a variable appears in $4\binom{n}{2}p=2d+o(1)$ clauses positively, and in $3d/2+o(1)$ clauses negatively. Our first assumption is that for every variable, its number of positive and negative appearances is equal to these expectations.
\item We say that a variable {\em supports} a clause with respect to the planted assignment (which was assumed without loss of generality to be the all \textbf{1}\ assignment) if it appears positively in the clause, and the other variables in the clause appear negatively. Hence the variable is the only one to satisfy the clause under the planted assignment. For every variable in expectation there are roughly $d/2$ clauses that it supports. Our second assumption is that for every variable, the number of clauses that it supports is equal to this expectation.
\item Recall that in the initialization of the \textsf{WP} algorithm, every clause-variable message $C\to x$ is 1 w.p. $\frac{1}{2}$, and 0 otherwise. Our third assumption is that with respect to every variable, half the messages that it receives from clauses in which it is positive are initialized to~1, and half the messages that it receives from clauses in which it is negative are initialized to~1.
\item Recall that in step 3b of \textsf{WP}, clause-variable messages are updated in a random order. Our fourth assumption is that in each iteration of step~3, the updates are based on the values of the other messages from the previous iteration, rather than on the last updated values of the messages (that may correspond either to the previous iteration or the current iteration, depending on the order in which clause-variable messages are visited). Put differently, we assume that in step 3b all clause-variable messages are evaluated in \emph{parallel}.
\end{enumerate}
Observe that under the first two assumptions, the planted assignment is a local maximum of the underlying MAX-3SAT instance. We show that under the third and fourth assumption, \textsf{WP} converges to the corresponding local maximum fixed point in two iterations: Based on the initial messages as in our third assumption, the messages that variables send to clauses are all roughly $(2d-3d/2)/2=d/4$. Following the initialization, in the first iteration of step~3 every clause $C$ that $x$ supports will send $x$ the message 1, and all other messages will be 0. Here we used our fourth assumption. (Without our fourth assumption, \textsf{WP} may run into trouble as follows. The random ordering of the edges in step 3 may place for some variable $x$ all messages from clauses in which it appears positively before those messages from clauses in which it appears negatively. During the iteration, some of the messages from the positive clauses may change from 1 to 0. Without our fourth assumption, this may at some point cause $x$ to signal to some clauses a negative rather than positive value.) The set of clause-variable messages as above will become a fixed point and repeat itself in the second iteration of step 3. (For the second iteration, the fourth assumption is no longer needed.) Hence the algorithm will terminate after the second iteration.
Unfortunately, none of the four assumptions that we made are correct. Let us first see to what extent they are violated in the context of Proposition~\ref{ConvergencePropDense}, namely, when $d$ is very large, significantly above $\log n$. Standard concentration results for independent random variables then imply that the first, second and third assumptions simultaneously hold for all variables, up to small error terms that do not effect the analysis. Our fourth assumption is of course never true, simply because we defined \textsf{WP} differently. This complicates the analysis to some extent and makes the outcome depend on the order chosen in the first iteration of step 3a of the algorithm. However, it can be shown that for most such orders, the algorithm indeed converges to the fixed point that corresponds to the planted assignment.
The more difficult part of our work is the case when $d$ is a large constant. In this case, already our first two assumptions are incorrect. Random fluctuations with respect to expected values will $whp$ cause a linear fraction of the variables to appear negatively more often than positively, or not to support any clause (with respect to the planted assignment). In particular, the planted assignment would no longer be a local maximum with respect to the underlying MAX-3SAT instance. Nevertheless, as is known from previous work~\cite{flaxman}, a large fraction of the variables will behave sufficiently close to expectation so that the planted assignment is a local maximum with respect to these variables. Slightly abusing notation, these set of variables are often called the {\em core} of the 3CNF formula. Our proof plan is to show that $\textsf{WP}$ does converge, and that the partial assignment in step 4 assigns all core variables their correct planted value. Moreover, for non-core variables, we wish to show that the partial assignment does not make any unrecoverable error -- whatever value it assigns to some of them, it is always possible to assign values to those variables that are left unassigned by the partial assignment so that the input formula is satisfied. The reason why we can expect such a proof plan to succeed is that it is known to work if one obtains an initial partial assignment by means other than $\textsf{WP}$, as was already done in~\cite{flaxman,TechReport}.
Let us turn now to our third assumption. It too is violated for a linear fraction of the variables, but is nearly satisfied for most variables. This fact marks one point of departure for our work compared to previous work~\cite{flaxman,TechReport}. Our definition of the core variables will no longer depend only on the input formula, but also on the random choice of initialization messages. This adds some technical complexity to our proofs.
The violation of the fourth assumption is perhaps the technical part in which our work is most interesting. It relates to the analysis of $\textsf{WP}$ on factor graphs that contain cycles, which is often a stumbling point when one analyzes message passing algorithms. Recall that when $d$ is very large (Proposition~\ref{ConvergencePropDense}), making the fourth assumption simplifies the proof of convergence of WP. Hence removing this assumption in that case becomes a nuisance. On the other hand, when $d$ is smaller (as in Theorem~\ref{ConvergenceThmMed}), removing this assumption becomes a necessity. This will become apparent when we analyze convergence of WP on what we call {\em free cycles}. If messages in step 3b of \textsf{WP} are updated based on the value of other messages in the {\em previous} iteration (as in our fourth assumption), then the random choice of order in step 3a of \textsf{WP} does not matter, and one can design examples in which the messages in a free cycle never converge. In contrast, if messages in step 3b of \textsf{WP} are updated based on the latest value of other messages (either from the previous iteration or from the current iteration, whichever one is applicable), free cycles converge with probability 1 (as we shall later show).
To complete the proof plan, we still need to show that simplifying the input formula according to the partial assignment returned by \textsf{WP} results in a formula that is satisfiable, and moreover, that a satisfying assignment for this sub-formula can easily be found. The existential part (the sub-formula being satisfiable) will follow from a careful analysis of the partial assignment returned by \textsf{WP}. The algorithmic part (easily finding an assignment that satisfies the sub-formula) is based on the same principles used in~\cite{AlonKahale97,flaxman}, showing that the sub-formula breaks into small connected components.
\section{Properties of a Random ${\cal{P}}^{{\rm plant}}_{n,p}$
Instance}\label{PropOfRandomInstSection}
In this section we discuss relevant properties of a random ${\cal{P}}^{{\rm plant}}_{n,p}$ instance. This section is rather technical in nature. The proofs are based on probabilistic arguments that are standard in our context. In the rest of the paper, for simplicity of presentation, we assume w.l.o.g. that the planted assignment is the all TRUE assignment.
\subsection{Preliminaries}
The following well-known concentration result (see, for example
\cite[p. 21]{JRL2000}) will be used several times in the proof. We denote by $B(n,p)$ the binomial random variable with parameters $n$ and $p$, and expectation $\mu=np$. \begin{thm}\label{Thm:Chernoff2}(Chernoff's inequality) If $X \sim B(n,p)$ and $t \geq 0$ is some number, then \[ Pr\big(X \geq \mu + t) \leq e^{-\mu \cdot f(t/\mu)}, \] \[ Pr\big(X \leq \mu - t) \leq e^{-\mu \cdot f(-t/\mu)},\]
where $f(x) = (1+x)\ln (1+x) - x$. \end{thm} \noindent We use the following Azuma-Hoeffding inequality for martingales:
\begin{thm}\label{Thm:RegMartingale} Let $\{X_i\}_{i=0}^{N}$ be a martingale with $|X_k - X_{k-1}| \leq c_k$, then
\[Pr \left[|X_N - X_0| \geq t\right] \leq 2 e^{-t^2 / \sum_{k=1}^N c_k^2}\] \end{thm} We shall also use the following version of the martingale argument, which can be found in \cite[Page 101]{TheProbMethod}. Assume the probability space is generated by a finite set of mutually independent Yes/No choices, indexed by $i\in I$. Given a random variable $Y$ on this space, let $p_i$ denote the probability that choice $i$ is Yes. Let $c_i$ be such that changing choice $i$ (keeping all else the same) can change $Y$ by at most $c_i \leq c$. We call $p_i (1 - p_i )c^2$ the variance of choice $i$. Now consider a solitaire game in which one finds the value of $Y$ by making queries of an always truthful oracle. The choice of query can depend on previous responses. All possible questioning lines can be naturally represented in a decision tree form. A ``line of questioning'' is a path from the root to a leaf of this tree, a sequence of questions and responses that determine $Y$. The total variance of a line of questioning is the sum of the variances of the queries in it.
\begin{thm}\label{thm_martingale} For every $\varepsilon > 0$ there exists a $\delta > 0$ so that if for every line of questioning, with parameters $p_1,p_2,\ldots,p_n$ and $c_1,c_2,\ldots,c_n$, that determines $Y$, the total variance is at most $\sigma^2$, then
$$Pr[|Y -E[Y ]| > \alpha\sigma] \leq 2e^{-\alpha^2/(2(1+\varepsilon))},$$ for all positive $\alpha$ with $\alpha c < \sigma(1 + \varepsilon)\delta$, where $c$ is such that $\forall i \phantom{i} c_i \leq c$. \end{thm}
\subsection{Stable Variables}\label{StableVarsSubs} \begin{defn} A variable $x$ \textbf{supports} a clause $C$ with respect to a partial assignment $\psi$, if it is the only variable to satisfy $C$ under $\psi$, and the other two variables are assigned by $\psi$. \end{defn}
\begin{prop}\label{SupportSuccRate} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p = d/n^2$, $d \geq d_0$, $d_0$ a sufficiently large constant. Let $F_{SUPP}$ be a random variable counting the number of variables in ${\cal{F}}$ whose support w.r.t. $\varphi$ is less than $d/3$. Then $whp$ $F_{SUPP}\leq e^{-\Theta(d)}n$. \end{prop} \begin{Proof} Every variable is expected to support $\frac{d}{n^2}\cdot\binom{n}{2}=\frac{d}{2}+O(\frac{1}{n})$ clauses, thus using Chernoff's inequality the probability of a single variable supporting less than $d/3$ clauses is at most $e^{-\Theta(d)}$. Using linearity of expectation, $\mathbb{E}[F_{SUPP}]\leq e^{-\Theta(d)}n$. To prove concentration around the expected value we use Chernoff's bound once more as the support of one variable is independent of the others (since it concerns different clauses which are included independently of each other). The claim then follows. \end{Proof}
Following the definitions in Section \ref{WPSubs}, given a 3CNF ${\cal{F}}$ with a satisfying assignment $\varphi$, and a variable $x$, we let $N^{++}(x)$ be the set of clauses in ${\cal{F}}$ in which $x$ appears positively but doesn't support w.r.t. $\varphi$. Let $N^{s}(x)$ be the set of clause in ${\cal{F}}$ which $x$ supports w.r.t. $\varphi$. Let $\pi=\pi({\cal{F}})$ be some ordering of the clause-variable message edges in the factor graph of ${\cal{F}}$. For an index $i$ and a literal $\ell_x$ (by $\ell_x$ we denote a literal over the variable $x$) let $\pi^{-i}(\ell_x)$ be the set of clause-variable edges $(C \rightarrow x)$ that appear before index $i$ in the order $\pi$ and in which $x$ appears in $C$ as $\ell_x$. For a set of clause-variable edges $\cal{E}$ and a set of clauses $\cal{C}$ we denote by ${\cal{E}}\cap{\cal{C}}$ the subset of edges containing a clause from $\cal{C}$ as one endpoint.
\begin{defn}\label{StableDefn} Let $\pi$ be an ordering of the clause-variable messages of a 3CNF formula ${\cal{F}}$. Let $\varphi$ be a satisfying assignment of ${\cal{F}}$. A variable $x$ is \textbf{$d$-stable} in ${\cal{F}}$ w.r.t. $\pi$ and $\varphi$ if for every location $i$ in $\pi$ that contains a message $C\rightarrow x$, $C=(\ell_x\vee \ell_y \vee \ell_z)$, the following holds:
\begin{enumerate} \item $|\#\pi^{-i}(y)\cap N^{++}(y)-\#\pi^{-i}(\bar{y})\cap N^{-}(y)|\leq d/30$.
\item $|\#N^{++}(y)-\#N^{-}(y)|\leq d/30$. \item $\#N^{s}(y)\geq d/3$ \end{enumerate} and the same holds for $z$. \end{defn} When $d$ is clear from context, which will usually be the case, we will suppress the $d$ in the ``$d$-stable". \begin{prop}\label{StableSuccRate} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p = d/n^2$, $d \geq d_0$, $d_0$ a sufficiently large constant. Let $\pi$ be a random ordering of the clause-variable messages, and $F_{UNSTAB}$ be a random variable counting the number of variables in ${\cal{F}}$ which are \emph{not} stable. Then $whp$ $F_{UNSTAB}\leq e^{-\Theta(d)}n$. \end{prop} \begin{Proof} We start by bounding $\mathbb{E}[F_{UNSTAB}]$. Consider a clause-variable message edge $C \rightarrow x$ in location $i$ in $\pi$, $C=(\ell_x\vee \ell_y \vee \ell_z)$. Now consider location $j \leq i$. The probability of an edge $C' \rightarrow \bar{y}$ in location $j$ is $\left(3\binom{n}{2}\right)/\left(7\binom{n}{3}\right)=\frac{9}{7n}+O(\frac{1}{n^2})$ which is exactly the probability of an edge $C'' \rightarrow y$, $C'' \in N^{++}(y)$. This implies
$$E[|\#\pi^{-i}(y)\cap N^{++}(y)-\#\pi^{-i}(\bar{y})\cap N^{-}(y)|]=0.$$ If however
$$|\#\pi^{-i}(y)\cap N^{++}(y)-\#\pi^{-i}(\bar{y})\cap N^{-}(y)| > d/30$$ then at least one of the quantities deviates from its expectation by $d/60$.
Look at $\#\pi^{-i}(y)\cap N^{++}(y)$ -- this is the number of successes in draws without replacement. It is known that this quantity is more concentrated than the corresponding quantity if the draws were made with replacement~\cite{Hoeffding:63}. In particular, since the expectation of $\#\pi^{-i}(y)\cap N^{++}(y)$ is $O(d)$ it follows from Chernoff's bound that the probability that it deviates from its expectation by more than $d/60$ is $e^{-\Theta(d)}$. A similar statement holds for $\#\pi^{-i}(\bar{y})\cap N^{-}(y)$. Properties $(b)$ and $(c)$ are bounded similarly using concentration results.
\noindent The calculations above hold in particular for the first $5d$ appearances of messages involving $x$. As for message $5d+1$, the probability of this message causing $x$ to become unstable is bounded by the event that $x$ appears in more than $5d$ clauses. As $x$ is expected to appear in $3.5d$ clauses, the latter event happens w.p. $e^{-\Theta(d)}$ (again using Chernoff's bound). To sum up, $$Pr[x\text{ is unstable }]\leq 5d\cdot e^{-\Theta(d)}+e^{-\Theta(d)}=e^{-\Theta(d)}.$$ The bound on $E[F_{UNSTAB}]$ follows by linearity of expectation.
We are now left with proving that $F_{UNSTAB}$ is concentrated around its expectation, we do so using a martingale argument. Define two new random variables, $F_1$ counting the number of unstable variables $x$ s.t. there exists a clause $C$, containing $x$, and another variable $y$, s.t. $y$ appears in more than $\log n$ clauses, and $F_2$ to be the unstable variables s.t. in all clauses in which they appear, all the other variables appear in at most $\log n$ clauses. Observe that $F_{UNSTAB}=F_1+F_2$. To bound $F_1$, observe that if $F_1\geq 1$, then in particular this implies that there exists a variable which appears in more than $\log n$ clauses in ${\cal{F}}$. This happens with probability $o(1)$ since every variable is expected to appear only in $O(d)$ clauses (using Chernoff's bound). To bound $F_2$ we use a martingale argument in the constellation of \cite{TheProbMethod}, page 101. We use the clause-exposure martingale (the clause-exposure martingale implicitly includes the random ordering $\pi$, since one can think of the following way to generate the random instance -- first randomly shuffle all possible clauses, and then toss the coins). The exposure of a new clause $C$ can change $F_2$ by at most $6\log n$ since every variable in $C$ appears in at most $\log n$ clauses, namely with at most $2\log n$ other variables that might become (un)stable due to the new clause. The martingale's total variance, in the terminology of Theorem \ref{thm_martingale}, is $\sigma^2=\Theta(dn\log ^2 n)$. Theorem \ref{thm_martingale}, with $\alpha=e^{-\Theta(d)}\sqrt{n}/\log n$, and the fact that $E[F_2]\leq E[F_{UNSTAB}]$, implies concentration around the expectation of $F_2$. \end{Proof}
Let $\alpha\in\{0,1\}^{3\#{\cal{F}}}$ be a clause-variable message vector. For a set of clause-variable message edges $\cal{E}$ let $\textbf{1}_\alpha(\cal{E})$ be the set of edges along which the value is 1 according to $\alpha$. For a set of clauses $\cal{C}$, $\textbf{1}_\alpha(\cal{C})$ denotes the set of clause-variable message edges in the factor graph of ${\cal{F}}$ containing a clause from $\cal{C}$ as one endpoint and along which the value is 1 in $\alpha$.
\begin{defn}\label{ViolatedDefn} Let $\pi$ be an ordering of the clause-variable messages of a 3CNF formula ${\cal{F}}$, and $\alpha$ a clause-variable message vector. Let $\varphi$ be a satisfying assignment of ${\cal{F}}$. We say that a variable $x$ is \textbf{$d$-violated} by $\alpha$ in $\pi$ if there exists a message $C \rightarrow x$, $C=(\ell_x\vee \ell_y\vee \ell_z)$, in place $i$ in $\pi$ s.t. one of the following holds: \begin{enumerate}
\item $|\#\textbf{1}_\alpha(\pi^{-i}(y)\cap N^{++}(y))-\#\textbf{1}_\alpha(\pi^{-i}(\bar{y})\cap N^{-}(y))|> d/30$
\item $|\#\textbf{1}_\alpha(N^{++}(y))-\#\textbf{1}_\alpha(N^{-}(y))|> d/30$ \item $\#\textbf{1}_\alpha(N^s(y))< d/7$. \end{enumerate} Or one of the above holds for $z$. \end{defn}
We say that a variable is \emph{$r$-bounded} in ${\cal{F}}$ if it appears in at most $r$ clauses. We say that a variable $x$ has an \emph{$r$-bounded neighborhood} in ${\cal{F}}$ if every clause $C=(\ell_x\vee \ell_y \vee \ell_z)$ in ${\cal{F}}$ that contains $x$ is such that $y$ and $z$ are $r$-bounded ($x$ itself is not limited)
\begin{lem}\label{GeneralViolatedSuccRate} Let F be an arbitrary satisfiable 3CNF formula, let $\psi$ be an arbitrary satisfying assignment for $F$, and let $\pi$ be an arbitrary ordering of clause-variable messages. For a given value $d$, let $X$ be the set of variables that have a $20d$-bounded neighborhood in $F$ and are $d$-stable with respect to $\psi$ and $\pi$.
Let $\alpha$ be a random clause-variable message vector, and $F_{VIO}$ a random variable counting the number of $d$-violated variables in $X$. Then $whp$ $F_{VIO}\leq e^{-\Theta(d)}n$. \end{lem} \begin{Proof} First observe that the probability in the statement is taken only over the coin tosses in the choice of $\alpha$, as all other parameters are fixed. As in the proof of Proposition \ref{StableSuccRate}, we first bound $\mathbb{E}[F_{VIO}]$, and then prove concentration using a martingale argument. Fix $x \in X$ and a clause-variable message $C \rightarrow x$ at location $i$ in $\pi$, $C=(\ell_x\vee \ell_y \vee \ell_z)$. Let $A^+=\pi^{-i}(y)\cap N^{++}(y)$ (the set of messages preceding location $i$ where $y$ appears positively but doesn't support the clause) and $A^-=\pi^{-i}(y)\cap N^{-}(y)$ (the set of messages preceding location $i$ where $y$ appears negatively). Let us assume w.l.o.g that $\#A^+ \geq \#A^-$. Stability implies $$\#A^+-\#A^-\leq d/30.$$ Since $\alpha$ is a random assignment to the clause-variable messages, $$\mathbb{E}[\#\textbf{1}_\alpha(A^+)]-\mathbb{E}[\#\textbf{1}_\alpha(A^-)]\leq d/60.$$ If however \begin{equation}\label{equa}
|\#\textbf{1}_\alpha(A^+)-\#\textbf{1}_\alpha(A^-)|> d/30, \end{equation} then at least one of $\#\textbf{1}_\alpha(A^+),\#\textbf{1}_\alpha(A^-)$ deviated from its expectation by at least $(d/30-d/60)/2=d/120$. Both quantities are binomially distributed with expectation $O(d)$ ($x$ has a $20d$-bounded neighborhood, and therefore $y$ appears in at most $20d$ clauses), and therefore the probability of the latter happening is $e^{-\Theta(d)}$ (using standard concentration results). Properties $(b)$ and $(c)$ are bounded similarly, and the same argument takes care of $z$. Using the union bound and the linearity of expectation, one obtains that $\mathbb{E}[F_{VIO}] \leq e^{-\Theta(d)}\#X \leq e^{-\Theta(d)}n$. Let us use Theorem \ref{Thm:RegMartingale} to obtain concentration around $\mathbb{E}[F_{VIO}]$. Let $Y$ be the set of variables that share some clause with a variable in $X$. Expose the value of messages $C \to y$ for $y \in Y$. Since all $y \in Y$ are $20d$-bounded, the length of the martingale, $N = O(dn)$. The boundedness condition also gives that the martingale difference is $O(d)$. Taking $t=e^{-\Theta(d)}n$, and plugging all the parameters in Theorem \ref{Thm:RegMartingale}, the result follows.
\end{Proof}
\begin{prop}\label{ViolatedSuccRate} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p = d/n^2$, $d \geq d_0$, $d_0$ is a sufficiently large constant. Let $\pi$ be a random ordering of the clause-variable messages and $\alpha$ a random clause-variable message vector. Then $whp$ the number of stable variables which are violated in $\alpha$ is at most $e^{-\Theta(d)}n$. \end{prop} \begin{Proof} Let $X'$ be the set of stable variables in ${\cal{F}}$ w.r.t.~$\pi$, and $X \subseteq X'$ be the set of variables with a $20d$-bounded neighborhood in ${\cal{F}}$. Lemma \ref{GeneralViolatedSuccRate} guarantees that the number of violated variables in $X$ is at most $e^{-\Theta(d)}n$. It suffices to show that $\#(X' \setminus X) \leq e^{-\Theta(d)}n$. Let $Z_t$ be the set of variables that appear in $t$ clauses in ${\cal{F}}$. The number of clauses in which a variable appears is binomially distributed with expected value $\mu = 7\binom{n}{2}d/n^2 \leq 7d/2$. Using Theorem \ref{Thm:Chernoff2}, it holds that $$Pr[x \in Z_t] \leq e^{-\mu \cdot f((t-\mu)/\mu)},$$ where $f(x) = (1+x)\ln (1+x) - x$. For $t \geq 20d$, $f((t-\mu)/\mu) \geq t/(2\mu)$, and therefore $$Pr[x \in Z_t \, , t \geq 20d] \leq e^{-t/2}.$$ Using the linearity of expectation, and standard concentration results, it holds that $whp$ for every $t \geq 20d$, $\# Z_t \leq e^{-t/3}n$. The number of variables in $X' \setminus X$ is then $whp$ at most $$\sum_{t \geq 20d} 2t \cdot \#Z_t \leq \sum_{t \geq 20d} 2t \cdot e^{-t/3}n \leq e^{-\Theta(d)}n.$$ (Every variable in $Z_t$ ``spoils" at most two other variables in every clause in which it appears, which possibly end up in $X' \setminus X$). \end{Proof}
\subsection{Dense Subformulas}\label{DesnseSubSubs} The next property we discuss is analogous to a property proved in \cite{AlonKahale97} for random graphs. Loosely speaking, a random graph typically doesn't contain a small induced subgraph with a large average degree. A similar proposition for 3SAT can also be found in \cite{flaxman}. \begin{prop}\label{NoDenseSubformulas} Let $c > 1$ be an arbitrary constant. Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p = d/n^2$, $d \geq d_0$, $d_0$ a sufficiently large constant. Then $whp$ there exists \emph{no} subset of variables $U$, s.t. $\#U \leq e^{-\Theta(d)} n$ and there are at least $c\#U$ clauses in ${\cal{F}}$ containing two variables from $U$. \end{prop} \begin{Proof} For a fixed set $U$ of variables, $\#U=k$, the number of clauses containing two variables from $U$ is $$\binom{k}{2}(n-2)2^3 \leq 4k^2n.$$\\ Each of these clauses is included independently w.p. $\frac{d}{n^2}$. Thus, the probability that $ck$ of them are included is at most $$\binom{4k^2n}{ck}\left(\frac{d}{n^2}\right)^{ck}\leq \left(\frac{4k^2ne}{ck}\cdot \frac{d}{n^2}\right)^{ck} \leq \left(\frac{12
kd }{cn}\right)^{ck}.$$ Using the union bound, the probability there exists a ``dense" set $U$ is at most $$\sum_{k=2}^{e^{-\Theta(d)} n}\binom{n}{k}\left(\frac{12
kd }{cn}\right)^{ck}=O(d^{2c}/n^{2c-2}).$$ The last equality is obtained using standard calculations, and the standard estimate on the binomial coefficient: $\binom{n}{k} \leq (en/k)^k$. \end{Proof}
\subsection{The Core Variables}\label{CoreSubsection} We describe a subset of the variables, denoted throughout by ${\cal{H}}$ and referred to as the \emph{core variables}, which plays a crucial role in the analysis. The notion of a stable variable is not enough to ensure that the algorithm will set a stable variable according to the planted assignment, as it may happen that a stable variable $x$ appears in many of its clauses with unstable variables. Thus, $x$ can be biased in the wrong direction (by wrong we mean disagreeing with the planted assignment). However, if most of the clauses in which $x$ appears contain only stable variables, then this is already a sufficient condition to ensure that $x$ will be set correctly by the algorithm. The set ${\cal{H}}$ captures the notion of such variables. There are several ways to define a set of variables with these desired properties, we present one of them, and give a constructive way of obtaining it (though it has no algorithmic implications, at least not in our context).
\noindent Formally, ${\cal{H}}={\cal{H}}({\cal{F}},\varphi,\alpha,\pi)$ is constructed using the following iterative procedure: \begin{figure*}\end{figure*} \begin{prop}\label{SizeOfHBarPr} If both $\alpha$ and $\pi$ are chosen uniformly at random then with probability $1-n^{-\Omega(d)}$, $\#{\cal{H}} = (1-e^{-\Omega(d)})n$. \end{prop} \begin{Proof} Let $\bar{{\cal{H}}}=V\setminus {\cal{H}}$. Set $\delta = e^{-\Theta(d)}$. Partition the variables in $\bar{{\cal{H}}}$ into variables that belong to $A_1 \cup A_2 \cup A_3$, and variables that were removed in the iterative step, $\bar{H}^{it}=H_{0}\setminus {\cal{H}}$. If $\#\bar{{\cal{H}}}\geq \delta n$, then at least one of $A_1 \cup A_2 \cup A_3$, $\bar{H}^{it}$ has cardinality at least $\delta n/2$. Consequently, \begin{align*} Pr[\#\bar{{\cal{H}}}\geq \delta n] \leq \underbrace{Pr[\#A_1 \cup A_2 \cup A_3\geq \delta n/2]}_{(a)}+\underbrace{Pr[\#\bar{H}^{it} \geq
\delta n/2\text{ }\big|\text{ }\#A_1 \cup A_2 \cup A_3 \leq\delta n/2]}_{(b)}. \end{align*} Propositions \ref{SupportSuccRate}, \ref{StableSuccRate}, and \ref{ViolatedSuccRate} are used to bound $(a)$. To bound $(b)$, observe that every variable that is removed in iteration $i$ of the iterative step (step 2), supports at least $(d/3-d/4)=d/12$ clauses in which at least another variable belongs to $\{a_{1},a_{2},...,a_{i-1}\}\cup A_1 \cup A_2 \cup A_3$, or appears in $d/30$ clauses each containing at least one of the latter variables. Consider iteration $\delta n/2$. Assuming $\#A_1 \cup A_2 \cup A_3 \leq\delta n/2$, by the end of this iteration there exists a set containing at most $\delta n$ variables, and there are at least $d/30\cdot\delta n/2\cdot 1/3$ clauses containing at least two variables from it (we divide by 3 as every clause might have been counted 3 times). Plugging $c=d/180$ in Proposition \ref{NoDenseSubformulas}, $(b)$ is bounded. Finally observe that $(a)$ occurs with exponentially small probability, and $(b)$ occurs with probability $n^{-\Omega(d)}$. \end{Proof} \subsection{The Factor Graph of the non-Core Variables}\label{NonCoreStructSec} Proposition \ref{SizeOfHBarPr} implies that for $p=c\log n/n^2$, $c$ a sufficiently large constant, $whp$ ${\cal{H}}$ contains already all variables. Therefore the following propositions are relevant for the setting of Theorem \ref{ConvergenceThmMed} (namely, $p=O(1/n^2)$). In what follows we establish the typical structure of the factor graph induced on the non-core variables.
\begin{prop}\label{prop_NonCoreFactorGraph} Let ${\cal{F}}$ be a 3CNF formula randomly sampled according to ${\cal{P}}^{{\rm plant}}_{n,p}$, where $p = d/n^2$, $d \geq d_0$, $d_0$ a sufficiently large constant, let $\pi$ be the initial random ordering of the clause-variable messages, and $\alpha$ the initial random clause-variable message vector. Let $T$ be the factor graph induced on the non-core variables. $T$ enjoys the following properties: \begin{enumerate}
\item Every connected component contains $whp$ $O(\log n)$ variables,
\item every connected component contains $whp$ at most one cycle,
\item the probability of a cycle of length at least $k$ in $T$ is at most $e^{-\Theta(dk)}$. \end{enumerate} \end{prop}
A proposition of similar flavor to $(a)$ was proven in \cite{flaxman} though with respect to a different notion of core. This alone would not have sufficed to prove our result, and we need $(b)$ and $(c)$ as well.
\begin{cor}\label{cor:NoLongCycle} Let $f=f(n)$ be such that $f(n)\rightarrow \infty$ as $n\to \infty$. Then $whp$ there is no cycle of length $f(n)$ in the non-core factor graph. \end{cor}
The proof of Proposition \ref{prop_NonCoreFactorGraph} is quite long and technical. To avoid distraction, we go ahead and prove Theorem \ref{ConvergenceThmMed} and Proposition \ref{ConvergencePropDense}, and defer the proof of Proposition \ref{prop_NonCoreFactorGraph} to Section \ref{sec:NoTwoCycleProof}.
\section{Proof of Theorem \ref{ConvergenceThmMed} and Proposition \ref{ConvergencePropDense}}\label{ResultsProofSection} We start by giving an outline of the proof of Theorem \ref{ConvergenceThmMed}. Proposition \ref{ConvergencePropDense} is derived as an easy corollary of that proof. To prove Theorem \ref{ConvergenceThmMed} we need to establish three properties:
\begin{enumerate}
\item {\em Convergence}: the WP algorithm converges to a fixed point.
\item {\em Consistency}: the partial assignment implied by this fixed point is consistent with some satisfying assignment.
\item {\em Simplicity}: after simplifying the input formula by substituting in the values of the assigned variables, the remaining subformula is not only satisfiable (this is handled by consistency), but also simple.
\end{enumerate}
We assume that the formula ${\cal{F}}$ and the execution of $\textsf{WP}$ are {\em typical} in the sense that Propositions \ref{SizeOfHBarPr} and \ref{prop_NonCoreFactorGraph} hold. First we prove that after one iteration \textsf{WP} sets the core variables ${\cal{H}}$ correctly ($B_i$ agrees with $\varphi$ in sign) and this assignment does not change in later iterations. The proof of this property is rather straightforward from the definition of a core. This establishes convergence and consistency for the core variables. From iteration 2 onwards $\textsf{WP}$ is basically running on ${\cal{F}}$ in which variables belonging to ${\cal{H}}$ are substituted with their planted assignment. This subformula is satisfiable. Moreover, its factor graph contains small (logarithmic size) connected components, each containing at most one cycle. This last fact serves a dual purpose. It shows that if WP will eventually converge, the simplicity property will necessarily hold. Moreover, it will assist us in proving convergence and consistency for the subformula. Consider a connected component composed of a cycle and trees ``hanging" on the cycle. Proving convergence on the trees is done using a standard inductive argument. The more interesting part is proving convergence on the cycle. The difficulty there is that messages on a cycle may have more than one fixed point to which they may possibly converge, which makes it more difficult to prove that they converge at all. Our proof starts with a case analysis that identifies those cases that have multiple fixed points. For these cases we prove that almost surely random fluctuations caused by step~3.a of the WP algorithm will lead to convergence to some fixed point. This is similar in flavor to the fact that a random-walk on a line eventually reaches an endpoint of the line (even though one cannot tell a-priori which endpoint this will be). Hand-in-hand with establishing convergence for the trees and cycle, we shall also prove consistency.
The set $V_A$ of Theorem~\ref{ConvergenceThmMed} is composed of all variables from ${\cal{H}}$ and those variables from the non-core factor graph that get assigned. The set $V_U$ is composed of the UNASSIGNED variables from non-core factor graph. We now proceed with the formal proof.
\subsection{Analysis of \textsf{WP} on the core factor graph} We start by proving that the messages concerning the factor graph induced by the core-variables converge to the correct value, and remain the same until the end of the execution.
We say that a message $C \rightarrow x$, $C=(\ell_x \vee \ell_y \vee \ell_z)$, is \emph{correct} if its value is the same as it is when $y \rightarrow C$ and $z\rightarrow C$ are 1 (that is agree in sign with their planted assignment). In other words, $C \rightarrow x$ is 1 iff $C=(x\vee \bar{y}\vee\bar{z})$ ($x$ supports $C$).
\begin{prop} \label{prop:invariant} If $x_i\in {\cal{H}}$ and all messages $C \rightarrow x_i$, $C \in {\cal{F}}[{\cal{H}}]$ are correct at the beginning of an iteration (line 3 in the \textsf{WP} algorithm), then this invariant is kept by the end of that iteration. \end{prop} \begin{Proof} By contradiction, let $C_0 \rightarrow x$ be the first wrongly evaluated message in the iteration. W.l.o.g. assume $C_0=(\ell_x\vee \ell_y \vee \ell_z)$. Then at least one of $y,z$ sent a wrong message to $C_0$. $$y \rightarrow C_0 = \sum_{C \in N^+(y),C\neq C_0} C \rightarrow y - \sum_{C'\in N^-(y),C'\neq C_0}C' \rightarrow y.$$ Every message $C'' \rightarrow y$, $C''\in F[{\cal{H}}]\cap \{N^{++}(y)\cup N^{-}(y)\}$ is 0 (since it was correct at the beginning of the iteration and that didn't change until evaluating $C_0 \rightarrow x$). On the other hand, $y \in {\cal{H}}$ and therefore it supports at least $d/4$ clauses in ${\cal{F}}[{\cal{H}}]$. Thus at least $(d/4-1)$ messages in the left hand sum are `1' (we subtract 1 as $y$ might support $C_0$). $y$ appears in at most $d/30$ clauses with non-core variables (all of which may contribute a wrong '1' message to the right hand sum). All in all, $y \rightarrow C_0 \geq (d/4-d/30-1) > d/5$, which is correct (recall, we assume $\varphi = \textbf{1}^n$). The same applies for $z$, contradicting our assumption. \end{Proof}
\begin{prop}\label{PropNoProof} If $x_i\in {\cal{H}}$ and all messages $C \rightarrow x_i$, $C \in {\cal{F}}[{\cal{H}}]$ are correct by the end of a \textsf{WP} iteration, then $B_i$ agrees in sign with $\varphi(x_i)$ by the end of that iteration. \end{prop} \noindent Proposition \ref{PropNoProof} follows immediately from the definition of ${\cal{H}}$ and the message $B_i$. It suffices to show then that after the first iteration all messages $C \rightarrow x_i$, $C \in {\cal{F}}[{\cal{H}}]$ are correct. \begin{prop}\label{CoreSetCorrectProp} If ${\cal{F}}$ is a typical instance in the setting of Theorem \ref{ConvergenceThmMed}, then after one iteration of $\textsf{WP({\cal{F}})}$, for every variable $x_i\in {\cal{H}}$, every message $C \rightarrow x_i$, $C \in {\cal{F}}[{\cal{H}}]$ is correct. \end{prop} \begin{Proof} The proof is by induction on the order of the execution in the first iteration. Consider the first message $C \rightarrow x$, $C=(\ell_x\vee \ell_y \vee \ell_z)$, $C \in {\cal{F}}[{\cal{H}}]$, to be evaluated in the first iteration. Now consider the message $y \rightarrow C$ at the time $C \rightarrow x$ is evaluated. All messages $C' \rightarrow y$, $C'\in{\cal{F}}[H]$ have their initial random value (as $C \rightarrow x$ is the first core message to be evaluated). Furthermore, $y \in {\cal{H}}$, and therefore there are at most $d/30$ messages of the form $C'' \rightarrow y$, $C'' \notin {\cal{F}}[{\cal{H}}]$. $x \in {\cal{H}}$ hence it is stable w.r.t. $\pi$ and not violated by the initial clause-variable random messages. Therefore $$y \rightarrow C \geq \underbrace{d/7}_{\text{property (c) in defn. \ref{ViolatedDefn}}} -\underbrace{d/30}_{\text{property $(b)$ in defn. \ref{ViolatedDefn}}}-\underbrace{d/30}_{\text{non-core messages}}>d/14.$$ The same applies to $z$, to show that $C \rightarrow x$ is correct. Now consider a message $C \rightarrow x$ at position $i$, and assume all core messages up to this point were evaluated correctly. Observe that every core message $C' \rightarrow y$ that was evaluated already, if $C'\in \{N^{++}(y)\cup N^{-}(y)\}\cap {\cal{F}}[{\cal{H}}]$ then its value is '0' by the induction hypothesis. Since $x$ is not violated by $\alpha$, property $(b)$ in definition \ref{ViolatedDefn} ensures that to begin with
$|\#\textbf{1}_\alpha(N^{++}(y))-\#\textbf{1}_\alpha(N^{-}(y))|\leq d/30$. $y \in {\cal{H}}$, therefore it appears in at most $d/30$ non-core messages, all of which could have been already wrongly evaluated, changing the above difference by additional $d/30$. As for the core messages of $y$ which were already evaluated, since they were evaluated correctly, property $(a)$ in definition \ref{ViolatedDefn} ensures that the above difference changes by at most additional $d/30$. All in all, by the time we evaluate $C \rightarrow x$, $$\sum_{C' \in N^{++}(y),C'\neq C} C' \rightarrow y - \sum_{C''\in N^-(y),C''\neq C}C'' \rightarrow y \geq -3\cdot d/30.$$ As for messages that $y$ supports, property $(c)$ in definition \ref{ViolatedDefn} ensures that their contribution is at least $d/7$ to begin with. Every core message in $N^{s}(y)$ that was evaluated turned to '1', every non-core message was already counted in the above difference. Therefore $y \rightarrow C \geq d/7-3\cdot d/30>d/25$. The same applies to $z$ showing that $C \rightarrow x$ is correct. \end{Proof}
To prove Proposition \ref{ConvergencePropDense}, observe that when $p=c\log n/n^2$, with $c$ a sufficiently large constant, Proposition \ref{SizeOfHBarPr} implies ${\cal{H}} = V$. Combining this with Proposition \ref{CoreSetCorrectProp}, Proposition \ref{ConvergencePropDense} readily follows.
\subsection{The effect of messages that already converged}
It now remains to analyze the behavior of \textsf{WP} on the non-core factor graph, given that the messages involving the core factor graph have converged correctly. A key observation is that once the messages in the factor graph induced by the core variables converged, we can think of $\textsf{WP}$ as if running on the formula resulting from replacing every core variable with its planted assignment and simplifying (which may result in a 1-2-3CNF). The observation is made formal by the following proposition:
\begin{prop}\label{prop:AsIfSimplified} Consider a run of $\textsf{WP}$ that has converged on the core. Starting at some iteration after $\textsf{WP}$ has converged on the core, consider two alternative continuations of the warning propagation algorithm. $\textsf{WP}_1$ denotes continuing with $\textsf{WP}$ on the original input formula. $\textsf{WP}_2$ denotes continuing with $\textsf{WP}$ on the formula obtained by replacing each core variable with its planted assignment and simplifying. Then for every iteration $t$, the sequence of messages in the $t$'th iteration of $\textsf{WP}_2$ is identical to the respective subsequence in $\textsf{WP}_1$. (This subsequence includes those messages not involving the core variables, and includes messages of type $x \to C$ and of the type $C \to x$.) \end{prop}
\begin{Proof} First note that all messages $x \to C$, $x\in {\cal{H}}$, do not change (sign) from the second iteration onwards (by the analysis in the proof of Proposition \ref{CoreSetCorrectProp}). Furthermore, if $\ell_x$ satisfies $C$ in $\varphi$, then $x\to C$ is positive (if $x$ is a true literal in $C$, or negative otherwise), and therefore all messages $C\to y$, $y\neq x$ are constantly 0. Namely, they don't effect any calculation, and this is as if we replaced $\ell_x$ with TRUE, and in the simplification process $C$ disappeared. If $\ell_x$ is false in $C$ under $\varphi$, then $x \to C$ is constantly negative (if $\ell_x=x$, or constantly positive if $\ell_x=\bar{x}$), and this is exactly like having $\ell_x$ removed from $C$ (which is the result of the simplification process).
\end{Proof}
\subsection{Analysis of \textsf{WP} on the \emph{non}-core factor graph} \label{sec:noncoreoutline}
Note that to prove the convergence of the algorithm we need also to prove that messages of the sort $C \to x$ where $C$ is not in the core and $x$ is in the core converge. However, if we prove that all messages in the factor graph induced by the non-core variables converge, then this (with the fact that the core factor graph messages converge) immediately implies the convergence of messages of this type. Therefore, our {\em goal reduces to proving convergence of \textsf{WP} on the factor graph induced by
${\cal{F}}|_\psi$, where $\psi$ assigns the core variables their planted assignment, and the rest are UNASSIGNED.}
We say that $\textsf{WP}$ converged correctly in a connected component $\cal{C}$ of the non-core factor graph if there exists a satisfying assignment $\psi$ of the entire formula which is consistent with the planted assignment on the core, and with the assignment of $\textsf{WP}$ to $\cal{C}$.
Consider a connected component in the non-core factor graph consisting of a cycle with trees hanging from it. Our analysis proceeds in three steps:
\begin{enumerate} \item We first prove that clause-variable and variable-clause messages of the form $\alpha \to \beta$ where $\alpha \to \beta$ lead from the trees to the cycle, converge weakly correctly w.r.t. the planted assignment. In the case that the component has no cycles, this concludes the proof. \item Then, using a refined case analysis, we show that the
messages along the cycle also converge $whp$, this time not necessarily to the planted assignment, but to some satisfying assignment which agrees with the already converged messages. \item We conclude by showing that messages in the direction from the cycle to the trees converge. Finally we show that together, all messages (including parts $(a)$ and $(b)$) in the connected component converge correctly according to some satisfying assignment. \end{enumerate}
Consider the factor graph $F$ induced by the simplified formula. A {\em cycle} in $F$ is a collection $x_1,C_2,x_3,C_4,\ldots,x_r = x_1$ where $x_i$ and $x_{i+2}$ belong to $C_{i+1}$ for all $i$ (in our description we consider only odd values of $i$) and $x_i \neq x_{i+2}$, $C_{i+1} \neq C_{i+3}$ for all $i$. A factor graph $F$ is a {\em tree} if it contains no cycles. It is {\em unicyclic} if it contains exactly one cycle. Let $x \to C$ be a directed edge of $F$. We say that $x \to C$ {\em belongs} to the cycle, if both $x$ and $C$ belong to the cycle. For an edge $x \to C$ that does not belong to the cycle, we say that $x \to C$ {\em is directed towards} the cycle if $x$ doesn't belong to the cycle and $C$ lies on the simple path from $x$ to the cycle. We say that the edge $x \to C$ is {\em directed away} from the cycle if $C$ doesn't belong to the cycle and $x$ lies on the simple path from the cycle to $C$. Similarly we define what it means for an edges $C \to x$ to belong to the cycle, to be directed towards the cycle and to be directed away from the cycle. \begin{prop} Let $F$ be a unicyclic factor graph. Then every directed edge of the form $x \to C$ or $C \to x$ either belongs to the cycle, or is directed towards it or directed away from it. \end{prop} \begin{Proof} Recall that the factor graph is an undirected graph, and the direction is associated with the messages. Take an edge $x\to C$ (similarly for $C\to x$), if it lies on the cycle, then we are done. Otherwise, since the factor graph is connected, consider the path in the tree leading from some element of the cycle to $C$. This path is either contained in the path to $x$ or contains it (otherwise there is another cycle). In the first case $x\to C$ is directed towards the cycle, and in the latter $x\to C$ is directed away from the cycle. \end{Proof}
Our analysis proceeds in two parts: first we shall analyze $\textsf{WP}$ on the trees, then $\textsf{WP}$ on the cycle and connect the two (which is relevant for the uni-cyclic components).
\subsection{$\textsf{WP}$ on the trees} As we already mentioned before, there are two directions to consider: messages directed towards the cycle and away from the cycle. In this section we shall consider a rooted tree, and partition the messages according to messages which are oriented away from the root (they will correspond in the sequel to messages going away from the cycle) and messages towards the root (messages from the leaves towards the root -- later to be identified with messages going into the cycle). The first lemma concerns messages going towards the root.
\begin{rem}\rm\label{rem:ConvergenceOnTrees} Lemma \ref{MessageIntoCycleConvergeLem} is a special case of the known fact (see~\cite{SurveyPropagation} for example) that for every tree induced by a satisfiable formula, $\textsf{WP}$ converges and there exists a satisfying assignment $\psi$ such that every $B_i$ is either 0 or agrees with $\psi$. In Lemma \ref{MessageIntoCycleConvergeLem} we assume that the formula is satisfiable by the all~1 assignment (the planted assignment), and consider only messages towards the root. \end{rem}
\begin{lem}\label{MessageIntoCycleConvergeLem} Let $C \rightarrow x$ be an edge in the non-core factor graph belonging to a connected component of size $s$, and in particular to a rooted tree $T$. If $C\rightarrow x$ is directed towards the root then the message $C \rightarrow x$ converges after at most $O(s)$ iterations. Furthermore, if $C \rightarrow x = 1$ then $x$ appears positively in $C$. \end{lem} \begin{Proof} We consider the case $C=(\ell_x \vee \ell_y)$ -- the case $C=(\ell_x \vee \ell_y \vee \ell_z)$ where all three literals belong to non-core variables is proved similarly. For an edge $(C,x)$ in the factor graph, we define $\verb"level"(C,x)$ to be the number of edges in a path between $C$ and the leaf most distant from $C$ in the factor graph from which the edge $(C,x)$ is removed. The lemma is now proved using induction on the level $i$. Namely, after the $i^{th}$ iteration, all messages $C \rightarrow x$ associated with an edge $(C,x)$ at level $i$ converge, and if $C \rightarrow x = 1$ then $x$ appears positively in $C$.
The base case is an edge $(C,x)$ at level 0. If $\verb"level"(C,x)=0$ then $C$ is a unit clause containing only the variable $x$. By the definition of the messages, in this case $C\rightarrow x = 1$ and indeed it must be the case that $x$ is positive in $C$ (as the other two variables evaluate to FALSE under the planted). Now consider an edge $(C,x)$ at level $i$, and consider iteration $i$. Since $i>0$, it must be that there is another non-core variable $y$ in $C$ (or two more variables $y,z$). Consider an edge $(C',y)$, $y\in C'$ (if no such $C'$ exists that we are done as $C \to x$ will be constantly 0 in this case).
$\verb"level"(C',y)$ is strictly smaller than $i$ since every path from $C$ to a leaf (when deleting the edge $(C,x)$) passes through some edge $(C',y)$. By the induction hypothesis, all messages $C' \rightarrow y$ already converged, and therefore also $y\to C$ and in turn $C\to x$. It is only left to take care of the case $C\to x=1$. In this case, there must be a clause $C'$ s.t. $C'
\rightarrow y = 1$ and $y$ appears positively in $C'$ (by the induction hypothesis). If $C \to x=1$ it must be that $y$ appears negatively in $C$ and therefore $x$ must appear positively (otherwise $C$ is not satisfied by the planted assignment). \end{Proof}
Next we consider several scenarios that correspond to messages going from the root towards the leaves. Those scenarios correspond to step~(c) of our analysis, referred to in Section~\ref{sec:noncoreoutline}.
\begin{clm}\label{1MsgIffUnsatClm} Let $F$ be a unicyclic formula, and assume that WP has converged on $F$. Let $x \to C$ be directed away from the cycle, and define $F_C$ to be the subformula inducing the tree rooted at $C$ ($x$ itself is removed from $C$). Then $C \to x = 0$ in the fixed point if and only if $F_C$ is satisfiable. \end{clm} \begin{Proof} The structure of the proof is similar to that of Lemma \ref{MessageIntoCycleConvergeLem}. For convenience we extend the definition of level above as to include edges on the cycle. We say that an edge $(C,x)$ in the factor graph has $\verb"level"(C,x)$ equal $\infty$ if $(C,x)$ lies on a cycle and $\verb"level"(C,x) = t < \infty$ if $t$ is the maximal length of a path between $C$ and a leaf in the factor graph from which the edge $(C,x)$ is removed. The lemma is now proved using induction on $t$.
The base case is an edge $(C,x)$ at level 0. If $\verb"level"(C,x)=0$ then $C$ is a unit clause containing only the variable $x$, and then $F_C$ is the empty formula. Indeed $C \to x =1$ by definition and $F_C$ is unsatisfiable (by definition again, the empty formula is not satisfiable).
Now consider an edge $(C,x)$ at level $t>0$ ($t$ is still finite because we are not interested in cycle edges). Assume w.l.o.g. that $C=(x\vee y \vee z)$ (maybe only $y$). Let$C_1,C_2,\ldots,C_r$ be the clauses in $F_C$ that contain the variable $y$, and let $D_1,D_2,\ldots,D_s$ be the clauses that contain the variable $z$. Similarly to $F_C$ we can define $F_{C_i}$ and $F_{D_j}$. Observe that every edge $(C_i,y)$ and $(D_j,z)$ in $F_C$ has $\verb"level"$ value which is strictly smaller than $t$ -- since every path from $C$ to a leaf (when deleting the edge $(C,x)$) passes through some such edge.
First consider the case where $C \to x = 1$. For that to happen, it must be that there are two clauses $C_i,D_j$ in $F_C$ so that $C_i \to y = 1$ and $D_j \to z = 1$ and $C_i$ contains $\bar{y}$, $D_j$ contains $\bar{z}$. By the induction hypothesis, $F_{C_i}$ and $F_{D_j}$ are not satisfiable. The only way that $F_C$ can now be possibly satisfied is by assigning $z$ and $y$ to FALSE, but then $F_C$ is not satisfied as the clause $C$ (without $x$) evaluates to FALSE.
Next we prove that if $F_C$ is not satisfiable then it must be that $C\to x=1$. If $F_C$ is not satisfiable then it must be that $C=(\ell_x\vee \bar{y} \vee \bar{z})$ (otherwise $\varphi$ satisfies $F_C$). Further, observe that there must exist at least one clause $C_i$ containing $y$ positively, and at least one $D_j$ containing $z$ positively s.t. $F_{C_{i}}$ and $F_{D_{j}}$ are unsatisfiable. Otherwise, we can define $\varphi'$ to be $\varphi$ except that $y$ or $z$ are assigned FALSE (depending which of $C_i$ or $D_j$ doesn't exist). It is easy to see that $\varphi'$ satisfies $F_C$, contradicting our assumption. By the induction hypothesis $C_i\to y=1$ and $D_j\to z=1$. Further, by Lemma \ref{MessageIntoCycleConvergeLem}, there cannot be a message $C' \to y = 1$ where $y$ is negative, or $D' \to z =1$ where $z$ is negative (because all of these messages are directed towards the cycle). This in turn implies that $C\to x=1$. \end{Proof}\\ Recall our definition for $\textsf{WP}$ converges correctly on a subformula $F'$ of $F$ if there exists a satisfying assignment $\psi$ to $F$ which is consistent with the planted assignment $\varphi$ on the core, and with the assignment of $\textsf{WP}$ to $F'$. \begin{clm}\label{spinalDecomp} Assume that $F$ is a unicyclic formula. Assume further that WP has converged on $F$. Let $C \to y$ be directed away from the cycle. Consider a subformula $F_C$ which induces a tree rooted at a clause $C$. This formula contains the clause $C$ and all other clauses whose path to the cycle goes via $y$. If in the fixed point for $F$ it holds that \begin{itemize}
\item $C\to y=1$,
\item $y$ appears negatively in $C$,
\item $y\to C \geq 1$, \end{itemize} then $\textsf{WP}$ converges correctly on $F_C$. \end{clm} \begin{Proof} We split the variables of $F_C$ into two sets. The first is the set of spinal variables (to be defined shortly), and the remaining variables. The \emph{spine} (rooted at $y$) of a tree-like formula is defined using the following procedure: Let us track the origin of the message $y\to C$ in the tree (which is directed towards the root). For $y\to C \geq 1$ to occur, there must be a clause $D_1$ in the tree that contains $y$ positively and a message $D_1 \to y = 1$ in the direction of the root (as messages in the direction of the root are only effected by other messages in that direction). Let us backtrack one more step.
$D_1=(y \vee \bar{k} \vee \bar{w})$ for some variables $w,k$; $k$ and $w$ must appear negatively in $D_1$ by Lemma \ref{MessageIntoCycleConvergeLem},
and the fact that $D_1 \to y = 1$, that is both $k$ and $w$ were issued warnings having them not satisfy $D_1$. Let us consider the clauses $D_2$ and $D_3$ that issues warnings to $k$ and $w$ respectively. $D_2=(k\vee ...),D_3=(w\vee ...)$, $D_2 \to k =1$ and $D_3 \to w = 1$, and both messages are directed towards the root. Obviously, one can inductively continue this backtracking procedure which terminates at the leaves of the tree (since there are no cycles the procedure is well defined and always terminates). Let us call the clauses and variables that emerge in this backtrack the {\em spine} of the tree. The figure below illustrates this procedure.
\begin{figure*}
\caption{The spine of a tree}
\label{fig:a}
\end{figure*}
Let us start with the $B_i$-messages for the non-spinal variables (the $B_i$ message was defined in Section \ref{WPSubs}). The spine splits $F_C$ into sub-trees hanging from the spinal variables (the root of each such tree is a clause, e.g. $C_1,C_2,C_3$ in our example). Every non-spinal variable belongs to one of those trees. It suffices to prove that for every subtree rooted at $C_i$, hanging from a spinal variable $s$, it holds that $s \to C_i \geq 0$ in the fixed point (that is, $s$ transmits its assignment according to the planted assignment). This ensures, by Remarks \ref{rem:ConvergenceOnTrees}, convergence for that tree to a satisfying assignment (consistent with the planted). Let us prove that $s \to C_i \geq 0$. Let $F_s$ be the subformula corresponding to the tree hanging from $s$ rooted at some clause $C_i$. We prove that $s \to C_i \geq 0$ by induction on the distance in $F_C$ between $y$ and $s$. The base case is distance 0, which is $y$ itself. The messages that we need to verify are of the form $y \to C_i$, $C_i \neq D_1$, which are pointing away from the cycle. For every message $y \to C_i$, the wrong message $C \to y$ is evened by the correct warning $D_1 \to y$. Since $y\to C_i$ depends only on one message which is directed away from the cycle (or else
there is a second cycle), and all the other messages that contribute to the calculation of $y\to C_i$ are correct (Lemma \ref{MessageIntoCycleConvergeLem}), we conclude that $y \to C_i \geq 0$. The induction step follows very similarly.
Let us now consider the spinal variables. For every such variable there is always at most one message in the direction away from the cycle (otherwise there is more than one cycle); this message may be wrong. Using a very similar inductive argument as in the previous paragraph, one can show that there is always at least one correct warning (in the direction of the cycle) for each spinal variable. Therefore $B_s \geq 0$ for every spinal variable $s$. \end{Proof}
\subsection{$\textsf{WP}$ on cycles} \label{subsec:cycle}
We will denote a cycle by $x_1, C_2 , x_3, C_4 ...x_{2r-1},C_{2r},x_1$ where by this we mean that $x_i$ appears in the clauses before/after it and that $C_i$ contains the two variables before/after it. We consider two different types of cycles. \begin{itemize} \item \emph{Biased} cycles: cycles that have at least one warning message $C \to x_i = 1$ coming into the cycle, where $C \to x_i$ directs into the cycle and the value of $C \to x_i$ is the value after the edge has converged. \item \emph{Free} cycles: cycles that do not have such messages coming in, or all messages coming in are 0 messages. \end{itemize}
\subsubsection{Convergence of \textsf{WP} when the cycle is biased}
\noindent First we observe that we may assume w.l.o.g. that edges that enter the cycle enter it at a variable rather than at a clause (hence that every clause on the cycle contains exactly two non-core variables). This is because of a simple argument similar to Proposition \ref{prop:AsIfSimplified}: consider an edge going into the cycle, $z\to C$, and w.l.o.g. assume that $z$ appears positively in $C$. After all the edges going into the cycle have converged, if $z\to C\geq 0$ it follows that $C\to x=0$ for cycle edges $(C,x)$, and thus execution on the cycle is the same as if $C$ was removed from the formula, only now we are left with a tree, for which convergence to a correct assignment is guaranteed (Remark \ref{rem:ConvergenceOnTrees}). If $z\to C<0$, then the execution is exactly as if $z$ was removed from $C$ (and $C$ is in 2-CNF form).
\begin{prop}\label{prop:ConnvergenceInBiasedCaseClm} Let $\cal{C}$ be a connected component of the factor graph of size $s$ containing one cycle s.t. there exists an edge directed into the cycle $C \to x_i$ where $x_i$ belongs to the cycle and such that the message converges to $C \to x_i = 1$. Then $\textsf{WP}$ converges on $\cal{C}$ after at most $O(s)$ rounds. Moreover for the fixed point, if the message $C' \to x = 1$ then $x$ appears positively in $C'$. \end{prop}
\begin{Proof} A message of the cycle $C_j \to x_{j+1}$ depends only on cycle messages of the type $C_{j'} \to x_{j'+1}, x_{j'+1} \to C_{j'+2}$ and on messages coming into the cycle. In other words during the execution of $\textsf{WP}$ the values of all messages $C_{j'} \to x_{j'-1},x_{j'-1} \to C_{j'-2}$ do not effect the value of the message $C_j \to x_{j+1}$. Recall that we are in the case where there exists a message $C \to x_i = 1$ going into the cycle (after the convergence of these messages). Also $x_i$ must appear positively in $C$ (Lemma \ref{MessageIntoCycleConvergeLem}). We consider the following cases:
\begin{itemize} \item There exists a variable $x_j$ that appears positively in both $C_{j-1}$ and $C_{j+1}$ (the case $j=i$ is allowed here). We note that in this case the message $x_j \to C_{j+1}$ must be non-negative which implies that the message $C_{j+1} \to x_{j+2}$ converges to the value $0$. This in turn implies that the value of all messages $x_r \to C_{r+1}$ and $C_{r+1} \to x_{r+2}$ for $r \neq j$ will remain the same if the clause $C_{j+1}$ is removed from the formula. However, this case reduces to the case of tree formula. \item $x_i$ appears negatively in $C_{i+1}$ and positively in $C_{i-1}$. We note that in this case the value of the message $x_i \to C_{i+1}$ is always at least 1, which implies that the message $C_{i+1} \to x_{i+2}$ always take the value $1$. Thus in this case we may remove the clause $C_{i+1}$ from the formula and replace it by the unit clause $\ell_y$ where $C_{i+1} = \ell_y \vee \bar{x_i}$. Again, this reduces to the case of a tree formula. \item The remaining case is the case where $x_i$ appears negatively in both $C_{i-1}$ and $C_{i+1}$ and there is no $j$ such that $x_j$ appears positively in both $C_{j-1}$ and $C_{j+1}$. We claim that this leads to contradiction. An easy corollary of Lemma \ref{MessageIntoCycleConvergeLem} is that all the messages that go into the cycle have converged according to the planted assignment. Therefore w.l.o.g one can ignore messages of the form $y \to C_i$, $y$ is a non-cycle variable, and $C_i$ is a cycle clause (we can assume that every such message says that the literal of $y$ is going to be false in $C_i$, otherwise the cycle structure is again broken and we reduce to the tree case). Therefore the cycle reduces to a 2SAT formula which has to be satisfiable by the planted assignment, in which in particular $x_i = 1$. Write $C_{i+1} = \bar{x_i} \vee \ell_{i+2}$. Then for the satisfying assignment we must have $\ell_{i+2} = 1$, similarly $\ell_{i+4} = 1$, etc, until we reach $\bar{x}_i = 1$, a contradiction. \end{itemize} To summarize, by Lemma \ref{MessageIntoCycleConvergeLem} the messages going into the rooted tree at $x_i$ converge after $O(s)$ steps, and at least one warning is issued. By the above discussion, for every clause $D$ in the connected component it holds that $x_i \to D\geq 0$ (as $x_i$ appears in at most one message which may be wrong -- a cycle message). Since there is always a satisfying assignment consistent with $x_i$ assigned TRUE, then after reducing the cycle to a tree we are left with a satisfiable tree. Remark \ref{rem:ConvergenceOnTrees} guarantees convergence in additional $O(s)$ iterations. \end{Proof}
\subsubsection {Convergence of \textsf{WP} when the cycle is free} The main result of this subsection is summarized in the following claim: \begin{clm}\label{ConnvergenceInNonBiasedCaseMainClm} Let $\cal{C}$ be a connected component of the factor graph of size $s$ containing one cycle of size $r$ s.t. the fixed point contains no messages $C \to x = 1$ going into the cycle (the cycle is free). Then $whp$ \textsf{WP} converges on $\cal{C}$ after at most $O(r^2 \cdot \log n+s)$ rounds. Moreover for the fixed point, if we simplify the formula which induces $\cal{C}$ according to the resulting $B_i$'s, then the resulting subformula is satisfiable. \end{clm}
\begin{rem}\label{DiffRem}\rm Observe that the free case is the only one where convergence according to the planted assignment is not guaranteed. Furthermore, the free cycle case is the one that may not converge ``quickly" (or not at all), though this is extremely unlikely. The proof of Proposition \ref{ConnvergenceInNonBiasedCaseMainClm} is the only place in the analysis where we use the fact that in line $3.a$ of \textsf{WP} we use fresh randomness in every iteration. \end{rem}
We consider two cases: the easy one is the case in which the cycle contains a pure variable w.r.t the cycle (though this variable may not be pure w.r.t to the entire formula).
\begin{clm}\label{ConnvergenceOfNonBiasedCyclePure} If the cycle contains a variable $x_i$ appearing in the same polarity in both $C_{i+1},C_{i-1}$, then the messages $C \to x$ along cycle edges converge. Moreover for the fixed point, if $C \to x = 1$ then $x$ satisfies $C$ according to $\varphi$. \end{clm} The proof is essentially the first case in the proof of Proposition \ref{prop:ConnvergenceInBiasedCaseClm}. We omit the details.
\noindent We now move to the harder case, in which the cycle contains no pure variables (which is the case referred to in Remark \ref{DiffRem}). \begin{prop}\label{FreeCycleNoPureConvergenceClm} Consider a free cycle of size $r$ with no pure literal, and one of the two directed cycles of messages. Then the messages along the cycle converge $whp$ to either all $0$ or all $1$ in $O(r^2\log n)$ rounds. \end{prop} Convergence in $O(r^2\log n)$ rounds suffices due to Corollary \ref{cor:NoLongCycle} (which asserts that $whp$ the length of every cycle is constant). The proof of Proposition \ref{FreeCycleNoPureConvergenceClm} is given at the end of this section. We proceed by analyzing \textsf{WP} assuming that Proposition \ref{FreeCycleNoPureConvergenceClm} holds, which is the case $whp$.
\begin{prop}\label{prop:free_cycle_comp_convergence} Suppose that the cycle messages have converged (in the setting of Proposition \ref{FreeCycleNoPureConvergenceClm}), then the formula resulting from substituting every $x_i$ with the value assigned to it by $B_i$ (according to the fixed point of $\textsf{WP}$), and simplifying, is satisfiable. \end{prop} \begin{Proof} Let $F$ be the subformula that induces the connected component $\cal{C}$, and decompose it according to the trees that hang on the cycle's variables and the trees that hang on the cycle's clauses. Observe that the formulas that induce these trees are variable and clause disjoint (since there is only one cycle in $\cal{C}$).
Let us start with the cycle clauses. The key observation is that setting the cycle variables according to one arbitrary orientation (say, set $x_i$ to satisfy $C_{i+1}$) satisfies the cycle and doesn't conflict with any satisfying assignment of the hanging trees: if the tree hangs on a variable $x_i$, then since the cycle is free, the tree is satisfiable regardless of the assignment of $x_i$ (Proposition \ref{1MsgIffUnsatClm}). In the case that the tree hangs on a cycle-clause $C$, then the cycle variables and the tree variables are disjoint, and $C$ is satisfied already by a cycle-variable regardless of the assignment of the tree-variables. Now how does this coincide with the result of $\textsf{WP}$. Recall that we are in the case where the cycle is free. Therefore only messages $C \to x_i$ where both $C$ and $x_i$ belong to the cycle effect $B_i$. If in the fixed point one cycle orientation is 0 and one orientation is 1, then the $B_i$ messages of the cycle variables implement exactly this policy. If both cycle orientations converged to 1 or to 0, then the corresponding $B_i$ messages of all cycle variables are UNASSIGNED (since the cycle is free), but then the same policy can be used to satisfy the clauses of the cycle in a manner consistent with the rest of the formula.
It remains to show that $\textsf{WP}$ converges on every tree in a manner that is consistent with some satisfying assignment of the tree. We consider several cases.
Consider a tree hanging on a cycle variable $x_i$. Let $C$ be some non-cycle clause that contains $x_i$, and $F_C$ the subformula that induces the tree rooted at $C$. Observe that once the cycle has converged, then the message $x_i\to C$ does not change anymore. If $x_i \to C$ agrees with $\varphi$ there are two possibilities. Either $x_i$ satisfies $C$ under $\varphi$, in which case $C$ always sends 0 to $F_C$, and then $\textsf{WP}$ executes on $F_C$ as if $C$ is removed. Remark \ref{rem:ConvergenceOnTrees} guarantees correct convergence (as $F_C\setminus C$ is satisfiable), and as for $C$, $B_i \geq 0$ and we can set $x_i$ to TRUE so that it satisfies $C$ and is consistent with the assignment of the cycle ($B_i \geq 0$ since $x_i \geq 0$ and $C \to x_i = 0$ as we are in the free cycle case). If $x_i$ appears negatively in $C$, then $\textsf{WP}$ executes as if $x_i$ was deleted from $C$. Still $F_C$ is satisfiable and correct convergence is guaranteed.
Now consider the case where $x_i \to C$ disagrees with $\varphi$. Recall that we assume $\varphi(x_i)=TRUE$, and therefore $x_i \to C$ is negative in the fixed point. If $x_i$ appears negatively in $C$ then $C\rightarrow y=0$ for every $y\in C$ (since $x_i$ signals $C$ that it satisfies it), and therefore $C$ doesn't effect any calculation from this point onwards, and the correct convergence of $F_C$ is again guaranteed by Remark \ref{rem:ConvergenceOnTrees} on the convergence for satisfiable trees. The more intricate case is if $C$ contains $x_i$ positively. Since we are in the free case, it must hold that $C \to x=0$. Therefore using Proposition \ref{1MsgIffUnsatClm} one obtains that $F_C$ is satisfiable (regardless of the assignment of $x_i$), and $\textsf{WP}$ will converge as required (again Remark \ref{rem:ConvergenceOnTrees}).
Now consider a tree hanging on a cycle clause. Namely, $C_{i+1}=(x_i\vee x_{i+2} \vee y)$, where $x_i,x_{i+2}$ are cycle variables, and $(C_{i+1},y)$ is a tree edge. If one of the cycle orientations converged to 0, then $C_{i+1}\to y$ converges to 0, and then Remark \ref{rem:ConvergenceOnTrees} guarantees correct convergence. The same applies to the case where $C_{i+1} \to y$ converges to 1 and $y$ is positive in $C_{i+1}$ (since then we can remove $y$ from the tree, use Remark \ref{rem:ConvergenceOnTrees} for the remaining part, then add back $y$, and set it to TRUE without causing any conflict with the tree assignment, but satisfying $C_{i+1}$ according to the planted assignment).
The delicate case remains when $C_{i+1} \to y$ converges to 1 but $y$'s polarity in $C_{i+1}$ disagrees with $\varphi$, that is, $y$ is negative in $C_{i+1}$. The key observation is that the message $y\to C_{i+1}$ (which is directed towards the cycle) must have converged to a positive value (otherwise, $C_{i+1}\to x_i$ and $C_{i+1}\to x_{i+2}$ would have converged to 0). However this complies with the scenario of Proposition \ref{spinalDecomp}, and again correct convergence is guaranteed. \end{Proof}
In Theorem \ref{ConvergenceThmMed} the unassigned variables are required to induce a ``simple" formula, which is satisfiable in linear time. Observe that the factor graph induced by the UNASSIGNED variables consists of connected components whose structure is a cycle with trees hanging on it, or just a tree. A formula whose factor graph is a tree can be satisfied in linear time by starting with the leaves (which are determined uniquely in case that the leaf is a clause -- namely, a unit clause, or if the leaf is a variable then it appears only in one clause, and can be immediately assigned) and proceeding recursively. Regarding the cycle, consider an arbitrary variable $x$ on the cycle. By assigning $x$ and simplifying accordingly, we remain with a tree. Since there are only two ways to assign $x$, the whole procedures is linear in the size of the connected component. This completes the proof of Theorem \ref{ConvergenceThmMed}.
\subsubsection{Proof of Proposition \ref{FreeCycleNoPureConvergenceClm}} Since the cycle has no pure literal it must be of the following form: $C_1 = (\ell_{x_1} \vee \overline{\ell}_{x_2}), C_2 = (\ell_{x_2} \vee \overline{\ell}_{x_3}), \ldots, C_L = (\ell_{x_L} \vee \overline{\ell}_{x_1})$ (recall the definition of cycles at the beginning of subsection \ref{subsec:cycle}).
Consider one of the directed cycles, say: $x_1 \to C_1 \to x_2 \to \cdots$ and note that when the message $x_i \to C_i$ is updated it obtains the current value of $C_{i-1} \to x_i$ and when the message $C_i \to x_{i+1}$ is updated, it obtains the current value of $x_i \to C_i$.
It thus suffices to show that the process above converges to all $0$ or all $1$ in time polynomial in the cycle length. This we prove in the lemma below.
\begin{clm} \label{clm:perm_process} Consider the process $(\Gamma^i : i \geq 0)$ taking values in $\{0,1\}^{L}$. The process is a Markov process started at $\Gamma^0 \in \{0,1\}^{L}$.
Given the state of the process $\Gamma^i$ at step $i$, the distribution of $\Gamma^{i+1}$ at round $i+1$ is defined by picking a permutation $\sigma \in S_L$ uniformly at random and independently. Then $\Gamma^{i+1}$ is defined via the following process: let $\Delta_0 = \Gamma^i$ and for $1 \leq j \leq L$: \begin{itemize} \item Let $\Delta_j$ be obtained from $\Delta_{j-1}$ by setting $\Delta_j(\sigma(j-1)) = \Delta_{j-1}((\sigma(j-1)+1) \mod L)$ and $\Delta_j(r) = \Delta_{j-1}(r)$ for all $r \neq \sigma(j-1)$. \end{itemize} Then set $\Gamma^{i+1} = \Delta_L$.
Let $T$ be the stopping time where the process hits the state all $0$ or all $1$. Then for all $\Gamma^0$: \begin{equation} \label{eq:tail_perm} Pr[T \geq 4 a L^2] \leq L 2^{-a}. \end{equation} for all $a \geq 1$ integer. \end{clm}
The proof of Proposition~\ref{clm:perm_process} is based on the fact that the process defined in this lemma is a martingale. This is established in the following two lemmas.
\begin{lem} \label{lem:perm_process_mart} Consider the following variant $\tilde{\Gamma^i}$ of the process $\Gamma^i$ defined in Proposition~\ref{clm:perm_process}. In the variant, different intervals of $0/1$ are assigned different colors and the copying procedure is as above. Fix one color and let $X_i$ denote the number of elements of the cycle of that color in
$\tilde{\Gamma}^i$. Then $X_i$ is a martingale with respect to the filtration defined by $\tilde{\Gamma}^i$. \end{lem}
\begin{Proof}
From the definition of the copying process it is easy to see that \[
\mathbb{E}[X_{i+1} | \tilde{\Gamma}^i,\tilde{\Gamma}^{i-1},\ldots] =
\mathbb{E}[X_{i+1} | X_i]. \]
We will show below that $\mathbb{E}[X_{i+1} | X_i] = 0$ and thus that $X_{i}$ is a martingale with respect to the filtration $\tilde{\Gamma}^i$.
Assume that $X_i = k$. Then w.l.o.g. we may assume that
the configuration $\tilde{\Gamma}^i$ consists of an interval of $1$'s of length $k$ and an interval of $0$'s of length $L-k$. Indeed if the configuration consists of a number of intervals of length $Y_{i,1},\ldots,Y_{i,r}$ where $X_i = \sum_{t} Y_{i,t}$ then we may think of the different sub-intervals as having different colors. Then proving that each of the $Y_{i,t}$ is a martingale implies that $X_i$ is an interval as needed.
We calculate separately the expected shifts in the locations of left end-points of the $0$ and $1$ interval respectively. We denote the two shift random variables by $L_0$ and $L_1$. Clearly $L_0 = I_{0,1} + I_{0,2} + \ldots + I_{0,k-1}$ where $I_{0,j}$ is the indicator of the event that the $0$ left-end point shifted by at least $j$ and similarly for $L_1$. Note that \[ \mathbb{E}[I_{0,j}] = \frac{1}{j!} - \frac{1}{(L-k+j)!} \] and that \[ \mathbb{E}[I_{1,j}] = \frac{1}{j!} - \frac{1}{(k+j)!}. \] The last equation follows from the fact that in order for the $1$ interval to extend by at least $j$, the $j$ copying has to take place in the correct order and it is forbidden that they all took place in the right order and the interval has become a $0$ interval. The previous equation is derived similarly. Thus \[
\mathbb{E}[(X_{i+1} - X_i) | X_i] = \mathbb{E}[L_1] - \mathbb{E}[L_0] = \sum_{j=1}^{L-k} \left(\frac{1}{j!} - \frac{1}{(k+j)!}\right) - \sum_{j=1}^{k} \left(\frac{1}{j!} - \frac{1}{(L-k+j)!}\right) = 0 \] This concludes the proof that $X_i$ is a martingale. The proof follows. \end{Proof}\\\\ The proof of Proposition~\ref{clm:perm_process} follows by a union bound from the following lemma where the union is taken over all intervals.
\begin{lem} \label{lem:perm_process_tail} Consider the process $\tilde{\Gamma^i}$ defined in Lemma~\ref{lem:perm_process_mart}. Fix one interval and let $X_i$ denote its length. Let $T$ be the stopping time where $ X_i$ equals either $0$ or $L$. Then \[ Pr[T \geq 4 a L^2] \leq 2^{-a}. \] \end{lem}
\begin{Proof} In order to bound the hitting probability of $0$ and $L$, we need some bounds on the variance of the martingale differences. In particular, we claim that unless $k=0$ or $k=L$ it holds that \[
\mathbb{E}[(X_{t+1}-X_t)^2 | X_t = k] \geq 1/2. \] If $k=1$ or $k=L-1$ this follows since with probability at least $1/2$ the end of the interval will be hit. Otherwise, it is easy to see that the probability that $X_{t+1}-X_t$ is at least $1$ is at least $1/4$ and similarly the probability that it is at most $-1$ is at least $1/4$. This can be verified by considering the event that one end-point moves by at least $2$ and the other one by at most $1$.
Let $T$ be the stopping time when $X_T$ hits $0$ or $n$. Then by a Wald kind of calculation we obtain: \begin{eqnarray*} L^2 &\geq& \mathbb{E}[(X_T-X_0)^2] = \mathbb{E}[ (\sum_{t=1}^{\infty} 1(T \geq t) (X_t-X_{t-1}))^2] \\ &=& \mathbb{E}[\sum_{t,s=1}^{\infty} (X_t-X_{t-1})(X_s-X_{s-1}) 1(T \geq \max{t,s})] \\ &=& \mathbb{E}[\sum_{t=1}^{\infty} (X_t-X_{t-1})^2 1(T \geq t)] \geq \frac{1}{2} \sum_{t=1}^{\infty} P[T \geq t] = \mathbb{E}[T]/2, \end{eqnarray*} where the first equality in the last line follows from the fact that if $s < t$ say then: \[ \mathbb{E}[(X_t-X_{t-1})(X_s-X_{s-1}) 1(T \geq \max{t,s})] \] \[ = \mathbb{E}[(X_t-X_{t-1})(X_s-X_{s-1}) (1 - 1(T < t))] \] \[
= \mathbb{E}[\mathbb{E}[(X_t - X_{t-1}) (X_s - X_{s-1}) (1 - 1(T < t)) | X_1,\ldots,X_{t-1}]] \] \[ =
\mathbb{E}[(X_s - X_{s-1}) (1 - 1(T < t)) E [X_t - X_{t-1} | X_1,\ldots,X_{t-1}]] = 0. \]
We thus obtain that $\mathbb{E}[T] \leq 2 L^2$. This implies in turn that $Pr[T \geq 4 L^2] \leq 1/2$ and that $P[T \geq 4 a L^2] \leq 2^{-a}$ for $a \geq 1$ since $X_t$ is a Markov chain. The proposition follows.
\end{Proof}
\section{Proof of Proposition \ref{prop_NonCoreFactorGraph}}\label{sec:NoTwoCycleProof} We shall start with the proof of Item $(b)$. Then we shall remark how to adjust this proof to prove $(c)$. Item $(a)$ was proven, though with respect to a slightly different definition of a core, in \cite{flaxman}. The proof of Item $(a)$ is identical to that in \cite{flaxman}, with the adjustments made along the proof of Item $(b)$. Details of the proof of $(a)$ are omitted.
In order to prove Proposition \ref{prop_NonCoreFactorGraph} $(b)$ it suffices to prove that $whp$ there are no two cycles with a simple path (maybe of length 0) connecting the two. To this end, we consider all possible constellations of such prohibited subgraphs and prove the proposition using a union bound over all of them.
Every simple $2k$-cycle in the factor graph consists of $k$ variables, w.l.o.g. say $x_{1},...,x_{k}$ (all different), and $k$ clauses $C_{1},...,C_{k}$, s.t. $x_{i},x_{i+1}\in C_{i}$. The cycle itself consists of $2k$ edges.
As for paths, we have 3 different types of paths: paths connecting a clause in one cycle with a variable in the other (type 1), paths connecting two clauses (type 2), and paths connecting two variables (type 3). Clause-variable paths are always of odd length, and clause-clause, variable-variable paths are always of even length. A $k$-path $P$ consists of $k$ edges. If it is a clause-variable path, it consists of $(k-1)/2$ clauses and the same number of variables. If it is a variable-variable path, it consists of $k/2-1$ variables and $k/2$ clauses and symmetrically for the clause-clause path (we don't take into account the clauses/variables that participate in the cycle, only the ones belonging exclusively to the path).
Our prohibited graphs consist of two cycles $C_1,C_2$ and a simple path $P$ connecting them. We call a graph containing exactly two simple cycles and a simple path connecting them a \emph{bi-cycle}. The path $P$ can be of either one of the three types described above. Similarly to the bi-cycle case, one can have a cycle $C$ and a chord $P$ in it. We call such a cycle a \emph{chord-cycle}. For parameters $i,j,k\in [1,n]$, and $t\in \{1,2,3\}$, we denote by $B_{2i,2j,k,t}$ a bi-cycle consisting of a $2i$-cycle connected by a $k$-path of type $t$ to a $2j$-cycle. Similarly, we denote by $B_{2i,k,t}$ a chord-cycle consisting of a $2i$-cycle with a $k$-path of type $t$ as a chord.
Our goal is then to prove that $whp$ the graph induced by the non-core variables contains no bi-cycles and no chord-cycles.
For a fixed factor graph $H$ we let $F_{H}\subseteq {\cal{F}}$ be a fixed minimal set of clauses inducing $H$, and $V(H)$ be the set of variables in $H$. In order for a fixed graph $H$ to belong to the factor graph induced by the non-core variables it must be that there exists some $F_H$ s.t. $F_{H} \subseteq {\cal{F}}$ and that $V(H)\subseteq \bar{{\cal{H}}}$ (put differently, $V(H)\cap {\cal{H}}=\emptyset$).
Let $B=B_{2i,2j,k,t}$ (or $B=B_{2i,k,t}$ if $B$ is a chord-cycle) be a fixed bi-cycle and $F_B$ a fixed minimal-set of clauses inducing $B$. We start by bounding $Pr[F_{B} \subseteq {\cal{F}} \text{ and } V(B)\cap {\cal{H}} = \emptyset]$ and then use the union bound over all possible bi-cycles (chord-cycles) and inducing minimal sets of clauses. As the two events -- $\{F_B \subseteq {\cal{F}}\}$ and $\{V(B)\cap {\cal{H}} = \emptyset\}$ -- are not independent, the calculations are more involved. Loosely speaking, to circumvent the dependency issue, one needs to defuse the effect that the event $\{F_B \subseteq {\cal{F}}\}$ might have on ${\cal{H}}$. To this end we introduce a set ${\cal{H}}^*$, defined very similarly to ${\cal{H}}$ only ``cushioned" in some sense to overcome the dependency issues (the ``cushioning" depends on $F_B$). This is done using similar techniques to \cite{AlonKahale97,flaxman}.
We start by defining the new set of core variables ${\cal{H}}^*$ (again w.r.t. an ordering $\pi$ of the clause-variable messages and an initial values vector $\alpha$). The changes compared to ${\cal{H}}$ are highlighted in bold. \begin{figure*}\end{figure*}\\ Propositions \ref{StableSuccRate} and \ref{ViolatedSuccRate} could be easily adjusted to accommodate the 6-gap in the new definition in $B_2$ and $B_3$. Therefore Proposition \ref{SizeOfHBarPr} can be safely restated in the context of ${\cal{H}}^*$: \begin{prop}\label{SizeOfHBarTagPr} If both $\alpha$ and $\pi$ are chosen uniformly at random then $whp$ $\#{\cal{H}}^*\geq (1-e^{-\Theta(d)})n$. \end{prop} \begin{prop} Let $b=\#V(B)$, then the set $J$ defined above satisfies $\#J\geq b/4$ \end{prop} \begin{Proof} Observe that if $F_B$ is minimal then $\#F_B\leq b+1$. This is because in every cycle the number of variables equals the number of clauses, and in the worst case, the path contains at most one more clause than the number of variables, and the same goes for the chord-cycle. Now suppose in contradiction that $\#J<b/4$, then there are more than $3b/4$ variables in $V(B)$, each appearing in at least 6 different clauses in $F_B$. Thus, $\#F_B>(6\cdot 3b/4)/3=1.5b\underbrace{>}_{b\geq 3}b+1$ (we divided by three as every clause might have been counted 3 times), contradicting $\#F_B\leq b+1$. \end{Proof}\\ The following proposition ``defuses" the dependency between the event that a bi-cycle (chord-cycle) was included in the graph and the fact that it doesn't intersect the core variables. In the following proposition we fix an arbitrary $\pi$ and $\alpha$ in the definition of ${\cal{H}}^*$, therefore the probability is taken only over the randomness in the choice of ${\cal{F}}$. \begin{prop}\label{CycleProbProp}$Pr[F_{B} \subseteq {\cal{F}} \text{ and } V(B) \cap {\cal{H}} = \emptyset]\leq Pr[F_{B} \subseteq {\cal{F}}]\cdot Pr[J \cap {\cal{H}}^* = \emptyset].$ \end{prop} \noindent Before proving Proposition \ref{CycleProbProp}, we establish the following fact. \begin{lem}\label{BasicIncLemma} For every bi-cycle (chord-cycle) $B$ and every minimal inducing set $F_{B}$, ${\cal{H}}^*({\cal{F}},\varphi,\alpha,\pi) \subseteq {\cal{H}}({\cal{F}}\cup F_{B},\varphi,\alpha,\pi)$. \end{lem} \begin{Proof} The lemma is proved using induction on $i$ ($i$ being the iteration counter in the construction of ${\cal{H}}$). For the base case $H'_{0}({\cal{F}}) \subseteq H_{0}({\cal{F}} \cup F_B)$, since every variable in $H'_{0}({\cal{F}})$ appears in at most 6 clauses in $F_B$ it holds that $A_i({\cal{F}}\cup F_B) \subseteq B_i ({\cal{F}})$, $i=2,3$. $A_1({\cal{F}}\cup F_B) \subseteq B_1({\cal{F}})$ holds at any rate as more clauses can only increase the support, and the set $J$ was not even considered for $H_0$. Suppose now that $H_{i}'({\cal{F}}) \subseteq H_{i}({\cal{F}} \cup F_{B})$, and prove the lemma holds for iteration $i+1$. If $x \in H'_{i+1}({\cal{F}})$ then $x$ supports at least $d/3$ clauses in which all variables are in $H'_{i}({\cal{F}})$. Since $H'_{i}({\cal{F}}) \subseteq H_{i}({\cal{F}} \cup F_B)$, then $x$ supports at least this number of clauses with only variables of $H_i({\cal{F}} \cup F_{B})$. Also, $x$ appears in at most $d/30-6$ clauses with some variable outside of $H'_{i}({\cal{F}})$, again since $H'_{i}({\cal{F}}) \subseteq H_{i}({\cal{F}} \cup F_B)$ and $F_{B}$ contains at most 6 clauses containing $x$, $x$ will appear in no more than $d/30$ clauses each containing some variable not in $H_{i}({\cal{F}} \cup F_B)$. We conclude then that $x \in H_{i}({\cal{F}} \cup F_B)$. \end{Proof}\\ This lemma clarifies the motivation for defining ${\cal{H}}^*$. It is not necessarily true that ${\cal{H}}({\cal{F}})\subseteq {\cal{H}}({\cal{F}} \cup F_{B})$. For example, a variable which appears in ${\cal{H}}({\cal{F}})$ could disappear from ${\cal{H}}({\cal{F}} \cup F_{B})$ since the clauses in $F_{B}$ make it unstable. Loosely speaking, ${\cal{H}}^*$ is cushioned enough to prevent such a thing from happening.\\\\
\begin{Proof}(Proposition \ref{CycleProbProp}) $$Pr[F_B \subseteq {\cal{F}} \text{ and } V(B) \cap {\cal{H}} = \emptyset] \leq Pr[F_B \subseteq {\cal{F}} \text{ and } J \cap {\cal{H}} = \emptyset]
= Pr[J \cap {\cal{H}} = \emptyset | F_B \subseteq {\cal{F}}] Pr[F_B \subseteq {\cal{F}}].$$ Therefore, it suffices to prove
$$Pr[J \cap {\cal{H}} = \emptyset | F_B \subseteq {\cal{F}}] \leq Pr[J \cap {\cal{H}}^* = \emptyset].$$ $$Pr[J \cap {\cal{H}}^* = \emptyset] = \sum_{F:J \cap {\cal{H}}^*(F) = \emptyset}Pr[{\cal{F}}=F]\underbrace{\geq }_{Lemma~\ref{BasicIncLemma}}\sum_{F:J \cap {\cal{H}}(F\cup F_B) = \emptyset}Pr[{\cal{F}}=F]$$ Break each set of clauses $F$ into $F'=F \setminus F_B$ and $F''=F \cap F_B$, and the latter equals $$\sum_{F':F' \cap F_B=\emptyset,J \cap {\cal{H}}(F'\cup F_B)=\emptyset} \sum_{F'':F''\subseteq F_B} Pr[{\cal{F}}\setminus F_B=F' \text{ and } {\cal{F}} \cap F_B = F'']$$ Since the two sets of clauses, ${\cal{F}}\setminus F_B$, and ${\cal{F}} \cap F_B$, are disjoint, and clauses are chosen independently, the last expression equals, \begin{align*} &\sum_{F':F' \cap F_B=\emptyset,J \cap {\cal{H}}(F'\cup F_B)=\emptyset}\sum_{F'':F''\subseteq F_B} Pr[{\cal{F}}\setminus F_B=F']Pr[{\cal{F}} \cap F_B = F'']= \\ & \sum_{F':F' \cap F_B=\emptyset,J \cap {\cal{H}}(F'\cup F_B)=\emptyset} Pr[{\cal{F}}\setminus F_B=F'] \underbrace{\sum_{F'':F''\subseteq F_B} Pr[{\cal{F}} \cap F_B = F'']}_{1} = \\ & \sum_{F':F' \cap F_B=\emptyset,J \cap {\cal{H}}(F'\cup F_B)=\emptyset}Pr[{\cal{F}}\setminus F_B = F'] \end{align*}
Since $({\cal{F}} \setminus F_B) \cap F_B =\emptyset$, and clauses are chosen independently, the event $\{F_B\subseteq {\cal{F}}\}$ is independent of the event $\{{\cal{F}}\setminus F_B = F'\}$. Therefore, the latter expression can be rewritten as $$\sum_{F':F' \cap F_B=\emptyset,J \cap {\cal{H}}(F'\cup F_B)=\emptyset}Pr[{\cal{F}}\setminus F_B = F'|F_B \subseteq {\cal{F}}]=
Pr[J \cap {\cal{H}} = \emptyset|F_B \subseteq {\cal{F}}].$$ \end{Proof}\\
\begin{lem}\label{lem_chordCycleProbCor} Let $B=B_{2i,k,t}$ be a chord-cycle, then
$Pr[V(B) \cap {\cal{H}}^* = \emptyset | \# {\cal{H}}^* = (1-\lambda) n]\leq p(i,k)$ where:\begin{enumerate}
\item $p(i,k)\leq \lambda^{(i+\frac{k}{2}-1)/4}$ if $B$
consists of a $2i$-cycle and a variable-variable $k$-path as a chord.
\item $p(i,k)\leq \lambda^{(i+\frac{k}{2})/4}$ if $B$
consists of $2i$-cycle and a clause-clause $k$-path as a chord.
\item $p(i,k)\leq \lambda^{(i+\frac{k-1}{2})/4}$ if $B$
consists of $2i$-cycle and a variable-clause $k$-path as a chord. \end{enumerate} \end{lem}
\begin{Proof} In $(a)$, we have $i+\frac{k}{2}-1$ variables and $i+\frac{k}{2}$ clauses. To bound the event $\{J \cap {\cal{H}}^* = \emptyset\}$, given the size of ${\cal{H}}^*$, observe that $F_B$ is fixed in the context of this event, and there is no pre-knowledge whether $F_B$ is included in ${\cal{F}}$ or not. Therefore, $J$ can be treated as a fixed set of variables, thus the choice of ${\cal{H}}^*$ is uniformly distributed over $J$. Recalling that $\#J \geq (i+\frac{k}{2}-1)/4$, it follows that \begin{align*}
Pr[J \cap {\cal{H}}^* = \emptyset | \# {\cal{H}}^* = (1-\lambda) n] \leq \frac{\binom{n-\#{\cal{H}}^*}{\#J}}{\binom{n}{\#J}}=\frac{\binom{\lambda n}{(i+\frac{k}{2}-1)/4}}{\binom{n}{(i+\frac{k}{2}-1)/4}} \leq \lambda^{(i+\frac{k}{2}-1)/4}. \end{align*} The last inequality follows from standard bounds on the binomial coefficients. This proves $(a)$. In the same manner items $b,c$ are proven (just counting how many variables and clauses $B$ contains, depending on the type of its path). \end{Proof}
\begin{lem}\label{lem_BiCycleProbCor} Let $B=B_{2i,2j,k,t}$ be a bi-cycle, then
$Pr[V(B) \cap {\cal{H}}^* = \emptyset | \# {\cal{H}}^* = (1-\lambda) n]\leq p(i,j,k)$ where:\begin{enumerate}
\item $p(i,j,k)\leq \lambda^{(i+j+\frac{k}{2}-1)/4}$ if $B$
consists of a $2i$,$2j$-cycles and a variable-variable $k$-path.
\item $p(i,j,k)\leq \lambda^{(i+j+\frac{k}{2})/4}$ if $B$
consists of $2i$,$2j$-cycles and a clause-clause $k$-path.
\item $p(i,j,k)\leq \lambda^{(i+j+\frac{k-1}{2})/4}$ if $B$
consists of $2i$,$2j$-cycles and a variable-clause $k$-path. \end{enumerate} \end{lem} \noindent Lemma \ref{lem_BiCycleProbCor} is proven in a similar way to Lemma \ref{lem_chordCycleProbCor}.
To complete the proof of Proposition \ref{prop_NonCoreFactorGraph}, we use the union bound over all possible bi/chord-cycles. We present the proof for the bi-cycle case with a variable-variable path; the proof for all other cases is identical. $s=s_{i,j,k}=i+j+\frac{k}{2}-1$ (namely, $\#V(B)=s$ and $\#F_B=s+1$). We also let $p_\lambda=Pr[\#{\cal{H}}^*=(1-\lambda) n]$. The probability of $B$ is then at most \begin{align*} &\sum_{\lambda=0}^{n} p_\lambda\cdot \sum_{i,j,\frac{k}{2}=1}^{n}\binom{n}{s_{i,j,k}}\cdot (s_{i,j,k})! \cdot (7n)^{s_{i,j,k}+1}\cdot \left(\frac{d}{n^2}\right)^{s_{i,j,k}+1} \cdot \lambda^{s_{i,j,k}/4} \leq\\& \sum_{\lambda=0}^{n} p_\lambda\cdot\sum_{i,j,\frac{k}{2}=1}^{n} 7d\cdot \left(\frac{7en}{s}\right)^{s} \cdot s^{s} \cdot n^{s+1} \cdot \left(\frac{d}{n^2}\right)^{s+1} \cdot \lambda^{s/4} \leq \sum_{\lambda=0}^{n} p_\lambda\cdot\sum_{i,j,\frac{k}{2}=1}^{n}(7e \cdot d \cdot \lambda^{1/4})^{s}\cdot \frac{7d}{n} . \end{align*} Let us now break the first sum according to the different values of $\lambda$. Proposition \ref{SizeOfHBarPr} implies that $p_\lambda < n^{-5}$ for all $\lambda > d^{-8}$. Therefore the contribution of this part to the (double) summation is $O(1/n)$. For $\lambda < d^{-8}$, the latter simplifies to
\begin{equation}\label{eq:lambda}\sum_{\lambda=0}^{n/d^8}p_\lambda\sum_{i,j,\frac{k}{2}=1}^{n}\left(\frac{1}{2}\right)^{s}\cdot \frac{7d}{n}. \end{equation} Finally observe that $$\sum_{i,j,\frac{k}{2}=1}^{n}\left(\frac{1}{2}\right)^{s}\cdot \frac{7d}{n} \leq \sum_{i+j+\frac{k}{2} \leq 4\log n}\frac{7d}{n}+\sum_{i+j+\frac{k}{2}\geq 4\log n} \left(\frac{1}{2}\right)^{s}\leq (4\log n)^3\cdot \frac{7d}{n}+n^3\cdot \frac{1}{n^4}=o(1). $$ Therefore, (\ref{eq:lambda}) sums up to $o(1)$.
\subsection{Proof of Proposition \ref{prop_NonCoreFactorGraph} $(c)$} The proof is basically the same as that of Proposition \ref{prop_NonCoreFactorGraph} $(b)$. One defines the same notion of ``cushioned" core ${\cal{H}}^*$, and proceeds similarly. We therefore reprove only the last part -- the union bound over all possible cycles.
First let us bound the number of cycles of length $k$. There are $\binom{n}{k}$ ways to choose the variables inducing the cycle, and $k!/2$ ways to order them on the cycle. As for the set of clauses that induces the cycle, once the cycle is fixed, we have at most $(7n)^k$ ways of choosing the third variable and setting the polarity in every clause. We also let $p_\lambda=Pr[\#{\cal{H}}^*=(1-\lambda) n]$.
Using the union bound, the probability of a cycle of length at least $k$ in the non-core factor graph is at most \begin{align*} &\sum_{\lambda=0}^{n} p_\lambda\cdot\sum_{t=k}^{n}\binom{n}{t}\cdot t! \cdot (7n)^t\cdot \left(\frac{d}{n^2}\right)^t \cdot \lambda^{t/2} \leq \sum_{\lambda=0}^{n} p_\lambda\cdot\sum_{t=k}^{n} \left(\frac{7en}{t}\right)^t \cdot t^t \cdot n^t \cdot \left(\frac{d}{n^2}\right)^t \cdot \lambda^{t/2} \\& = \sum_{\lambda=0}^{n}p_\lambda \cdot\sum_{t=k}^{ n}(7e \cdot d \cdot \sqrt{\lambda})^t. \end{align*} Let us now break the first sum according to the different values of $\lambda$. Proposition \ref{SizeOfHBarPr} implies that there exists a constant $c$ s.t. $p_\lambda < n^{-3}$ for $\lambda > e^{-cd}$. Therefore the contribution of this part to the (double) summation is $O(1/n)$. For $\lambda \leq e^{-cd}$, $7e \cdot d \cdot \sqrt{e^{-cd}}=e^{-\Theta(d)}$. In this case, the last summation is simply the sum of a decreasing geometric series with quotient $e^{-\Theta(d)}$, which sums up to at most twice the first term, namely $e^{-\Theta(dk)}$. The proposition then follows.
\end{document} |
\begin{document}
\title{The Expressive Power of $\dlliterc$}
\begin{abstract} Description logics are knowledge representation formalisms that provide the formal underpinning of the semantic web and in particular of the OWL web ontology language. In this paper we investigate the expressive power of $\g{DL-Lite}_{R,\sqcap}\xspace$, and some of its computational properties. We rely on simulations to characterize the absolute expressive power of $\g{DL-Lite}_{R,\sqcap}\xspace$ as a {\em concept language}, and to show that disjunction is not expressible. We also show that no simulation-based closure property exists for $\g{DL-Lite}_{R,\sqcap}\xspace$ assertions. Finally, we show that query answering of unions of conjunctive queries is \textsc{NP}\xspace-complete. \end{abstract}
\section{Introduction}\label{intro}
Description logics (DLs) are knowledge representation formalisms that provide the formal underpinning of the semantic web and in particular of the OWL web ontology language\footnote{http://www.w3.org/TR/owl-features/}. In this paper we are interested in investigating the expressive power of the DL known as \g{DL-Lite}$_{R,\sqcap}$ \cite{Calvanese2006B}. The \g{DL-Lite} family of logics, of which $\g{DL-Lite}_{R,\sqcap}\xspace$ makes part, has been proposed by Calvanese et al. as a foundation of ontology-based data access systems. They are intended \cite{Calvanese2005A,Calvanese2007D} as the least expressive DLs capable of capturing the main features of conceptual modelling languages such as UML\footnote{http://www.omg.org/uml/}. By the expressive power of a DL we understand \textit{(i)} the computational complexity of its reasoning problems and \textit{(ii)} its model-theoretic properties. As most DLs, $\g{DL-Lite}_{R,\sqcap}\xspace$ is contained in $\logic{Fo}^2$, the 2-variable fragment of \logic{Fo} and is therefore decidable \cite{Borgida1996,Hudstadt2004,DLHandbook}. However, its expressive power is still not known completely.
DLs model domains in terms of concepts (representing classes of objects), and binary relations known as roles (representing relations and attributes of objects) \cite{DLHandbook}, all of which are structured into hierarchies by concept and role inclusion assertions. Extensional information (the data), by contrast, is conveyed by membership assertions. This information can be accessed by posing suitable \logic{Fo} formulas, viz., unions of conjunctive queries. This crucial reasoning problem is known as the knowledge base query answering problem.
The main contributions of this paper consist, on the one hand, in determining the (so-called) combined complexity of $\g{DL-Lite}_{R,\sqcap}\xspace$'s query answering problem and, on the other hand, to define what we call $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations. This relation stems from the notion of bisimulations (see e.g. \cite{vanBenthem2006}) for modal logics, known to hold for the DL $\f{ALC}$ \cite{DLHandbook}, that has been proposed \cite{deRijke1997} as a means of characterizing the (absolute) expressivity of arbitrary DLs as concept languages.
The structure of this paper is as follows. Section~\ref{two} recalls \textit{(i)} $\g{DL-Lite}_{R,\sqcap}\xspace$'s syntax and semantics and \textit{(ii)} those of unions of conjunctive queries. In section~\ref{two} we characterize the combined complexity of answering unions of conjunctive queries over $\g{DL-Lite}_{R,\sqcap}\xspace$ knowledge bases. In section~\ref{three} we introduce the notion of $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations and show that a \logic{Fo} formula is equivalent to a $\g{DL-Lite}_{R,\sqcap}\xspace$ concept when and only when it is closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations. In section~\ref{four} we show that no such closure property exists for assertions. Finally, in section~\ref{five} we sum up our conclusions.
\section{Preliminaries}\label{one}
The syntax of $\g{DL-Lite}_{R,\sqcap}\xspace$ is defined by the grammar: \begin{itemize} \item $R ::= P \mid P^-$, \item $D ::= A \mid \exists R \mid D \sqcap D'$ (left concepts), \item $E ::= C \mid \neg A \mid \neg \exists R \mid \exists R . E$ (right concepts), \end{itemize} where $A$ stands for an atomic concept symbol (a unary predicate), $P$ for an atomic role symbol (a binary predicate) and $R^-$ for its inverse.
Concepts combine into {\em concept inclusion assertions} of the form $D \sqsubseteq E$, where $D$ is a left concept, $E$ is a right concept and $\sqsubseteq$ is the {\em subsumption} relation. Roles into {\em role inclusion assertions} of the form $R \sqsubseteq R'$. A {\em teminology} $\f{T}$ (TBox) is a set of such assertions. A {\em membership assertion} is an assertion of the form $A(c)$ or $P(c,c')$, where $c,c'$ are object (or individual) constants. We denote $\f{A}$ any set of membership assertions (ABox). The integer $\card{\f{A}}$ denotes the number of (distinct) tuples occuring among the atoms in $\f{A}$. The integer $\card{\f{T}}$ the number of axioms in the terminology. A {\em knowledge base} is a pair $\tup{\f{T},\f{A}}$.
Let $\e{Dom}$ denote a countable infinite set of constants. The semantics of $\g{DL-Lite}_{R,\sqcap}\xspace$ is based on \logic{Fo}\xspace {\em interpretations} $\f{I} := \tup{\Delta^{\f{I}}, .^{\f{I}}}$, where $\Delta^{\f{I}} \subsetneq \e{Dom}$ is a non-empty {\em domain}. Interpretations map each constant $c$ to itself, each atomic concept $A$ to $A^{\f{I}} \subseteq \Delta^{\f{I}}$ and each atomic role $P$ to $P^{\f{I}} \subseteq \Delta^{\f{I}} \times \Delta^{\f{I}}$ such that the following conditions hold: \begin{itemize} \item $(P^-)^{\f{I}} := \set{\tup{d, e} \in \Delta^{\f{I}} \times \Delta^{\f{I}} \mid \tup{e, d} \in P^{\f{I}}}$, \item $(\exists R)^{\f{I}} := \set{d \in \Delta^{\f{I}} \mid \text{exists } e \in \Delta^{\f{I}} \text{ s.t. } \tup{d, e} \in R^{\f{I}}}$, \item $(D \sqcap D')^{\f{I}} := D^{\f{I}} \cap D'^{\f{I}}$, \item $(\neg A)^{\f{I}} := \Delta^{\f{I}} - A^{\f{I}}$, \item $(\neg \exists R)^{\f{I}} := \Delta^{\f{I}} - (\exists R)^{\f{I}}$, and \item $(\exists R . E)^{\f{I}} := \set{d \in \Delta^{\f{I}} \mid \text{exists } e \in \Delta^{\f{I}} \text{ s.t. } \tup{d, e} \in R^{\f{I}} \text{ and } e \in E^{\f{I}}}$. \end{itemize}
We say that $\f{I}$ {\em models} an assertion $D \sqsubseteq E$ (resp. $R \sqsubseteq R'$), and write $\f{I} \models D \sqsubseteq E$ (resp. $\f{I} \models R \sqsubseteq R'$), whenever $D^{\f{I}} \subseteq E^{\f{I}}$ (resp. $R^{\f{I}} \subseteq R'^{\f{I}}$) and a TBox $\f{T}$, and write $\f{I} \models \f{T}$, whenever it is a model of all of its assertions. We say that it {\em models} a membership assertion $A(c)$ (resp. $R(c,c')$), and write $\f{I} \models A(a)$ (resp. $\f{I} \models R(c,c')$), whenever $c^{\f{I}} \in A^{\f{I}}$ (resp. $\tup{c^{\f{I}},c^{\f{I}}} \in R^{\f{I}}$) and an ABox $\f{A}$, and write $\f{I} \models \f{A}$, when it {\em models} all of its membership assertions. Finally, we say that it is a {\em model} of a KB $\tup{\f{T}, \f{A}}$, and write $\f{I} \models \tup{\f{T}, \f{A}}$, if it is a model of both $\f{T}$ and $\f{A}$.
The semantics \logic{Fo} formulas is defined, we recall, in the usual terms of satisfaction w.r.t. interpretations $\f{I}$. Let $\phi$ be a \logic{Fo} formula and let $\Var{\phi}$ denote the set of its variables. An {\em assignment} for $\phi$ relative to $\f{I}$ is a function $v \colon \Var{\phi} \to \Delta^{\f{I}}$, that can be recursively extended in the standard way to complex formulas (see, e.g., \cite{CoriLascar}). It is said to {\em satisfy} an atom $R(x_1,...,x_n)$ w.r.t. $\f{I}$ iff $\tup{v(x_1),...,v(x_n)} \in R^{\f{I}}$. This definition is recursively extended to complex formulas \cite{CoriLascar}. If $v$ satisfies $\phi$ w.r.t. $\f{I}$, we write $\f{I} \models_v \phi$. An interpretation $\f{I}$ is said to be a {\em model} of $\phi$, written $\f{I} \models \phi$, if there exists an assignment $v$ s.t. $\f{I} \models_v \phi$.
A {\em union of conjunctive queries} (UCQ) of arity $n$ is a (positive existential) \logic{Fo} formula of the form $\phi := \psi_{1}(\vec{x},\vec{y}_1) \lor ... \lor \psi_{k}(\vec{x},\vec{y}_k)$ where $\vec{x}$ is a sequence of $n \geq 0$ {\em distinguished variables} and the $\psi_i$s, for $i \in [1,k]$, are conjunctions of atoms. A UCQ is said to be {\em boolean} if $\vec{x}$ is an empty sequence. The integer $\textit{size}(\phi)$ denotes the number of symbols of $\phi$.
Let $\tup{\f{T},\f{A}}$ be a KB and $\phi$ a UCQ of arity $n$. KB $\tup{\f{T},\f{A}}$ is said to {\em entail} $\phi$, written $\tup{\f{T},\f{A}} \models \phi$, iff for all interpretations $\f{I}$, $\f{I} \models \tup{\f{T},\f{A}}$ implies that $\f{I} \models \phi$. The {\em certain answers} of a UCQ $\phi$ over KB $\tup{\f{T},\f{A}}$ are defined as the set $\textit{cert}(q,\f{O},\f{D}) := \set{\vec{c} \in \e{Dom}^n \mid \f{T}, \f{A} \models \phi(\vec{c})}$, where $\phi(\vec{c})$ denotes the instantiation of $\vec{x}$ in $\phi$ by a sequence of constants $\vec{c}$. The associated decision problem is known as the KB {\em query answering} problem (\g{QA}) and is defined as follows: \begin{itemize} \item given $\vec{c} \in \e{Dom}^n$, a UCQ $\phi$ of arity $n$ and a KB $\tup{\f{T}, \f{A}}$, \item does $\f{T}, \f{A} \models \phi(\vec{c})$? \end{itemize}
When $\card{T}$ and $\textit{size}(\phi)$ are fixed we speak about the {\em data complexity} of \g{QA}, when only $\textit{size}(\phi)$ about its {\em KB complexity}, when $\card{T}$ and $\card{A}$ are fixed about its {\em query complexity} and finally, when none is fixed, about its {\em combined complexity}. It is known \cite{Calvanese2007C} that $\g{DL-Lite}_{R,\sqcap}\xspace$ is in \textsc{LogSpace}\xspace in data complexity, \textsc{PTime}\xspace-complete in KB complexity and \textsc{NP}\xspace-complete in query complexity, but its combined complexity remains unknown.
\section{Combined Complexity of \g{QA}}\label{two}
A {\em perfect reformulation} is an algorithm that takes as input a DL TBox $\f{T}$ and a UCQ $\phi$ and rewrites $\phi$ w.r.t. $\f{T}$ into a UCQ $\phi_{\f{T}}$ s.t., for every DL ABox $\f{A}$ and every $\vec{c} \in \e{Dom}$ it holds that: $\f{T} , \f{A} \models \phi(\vec{c})$ iff $\f{I}(\f{A}) \models \phi_{\f{T}}(\vec{c})$, where $\f{I}(\f{A})$ denotes the interpretation built out of $\f{A}$ (i.e., $\f{A}$ seen as a \logic{Fo} interpretation).
\begin{proposition} {\bf (Calvanese et al. 2006)} A perfect reformulation exists for $\g{DL-Lite}_{R,\sqcap}\xspace$. \end{proposition}
\begin{theorem} QA for $\g{DL-Lite}_{R,\sqcap}\xspace$ is \textsc{NP}\xspace-complete in combined complexity. \end{theorem}
\proof (\e{Membership}) Let $\tup{\f{T},\f{A}}$ be a KB and let $\phi(\vec{c})$ be the grounding of a UCQ $\phi$. First, consider: $\f{T} , \f{A} \models \phi(\vec{c}).$ We know that $\f{T}$ can be "compiled" into $\phi$ by a perfect reformulation, yielding a UCQ $\phi_{\f{T}}(\vec{c}) := \psi^{\f{T}}_1(\vec{c},\vec{y}_1) \lor ... \lor \psi^{\f{T}}_k(\vec{c},\vec{y}_k)$. Guess, therefore, a disjunct $\psi^{\f{T}}_i(\vec{c},\vec{y}_i)$, for some $i \in [1,k]$. This can be done in time constant in $\card{\f{T}}$ and $\textit{size}(q)$. Clearly, $\f{T} , \f{A} \models \phi(\vec{c})$ iff $\f{I}(\f{A}) \models_v \psi^{\f{T}}_i(\vec{c},\vec{y_i})$, for some assignment $v$. Guess now an assignment $v \colon \Var{\psi_i} \to \Delta^{\f{I}(\f{A})}$. This can be done in time constant in, ultimately, $\textit{size}(\phi)$. Finally, check in time polynomial on $\card{\f{A}}$ and $\textit{size}(\phi)$ whether $\f{I}(\f{A})\models_{v} \psi_i(\vec{c},\vec{y}_i)$.
(\e{Hardness}) By reduction from the graph homomorphism problem, where, given two graphs $G_1 = \tup{V_1, E_1}$ and $G_2 = \tup{V_2, E_2}$ we ask whether there exists an homomorphism $h$ from $G_1$ to $G_2$. A graph homomorphism, we recall, is a function $h \colon V_1 \to V_2$ s.t. for all $\tup{u,v} \in V_1$, $\tup{h(u),h(v)} \in V_2$. This problem is known to the \textsc{NP}\xspace-complete. We will consider $\g{DL-Lite}_{R,\sqcap}\xspace$ KBs with empty TBoxes. Polynomially encode $G_1$ and $G_2$ as follows: \begin{itemize} \item for each $\langle u, v \rangle \in E_1$, add the fact $R(c_u,c_v)$ to the ABox $\f{A}_{G_1}$, \item for each $\langle u', v' \rangle \in E_2$, add the ground atom $R(c_{u'},c_{v'})$ to the boolean UCQ $\phi_{G_2}$, which is the conjunction of such atoms. \end{itemize}
We now claim that there exists an homomorphism $h$ from graph $G_2$ to graph $G_1$ iff $\f{A}_{G_1} \models \phi_{G_2}$.
Since there is a perfect reformulation for $\g{DL-Lite}_{R,\sqcap}\xspace$, then $\f{A}_{G_1} \models \phi_{G_2}$ iff $\f{I}(\f{A}_{G_1}) \models \phi_{G_2}$. Now, clearly, $\f{I}(\f{A}_{G_1}) = G_1$. Thus, the interpretation function $.^{\f{I}(\f{A}_{G_1})}$ can be seen as an homomorphism mapping $\phi_{G_2}$ to $G_1$. Finally, given that $\phi_{G_2}$ encodes $G_2$, the claim follows. \qed
\section{$\g{DL-Lite}_{R,\sqcap}\xspace$ Simulations}\label{three}
Given two interpretations $\f{I}$ and $\f{J}$, a {\em $\g{DL-Lite}_{R,\sqcap}\xspace$ left} $\f{B}_l$ or {\em right simulation} $\f{B}_r$ is a relation $\f{B}_l, \f{B}_r \subseteq \f{P}(\Delta^{\f{I}}) \times \Delta^{\f{J}}$ s.t., for every $X \subseteq \Delta^{\f{I}}$, every $d' \in \Delta^{\f{J}}$\footnote{Observe that the clause for $D \sqcap D'$ follows implicitly from the first two.}: \begin{itemize} \item if $\tup{X,d'} \in \f{B}_l$ and $X \subseteq A^{\f{I}}$, then $d' \in \Delta^{\f{J}} \, (A)$. \item if $\tup{X,d'} \in \f{B}_l$ and forall $d \in X$ there is some $e \in Y \subseteq \Delta^{\f{I}}$ s.t. $\tup{d,e} \in R^{\f{I}}$, then there exists an $e' \in \Delta^{\f{J}}$ s.t. $\tup{d',e'} \in R^{\f{J}} \, (\exists R)$. \item if $\tup{X,d'} \in \f{B}_r$ and $X \subseteq \neg B^{\f{I}}$, then $d' \not\in B^{\f{J}} \, (\neg B)$. \item if $\tup{X,d'} \in \f{B}_r$ and forall $d \in X$ there exists no $e \in Y \subseteq \Delta^{\f{I}}$ s.t. $\tup{d,e} \in R^{\f{I}}$, then there is no $e' \in \Delta^{\f{J}}$ s.t. $\tup{d',e'} \in R^{\f{J}} \, (\neg \exists R)$. \item if $\tup{X,d'} \in \f{B}_r$ and forall $d \in X$ there exists an $e \in Y \subseteq \Delta^{\f{I}}$ s.t. $\tup{d,e} \in R^{\f{I}}$, then there is an $e' \in \Delta^{\f{J}}$ s.t. $\tup{d',e'} \in R^{\f{J}}$ and $\tup{Y,e'} \in \f{B} \, (\exists R . C)$. \end{itemize}
A {\em $\g{DL-Lite}_{R,\sqcap}\xspace$ simulation} $\f{B}$ is either a left, a right or a combination of both simulations (i.e., their union). If a $\g{DL-Lite}_{R,\sqcap}\xspace$ simulation $\f{B}$ exists among two interpretations ${\f{I}}$ and ${\f{J}}$ we say that they are {\em DL-similar} and write ${\f{I}} \sim_{DL} {\f{J}}$.
We say that a \logic{Fo} formula $\phi$ is {\em closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations} iff for every two interpretations $\f{I}$ and $\f{J}$, if $\f{I} \models \phi$ and $\f{I} \sim_{DL} \f{J}$, then $\f{J} \models \phi$.
We say that a \logic{Fo} formula $\phi$ {\em entails} a $\g{DL-Lite}_{R,\sqcap}\xspace$ concept $C$, written $\phi \models C$, iff for all $\f{I}$, $\f{I} \models \phi$ implies that $C^{\f{I}} \neq \emptyset$, and conversely, that $C$ {\em entails} $\phi$, written $C \models \phi$, whenever, for all $\f{I}$, $C^{\f{I}} \neq \emptyset$ implies $\f{I} \models \phi$. If both entailments hold, we say that they are {\em equivalent}.
\begin{lemma} If A \logic{Fo} formula $\phi$ is closed under \g{DL-Lite} simulations, then it is equivalent to a \g{DL-Lite} right hand or left hand side concept. \end{lemma}
\proof Let $\phi$ be a \g{FOL} formula closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations. Let $\textit{Con}(\phi)$ denote the set of consequences in $\g{DL-Lite}_{R,\sqcap}\xspace$ of a \logic{Fo} formula $\phi$, i.e., $\textit{Con}(\phi) := \set{C \mid \phi \models C}$. By compactness for DLs \cite{DLHandbook} the set of concepts $\textit{Con}(\phi)$ has a model iff every finite $\Sigma \subseteq \textit{Con}(\phi)$ has a model, whence the concept $C_{\phi} := \bigsqcap \set{C \mid C \in \Sigma}$ should have a model too. We claim that $\phi$ is equivalent to $C_{\phi}$. Clearly, $\phi \models C_{\phi}$. We claim now that \begin{equation}\label{eq:e} C_{\phi} \models \phi. \end{equation} Assume that $C^{\f{I}}_{\phi} \neq \emptyset$, for an arbitrary intrepretation $\f{I}$. Then, there exists a $d \in \Delta^{\f{I}}$ s.t. $d \in C^{\f{I}}_{\phi}$. Put now $\Gamma := \set{C \mid d \not\in C^{\f{I}}}$. Then, for every $C \in \Gamma, \phi \not\models C$. Hence for every $C \in \Gamma$ there exists an interpretation ${\f{I}}_C$ s.t. ${\f{I}}_C \models \phi$ and $C^{{\f{I}}_C}=\emptyset$. The idea now is to build an interpretation $\f{J} := \tup{\Delta^{\f{J}}, .^{\f{J}}}$ from the $\f{I}_C$s: \begin{itemize} \item $\Delta^{\f{J}} := \bigcup \set{\Delta^{\f{I}_C} \mid C \in \Gamma}$, \item $.^{\f{J}}$ extends each $.^{\f{I}_C}$, for $C \in \Gamma$. \end{itemize} Define now a \g{DL-Lite} simulation $\f{B} \subseteq \f{P}(\Delta^{\f{J}}) \times \Delta^{\f{I}}$ by putting: \begin{center} \begin{tabular}{c} $\tup{X,d'} \in \f{B}$ iff for every concept $C$, $X \subseteq C^{\f{J}}$ implies $d' \in C^{\f{I}}$. \end{tabular} \end{center} We now claim that $\f{B}$ is a $\g{DL-Lite}_{R,\sqcap}\xspace$ simulation between $\f{J}$ and $\f{I}$ and a fortiori that $\f{J} \sim_{DL} \f{I}$. We prove this by induction on $C$: \begin{itemize} \item Basis: \begin{itemize} \item The property trivially holds for basic concepts. \item $C := \neg A$. Let $X \subseteq \neg A^{\f{J}}$, $\tup{X,d'} \in \f{B}$. By definition of $\f{B}$, $d' \in (\neg A)^{\f{I}}$, that is, $d' \in \Delta^{\f{I}} - A^{\f{I}}$. \item $C := \exists R$. Let $\tup{X,d'} \in \f{B}$ and $d \in X$ such that there is some $e \in Y \subseteq \Delta^{\f{J}}$ such that $\tup{d,e} \in R^{\f{J}}$. Now, $X \subseteq (\exists R)^{\f{J}}$, so $d' \in (\exists R)^{\f{I}}$ and hence there is some $e' \in \Delta^{\f{I}}$ such that $\tup{d',e'} \in R^{\f{J}}$. \item $C := \neg \exists R$. This is proven by combining the two previous cases. \end{itemize} \item Inductive step: \begin{itemize} \item $C := \exists R . E$. Let $\tup{X,d'} \in \f{B}$ s.t. exists $e \in Y \subseteq \Delta^{\f{J}}$ and $\tup{d,e} \in R^{\f{J}}$. $X \subseteq (\exists R . E)^{\f{J}}$, therefore, $d' \in (\exists R \colon D)^{\f{I}}$ by definition and so there is an $e' \in \Delta^{\f{I}}$ such that $\tup{d',e'} \in R^{\f{I}}$ and $e' \in E^{\f{I}}$. Suppose that $Y \subseteq E^{\f{J}}$. By induction hypothesis, $e' \in E^{\f{I}}$. Thus, by definition of $\f{B}$, $\tup{Y,e'} \in \f{B}$. \item $C := D \sqcap D'$ (trivial). \end{itemize} \end{itemize}
Therefore, $\f{J} \sim_{DL} \f{I}$ and since by assumption $\phi$ is closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations, $\f{I} \models \phi$. This means that claim (\ref{eq:e}) holds. \qed
\begin{lemma} If a \logic{Fo} formula $\phi$ is equivalent to a \g{DL-Lite} right hand or left hand side concept, then it is closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations. \end{lemma}
\proof Let $\f{I}$ be s.t. $\f{I} \models \phi$. Let $\f{J}$ be an interpretation DL-similar to $\f{I}$. Let $X \subseteq \Delta^{\f{I}}, d \in X, d' \in \Delta^{\f{J}}, \f{B} \subseteq \f{P}(\Delta^{\f{I}}) \times \Delta^{\f{J}}$ and assume that $\tup{X,d'} \in \f{B}$. We prove now, by induction on $C$, that $C^{\f{J}} \neq \emptyset$: \begin{itemize} \item Basis: \begin{itemize} \item $C := A$. Let $d \in A^{\f{I}}$. Then, $X \subseteq A^{\f{I}}$, whence (by definition) $d' \in A^{\f{J}}$. \item $C := \neg A$ (analogous argument). \item $C := \exists R$. Let $d \in (\exists R)^{\f{I}}$. Then there exists $e \in \Delta^{\f{I}}$ s.t. $\tup{d,e} \in R^{\f{I}}$, whence, by definition of \g{DL-Lite} simulations $\f{B}$, there is an $e' \in \Delta^{\f{J}}$ s.t. $\tup{d',e'} \in R^{\f{J}}$, that is, s.t. $d' \in (\exists R)^{\f{J}}$. \item $C := \neg \exists R$ (analogous argument). \end{itemize} \item Inductive step: \begin{itemize} \item $C := \exists R . E$. Suppose that $d \in (\exists R \colon E)^{\f{I}}$. Therefore there is some $e \in \Delta^{\f{I}}$ s.t. $e \in E^{\f{I}}$ and $\tup{d,e'} \in R^{\f{I}}$. By induction hypothesis this implies that $e \in E^{\f{J}}$, whence $d \in (\exists R . E)^{\f{J}}$ as well. \item $C := D \sqcap D'$. By induction hypothesis the property holds for $D$ and $D'$. Now: \begin{equation*} \begin{array}{ccl} d \in (D \sqcap D')^{\f{I}} & \text{ iff } & d \in D^{\f{I}} \text{ and } d \in D'^{\f{I}}\\
& \text{ implies } & d' \in D^{\f{J}} \text{ and } d' \in D'^{\f{J}}\\
& \text{ iff } & d' \in (D \sqcap D')^{\f{J}}. \end{array} \end{equation*} \end{itemize} Therefore, since $\phi$ is equivalent to $C$, $\f{J} \models \phi$, as desired. \qed \end{itemize}
\begin{theorem} A \logic{Fo} formula $\phi$ is equivalent to a $\g{DL-Lite}_{R,\sqcap}\xspace$ right hand or left hand side concept iff it is closed under \g{DL-Lite} simulations. \end{theorem}
\begin{example} The \logic{Fo} formula $\phi := \forall y P(x,y) \to A(y)$ is not equivalent to any $\g{DL-Lite}_{R,\sqcap}\xspace$ concept, because it is not closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations.
\begin{center} \begin{pspicture}(10,4.5) \psline{->}(2.8,2.5)(0.5,3.3) \psline{->}(2.8,2.5)(0.5,1.7) \psellipse(0.5,2.5)(0.4,1.3) \put(0,4.1){$\Delta^{\f{I}}$} \put(9.5,4.1){$\Delta^{\f{J}}$} \put(5,2.5){$\f{B}$} \put(1.3,2.5){$P^{\f{I}}$} \put(7.8,2.5){$P^{\f{J}}$} \put(0.8,3.6){$A^{\f{I}}$} \cput[doubleline=false](3,2.5){$d$} \put(2.8,1.8){$X$} \put(0.3,3.35){$e_1$} \put(0.3,1.5){$e_2$} \put(6.7,2.6){$e'$} \psline[linearc=.25]{<-}(6.8,2.5)(8,3)(9.5,2.5) \put(9.5,2.6){$d'$} \psframe(0,1)(3.5,4) \psframe(6.5,1)(10,4) \psline[linestyle=dashed,linearc=.5](0.8,3.5)(5,3.5)(6.7,2.5) \psline[linestyle=dashed,linearc=.5](3.4,2.5)(5,1.5)(9.5,2.5) \end{pspicture} \end{center} As the reader can see, $\f{B}$ is a $\g{DL-Lite}_{R,\sqcap}\xspace$ simulation there \textit{(i)} $\tup{\set{d},d'} \in \f{B}$, \textit{(ii)} $\tup{\set{e_1,e_2},e'} \in \f{B}$ and \textit{(iii)} $\f{I} \sim_{DL} \f{J}$. Now, clearly, $\f{I} \models_{v[x:=d]} \forall y P(x,y) \to A(y)\}$, but $\f{J} \not\models_{v'[x:=d']} \forall y P(x,y) \to A(y)$, since $A^{\f{J}} = \emptyset$. \hspace*{\fill}$\clubsuit$ \end{example}
\section{Some Negative Results}\label{four}
\begin{proposition} Disjunction is not expressible in $\g{DL-Lite}_{R,\sqcap}\xspace$. \end{proposition}
\proof $\g{DL-Lite}_{R,\sqcap}\xspace$ is contained in \g{HORN} (the set of \logic{Fo} horn clauses)\cite{Calvanese2007C,Calvanese2007E}, which cannot express disjunctions of the form $\phi := A(c) \lor A'(c')$. Otherwise, let $\f{H} := \set{A(c)}$ and $\f{H'} := \set{A'(c')}$ be two Herbrand models of $\phi$. Clearly, $\f{H}$ and $\f{H'}$ are minimal (w.r.t. set inclusion) models of $\phi$ s.t. $\f{H} \neq \f{H'}$. But this is impossible, since \g{HORN} verifies the least (w.r.t. set inclusion) Herbrand model property \cite{CoriLascar}. \qed
\begin{theorem} There is no relation $\sim$ over interpretations such that, for every \logic{Fo} sentence $\phi$, $\phi$ is equivalent to a $\g{DL-Lite}_{R,\sqcap}\xspace$ assertion iff it is closed under the relation $\sim$. \end{theorem}
\proof Recall that a \logic{Fo} sentence is a \logic{Fo} formula with no free variables. Suppose the contrary and consider the sentence $A(c)$. Let $\f{I}$ and $\f{J}$ be two structures s.t. $\f{I} \sim \f{J}$ and suppose that $\f{I} \models A(c)$. Then, obviously, $\f{J} \models A(c)$ too. But then: \begin{center} \begin{tabular}{l} $\f{I} \models A(c)$ implies $\f{I} \models A(c) \lor A'(c)$, and\\ $\f{J} \models A(c)$ implies $\f{J} \models A(c) \lor A'(c)$. \end{tabular} \end{center} That is, $A(c) \lor A'(c)$ is closed under $\sim$ and is a fortiori equivalent to some $\g{DL-Lite}_{R,\sqcap}\xspace$ assertion. But this is impossible, because disjunction is not expressible in $\g{DL-Lite}_{R,\sqcap}\xspace$. \qed
\section{Conclusions}\label{five}
In this paper we have shown four things: \textit{(i)} Answering UCQs over $\g{DL-Lite}_{R,\sqcap}\xspace$ KBs is \textsc{NP}\xspace-complete in combined complexity. \textit{(ii)} A simulation relation among interpretations, viz., a $\g{DL-Lite}_{R,\sqcap}\xspace$ simulation, can be used to characterize the expressive power of $\g{DL-Lite}_{R,\sqcap}\xspace$ as a concept language. \textit{(iii)} \logic{Fo} formulas that are closed under $\g{DL-Lite}_{R,\sqcap}\xspace$ simulations are equivalent to a (left or right) $\g{DL-Lite}_{R,\sqcap}\xspace$ concept. \textit{(iv)} This closure property holds only w.r.t. concepts, but not w.r.t. assertions. Simulations, in particular, can be generalized, with minor adjustments, to the whole \g{DL-Lite} family of DLs, although, since all of them are in \g{HORN}, no such closure property exists for their assertions.
\end{document} |
\begin{document}
\thispagestyle{empty} \baselineskip=28pt \vskip 5mm \begin{center} {\Large{\bf Partial Tail-Correlation Coefficient \\Applied to Extremal-Network Learning}} \end{center} \baselineskip=12pt \vskip 5mm
\begin{center} \large Yan Gong$^1$, Peng Zhong$^1$, Thomas Opitz$^2$, Rapha\"el Huser$^1$ \end{center}
\footnotetext[1]{ \baselineskip=10pt Statistics Program, Computer, Electrical and Mathematical Sciences and Engineering (CEMSE) Division, King Abdullah University of Science and Technology (KAUST), Thuwal 23955, Saudi Arabia. E-mails: [email protected]; [email protected]; [email protected].} \footnotetext[2]{ \baselineskip=10pt Biostatistics and Spatial Processes, INRAE, Avignon 84914, France. E-mail: [email protected].}
\baselineskip=17pt \vskip 4mm \centerline{\today} \vskip 6mm
\begin{center} {\large{\bf Abstract}} \end{center} We propose a novel extremal dependence measure called the partial tail-correlation coefficient (PTCC), in analogy to the partial correlation coefficient in classical multivariate analysis. The construction of our new coefficient is based on the framework of multivariate regular variation and transformed-linear algebra operations. We show how this coefficient allows identifying pairs of variables that have partially uncorrelated tails given the other variables in a random vector. Unlike other recently introduced conditional independence frameworks for extremes, our approach requires minimal modeling assumptions and can thus be used in exploratory analyses to learn the structure of extremal graphical models. Similarly to traditional Gaussian graphical models where edges correspond to the non-zero entries of the precision matrix, we can exploit classical inference methods for high-dimensional data, such as the graphical LASSO with Laplacian spectral constraints, to efficiently learn the extremal network structure via the PTCC. We apply our new method to study extreme risk networks in two different datasets (extreme river discharges and historical global currency exchange data) and show that we can extract meaningful extremal structures with meaningful domain-specific interpretations.
\baselineskip=16pt
\par
\noindent {\bf Keywords:} Extreme event; Graphical Lasso; Multivariate regular variation; Network structure learning; Partial tail dependence.\\
\pagenumbering{arabic} \baselineskip=26pt
\section{Introduction}\label{sec:introduction}
Characterizing the extremal dependence of complex stochastic processes (e.g., in spatial, temporal, and spatio-temporal settings) is fundamental for both statistical modeling and applications, such as risk assessment in environmental and financial contexts. Important applications include the modeling of precipitation extremes \citep{Huser.Davison:2014a,opitz2018inla,bacro2020hierarchical,saunders2021regionalisation,richards2022modelling}, heatwaves \citep{winter2016modelling,zhong2022modeling}, and air pollution \citep{Vettori.etal:2019, Vettori.etal:2020}, as well as financial risk assessment \citep{bassamboo2008portfolio,ferro2011extremal,marcon2016bayesian,bekiros2017extreme,yan2019cross,gong2019asymmetric}. Often, applications illustrate the benefits of methodological innovations, such as the application of the extremal dependence measure in \citet{larsson2012extremal}, which is a key ingredient of the present work, in the analysis of financial data.
Models for extremal dependence traditionally rely on asymptotic frameworks, such as max-stable processes for block maxima or $r$-Pareto processes for threshold exceedances of a summary functional of the process over a high threshold. Recently, more advanced models have been proposed to further improve flexibility, especially towards modeling of asymptotically independent data with dependence vanishing at the most extreme levels, such as inverted max-stable processes \citep{wadsworth2012dependence}, max-mixture models \citep{ahmed2020spatial}, random scale mixtures of Gaussian processes \citep{Opitz2016,Huser2017} or of more general processes \citep{wadsworth2017modelling,engelke2019extremal,huser2019modeling}, max-infinitely divisible processes \citep{bopp2021hierarchical}, and conditional spatial extremes models \citep{wadsworth2019higher}; for a comprehensive review, see \citet{huser2022advances}. Specifics of serial extremal dependence have been studied by \citet{davis2013measures}, among others.
In the study of stochastic dependence structures, networks and graphs are natural tools to represent dependence relationships in multivariate data. Conditional independence, sparsity, and parsimonious representations are key concepts in graph-based approaches for random vectors. Recently, graph-based tools have also been developed for extremal dependence, where variants of conditional independence apply to variables not directly connected by the edges of the graph. For example, \citet{huang2019new} provide an exploratory tool, called the \emph{$\chi\text{-}$network,} for modeling extremal dependence, and they use it to analyze maximum precipitation during the hurricane season in the United States (US) Gulf Coast and in surrounding areas. In their approach, however, the $\chi$-network does not remove the effect of confounding variables, so it does not naturally lead to sparse extremal dependence representations. More recently, \citet{engelke2020graphical} introduce a notion of conditional independence adapted to multivariate Pareto distributions arising for limiting multivariate threshold exceedances, and they use it to develop {parametric graphical models for extremes based on the H\"usler--Reiss distribution}. Similarly, \citet{gissibl2018graphical} and \citet{kluppelberg2021estimating} propose max-linear constructions for modeling maxima on tree-like supports, and \citet{tran2021estimating} propose {QTree}, a simple and efficient algorithm to solve the ``Latent River Problem" for the important case of extremes on trees. In the same vein, \citet{engelke2020structure} develop a data-driven methodology for learning the graphical structure in the setting of \citet{engelke2020graphical}, whereas \citet{rottger2021total} further propose H\"usler--Reiss graphical models under the assumption of multivariate total positivity of order two (MTP$_2$), which allows estimating sparse graphical structures. Finally, \citet{engelke2021sparse} review the recent developments in sparse representations, dimension reduction approaches, and graphical models for extremes. Overall, existing graphical representations for extremes from the literature often rely on rather stringent asymptotically justified models, sometimes leading to issues when dealing with relatively high-dimensional problems or when specific graph structure assumptions (e.g., trees) are required. A recent exception is \citet{Engelke.etal:2022}, who develop theories and methods for learning extremal graphical structures in high dimensions based on $L_1$ regularized optimization, though their methodology still assumes a parametric extremal dependence structure of H\"usler--Reiss type.
By contrast, rather than restricting ourselves to a strict parametric modeling framework, we adopt a more pragmatic and empirical approach. Specifically, our goal is to extend and enrich existing approaches by defining the new concept of \emph{partial tail correlation} as an extreme-value analog of the notion of partial correlation widely used in classical multivariate analysis, and by introducing a new coefficient that enables estimation of general extremal networks under minimal modeling assumptions. In the same way that correlation does not imply independence in general, our concept of \emph{partial tail-uncorrelatedness} is a weaker assumption than \emph{conditional tail independence}. However, we shall show that it still provides relevant insights into various forms of extremal dependence structures and helps in guiding modeling choices at a data exploratory stage.
As a novel extremal dependence measure, we propose the \emph{partial tail correlation coefficient (PTCC)} as an equivalent of the partial correlation coefficient in the non-extreme setting. In the classical setting, the Pearson correlation coefficient between two random variables can give misleading interpretations when there are confounding variables that influence both variables, whereas the partial correlation coefficient measures the residual degree of association between two variables after the linear effects of a set of other variables have been removed. To compute the partial correlation between two variables of interest, we regress each of these variables onto the set of covariates given by all the other variables in the multivariate random vector, and then compute the correlation between the residuals from the two fitted linear regressions. In the Gaussian setting, a partial correlation of zero is equivalent to conditional independence between two variables \citep{lawrance1976conditional, baba2004partial}, and the elements of the inverse of the covariance matrix (i.e., the \emph{precision matrix}) of the full vector are known to characterize this conditional (in)dependence structure. In this paper, we adopt a similar strategy to define the PTCC, namely by computing a suitable measure of tail dependence between residuals obtained by regressing variables using transformed-linear operations that do not alter tail properties. While classical linear regression only makes sense for Gaussian-like data, such transformed-linear operations can be used for tail regression of multivariate regularly-varying random vectors, which is a fundamental assumption characterizing asymptotically dependent extremes.
To be more precise, we here define the PTCC by building upon the framework of \citet{cooley2019decompositions}, who developed a customized transformed-linear algebra on the positive orthant, preserving multivariate regular variation and thus being well adapted to ``linear'' methods for joint extremes. \citet{cooley2019decompositions} used this framework for principal component analysis of extremes based on decompositions of the so-called tail pairwise dependence matrix (TPDM), which conveniently summarizes information about extremal dependence in random vectors and possesses appealing properties for such decompositions. The TPDM can be thought of as an analogy of the classical covariance matrix but tailored for multivariate extremes. In some follow-up work, \citet{mhatre2020transformed} then developed non-negative regularly varying time series models with autoregressive moving average (ARMA) structure using the transformed-linear operations for time series extremes. For spatial extremes, \citet{fix2021simultaneous} extended the simultaneous autoregressive (SAR) model under the transformed-linear framework and developed an estimation method to minimize the discrepancy between the TPDM of the fitted model and an empirically estimated TPDM. Furthermore, \citet{lee2021transformed} recently introduced transformed-linear prediction methods for extremes. In the aforementioned papers, the TPDM always plays a central role. Similar to covariances, the entries of the TPDM are tail dependence measures giving insights into the direct extremal dependence structure without removing the influence of other confounding variables. However, just as the covariance matrix does not reflect partial correlations, the TPDM does not directly inform us about partial associations among extremes. In this work, we fill this gap with our new proposed PTCC. Thanks to its definition in terms of transformed-linear operations, we show that the PTCC inherits several appealing features of the classical partial correlation coefficient. In particular, the PTCC between two components $X_i$ and $X_k$ from a random vector $\bm X$ is such that the $(i,k)$th entry of the inverse TPDM matrix of $\bm X$ equals zero if and only if the corresponding PTCC for these two variables is also equal to zero. In other words, \emph{partial tail-uncorrelatedness} can be conveniently read off from the zero elements of the inverse TPDM, similar to classical Gaussian graphical models. We then exploit this property to define a new class of extremal graphical models based on the PTCC and then use efficient inference methods to learn the extremal network structure from high-dimensional data based on state-of-the-art techniques from graph theory (e.g., the graphical LASSO with or without Laplacian spectral constraints). We note that here, our focus is on studying undirected graph structures, which is different from causal inference, where causal relationships can be encoded using directed graph edges.
The remainder of this article is organized as follows. In Section~\ref{sec:ptcc}, we first review the necessary background on multivariate regular variation and transformed-linear algebra, as introduced in \citet{cooley2019decompositions}. Then, we define the new PTCC and the related notion of partial tail-uncorrelatedness.
In Section~\ref{sec:extnet}, we present methods for learning general extremal network structures from the PTCC in a high-dimensional data setting, and we discuss two particularly appealing approaches, namely the graphical LASSO and Laplacian spectral constraint-based methods. Section~\ref{sec:simulation} presents a simulation study for general structured undirected graphs using the above two inference methods. In Section~\ref{sec:applications}, we apply these new tools to explore the risk networks formed by river discharges observed at a collection of monitoring stations in the upper Danube basin, and by historical global currency exchange rate data from different historical periods, covering different economic cycles, the COVID-19 pandemic, and the 2022 military conflict in Ukraine.
\section{Transformed-linear algebra for multivariate extremes}\label{sec:ptcc}
Before introducing the partial tail correlation coefficient (PTCC) and the related notion of partial-tail uncorrelatedness, we first briefly review the multivariate regular variation framework, which is our main assumption for defining the PTCC, and we also summarize the foundations of transformed-linear algebra.
\subsection{Regular variation framework and transformed linear algebra}\label{sec:regvar}
A random vector is multivariate regularly varying \citep{resnick2007heavy} (i.e., jointly heavy-tailed) if its joint tail decays like a power function. Precisely, we say that a $p$-dimensional random vector $\boldsymbol X\in \mathbb{R}_+^p = [0,\infty)^p$ with $p\in\mathbb{N}$ is \emph{regularly varying} if there exists a sequence $b_n \to\infty$ such that \begin{equation}\label{eq:mrv} n\,\pr(b_n^{-1} \boldsymbol X\in\cdot) \xrightarrow{v} \nu_{\boldsymbol X}(\cdot),\quad n\to \infty, \end{equation}
where $\xrightarrow{v}$ denotes vague convergence to the non-null limit measure $\nu_{\boldsymbol X}$, a Radon measure defined on the space $[0,\infty]^p\setminus \{0\}$. This measure has the scaling property $r^\alpha\nu_{\boldsymbol X}(rB)=\nu_{\boldsymbol X}(B)$ for $r>0$ and Borel sets $B \subset [0,\infty]^p\setminus \{0\}$, where $\alpha>0$ controls the tail decay (with $1/\alpha$ commonly called the tail index). For this reason, the measure can be further decomposed into a radial measure and an angular measure $H_{\boldsymbol X}$ on the unit sphere $\mathbb{S}_{p-1}^+ = \{\boldsymbol w \in \mathbb{R}_+^p: ||\boldsymbol w||_2 = 1\}$, such that $\nu_{\boldsymbol X}(\{\boldsymbol x \in [0,\infty]^p\setminus \{0\}: \|\boldsymbol x\|_2 \geq r, \ \boldsymbol x/\|\boldsymbol x\|_2 \in B_H)=r^{-\alpha}\times H_{\boldsymbol X}(B_H)$ for $r>0$ and Borel subsets $B_H$ of $\mathbb{S}_{p-1}^+$. The normalizing sequence $b_n$ is not uniquely determined but must satisfy $b_n = L(n)n^{1/\alpha}$, where $L(n)$ is a slowly varying function (at infinity), i.e., $L(n)>0$ and $L(rn)/L(n)\rightarrow 1$ for any $r>0$, as $n\rightarrow\infty$. We use the short-hand notation $\boldsymbol X \in$ RV$_+^{p}(\alpha)$ for a regularly varying vector $\boldsymbol X$ with tail index $1/\alpha$.
\citet{cooley2019decompositions} introduced the \emph{transformed-linear algebra} framework to construct an inner product space on an open set (the so-called \emph{target space}) via a suitable transformation, where the distribution of the random vector $\bm X$ has support within this set. Our use will mainly concern transformation towards the target space $\mathbb{R}_+^p$ from the space $\mathbb{R}^p$, but we first present the general approach. Let $t$ be a bijective transformation from $\mathbb{R}$ onto some open set $\mathbb{X}\subset\mathbb{R}$, and let $t^{-1}$ be its inverse. For a $p$-dimensional vector $\boldsymbol y \in \mathbb{R}^p$, we define $\boldsymbol x = t(\boldsymbol y)\in \mathbb{X}^p$ componentwise. Then, arithmetic operations among elements of the target space are carried out in the space $\mathbb{R}^p$ before transforming back to the target space. We define vector addition in $\mathbb{X}^p$ as $\boldsymbol x_1\oplus \boldsymbol x_2 = t\{t^{-1}(\boldsymbol x_1) + t^{-1}(\boldsymbol x_2)\}$, and scalar multiplication with a factor $a\in\mathbb{R}$ as $a\circ \boldsymbol x = t\{at^{-1}(\boldsymbol x)\}.$ The additive identity in $\mathbb{X}^p$ is set to $\boldsymbol 0_{\mathbb{X}^p}=t(\boldsymbol 0)$, and the additive inverse of $\boldsymbol x\in\mathbb{X}^p$ is given as $\ominus \boldsymbol x=t\{-t^{-1}(\boldsymbol x)\}$. A valid inner product between two elements $\boldsymbol x_1=(x_{1,1},\ldots,x_{1,p})^T,\boldsymbol x_2=(x_{2,1},\ldots,x_{2,p})^T\in\mathbb{X}^p$ from the target space is then obtained by applying the usual scalar product in $\mathbb{R}^p$, i.e., we set $\langle \boldsymbol x_1,\boldsymbol x_2\rangle =\sum_{j=1}^p t^{-1}(x_{1,j})t^{-1}(x_{2,j})$. To obtain an inner product space on the positive orthant for which arithmetic operations preserve multivariate regular variation, thus having a negligible effect on large values, we follow \citet{cooley2019decompositions} and define the specific transformation $t: \mathbb{R}\mapsto (0, \infty)$ given by $$ t(y) = \log\{1+\exp(y)\}, $$ though they are other possibilities. We have $y/t(y)\rightarrow 1$ as $y\rightarrow \infty$, such that the upper tail behavior of a random vector $\boldsymbol Y=t(\boldsymbol X)$ is preserved through $t$. For lower tails, we have $\exp(y)/t(y)\rightarrow 1$ as $y\rightarrow -\infty$. The inverse transformation is $t^{-1}(x) = \log(\exp(x)-1)$, $x>0$. Algebraic operations done in the vector space induced by the above transformation $t$ are commonly called \emph{transformed-linear operations}, and we can exploit this framework to extend classical linear algebra methods (e.g., principal component analysis, etc.) to the multivariate extremes setting, where heavy-tailed vectors and models are often conveniently expressed on the positive orthant, $\mathbb R^p_+$. We note that our main assumption, multivariate regular variation, implies asymptotic tail dependence (as well as homogeneity of the limit measure in \eqref{eq:mrv}), but it does not impose further parametric structural assumptions such as with Pareto models of H\"usler--Reiss type.
\subsection{Inner product space of regularly varying random variables}
With transformed-linear operations, we can use a vector of independent and identically distributed (i.i.d.) random variables to construct new regularly varying random vectors on the positive orthant that possess tail dependence. Suppose that $\boldsymbol Z = (Z_1,\ldots,Z_q)^T \geq 0$ is a vector of $q\in\mathbb{N}$ i.i.d.\ regularly varying random variables with tail index $1/\alpha$, such that there exists a sequence $\{b_n\}$ that yields $$ n\,\pr(Z_j>b_n z)\to z^{-\alpha}, \quad n\,\pr\{Z_j\leq \exp(-k b_n)\}\to 0, \quad k>0, \quad j=1,\ldots,q, $$ where the first condition is equivalent to regular variation \eqref{eq:mrv} in dimension $p=1$. The random vector $\bm Z$ with independent components has a limit measure of multivariate regular variation characterized by $\nu_{\bm Z}\left\{[\bm 0, \bm z]^C\right\}=\sum_{j=1}^q z_j^{-\alpha}$ for $\bm z=(z_1,\ldots,z_q)^T > \bm 0$. Then, we can construct new regularly varying $p$-dimensional random vectors $\boldsymbol X = (X_1, \ldots,X_p)^T$ by exploiting transformed-linear operations, via a matrix product with a deterministic matrix $A = (\boldsymbol a_1, \ldots, \boldsymbol a_q) \in \mathbb{R}_+^{p\times q},$ with columns $\boldsymbol a_j\in \mathbb{R}_+^p$, as follows: \begin{equation}\label{eq:transformed-linear construction}
\boldsymbol X = \overset{q}{\underset{j=1}{\oplus}}\boldsymbol a_j\circ Z_j. \end{equation} We write $\boldsymbol X = A \circ \boldsymbol Z \in$ RV$_+^{p}(\alpha)$. This construction ensures that the multivariate regular variation property is preserved with the same index $\alpha$ \citep[Corollary 1,][]{cooley2019decompositions}. Furthermore, we require $A$ to have a full row-rank. Based on the construction \eqref{eq:transformed-linear construction}, it is possible to define a (different) inner product space spanned by the random variables obtained by transformed-linear operations on $\boldsymbol Z$, where some but not all of the components of $\boldsymbol a_j$ are further allowed to be non-positive.
Following \citet{lee2021transformed}, an inner product of $\langle X_i,X_k\rangle $ on the space spanned by all possible transformed-linear combinations of the elements of the random vector $\boldsymbol X$ constructed as in \eqref{eq:transformed-linear construction}, may be defined as follows:
\[\langle X_i, X_k\rangle = \sum_{j=1}^q a_{ij} a_{kj},\]
where $a_{ij}$ refers to the entry in row $i$ of the column $j$ of the matrix $A$ for $i\in\{1,\ldots,p\}$ and $j\in\{1,\ldots,q\}$, and the corresponding norm becomes $||X|| = \sqrt{\langle X, X\rangle }$. The metric induced by the inner product is $d(X_i, X_k) = ||X_i\ominus X_k||=[\sum_{j=1}^q(a_{ij}-a_{kj})^2]^{1/2},$ for $i,k = 1,\ldots, p.$
\subsection{Generality of the framework}
In practice, given a random vector $\bm X$ for which we assume $\boldsymbol X \in$ RV$_+^{p}(\alpha)$, we will further assume that it allows for a stochastic representation as in \eqref{eq:transformed-linear construction}. Since the constructions of type \eqref{eq:transformed-linear construction} form a dense subclass of the class of multivariate regularly varying vectors (if $q$ is not fixed but allowed to tend to infinity, i.e., $q\rightarrow\infty$), this assumption is not restrictive; see \citet{fougeres2013dense} and \citet{cooley2019decompositions}.
Thanks to their flexibility, representations akin to the transformed-linear random vectors in \eqref{eq:transformed-linear construction} have recently found widespread interest in statistical learning for extremes. The fundamental model structure used in the causal discovery framework for extremes developed by \citet{gnecco2021} is essentially based on a variant of \eqref{eq:transformed-linear construction}. In the setting of max-linear models, in particular the graphical models of \citet{gissibl2018graphical}, we can use \eqref{eq:transformed-linear construction} to construct random vectors $\boldsymbol X$ possessing the same limit measure $\nu_{\boldsymbol X}$ as the max-linear vectors. Finally, low-dimensional representations of extremal dependence in random vectors obtained through variants of the $k$-means algorithm can be shown to be equivalent to the extremal dependence induced by construction \eqref{eq:transformed-linear construction}; see \citet{janssen2020}.
\subsection{Tail pairwise dependence matrix}
The tail pairwise dependence matrix \citep[TPDM,][]{cooley2019decompositions} is defined to summarize the pairwise extremal dependence of a regularly varying random vector using the second-order properties of its angular measure. Let $\alpha = 2$, which ensures desirable properties; in practice, this condition can be ensured through appropriate marginal pre-transformation of data \citep{cooley2019decompositions}. Then, the TPDM $\Sigma$ of $\boldsymbol X\in$ RV$_+^{p}(2)$ is defined as follows: $$ \Sigma=(\sigma_{ik})_{i,k=1,\ldots,p},\qquad \text{with}\qquad \sigma_{{ik}} := \int_{\mathbb{S}_{p-1}^+}w_i w_k\text{d}H_{\boldsymbol X}(\boldsymbol w), $$
where $H_{\boldsymbol X}$ is the angular measure on $\mathbb{S}_{p-1}^+ = \{\boldsymbol w \in \mathbb{R}_+^p: ||\boldsymbol w||_2 = 1\}$ as introduced in Section~\ref{sec:regvar}. The matrix $\Sigma$ is an extreme-value analog of the covariance matrix, and it has similar useful properties. It is positive semi-definite and completely positive, i.e., there exists a finite $p\times q$ matrix $A$ with nonnnegative entries such that the TPDM can be factorized as $\Sigma=AA^T$ \citep[][Proposition~5]{cooley2019decompositions}. The matrix $A$ is not unique, in particular if we do not impose nonnegative entries.
Specifically, for random vectors $\bm X$ obtained by the transformed-linear construction \eqref{eq:transformed-linear construction}, the entries of $\Sigma$ correspond to the values of the inner product $\sigma_{ik}=\langle X_i,X_k\rangle $. In the following, we further assume that $\Sigma$ is positive definite, which guarantees the existence of the inverse matrix of the TPDM.
We emphasize that the special case where $\sigma_{ik}=0$ is equivalent to asymptotic tail independence of the components $X_i$ and $X_k$ \citep[see][]{cooley2019decompositions}, meaning that the conditional exceedance probability $\mathrm{Pr}(F_{X_i}(X_i)>u \mid F_{X_k}(X_k)>u)$ tends to $0$ as $u\to1$ \citep{sibuya1960bivariate, ledford1996statistics}.
By exploiting the property that the TPDM is completely positive, we can construct new transformed-linear random vectors that have the same TPDM as a given random vector $\boldsymbol X$. Since we can always factorize the TPDM as $\Sigma=AA^T$ for some matrix $A$ of dimension $p\times q$, we can then use the construction \eqref{eq:transformed-linear construction} by multiplying $A$ with a (new) random vector $\boldsymbol Z\in \text{RV}_+^{p}(2)$ of independent regularly varying random variables, and the resulting vector will still have TPDM $\Sigma$. It is also worth noting that as $q\to\infty$, the angular measure of the new random vector can be arbitrarily close to that of $\boldsymbol X$ thanks to the denseness property of discrete angular measures. While $A$ is not unique, we note that it is very important that the inner product depends only on the entries of the matrix $\Sigma = AA^T$, such that the specific choice of $A$ does not matter.
An estimator of the TPDM was proposed by \citet[][Section~7.1]{cooley2019decompositions}. For an i.i.d.\ sequence of vectors $\boldsymbol x_t$, $t=1,\ldots,n_{\text{samp}}$, i.e., independent realizations from a random vector $\boldsymbol X\in$ RV$_+^{p}(2)$, define
\begin{equation}\label{eq:estimator} \hat{\sigma}_{{ik}} = \hat{m}\int_{\mathbb{S}_{p-1}^+}w_i w_k\text{d}\hat{N}_{\boldsymbol X}(w)=\hat{m}\, n^{-1}_{\text{ext}}\sum^{n_{\text{samp}}}_{t=1}w_{ti}w_{tk}\mathbbm{1}(r_t>r_0),
\end{equation}
where $r_t = ||\boldsymbol x_t||_2$, $w_t=\boldsymbol x_t/r_t$, $r_0$ is a high threshold for the radial component, $n_{\text{ext}}=\sum_{t=1}^{n_{\text{samp}}}\mathbbm{1}(r_t>r_0)$ refers to the number of threshold exceedances, and the probability measure $N_{\boldsymbol X}(\cdot)=m^{-1}H_{\boldsymbol X}(\cdot)$ (with $m = H_{\boldsymbol X}(\mathbb{S}_{p-1}^+)$) is obtained by normalizing $H_{\boldsymbol X}$. Moreover, $\hat{m}$ denotes an estimate of $H_{\boldsymbol X}(\mathbb{S}_{p-1}^+)$, and $\hat{N}_{\boldsymbol X}$ is the empirical counterpart of $N_{\boldsymbol X}$. We note that when the data are preprocessed to have a common unit scale, we can set $m=p$ and there is no need to estimate the normalizing factor. The estimator \eqref{eq:estimator} was discussed by \citet{larsson2012extremal} in the bivariate case.
\subsection{Partial tail correlation coefficient}
In this section, we introduce our new measure, the partial tail correlation coefficient (PTCC), which is analogous to the partial correlation coefficient but tailored to heavy-tailed random vectors.
Let $\boldsymbol X = A \circ \boldsymbol Z \in$ RV$_+^{p}(\alpha)$ be a $p$-dimensional vector constructed as in \eqref{eq:transformed-linear construction}, with TPDM $\Sigma$. We write $\boldsymbol X_{ik}=(X_i,X_k)^T$; $\boldsymbol X_{{\rm rest}}$ for the $(p-2)$-dimensional random vector obtained by removing the two components $X_i$ and $X_k$ from $\boldsymbol X$; $A_{ik}$ for the matrix comprising the $i$-th and $k$-th columns of $A$; and $A_{{\rm rest}}$ for the matrix $A$ without its $i$-th and $k$-th columns. Moreover, we define the $p$-dimensional random vector $\boldsymbol X'$ by re-ordering the columns of $\boldsymbol X$ as $\boldsymbol X' = (\boldsymbol X_{ik}^T, \boldsymbol X_{{\rm rest}}^T)^T= (A_{ik}, A_{{\rm rest}})\circ \boldsymbol Z.$ It is straightforward to show that the best transformed-linear predictor of $\boldsymbol X_{ik}$ given $\boldsymbol X_{{\rm rest}}$ can be obtained as $\hat{\boldsymbol X}_{ik}=\boldsymbol B\circ\boldsymbol X_{{\rm rest}} = (\hat{X}_{ik}^i, \hat{ X}_{ik}^k)^T$, where $\boldsymbol B = (\boldsymbol b_1, \boldsymbol b_2)^T$ is a $2\times(p-2)$ matrix with $\boldsymbol b_1, \boldsymbol b_2\in\mathbb R_+^{p-2}$ chosen such that $d(X_{i}, \hat{X}_{ik}^i)$ and $d(X_{k}, \hat{X}_{ik}^k)$ attain their minimum, respectively. Suppose that the TPDM of $\boldsymbol X'$ is of the block-matrix form $$ \Sigma_{\boldsymbol X'} = \begin{bmatrix} \Sigma_{ik,ik} & \Sigma_{ik,{\rm rest}}\\ \Sigma_{{\rm rest},ik} & \Sigma_{{\rm rest},{\rm rest}} \end{bmatrix} $$ where $\Sigma_{ik,ik}\in\mathbb R_+^{2\times 2}$ is the TPDM restricted to $\boldsymbol X_{ik}$, i.e., $\Sigma_{ik,ik}=\text{TPDM}(\boldsymbol X_{ik})=\begin{bmatrix} \Sigma_{ii} & \Sigma_{ik} \\ \Sigma_{ki} & \Sigma_{kk} \end{bmatrix}$; $\Sigma_{{\rm rest},{\rm rest}}\in\mathbb R_+^{(p-2)\times (p-2)}$ is the TPDM restricted to $\boldsymbol X_{{\rm rest}}$, and $\Sigma_{ik,{\rm rest}} = \Sigma_{{\rm rest},ik}^T = (\Sigma_{i,{\rm rest}}^T, \Sigma_{k,{\rm rest}}^T)^T\in\mathbb R_+^{2\times (p-2)}$ is the cross-TPDM between $\boldsymbol X_{ik}$ and $\boldsymbol X_{{\rm rest}}$. Then, based on the {projection theorem} for vector spaces with inner products, we have that $$\hat{\boldsymbol B} = \Sigma_{ik,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}} ^{-1}.$$ Straightforward calculations show that the {prediction error} $\boldsymbol e = \boldsymbol X_{{ik}}\ominus\hat{\boldsymbol X}_{\text{}ik}$ has the following TPDM:
\begin{align}\label{eq:prediction_error} \text{TPDM}(\boldsymbol e) &= \Sigma_{ik,ik} - \Sigma_{ik,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},ik}\nonumber\\ &=\begin{bmatrix} \Sigma_{ii} - \Sigma_{i,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},i} & \Sigma_{ik} - \Sigma_{i,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},k} \\ \Sigma_{ki} - \Sigma_{k,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},i} & \Sigma_{kk} - \Sigma_{k,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},k} \end{bmatrix}.
\end{align}
\begin{defn}\label{def:ptcc} The \emph{partial tail correlation coefficient (PTCC)} of two random variables $X_i$ and $X_k$ is defined as the off-diagonal {TPDM coefficient of the bivariate residual vector} $\boldsymbol e$ in \eqref{eq:prediction_error}, such that transformed-linear dependence with respect to all other random variables is removed. \end{defn}
\begin{defn}
Let $\boldsymbol X_{ik} = (X_i, X_k)^T$ and $\boldsymbol X_{{\rm rest}}$ be defined as above. Given $\boldsymbol X_{{\rm rest}}$, we say that ${X_i}$ and $X_k$ are \emph{partially tail-uncorrelated} if the PTCC of $X_i$ and $X_k$ (given $\boldsymbol X_{{\rm rest}}$) is equal to {zero}, i.e., if $\Sigma_{ki}-\Sigma_{k,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},i}=0$ as defined in \eqref{eq:prediction_error}. \end{defn} \noindent \textbf{Remark 1}: {Thanks to the properties of TPDMs, the residuals of two partially tail-uncorrelated random variables are necessarily asymptotically tail independent.}
The following proposition links tail-uncorrelatedness to the entries of the inverse of the TPDM of $\bm X$.
\begin{prop}\label{prop:partial} Given the representation of the TPDM of $\boldsymbol X'$ as a $3\times3$ block matrix as follows,
\begin{equation}\label{eq:sigma}
\Sigma_{\boldsymbol X'}=\text{TPDM}\{(X_i, X_k,\bm X_{{\rm rest}}^T)^T\}=
\begin{bmatrix}
\Sigma_{ii} & \Sigma_{ik} & \Sigma_{i,{\rm rest}}\\
\Sigma_{ki} & \Sigma_{kk} & \Sigma_{k,{\rm rest}}\\
\Sigma_{{\rm rest},i} & \Sigma_{{\rm rest},k} & \Sigma_{{\rm rest},{\rm rest}}
\end{bmatrix},
\end{equation} where the dimensions of submatrices are as above, then the following two statements are equivalent: $$
(1) \text{ }\Sigma_{ki} - \Sigma_{k,{\rm rest}}\Sigma_{{\rm rest},{\rm rest}}^{-1}\Sigma_{{\rm rest},i} =0,\quad\quad
(2) \text{ }(\Sigma^{-1})_{ik}=0 $$ where $\Sigma$ is the TPDM of the original vector $\boldsymbol X$. \end{prop} Since $\Sigma_{\boldsymbol X'}$ is a positive definite and therefore an invertible covariance matrix, this result is a direct consequence of the equivalence of statements $(i')$ and $(ii)$ of Proposition~$1$ of \citet{speed1986gaussian}. In the notation of \citet{speed1986gaussian}, we have the following index sets: $a=\{k,{\rm rest}\}$, $b=\{i,{\rm rest}\}$, and $ab = a\cap b = {\rm rest}$.
The following corollary is a direct consequence of this result.
\begin{cor}\label{prop:q} Denote the inverse matrix of the TPDM of a random vector $\boldsymbol X$ by $Q = \Sigma^{-1}$. Then, $$ Q_{ik}=0 \quad \text{if and only if}\quad \text{PTCC}_{ik}=0, $$ where PTCC$_{ik}$ is the PTCC of components $X_i$ and $X_k$. {Recall that a PTCC equal to zero corresponds to partial tail-uncorrelatedness.}
\end{cor}
\section{Learning extremal networks for high-dimensional extremes}\label{sec:extnet}
Using the PTCC, we can now explore the partial tail correlation structure of multivariate random variables under the framework of multivariate regular variation. In this section, we define new graphical models to represent extremal dependence for extremes. Thanks to the transformed-linear framework exposed in the previous section, we can proceed as for classical graphical models by replacing the classical covariance matrix with the TPDM.
\subsection{Graphical models for extremes}
Let $G = (V, E)$ be a graph, where $V = \{1, \ldots,p\}$ represents the node set and $E \in V\times V$ the edge set. We call $G$ an undirected graph if for two nodes $i,k\in V$, the edge $(i, k)$ is in $E$ if and only if the edge $(k,i)$ is also in $E$. We show how to estimate graphical structures for extremes for any type of undirected graph in which we have no edge $(i, k)$ if and only if the variables $X_i$ and $X_k$ are partially tail-uncorrelated given all the other variables in the graph, which we write as $$X_i \perp_p X_k \mid \boldsymbol{X}_{{\rm rest}}.$$
Our methods work for general undirected graph structures, including trees, decomposable graphs and non-decomposable graphs, see the example illustrations in Figure~\ref{fig:graphs}. Note, however, that our general method cannot restrict the estimated graph to be of a specific type, such as a tree.
\begin{figure}
\caption{Examples of undirected graph structures: a tree (left), a decomposable graph (middle), and a non-decomposable graph (right).}
\label{fig:graphs}
\end{figure}
\subsection{Sparse representation of high-dimensional extreme networks}
For high-dimensional extremes with a relatively large number of components $p$ (e.g., up to tens and hundreds of variables), a graphical representation of the extremal dependence structure is desirable for reasons of parsimony and interpretability. We now introduce two efficient inference methods to learn extremal networks from high-dimensional data via the PTCC based on two state-of-the-art graphical methods: the extremal graphical Lasso, and the structured graph learning method via Laplacian spectral constraints. These two methods both work efficiently in high-dimensional settings and return an estimate of the underlying extremal dependence with sparse structure, i.e., with the cardinality of $E$ being of the same order as the one of $V$.
\subsubsection{Extremal graphical Lasso}\label{subsec:lasso}
Given an empirical estimator $\hat{\Sigma}$ of the TPDM and a tuning parameter $\lambda\geq 0$, the optimization carried out by the extremal graphical Lasso method is expressed as follows \citep{friedman2008sparse}: $$
\hat{Q}_{\lambda}= \arg \max_{\Theta\succeq 0} \big[ \log\det\Theta - \text{tr}(\hat{\Sigma}\Theta) - \lambda \sum_{i\neq k}|\Theta_{ik}|\big] $$ where $\succeq$ indicates positive-semidefiniteness and $\hat{Q}_\lambda$ is a $L_1$-regularized estimate of the precision matrix $Q=\Sigma^{-1}$. Note that thanks to the $L_1$ regularization, the estimate $\hat{Q}_{\lambda}$ will tend to be sparse (with exact zero entries) and thus contains information on the extremal graph structure. A larger $\lambda$ enforces a larger proportion of zeros in $\hat{Q}_{\lambda}$ and hence fewer edges in the graph. Choosing an appropriate value for $\lambda$ is thus critical. On the one hand, we want to enforce sparsity in the graph, where only significant connections are maintained in the network. On the other hand, $\hat{Q}_{\lambda}$ should be well-defined, with estimation being stable, and with meaningful dependence structures in the estimated model. In our river discharge application in Section~\ref{sec:appldanube} we use a voting procedure to select the best value for $\lambda$, while in our global currency application in Section~\ref{sec:applcurrency} we set the sparsity level to a pre-defined level for interpretation purposes.
\subsubsection{Structured graph learning via Laplacian spectral constraints}\label{SGL}
As an alternative to the graphical Lasso approach, we can seek to include more structural information into the graph by using the structured graph Laplacian (SGL) method of \citet{kumar2019structured}, which assumes that the signal residing on the graph changes ``smoothly'' between connected nodes. This method allows us to better balance the sparsity and connectedness of the estimated precision matrix, thanks to additional constraints on the eigenvalues of the graph Laplacian operator that encodes the graph structure. For instance, if exactly one eigenvalue is zero and all other eigenvalues are positive, then the graph is connected. Laplacian matrix estimation can be formulated as the estimation problem for a precision matrix $Q$, which is therefore linked to our framework that uses the TPDM and its inverse. For any vector of eigenvalues $\boldsymbol\lambda\in S_{\boldsymbol\lambda}$ with appropriate a priori constraints for the desired graph structure defined through the set of admissible eigenvalues $S_{\boldsymbol\lambda}$, we set {$Q = \mathcal{L} \boldsymbol w$} with $\mathcal{L}$ the linear operator that maps a non-negative set of edge weights $\boldsymbol w\in\mathbb R_+^{p(p-1)/2}$ to the matrix $Q$ with Laplacian constraints. The Laplacian matrix $Q$ can be factorized as {$Q = U{\rm Diag}(\boldsymbol \lambda)U^T$} (with an orthogonal matrix $U$) to enforce the constraints on $\boldsymbol \lambda$. Then the optimization problem can be formulated as follows: $$
(\hat{\boldsymbol\lambda},\hat{U}) = \arg \max_{\boldsymbol\lambda,U}\max_{\boldsymbol w} \left(
\log \text{gdet}(\text{Diag}(\boldsymbol\lambda)) - \text{tr}(\hat{\Sigma} \mathcal{L} \boldsymbol w) + \textcolor{black}{\alpha}||\mathcal{L}\boldsymbol w||_1 +\dfrac{\textcolor{black}{\beta}}{2}||\mathcal{L}\boldsymbol w - U\text{Diag}(\boldsymbol\lambda)U^T||_{F}^2 \right), $$ $$ \text{subject to } \boldsymbol w\geq 0, \boldsymbol\lambda \in S_{\boldsymbol\lambda}, \text{and } U^TU = I, $$
where $S_{\boldsymbol\lambda}$ denotes the set of constrained eigenvalues, $||\cdot||_F$ is the Frobenius norm, and $\text{gdet}$ is the generalized determinant defined as the product of all positive values in $\boldsymbol \lambda$. The optimization problem can be viewed as penalized likelihood if data have been generated from a Gaussian Markov random field; in more general cases such as ours, it still provides meaningful graphical structures since it can be viewed as a so-called penalized log-determinant Bregman divergence problem. Therefore, this method can be seen as an extension of the graphical Lasso that allows us to set useful spectral constraints with respect to the structure of the graph. A larger value of $\alpha$ increases the sparsity level of the graph. The hyperparameter $\beta \geq 0$ additionally controls the level of connectedness, and a larger value of $\beta$ enforces a higher level of connectedness of the estimated graph structure.
\section{Simulation study}\label{sec:simulation}
We present a simulation study with three examples where the corresponding true structures of the extremal dependence graphs are as in Figure~\ref{fig:graphs}, i.e., a tree (Case 1), a decomposable graph (Case 2), and a non-decomposable graph (Case 3). The simulated models are constructed as follows. We simulate a dataset of $n = 10^5$ i.i.d.\ random variables $R_1, R_2, R_3, R_4\sim $ Fr\'echet$(2)$. We then construct $n$ replicates of the random vector $\boldsymbol X = (X_1, X_2, X_3, X_4)^T$ according to the following three cases, for which we also specify the true TPDM $\Sigma$ and its inverse $Q=\Sigma^{-1}$:\\ {\bf Case 1:} \[ \begin{cases} \notag X_1 &= R_1\\ \notag X_2 &= R_1 \oplus R_2\\ \notag X_3 &= R_1 \oplus R_3\\ \notag X_4 &= R_1 \oplus R_4 \end{cases}, \quad \Sigma= \begin{bmatrix} 1 &1 &1 &1\\ 1 &2 &1 &1\\ 1 &1 &2 &1\\ 1 &1 &1 &2 \end{bmatrix}, \quad Q = \begin{bmatrix} 4 &-1 &-1 &-1\\ -1 &1 &0 &0\\ -1 &0 &1 &0\\ -1 &0 &0 &1 \end{bmatrix}. \] {\bf Case 2:} \[ \begin{cases} \notag X_1 = R_1\\ \notag X_2 =R_1 \oplus R_2\\ \notag X_3 = R_1 \oplus R_3\\ \notag X_4 = R_1 \oplus 2R_3 \oplus R_4 \end{cases}, \quad \Sigma= \begin{bmatrix} 1 &1 &1 &1\\ 1 &2 &1 &1\\ 1 &1 &2 &3\\ 1 &1 &3 &6 \end{bmatrix}, \quad Q = \begin{bmatrix} -4 &-1 &1-3 &1\\ -1 &1 &0 &0\\ -3 &0 &5 &-2\\ 1 &0 &-2 &1 \end{bmatrix}. \] {\bf Case 3:} \[ \begin{cases} \notag X_1 = R_1 \\ \notag X_2 = R_1\oplus 3/{\sqrt{6}}R_2\\ \notag X_3 = R_1\oplus 1/\sqrt{6}R_2 \oplus 2/\sqrt{3}R_3\\ \notag X_4 = R_1 \oplus \sqrt{6}/3R_2 \oplus 1/\sqrt{3}R_3\oplus R_4 \end{cases}, \quad \Sigma= \begin{bmatrix} 1 &1 &1 &1\\ 1 &2.5 &1.5 &2\\ 1 &1.5 &2.5 &2\\ 1 &2 &2 &3 \end{bmatrix}, \quad Q = \begin{bmatrix} 2 &-0.5 &-0.5 &0\\ -0.5 &1 &0 &-0.5\\ -0.5 &0 &1 &-0.5\\ 0 &-0.5 &-0.5 &1 \end{bmatrix}. \]
We proceed as follows to infer the extremal graph structure in each case. First, we estimate the TPDM of $\boldsymbol X$, $\Sigma$, based on the estimator $\hat\Sigma$ specified through \eqref{eq:estimator} using the $99\%$ quantile for the threshold $r_0$ (i.e., there are $1000$ threshold exceedances to estimate the TPDM). Then, we apply the extremal graphical Lasso and SGL methods. In each setting, we test $m_1 = 300$ different values for the regularization parameter $\lambda$ when using extremal graphical Lasso and $m_2 = 400$ different settings for the combination of $\alpha$ and $\beta$ for the SGL method. The range of $\lambda$ and $\{\alpha,\beta\}$ values is chosen to span a wide range of graphical structures, from fully connected to fully sparse (no connection). In all experimental results, we have found that when the true number of edges is achieved, both methods can retrieve 100\% of the true extremal graph structure, i.e., all connections are correctly identified and the estimated graph has no wrong connections.
\begin{figure}
\caption{Estimated extremal graph structures using the extremal graphical Lasso method based on the PTCC for Case 3, as a function of the tuning parameter $\lambda$ (shown at the top of each display).}
\label{fig:simu3}
\end{figure}
This is illustrated in Figure~\ref{fig:simu3}, which displays the estimated extremal graph structure for Case 3 (general non-decomposable ``square graph'') when using the extremal graphical Lasso method. The heading of each display shows the range of $\lambda$ values that leads to the estimated graph shown below. The tuning parameter $\lambda$ controls the number of edges in the graph, i.e., the sparsity level: when $\lambda$ decreases, the number of edges increases, and vice versa. Interestingly, the proposed method always retrieves true connections (i.e., it never yields wrong connections) whenever the estimated graph is as sparse, or sparser than the true graph. This simple experiment shows that our method is able to retrieve the true extremal dependence graph structure, provided the tuning parameter $\lambda$ (or $\{\alpha,\beta\}$ for the SGL method) is well specified. While our numerical experiments were performed in dimension $p=4$, we expect similar results to hold in higher dimensions provided enough data replicates are available. Our higher-dimensional data applications in Section~\ref{sec:applications} demonstrate that the estimated graph structures indeed make sense and yield interpretable results.
We also note that with our distribution-free approach, there is no universally optimal way of setting the tuning parameters. However, we can use problem-specific criteria to achieve the desired outcome (and therefore to set the penalty parameters); see the data applications in Section~\ref{sec:appldanube} and \ref{sec:applcurrency}.
\section{Applications}\label{sec:applications}
Risk networks are useful in quantitative risk management to elucidate complex extremal dependence structures in collections of random variables. We show two examples of both environmental and financial risk analysis. First, we study river discharge data of the upper Danube basin \citep{asadi2015extremes}, which has become a benchmark dataset for learning extremal networks in the recent literature. The true underlying physical river flow network is available, which can be used as a benchmark to compare the performance of our method with other existing approaches. Second, we apply our method to historical global currency exchange rate data from different historical periods, including different economic cycles, the COVID-19 period, and the period of the 2022 military conflict opposing Russia and Ukraine (2022.02.24--2022.09.26).
\subsection{Extremal network estimation for a river network}\label{sec:appldanube} We apply our method to study the dependence structure of extreme discharges on the river network of the upper Danube basin (see the left panel of Figure~\ref{fig:danube} for the topographic map). This region has been regularly affected by severe flooding events in its history, which have caused losses of human lives and damage to material goods. The original daily discharge data from 1960 to 2009 were provided by the Bavarian Environmental Agency (http://www.gkd.bayern.de), and \citet{asadi2015extremes} preprocessed the data, which now include $n=428$ approximately independent events $\boldsymbol X_1,\ldots, \boldsymbol X_n \in \mathbb{R}^d$ recorded at $d=31$ gauging stations located on the river network from three summer months (June, July, and August), obtained using declustering methods. The data were later also studied by \citet{engelke2020graphical} among others, using graphical models for extremes based on a conditional independence notion adapted to multivariate Pareto distributions. The true physical river flow connections and directions are represented by a directed graph shown in the right panel of Figure~\ref{fig:danube}, where the arrows indicate the flow directions. This can serve as an accurate benchmark of the ``true'' conditional independence structure, against which we can compare the results from our proposed extremal graphical structure learning methods based on the PTCC.
\begin{figure}
\caption{Left: Topographic map of the upper Danube basin \citep[from][]{asadi2015extremes}, showing 31 sites of gauging stations (red circles) and the altitudes of the region. Right: The true physical river flow connections; the arrows show the flow directions.}
\label{fig:danube}
\end{figure}
\subsubsection{Graph structure learning using the extremal graphical Lasso} To learn the extremal dependence structure of the river network, we first perform a nonparametric empirical transformation of the data to satisfy Fr\'echet($\alpha = 2$) margins, for each station separately. Next, we estimate the TPDM, $\Sigma$, using the proposed estimator, $\hat\Sigma$, defined through \eqref{eq:estimator}. In particular, we choose $m=d = 31$ because the margins are preprocessed to have a common unit Fr\'echet scale and $r_0 = 11.4$, which corresponds to the empirical 90\% quantile. Therefore, $n_{\text{exc}} = 43$ is the number of extreme observations (i.e., threshold exceedances) which are used to estimate $\Sigma$. The left panel of Figure~\ref{fig:tpdm} displays the estimated TPDM of the river discharge data from the upper Danube basin, while the right panel displays the votes (in percentage) of the edges selected based on the extremal graphical Lasso method, obtained from multiple fits with a range of $\lambda$ values producing different dependence structures, from fully connected to fully sparse graphs.
\begin{figure}
\caption{Left: Estimated TPDM of the river discharge data from the upper Danube basin. Right: Votes ($\%$) of the edges selected based on the extremal graphical Lasso method. Darker red cells indicate that the corresponding edge has been selected more often by the graphical Lasso.}
\label{fig:tpdm}
\end{figure}
\subsubsection{Graph structure learning using the SGL method} To enhance connectedness, we further explore the SGL method, which learns sparse graph structures under additional spectral constraints. In particular, as described in Section~\ref{SGL}, we can control both the sparsity level and the graph connectedness by modulating the two tuning parameters $\alpha$ and $\beta$, respectively. As shown in the left panel of Figure~\ref{fig:SGL}, the overall graph sparsity varies for different combinations of $\alpha$ and $\beta$. The right panel of Figure~\ref{fig:SGL} displays the votes (in percentage) of the edges selected based on the SGL method, obtained by fitting a large number of models for each of the $\{\alpha,\beta\}$ combinations shown in the left panel.
\begin{figure}
\caption{Left: Number of edges selected by the SGL method as a function of the tuning parameters $\alpha$ and $\beta$. Lighter blue cells correspond to parameter combinations producing sparser graphs. Right: Votes ($\%$) of the edges selected based on the SGL method. Darker red cells indicate that the corresponding edge has been selected more often by the SGL method.}
\label{fig:SGL}
\end{figure}
\subsubsection{Results: estimated extremal river discharge networks}
For both the extremal graphical Lasso and the SGL method, it is important to carefully select the tuning parameters, $\lambda$ and $\{\alpha,\beta\}$, respectively. These tuning parameters impact both the sparsity level and the connectedness of the resulting graph structure. Ideally, we would like to obtain a sparse graph while keeping it connected. However, there is a tradeoff between these two requirements. Our approach is to control the overall sparsity level while imposing a soft connectedness condition: we start from the fully sparse graph (no edges) and sequentially add edges between nodes according to the ranking of the votes shown in Figures~\ref{fig:tpdm} and \ref{fig:SGL}, until no node is left alone (i.e., each node has at least one connection with another node). The estimated graph structure thus prioritizes edges that are most often selected and is obtained by blending the results from several model fits, which also makes it less sensitive to specific values of the tuning parameters.
Figure~\ref{fig:river_result} displays the estimated extremal river discharge network using our approach for both the extremal graphical Lasso and the SGL method, respectively. The edge thickness is proportional to the votes shown in Figures~\ref{fig:tpdm} and \ref{fig:SGL}. \begin{figure}
\caption{Extremal river discharge network estimated using the extremal graphical Lasso (left) and the SGL method (right). The edge thickness is proportional to the votes shown in Figures~\ref{fig:tpdm} and \ref{fig:SGL}.}
\label{fig:river_result}
\end{figure} Recall that when two nodes are not connected, it means that they are partially tail-uncorrelated in our framework. The estimation based on the extremal graphical Lasso method has a few more edges than the true physical river flow network. The extra links could be interpreted as being due to extremal dependence induced by regional weather events, though this is not very clear. By contrast, the estimation result based on the SGL method matches most of the true river flow connections, while the votes (shown in terms of the edge thickness) represent the strength of the extremal dependence connections. Interestingly, this dependence strength seems well-aligned with the actual physical strength of the river flow. Compared with existing methods \citep{engelke2020graphical, kluppelberg2021estimating}, our estimated extremal network based on the SGL method looks more realistic (thus more easily interpretable), as it is closer to the true flow structure of the river network, though the recent results from \citet{Engelke.etal:2022} are quite similar to ours (with a few extra connections).
\subsection{Extremal network estimation for global currency exchange rate network}\label{sec:applcurrency}
We now apply our method to explore historical global currency exchange rate data for 20 currencies from different historical periods, including two different global economic cycles (2009--2014 and 2015--2019, where the segmentation is determined from the world GDP cycles illustrated in Figure~\ref{fig:gdp} from Appendix~\ref{appd:currency}), COVID-19 (2020.01.01--2022.02.23), and the period from the beginning of the 2022 military conflict between Russia and Ukraine until a most recent date when we downloaded the data (2022.02.24--2022.09.26). Historical data were downloaded from \href{https://finance.yahoo.com/?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbS8&guce_referrer_sig=AQAAAArrJ51ii0LwZtFYwOr7XyQJJJefzGD3DqIN_OCi-CjH2j981I5L7tgeoZv4eTXCY0OSAxXw09alMxpKV7ygEdoDki02tMTic03tGlae7MXXVHzLszZQgvx5L9EFfqzlYo-S6UGPY7IHtZbO6fi1ZFbs_5hcGAkjzmw24f-EE8KR}{Yahoo Finance}. We chose the currencies from all G20 countries, and also added the Ukrainian and Kazakhstani currencies. The list of selected currencies and their corresponding symbols can be found in Table~\ref{table:currency} in Appendix~\ref{appd:currency}. Since the unit of the currencies is the US Dollar (USD), USD is not considered in our list of currencies under study.
First, we preprocess the historical daily closing prices of the currencies. An ARMA(1,1)-GARCH(1,1) time series model is fitted to the negative log return time series of each currency, and then standardized residuals are extracted and transformed marginally to Frech\'et margins with shape parameter $\alpha=2$. The extremal dependence graph structure governing negative log returns therefore represents partial associations among extreme losses, shedding light on the integration and/or vulnerability of major economies in periods of stress. To estimate this risk network, we follow the same procedure as before, estimating first the TPDM using the estimator in \eqref{eq:estimator} with $r_0$ as the empirical $90\%$ quantile, and then applying the SGL method.
In this analysis, we use only the SGL method, because it includes the extremal graphical Lasso method as a special case when $\beta = 0$, and it has shown better performance in the Danube river application. Furthermore, for comparability among different historical periods, we here fix the sparsity level to 80\% (i.e., with only about 38 edges), rather than using the method based on votes. This approach yields a unique combination of $\{\alpha,\beta\}$ tuning parameters, which then yields the final estimated graph structure. To assess the estimation uncertainty of the graph structure, we have further conducted 300 bootstrap simulations, whereby standardized residuals are resampled with replacement and the TPDM is then re-estimated, as well as the graph structure fixing the same sparsity level (i.e., potentially with different selected $\{\alpha,\beta\}$ values for each bootstrap simulation). The bootstrap results are shown in Figures~\ref{fig:eco} and \ref{fig:events} for the first two, and last two periods, respectively, where different edge types represent the ``significance'' of the displayed connections: the thickest edges (in red) indicate a frequency of at least $90\%$ to be included among the 300 bootstrap fitted models; thick edges (in blue) indicate a frequency between $70\%$ and $90\%$ to be selected; thin edges (in grey) indicate a frequency between $50\%$ and $70\%$ to be selected; absent edges have been selected less than $50\%$ of the time. Moreover, the size of nodes in the displayed graphs is proportional to their degree, i.e., to their number of connections. The bigger a node, the more connected it is, giving an idea of the centrality of a currency in the risk network.
\begin{figure}
\caption{Extremal currency exchange rate risk network estimated for 2009--2014 (left) and 2015--2019 (right). Different edge types (respectively thickest, thick, and thin) with different colors (respectively red, blue, and grey) indicate a frequency of $>90\%$, $70\%$--$90\%$, and $50\%$--$70\%$, respectively, to be selected among the $300$ bootstrap fitted models, while absent edges indicate that this frequency is less than $50\%$.}
\label{fig:eco}
\end{figure}
We now provide some interpretation of the estimated risk networks, assuming that more strongly connected nodes tend to be more vulnerable/exposed to network risks, or are important currencies that strongly determine the behavior of other currencies in the monetary system. We can back up some of our findings using historical events and economic development regimes for different countries. From the left panel in Figure~\ref{fig:eco}, representing the economic cycle 2009--2014, the strongest-connected currencies within the estimated risk network are GBP (United Kingdom) and ARS (Argentina), whereas KZT (Kazakstan) and UAH (Ukraine) are less connected from the major clusters. By contrast, in the right panel of Figure~\ref{fig:eco}, representing the economic cycle 2015--2019, the strongest-connected currencies from the estimated risk network are KRW (Republic of Korea), AUD (Australia) and EUR (EU), whereas ARS, KZT, and MXN (Mexico) are relatively isolated. ARS and GBP are two examples of currencies that were much weaker connected during the second period. Historical economic data show that the Argentinian economy went through a major crisis around 2000, establishing strong economic links to other countries and international financial markets in the following years. This seems to clearly transpire from the estimated graph for the 2009--2014 period following the global financial crises of 2008, but the monetary interconnections of ARS to other currencies have drastically been reduced in the later 2015--2019 period. As to GBP, the exit of Great Britain from the European Union (Brexit), and the political and economic decisions accompanying it, could be the origin of this change. \begin{figure}
\caption{Extremal currency exchange rate risk network estimated for the COVID-19 period (left) and the period since the 2022 military conflict between Russia and Ukraine (right). Different edge types (respectively thickest, thick, and thin) with different colors (respectively red, blue, and grey) indicate a frequency of $>90\%$, $70\%$--$90\%$, and $50\%$--$70\%$, respectively, to be selected among the $300$ bootstrap fitted models, while absent edges indicate that this frequency is less than $50\%$.}
\label{fig:events}
\end{figure} The left panel of Figure~\ref{fig:events} shows that during the COVID-19 pandemic, the ARS currency is again the strongest-connected one, by far, in the risk network, whereas TRY (Turkey) and CAD (Canada) are isolated and most of the other currencies are also less interconnected. As to ARS, Argentina was struck by COVID-19 during an already fragile economic situation, and its aforementioned strong international monetary ties might have led to strong extremal connectedness to other countries. Finally, the right panel of Figure~\ref{fig:events} shows the risk network for currency exchange rates from the beginning of the 2022 military conflict between Russia and Ukraine until 2022.09.26. Since the length of the time period is shorter, there is higher estimation uncertainty, and no edges have been selected more than $90\%$ of the time among the $300$ bootstrap model fits. Nevertheless, we can still make the interesting observation that RUB (Russia) is the only isolated currency from the rest of the network, which might be due to the antagonism and strong economic sanctions imposed by western countries, as well as the dramatic changes in monetary policies.
\section{Conclusions}\label{sec:conclusion}
We have proposed the partial tail correlation as a novel notion of extremal dependence measure that removes the effect of confounding variables through transformed-linear operations. Unlike other approaches from the recent literature, our new partial tail correlation coefficient (PTCC) assumes multivariate regular variation but does not rely on any further strict parametric assumptions. Furthermore, the PTCC has appealing theoretical properties and it can be used to define a new class of extremal graphical models, where the absence of edges indicates partial tail-uncorrelatedness between variables (i.e., when the PTCC of the corresponding edges equals zero). We have shown that the zero PTCC values between variable pairs can be retrieved by identifying the zero entries in the inverse tail pairwise dependence matrix (TPDM). This convenient property, which is akin to classical Gaussian graphical models, allows us to efficiently learn high-dimensional extremal networks defined in terms of the PTCC by exploiting state-of-the-art methods from graph theory, such as the graphical Lasso and structured graph learning via Laplacian spectral constraints.
Our graph-inference approach is flexible, can be applied to general undirected graphs, and easily scales to high dimensions. We demonstrate the effectiveness of our method as an exploratory tool for interpretable extremal network estimation. In our first application to river discharge data from the upper Danube basin, we show that the proposed method outperforms other existing methods by realistically capturing most physical flow connections, together with the strength of the connections, while largely avoiding spurious connections. In our second application to historical currency exchange rate data, we obtain interesting findings based on the estimated risk network for four recent periods, which can be backed up by real historical evidence. While our interpretations remain fairly basic, it would be interesting to get further insights from economists.
We have identified some theoretical and methodological challenges for future research. First, we have not obtained theoretical guarantees that our graph learning method can consistently estimate the unknown graph structure, and it would be interesting to see if this could be proven under general assumptions. However, our simulations and applications have provided convincing evidence that the method works well to extract useful structural information from extreme observations. Moreover, it is also worth noting that in some cases, there is no interest in recovering the exact true graph structure, but rather a sparser representation containing a fixed percentage with the ``most important'' edges, thus facilitating interpretations. In such cases, consistency is not a criterion that is well-adapted to the problem at hand. Second, we have focused in this paper on graph learning through the graphical Lasso, and the SGL method that imposes further Laplacian constraints. It would be interesting to extend these methods, in order to let the graphical structure depend on well-chosen covariates (e.g., temperature in our river network application, or the time period index in our currency network application), so that the estimated (non-stationary) risk network can then be interpreted through the glasses of these covariates. Moreover, unlike our currency application, where separate model fits were obtained for each time period under consideration, introducing covariates in the graph learning procedure would allow estimating the networks simultaneously from the combined dataset, thus gaining efficiency for higher accuracy of the estimated graph structure. A possibility could be to extend the Gaussian graphical regression approach proposed by \citet{Zhang.Li:2022} to our PTCC-based extremes framework. Third, by analogy with the widely used Gaussian Markov random field framework, one could imagine constructing Markov random fields for extremes where the distribution of connected variables may be fitted with an asymptotically justified model for extremes (e.g., of multivariate Pareto type), whereas the factorization of the likelihood could be pre-specified by the extremal network learned from the data in a preliminary step using our proposed method. Finally, another direction to investigate concerns the geometric representation of multivariate extremes \citep{nolde2020linking, simpson2021geometric}. It would be interesting to see if the new PTCC can be defined through this geometric approach and to explore its links with other popular measures of extremal dependence.
\section*{Acknowledgments} We point out that there has been independent and parallel work by Lee \& Cooley, who also investigate partial tail correlation for extremes. Our understanding is that inference in their work focuses rather on hypothesis testing (with the goal of checking if the partial tail correlations for given pairs of variables are significantly different from zero), and not on learning extremal networks with structural constraints. This publication is based upon work supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research (OSR) under Award No. OSR-CRG2020-4394.
\section*{Disclosure statement} The authors report there are no competing interests to declare.
\section*{Appendix} \appendix
\section{Data details} \label{appd:currency} \begin{table}[h] \centering \begin{tabular}{ccc} \hline Countries & Currency symbol & Currency name\\ \hline EU & EUR & EURO \\ United Kingdom & GBP & Pound Sterling \\ India & INR & Indian Rupee \\ Australia & AUD & Australian Dollar \\ Canada & CAD & Canadian Dollar \\ South Africa & ZAR & Rand \\ Japan & JPY & Yen \\ Singapore & SGD & Singapore Dollar \\ China & CNY & Yuan\ \\ Switzerland & CHF & Swiss Franc \\ Republic of Korea & KRW & Won \\ Turkey & TRY & Turkish Lira \\ Mexico & MXN & Mexican Peso \\ Brazil & BRL & Real \\ Indonesia & IDR & Rupiah \\ Saudi Arabia & SAR & Saudi Riyal \\ Russia & RUB & Ruble \\ Argentina & ARS & Argentine Peso \\ Ukraine & UAH & Ukrainian Hryvnia \\ Kazakstan & KZT & Kazakhstani Tenge\\ \hline \end{tabular} \caption{Currency symbol list.} \label{table:currency} \end{table}
\begin{figure}
\caption{World GDP (current trillion US\$) from 2000 to 2020. Data source: \href{https://data.worldbank.org/indicator/NY.GDP.MKTP.CD}{The World Bank}.}
\label{fig:gdp}
\end{figure}
\end{document} |
\begin{document}
\title{The Path Partition Conjecture is True and its Validity Yields
Upper Bounds for Detour Chromatic Number and Star Chromatic Number}
\author{G. Sethuraman\\ Department of Mathematics, Anna University\\ Chennai 600 025, INDIA\\ [email protected]}
\maketitle
\begin{abstract} The detour order of a graph $G$, denoted $\tau(G)$, is the order of a longest path in $G$. A partition $(A, B)$ of $V(G)$ such that $\tau(\langle A \rangle) \leq a$ and $\tau(\langle B \rangle) \leq b$ is called an $(a, b)$-partition of $G$. A graph $G$ is called $\tau$-partitionable if $G$ has an $(a, b)$-partition for every pair $(a, b)$ of positive integers such that $a + b = \tau(G)$. The well-known Path Partition Conjecture states that every graph is $\tau$-partitionable. In \cite{df07} Dunber and Frick have shown that if every 2-connected graph is $\tau$-partitionable then every graph is $\tau$-partitionable. In this paper we show that every 2-connected graph is $\tau$-partitionable. Thus, our result settles the Path Partition Conjecture affirmatively. We prove the following two theorems as the implications of the validity of the Path Partition Conjecture.\\ {\bf Theorem 1:} For every graph $G$, $\chi_s(G) \leq \tau(G)$, where $\chi_s(G)$ is the star chromatic number of a graph $G$.
The $n^{th}$ detour chromatic number of a graph $G$, denoted $\chi_n(G)$, is the minimum number of colours required for colouring the vertices of $G$ such that no path of order greater than $n$ is mono coloured. These chromatic numbers were introduced by Chartrand, Gellar and Hedetniemi\cite{cg68} as a generalization of vertex chromatic number $\chi(G)$.\\ {\bf Theorem 2:} For every graph $G$ and for every $n \geq 1$, $\chi_n(G) \leq \left\lceil \frac{\tau_n(G)}{n} \right\rceil$, where $\chi_n(G)$ denote the $n^{th}$ detour chromatic number.\\ Theorem 2 settles the conjecture of Frick and Bullock \cite{fb01} that $\chi_n(G) \leq \left\lceil \frac{\tau(G)}{n} \right\rceil$, for every graph $G$, for every $n \geq 1$, affirmatively.
\end{abstract}
{\bf Keywords:}Path Partition;Path Partition Conjecture;Star Chromatic Number;Detour Chromatic Number;Upper bound of chromatic number;Upper bound of Star Chromatic Number;Upper bound of Detour Chromatic Number.\\
\section{Introduction}
All graphs considered here are simple, finite and undirected. Terms not defined here can be referred from the book \cite{we02}. A longest path in a graph $G$ is called a detour of $G$. The number of vertices in a detour of $G$ is called the detour order of $G$ and is denoted by $\tau(G)$. A partition $(A, B)$ of $V(G)$ such that $\tau(\langle A \rangle) \leq a$ and $\tau(\langle B \rangle) \leq b$ is called an $(a, b)$-partition of $G$. If $G$ has an $(a, b)$-partition for every pair $(a, b)$ of positive integers such that $a + b = \tau(G)$, then we say that $G$ is $\tau$-partitionable. The following conjecture is popularly known as the Path Partition Conjecture.
\noindent\textbf{Path Partition Conjecture:} {\it Every graph is
$\tau$-partitionable}.
The Path Partition Conjecture was discussed by Lovasz and Mihok in 1981 in Szeged and treated in the theses \cite{ha84} and \cite{vr86}. The Path Partition Conjecture first appeared in the literature in 1983, in a paper by Laborde et al. \cite{lp82}. In 1995 Bondy \cite{bo95} posed the directed version of the Path Partition Conjecture. In 2004, Aldred and Thomassen \cite{at04} disproved two stronger versions of the Path Partition Conjecture, known as the Path Kernel Conjecture \cite{bh97,mi85} and the Maximum $P_n$-free Set Conjecture \cite{df04}. Similar partitions were studied for other graph parameters too. Lovasz proved in \cite{lo66} that every graph is $\Delta$-partitionable, where $\Delta$ denotes the maximum degree (A graph $G$ is $\Delta$-partitionable if, for every pair $(a, b)$ of positive integers satisfying $a + b = \Delta(G) - 1$, there exists a partition $(A, B)$ of $V(G)$ such that $\Delta(\langle A \rangle) \leq a$ and $\Delta(\langle B \rangle) \leq b$). For the results pertaining to the Path Partition Conjecture and related conjectures refer \cite{bd98,bh97,df99,df07,df04,fb01,fr13,ha84,lp82,mi85,se11,vr86,niel}. An $n$-detour colouring of a graph $G$ is a colouring of the vertices of $G$ such that no path of order greater than $n$ is monocoloured. The $n^{th}$ detour chromatic number of graph $G$, denoted by $\chi_n$, is the minimum number of colours required for an $n$-detour colouring of a graph $G$. It is interesting to note that for a graph $G$, when $n=1$, $\chi_1(G)=\chi(G)$. These chromatic numbers were introduced by Chartrand, Gellor and Hedetnimi \cite{cg68} in 1968 as a generalization of vertex chromatic number.
If the Path Partition Conjecture is true, then the following conjecture of Frick and Bullock \cite{fb01} is also true.
\noindent\textbf{Frick-Bullock Conjecture:} $\chi_n(G) \leq \left\lceil \frac{\tau(G)} {n} \right\rceil$ {\it for every graph $G$ and for
every $n \geq 1$.}\\ Recently, Dunbar and Frick \cite{df07} proved the following theorem.
\begin{theorem}[Dunber and Frick \cite{df07}] \label{thm1.1} If every 2-connected graph is \break $\tau$-partitionable then every graph is $\tau$-partitionable. \end{theorem}
In this paper we show that the Path Partition Conjecture is true for every 2-connected graph. Thus, Theorem \ref{thm1.1} and our result imply that the Path Partition Conjecture is true. The validity of the Path Partition Conjecture would imply the following Path Partition Theorem.
\noindent{\bf Path Partition Theorem.} {\it For every graph $G$ and for
every $t$-tuple \break $(a_1, a_2, \dots, a_t)$ of positive
integers with $a_1 + a_2 + \cdots + a_t = \tau(G)$ and $t \geq 1$,
there exists a partition $(V_1, V_2, \dots, V_t)$ of $V(G)$
such that $\tau(G(\langle V_i \rangle) \leq a_i$, for every $i$, $1 \leq i
\leq t$.}\\ The Path Partition Theorem immediately implies that the Conjecture of Frick and Bullock is true. The validity of Frick and Bullock Conjecture naturally implies the classical upper bound for the chromatic number of a graph $G$ that $\chi(G)=\chi_1(G) \leq \tau(G)$ proved by Gallai\cite{gall}.
A star colouring of a graph $G$ is a proper vertex colouring in which every path on four vertices uses at least three distinct colours. The star chromatic number of $G$ denoted by $\chi_s(G)$ is the least number of colours needed to star color $G$. As a consequence of the Path Partition Theorem, we have obtained an upper bound for the star chromatic number. More precisely, we show that $\chi_s(G) \leq \tau(G)$ for every graph $G$.
\section{Main Result}
In this section we prove our main result that every 2-connected graph is $\tau$-partitionable.
We use Whitney's Theorem on the characterization of 2-connected graph in the proof of our main result given in Theorem \ref{thm2.2}.
An ear of a graph $G$ is a maximal path whose internal vertices have degree 2 in $G$. An ear decomposition of $G$ is a decomposition $P_0, P_1, \dots, P_k$ such that $P_0$ is a cycle and $P_i$ for $i \geq 1$ is an ear of $P_0 \cup P_1 \cup \dots \cup P_i$.
\begin{theorem}[Whitney \cite{wh}] A graph is 2-connected if and only if it has an ear decomposition. Furthermore, every cycle in a 2-connected graph is the initial cycle in some ear decomposition. \end{theorem}
\begin{theorem}\label{thm2.2} Every 2-connected graph is $\tau$-partitionable. \end{theorem}
\begin{proof} Let $G$ be a 2-connected graph. By Whitney's Theorem there exists an ear decomposition $S = \{P_0, P_1, \dots, P_n\}$, where $P_0$ is a cycle and $P_i$ for $i \geq 1$ is an ear of $P_0 \cup P_1 \cup \dots \cup P_i$. We prove that $G$ is $\tau$-partitionable by induction on
$|S|$.
When $|S| = 1$, $S = \{P_0\}$. Then $G = P_0$. Thus, $G$ is a cycle. As every cycle is $\tau$-partitionable, $G$ is
$\tau$-partitionable. By induction, we assume that if $G$ is any 2-connected graph having an ear decomposition $S = \{P_0, P_1, \dots, P_{k-1}\}$, that is, with $|S| = k$, then $G$ is $\tau$-partitionable.
Let $H$ be a 2-connected graph with an ear decomposition \break $S =
\{P_0, P_1, \dots, P_{k-1}, P_k\}$. That is, $|S| = k + 1$. We claim that $H$ is\\ $\tau$-partitionable. Let $(a,b)$ be a pair of positive integers with $a+b=\tau(H)$. Since $H$ is having the ear decomposition $S=\{P_0,P_1,\dots,P_{k-1},P_k\}$, $H$ can be considered as a 2-connected graph obtained from the 2-connected graph $G$ having the ear decomposition $S'=\{P_0,P_1,\dots,P_{k-1}\}$ by adding a new path (ear) $P_k:xv_1v_2\dots v_ry$ to $G$, where $x,y \in V(G)$ and $v_1,v_2,\dots,v_r$ are new vertices to $G$. As $G$ is a 2-connected graph having the ear decomposition $S'=\{P_0,P_1,\dots,P_{k-1}\}$ with $|S|=k$, by induction $G$ is $\tau$-partitionable. Let $(a_1,b_1)$ be a pair of positive integers such that $a_1 \leq a$, $b_1 \leq b$ with $\tau(G)=a_1+b_1$. Since $G$ is $\tau$-partitionable, there exists an $(a_1,b_1)$ partition $(A',B')$ of $V(G)$ such that $\tau(G(\langle A' \rangle)) \leq a_1$ and $\tau(G(\langle B' \rangle)) \leq b_1$. In order to prove our claim that $H$ is $\tau$-partitionable, we define an $(a,b)$-partition $(A,B)$ of $V(H)$ from the $(a_1,b_1)$ partition $(A',B')$ of $V(G)$ as well as using the path $P_k: xv_1v_2\dots v_ry$. The construction of an $(a,b)$-partition $(A,B)$ of $V(H)$ is given under three cases, depending on $r=0$, $r=1$ and $r \geq 2$, where $r$ is the number of new vertices in the path $P_k$.
\noindent\textbf{Case 1.} $r = 0$
Then $P_k : xy$, where $x$ and $y$ are the vertices of $G$.\\ Thus, $H = G + xy$. This implies, $V(H) = V(G)$.
\noindent\textbf{Case 1.1.} Suppose $x$ and $y$ are in different parts of the partition $(A^\prime, B^\prime)$ of
\hspace{1.2cm} $V(G)$.
Then, as $x$ and $y$ are in different parts of the partition $(A^\prime, B^\prime)$ of $V(G)$, the introduction of the new edge $xy$ between the vertices $x$ and $y$ does not increase the length of any path either in $G(\langle A^\prime \rangle)$ or in $G(\langle B^\prime \rangle)$. Further, as $V(H) = V(G)$, we have $\tau(H(\langle A^\prime \rangle)) = \tau(G(\langle A^\prime \rangle)) \leq a_1 \leq a \ \text{ and } \ \tau(H(\langle B^\prime \rangle)) = \tau(G(\langle B^\prime \rangle)) \leq b_1 \leq b.$ Thus, $(A^\prime, B^\prime)$ is a required $(a, b)$-partition of $V(H)$. \noindent\textbf{Case 1.2.} Suppose $x$ and $y$ are in the same part of the partition $(A^\prime, B^\prime)$ of
\hspace{1.2cm} $V(G)$.
Without loss of generality, we assume that $x$ and $y$ are in $A^\prime$. \\ Suppose $\tau(H(\langle A^\prime \rangle)) \leq a$. Then, as $\tau(H(\langle B^\prime \rangle)) \leq b_1 \leq b$, the $(A^\prime, B^\prime)$ is a required $(a, b)$-partition of $V(H)$.\\ Suppose $\tau(H(\langle A^\prime \rangle)) > a$, then observe that the addition of the edge $xy$ to $G$ has increased the order of some of the longest paths (at least one longest path) in $H(\langle A^\prime \rangle)$ from $a_1$ to $t = a_1 + k > a$, where $k \geq 1$. On the other hand, any path of order $t > a$ in $H(\langle A^\prime \rangle)$ must contain the edge $xy$ also.
Let $P : u_1u_2u_3 \dots u_iu_{i+1} \dots u_{a_1} u_{a_{1+1}} \dots u_t$ be any path of order $t > a$. Then, note that the edge $xy = u_j u_{j+1}$ for some $j$, $1 \leq j \leq t-1$ and $t \leq 2a_1$.
\begin{observation} If we remove the vertex $u_{a+1}$ from the path $P$, then we obtain two subpaths $u_1 u_2 \dots u_i u_{i+1} \dots u_{a-1} u_a$, say $P^\prime$ and $u_{a+2} u_{a+3} \dots u_{t-1} u_t$, say $P^{\prime\prime}$ of $P$. The number of vertices in $P^\prime$ is exactly $a$ and the number of vertices in $P^{\prime\prime}$ is $t-(a+1) \leq t-(a_1+1) \leq 2a_1-a_1-1 = a_1-1 < a_1 \leq a$. \end{observation}
\begin{observation} Consider the subpath $Q : u_1 u_2 \dots u_{a-1} u_a u_{a+1}$ of $P$. Then observe that the end vertex $u_{a+1}$ of $Q$ cannot be adjacent to any of the end vertices of any path of order $b$ in the induced subgraph $H(\langle B^\prime \rangle) = G(\langle B^\prime \rangle)$ in $H$. \end{observation}
For, suppose $u_{a+1}$ is adjacent to an end vertex of a path, say $Z$ of order $b$ in $H(\langle B^\prime \rangle) = G(\langle B^\prime \rangle)$. Let $Z = v_1 v_2 \dots v_b$. Without loss of generality, let $u_{a+1}$ be adjacent to $v_1$. Then, there exists a path $Q \cup Z : u_1 u_2 \dots u_{a-1} u_a u_{a+1} v_1 v_2 \dots v_b$ of order $a+b+1 > a+b = \tau(H)$, a contradiction (Similar contradiction hold good if $u_{a+1}$ is adjacent $v_b$).
Let $\{R_0, R_1, \dots, R_t\}$ be the set of all paths in $H(\langle A^\prime \rangle)$ of order at least $a+1$. For $1 \leq i \leq t$, let $u_{a+1}^i$ denote the terminus vertex of the subpath of $R_i$ of order $a+1$ and having its origin as the origin of $R_i$. Let $\{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\}$ be the set of distinct vertices from the vertices $u_{a+1}^1, u_{a+1}^2, \dots, u_{a+1}^t$, where $h \leq t$. Suppose $\{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\}$ induces any path in $H(\langle A^\prime \rangle)$. Consider any such path $X : u_{a+1}^{\beta_1} u_{a+1}^{\beta_2} \dots u_{a+1}^{\beta_c}$, where $\{\beta_1, \beta_2, \dots, \beta_c\} \subseteq \{\alpha_1, \alpha_2, \dots, \alpha_h\}$. Then, for $1 \leq i \leq c$, any vertex $u_{a+1}^{\beta_i}$ divides the path $X$ into three subpaths $u_{a+1}^{\beta_1} u_{a+1}^{\beta_2} \dots u_{a+1}^{\beta_{i-1}}$, $u_{a+1}^{\beta_i}$, and $u_{a+1}^{\beta_{i+1}} u_{a+1}^{\beta_{i+2}} \dots u_{a+1}^{\beta_c}$.
\begin{figure}
\caption{Structures of various paths of order $t \geq a+1$ in $A'$}
\label{fig1}
\end{figure}
\noindent\textbf{Claim 1.} For every $i$, $1 \leq i \leq h$, the vertex $u_{a+1}^{\beta_i}$ cannot be adjacent to any of the end vertices of any path of order greater than or equal to $b-q$ in $H(\langle B^\prime \rangle)$, where $q = i-1$ or $c-i$.\\ First we ascertain $b<q+1$ in Observation 2.3 then we prove the Claim 1.
\begin{observation} $b \geq q+1$ \end{observation} For, suppose $b<q+1$. If $q = c-i$, then consider the path, $$K = u_{a+1}^{\beta_i} u_{a+1}^{\beta_{i+1}} \dots u_{a+1}^{\beta_c} u_a^{\beta_c} u_{a-1}^{\beta_c} \dots u_2^{\beta_c} u_1^{\beta_c}$$ in $H(\langle A^\prime \rangle)$ having $1+c-i+a=1+q+a $ vertices. As $q+1 > b$, the path $K$ has at least $a+b+1$ vertices. This implies there exists a path of order at least $a+b+1$ in $H(\langle A^\prime \rangle)$. A contradiction to the fact that $\tau(H) = a+b$. Similarly, if $q = i-1$, then consider the path, $$K^\prime = u_{a+1}^{\beta_i} u_{a+1}^{\beta_{i-1}} \dots u_{a+1}^{\beta_1} u_a^{\beta_1} u_{a-1}^{\beta_1} \dots u_2^{\beta_1} u_1^{\beta_1}$$ in $H(\langle A^\prime \rangle)$ having $i+a = 1+q+a$ vertices. As $q+1 > b$, the path $K^\prime$ has at least $a+b+1$ vertices. This implies that there exists a path of order at least $a+b+1$ in in $H(\langle A^\prime \rangle)$. A contradiction to the fact that $\tau(H) = a+b$. Hence, $b \geq q+1$.\\
To prove Claim 1, we suppose $u_{a+1}^{\beta_i}$, for some $i$, $1 \leq i \leq h$ is adjacent to an end vertex of a path of order $l \geq b-q$ in $H(\langle B^\prime \rangle)$. Let $Y = w_1 w_2 w_3 \dots w_{l}$ be a path of order $l \geq b-q$ in $H(\langle B^\prime \rangle)$ such that (without loss of generality) $w_{l}$ is adjacent to the vertex $u_{a+1}^{\beta_i}$.
\noindent\textbf{Case 1.2a.} $q = c-i$\\ Then consider the path $S = w_1 w_2 \dots w_{l} u_{a+1}^{\beta_i} u_{a+1}^{\beta_{i+1}} \dots u_{a+1}^{\beta_c} u_a^{\beta_c} u_{a-1}^{\beta_c} \dots u_2^{\beta_c} u_1^{\beta_c}$, where $u_1^{\beta_c} u_2^{\beta_c} \dots u_{a}^{\beta_c} u_{a+1}^{\beta_c}$ is a subpath of $R_{\beta_c}$ of order $a+1$ having the vertex $u_1^{\beta_c}$, the origin of $R_{\beta_c}$ as its origin. As $Y : w_1 w_2 w_3 \dots w_{l}$ is the path in $H(\langle B^\prime \rangle)$ such that $w_{l}$ is adjacent to $u_{a+1}^{\beta_i}$, it follows that $S$ is a path in $H$ having the order $l+1+c-i+a \geq b-q+1+q+a=b+a+1 > \tau(H)$, a contradiction.
\noindent\textbf{Case 1.2b.} $q = i-1$
Then consider the path $S^\prime = w_1 w_2 \dots w_{l} u_{a+1}^{\beta_i} u_{a+1}^{\beta_{i-1}} \dots u_{a+1}^{\beta_2} u_{a+1}^{\beta_1} u_a^{\beta_1} u_{a-1}^{\beta_1}$ $\dots$ $u_2^{\beta_1} u_1^{\beta_1}$, where $u_1^{\beta_1} u_2^{\beta_1} \dots u_a^{\beta_1} u_{a+1}^{\beta_1}$ is the subpath of $R_{\beta_1}$ of order $a+1$ having the vertex $u_1^{\beta_1}$, the origin of $R_{\beta_1}$ as its origin. As $Y : w_1 w_2 w_3 \dots w_{l}$ is the path in $H(\langle B^\prime \rangle)$ such that $w_{l}$ is adjacent to $u_{a+1}^{\beta_i}$, it follows that $S^\prime$ is a path in $H$ having the order $l+1+i-1+a \geq b-q+1+q+a=b+a+1 > \tau(H)$, a contradiction.\\ Hence the Claim 1.
Thus, it follows from the Claim 1 that \begin{align}\label{eq1} \tau(H(\langle B^\prime \cup \{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\} \rangle)) \leq b \end{align} From Observation 1, it follows that \begin{align}\label{eq2} \tau(H(\langle A^\prime \backslash \{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\} \rangle)) \leq a \end{align} Let $A = A^\prime \backslash \{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\}$ and $B = B^\prime \cup \{u_{a+1}^{\alpha_1}, u_{a+1}^{\alpha_2}, \dots, u_{a+1}^{\alpha_h}\}$. Then, from (\ref{eq1}) and (\ref{eq2}) it follows that $\tau(H(\langle A \rangle) \leq a$ and $\tau(H(\langle B \rangle) \leq b$.\\ Hence $(A, B)$ is a required $(a, b)$-partition of $H$.
\noindent\textbf{Case 2.} $r = 1$\\ Then $P_k : xv_1y$.
\noindent\textbf{Case 2.1.} Both $x$ and $y$ belong to the same partition $A^\prime$ or $B^\prime$. \\ Without loss of generality, we assume that $x, y \in B^\prime$. That is, $x, y \not\in A^\prime$. Then $(A^\prime \cup \{v_1\}, B^\prime)$ is a required $(a, b)$-partition of $V(H)$.\\ \noindent\textbf{Case 2.2.} The vertices $x$ and $y$ belong to different partitions $A^\prime$ and $B^\prime$.
Without loss of generality, we assume that $x \in A^\prime$ and $y \in B^\prime$. If $x$ is not an end vertex of a path of order $a$ in $H(\langle A^\prime \rangle)$, then $(A^\prime \cup \{v_1\}, B^\prime)$ is a required $(a, b)$-partition of $V(H)$. If $x$ is an end vertex of a path of order $a$ in $H(\langle A^\prime \rangle)$, then $y$ cannot be an end-vertex of a path of order $b$ in $H(\langle B^\prime \rangle)$ (otherwise, $H$ would have a path of order $a+b+1 > \tau(H)$). Therefore $(A^\prime, B^\prime \cup \{v_1\})$ is a required $(a, b)$-partition of $V(H)$.
\noindent\textbf{Case 3.} $r \geq 2$\\ Colour all vertices of $A^\prime$ with red colour and colour all the vertices of $B^\prime$ with blue colour. Since the vertices $x, y \in V(G)$, they are coloured with either blue or red colour. Without loss of generality, we assume that $x \in A^\prime$. Give $v_r$ the alternate colour to that of the vertex $y$. As $x$ is coloured with red colour, colour the vertex $v_1$ with blue colour. In general, for $2 \leq i \leq r-1$, sequentially colour the vertex $v_i$ with the alternate colour to the colour of the vertex $v_{i-1}$. Then observe that $P_k$ contains no induced monochromatic subgraph of order greater than 2 and no monochromatic path in $A^\prime$ or in $B^\prime$ can be extended to include any of the vertices $v_1, v_2, \dots, v_r$ of $P_k$.
Let $X_1$ be the set of all red coloured vertices of $P_k - \{x, y\}$ and let $X_2$ be the set of all blue coloured vertices of $P_k -\{x, y\}$. Then $H(\langle A^\prime \cup X_1 \rangle) \leq a_1 \leq a$ and $H(\langle B^\prime \cup X_2 \rangle) \leq b_1 \leq b$. Hence $(A^\prime \cup X_1, B^\prime \cup X_2)$ is a required $(a, b)$-partition of $H$.\\ Thus, $H$ is $\tau$-partitionable. This completes the induction. Hence every 2-connected graph is $\tau$-partitionable. \end{proof}
The following Corollary \ref{cor2.1} is an immediate consequence of Theorem \ref{thm1.1} and Theorem \ref{thm2.2}.
\begin{corollary}\label{cor2.1} Every graph is $\tau$-partitionable. \end{corollary}
It is clear that Corollary \ref{cor2.1} settles the Path Partition Conjecture affirmatively. Thus, ``{\bf the Path Partition Conjecture is true}''.
The following Theorem \ref{thm2.3} called ``Path Partition Theorem'' is a simple implication of Corollary \ref{cor2.1}.
\begin{theorem}[Path Partition Theorem] \label{thm2.3} For every graph $G$ and for every $t$-tuple $(a_1, a_2, \dots, a_t)$ of positive integers with $a_1 + a_2 + \cdots + a_t = \tau(G)$ and $t \geq 1$, there exists a partition $(V_1, V_2, \dots, V_t)$ of $V(G)$ such that $\tau(G(\langle V_i \rangle)) \leq a_i$, for every $i$, $1 \leq i \leq t$. \end{theorem} \begin{proof} Let $G$ be a graph. Consider any $t$-tuple $(a_1,a_2,\dots,a_t)$ of positive integers with $a_1+a_2+\dots+a_t=\tau(G)$, and $t \geq 1$. Then by Corollary 2.1, for the pair of positive integers $(a,b)$ with $a+b=\tau(G)$, where $a=a_1$ and $b=a_2+\cdots+a_t$, there exists a partition $(U_1,U_2)$ of $V(G)$ such that $\tau(G(\langle U_1\rangle)) \leq a = a_1$ and $\tau(G(\langle U_2 \rangle)) \leq b = a_2+a_3+\cdots+a_t$. Consider the graph $H=G(\langle U_2 \rangle)$. Then for the pair of positive integers $(c,d)$ with $c+d=\tau(H)=\tau(G(\langle U_2 \rangle))$, where $c=a_2$ and $d=a_3+a_4+\dots+a_t$, by Corollary 2.1, there exists a partition $(U_{21},U_{22})$ of $V(H)$ such that $\tau(H(\langle U_{21}\rangle )) \leq c =a_2$ and $\tau(H(\langle U_{22}\rangle )) \leq d=a_3+a_4+\dots+a_t$. As $H(\langle U_{21} \rangle)=G(\langle U_{21} \rangle)$ and $H(\langle U_{22} \rangle)=G(\langle U_{22} \rangle)$, we have $\tau(G(\langle U_{21} \rangle))\leq c = a_2$ and $\tau(G(\langle U_{22} \rangle))\leq d = a_3+a_4+\cdots+a_t$. Similarly, if we consider the pair of positive integers $(x,y)$ with $x+y=\tau(Q)$, where $Q=G(\langle U_{22} \rangle)$, $x=a_3$ and $y=a_4+a_5+\cdots+a_t$, by Corollary 2.1, we get a partition $(U_{31},U_{32})$ such that $\tau(G(\langle U_{31} \rangle)) \leq x =a_3$ and $\tau(G(\langle U_{32} \rangle)) \leq y =a_4+a_5+\cdots+a_t$. Continuing this process, finally we get a partition $(V_1,V_2,\cdots,V_t)$ of $V(G)$ such that $\tau(G(\langle V_i \rangle)) \leq a_i$, for every $i$, $1 \leq i \leq t$, where $V_1=U_1$, $V_2=U_{21}$, $V_3=U_{31}$ and so on. This completes the proof. \end{proof} \begin{corollary}\label{cor2.2} The $n^{th}$ detour chromatic number $\chi_n(G) \leq \left\lceil \frac{\tau(G)} {n} \right\rceil$ for every graph $G$ and for every $n \geq 1$. \end{corollary}
\begin{proof} Let $G$ be any graph. For every $n \geq 1$, consider the $\frac{\tau(G)} {n}$-tuple $(n, n, \dots, n)$ if $\tau(G)$ is a multiple of $n$, while if $\tau(G)$ is not a multiple of $n$, then consider the $\left\lceil \frac{\tau(G)} {n} \right\rceil$-tuple $(n, n, \dots, n, \nu)$, where $\nu = \tau(G)$ (mod $n$). Then, by Path Partition Theorem, there exist a partition $(V_1, V_2, \dots, V_t)$, where \[t = \begin{cases} \frac{\tau(G)} {n} & \text{if } \tau(G) \text{ is
a multiple of } n \\ \left\lceil \frac{\tau(G)} {n} \right\rceil &
\text{if } \tau(G) \text{ is not a multiple of } n \end{cases}\] such that $\tau(G(\langle V_i \rangle)) \leq n$, for every $i$, $1 \leq i \leq t$. For each $i$, $1 \leq i \leq t$, assign the (distinct) colour $i$ to all the vertices in each $G(\langle V_i \rangle)$. Then every monochromatic path in $G$ has the order at most $n$. Thus, $\chi_n(G) \leq \left\lceil \frac{\tau(G)} {n} \right\rceil$. \end{proof}
Corollary \ref{cor2.2} essentially ascertains that ``{\bf Frick-Bullock Conjecture is true}''.\\ {\bf Remark 1:} It is clear from the definition of $\chi_n(G)$, when $n=1$, $\chi_1(G) = \chi(G)$. Thus, by Corollary \ref{cor2.2}, for a graph $G$, $\chi(G) = \chi_1(G) \leq \tau(G)$. This upper bound for the chromatic number of a graph $G$ that $\chi(G) \leq \tau(G)$ is the well known Gallai's Theorem \cite{gall}. \section{An Upper Bound for Star Chromatic Number}
In this section we obtain an upper bound for star chromatic number as a consequence of path partition theorem.
\begin{theorem}\label{star} Let $G$ be a graph. Then the star chromatic number of $G$, $\chi_s(G) \leq \tau(G)$. \end{theorem}
\begin{proof} First we prove the result for connected graphs, then the result follows naturally for the disconnected graphs. Let $G$ be a connected graph.
\noindent\textbf{Claim 1:} There exists a proper $\tau(G)$-vertex colouring for $G$.
\noindent Consider $\tau(G)$. If $\tau(G)$ is even, say $2k$, for some $k \geq 1$, then consider the $k$-tuple $(2, 2, \dots, 2)$ with $2 + 2 + 2 + \cdots + 2 = 2k = \tau(G)$. By Path Partition Theorem, there exists a partition $(V_1, V_2, \dots, V_k)$ such that $\tau(G(\langle V_i \rangle)) \leq 2$, for every $i$, $1 \leq i \leq k$. Therefore, every induced subgraph $G(\langle V_i \rangle)$, for $i$, $1 \leq i \leq k$ is the union of a set of independent vertices and/or a set of independent edges. Thus, it is clear that, for $i$, $1 \leq i \leq k$, each $G(\langle V_i \rangle)$ is proper 2-vertex colourable. Properly colour the vertices of each $G(\langle V_i \rangle)$ with a distinct pair of colours $c_{i_1}$ and $c_{i_2}$, for $i$, $1 \leq i \leq k$. Consequently, this proper 2-vertex colouring of $G(\langle V_i \rangle)$, for all $i$, $1 \leq i \leq k$ induces a proper $\tau(G)$-vertex colouring for the graph $G$. If $\tau(G)$ is odd, say $2k+1$, for some $k \geq 1$, then consider the $k+1$-tuple $(2, 2, \dots, 2, 1)$ with $2 + 2 + 2 + \cdots + 1 = 2k+1 = \tau(G)$. Then by Path Partition Theorem there exists a partition $(V_1, V_2, \dots, V_k, V_{k+1})$ such that $\tau(G(\langle V_i \rangle)) \leq 2$, for every $i$, $1 \leq i \leq k$ and $\tau(G(\langle V_{k+1} \rangle)) \leq 1$. Consequently, the vertices of each $G(\langle V_i \rangle)$ can be properly coloured with a distinct pair of colours $c_{i_1}$ and $c_{i_2}$, for $i$, $1 \leq i \leq k$ and the vertices of $G(\langle V_{k+1} \rangle)$ are colored properly with a distinct color $c_{(k+1)_1}$. Thus, this proper 2-vertex colouring of $G(\langle V_i \rangle)$, for all $i$, $1 \leq i \leq k$ and the proper 1 colouring of $G(\langle V_{k+1} \rangle)$ induce a proper $\tau(G)$-vertex colouring for the graph $G$. Hence the Claim 1.
\noindent\textbf{Claim 2:} $\chi_s(G) \leq \tau(G)$
\noindent To prove Claim 2, we show that the vertices of every path of order four is either coloured with 3 or 4 different colours by the above proper $\tau(G)$-vertex colouring of $G$ or if there exists a bicoloured path of order four in $G$ by the above proper $\tau(G)$-vertex colouring of $G$, then those vertices of such a bicoloured path of order four are properly recoloured so that those vertices are coloured with at least three different colours after the recolouring.
\begin{observation} As \[\tau(G(\langle V_i \rangle)) \leq \begin{cases} 2, & \text{for } 1
\leq i \leq k \\ 1, & \text{for } i = k+1 \text{ and } \tau(G)
\text{ is odd} \end{cases}\] any path of order four in $G$ must contain vertices from at least two of induced subgraphs $G(\langle V_i \rangle)$'s, where $1 \leq i \leq \alpha$, and $\alpha = k$ when $\tau(G)$ is even, while when $\tau(G)$ is odd, $\alpha = k+1$ \mbox{[}Hereafter $\alpha$ is either $k$ or $k+1$ depending on $\tau(G)$ is even or odd respectively\mbox{]}. If any path of order four of $G$ contains vertices from three or four of the induced subgraphs $G(\langle V_i \rangle)$'s then such a path has vertices coloured with three or four colours by the proper $\tau(G)$-vertex colouring of $G$. Thus, we consider only those paths of order four in $G$ having vertices from exactly two of the induced subgraphs $G(\langle V_i \rangle)$'s, where $1 \leq i \leq \alpha$, for recolouring if it is bicoloured. \end{observation}
Consider any path $P$ of order four in $G$ having at least one vertex (at most three vertices) in $G(\langle V_i \rangle)$ for each $i$, $1 \leq i \leq \alpha$ and at least one vertex (at most three vertices) in $G(\langle V_j \rangle)$, for every $j$, $1 \leq i < j \leq \alpha$.
\noindent\textbf{Case 1.} Suppose a path $P$ of order four in $G$ has one vertex in $G(\langle V_i \rangle)$
\hspace{0.8cm} and three vertices in $G(\langle V_j \rangle)$, for $i,j$, $1 \leq i < j \leq \alpha$
\noindent Then without loss of generality we assume that $u_{i_1}$ is one of the vertices of $P$ which is in $G(\langle V_i \rangle)$ and we assume $w_{j_1}, w_{j_2}$ and $w_{j_3}$ are the other three vertices of $P$ which are in $G(\langle V_j \rangle)$. Under this situation, in order that the path $P$ is to be a path of order four with the vertices $u_{i_1}, w_{j_1}, w_{j_2}, w_{j_3}$, two of the vertices from the three vertices $w_{j_1}, w_{j_2}$ and $w_{j_3}$ in $G(\langle V_j \rangle)$ must be adjacent in $G(\langle V_j \rangle)$. Since $V_{k+1}$ is an independent set of vertices, $j \leq k$. As the vertices of each induced subgraph $G(\langle V_j \rangle)$ are properly coloured with 2 colours $c_{j_1}, c_{j_2}$, for $j$, $1 \leq j \leq k$ by the proper $\tau(G)$-vertex colouring, those two adjacent vertices from the three vertices $w_{j_1}, w_{j_2}$ and $w_{j_3}$ in $G(\langle V_j \rangle)$ should have been coloured with two different colours $c_{j_1}, c_{j_2}$ by the $\tau(G)$-vertex colouring. In $G(\langle V_i \rangle)$ each vertex is coloured with either $c_{i_1}$ or $c_{i_2}$ by the proper $\tau(G)$ vertex colouring, the vertex $u_{i_1}$ is coloured with either $c_{i_1}$ or $c_{i_2}$ in $G(\langle V_i \rangle)$ by the proper $\tau(G)$-vertex colouring. This implies that the path $P$ of order four having the vertices $u_{i_1}, w_{j_1}, w_{j_2}$ and $w_{j_3}$ are coloured with at least three different colours by the proper $\tau(G)$-vertex colouring of $G$.
\noindent\textbf{Case 2} Suppose a path $P$ of order four in $G$ has exactly two vertices in
\hspace{0.8cm} $G(\langle V_i \rangle)$ and has exactly two vertices in $G(\langle V_j \rangle)$.
\noindent Let $u_{i_1}$ and $u_{i_2}$ be the two vertices of $P$ in $G(\langle V_i \rangle)$ and let $w_{j_1}$ and $w_{j_2}$ be the two vertices of $P$ in $G(\langle V_j \rangle)$.
\noindent\textbf{Case 2.1.} Suppose either $u_{i_1}, u_{i_2}$ are coloured with two different colours
\hspace{1.3cm} $c_{i_1}, c_{i_2}$ in $G(\langle V_i \rangle)$ or $w_{j_1}, w_{j_2}$ are coloured with two different colours
\hspace{1.3cm} $c_{j_1}, c_{j_2}$ in $G(\langle V_j \rangle)$ by the proper $\tau(G)$-vertex colouring.
\noindent Then the vertices of the path $P$ of order four having the vertices $u_{i_1}, u_{i_2}, w_{j_1}$ and $w_{j_2}$ are coloured with three or four different colours by the proper $\tau(G)$-vertex colouring of $G$.
\noindent\textbf{Case 2.2.} Suppose neither the vertices $u_{i_1}, u_{i_2}$ received different colours in
\hspace{1.3cm} $G(\langle V_i \rangle)$ nor the vertices $w_{j_1}, w_{j_2}$ received different colours in
\hspace{1.3cm} $G(\langle V_j \rangle)$ by the proper $\tau(G)$-vertex colouring.
\noindent Then without loss of generality, we assume that $u_{i_1}, u_{i_2}$ received the same colour $c_{i_1}$ in $G(\langle V_i \rangle)$ and without loss of generality, we assume that $w_{j_1}, w_{j_2}$ received the same colour $c_{j_1}$ in $G(\langle V_i \rangle)$ by the $\tau(G)$-vertex colouring. As the vertices of $G$ are properly coloured, the vertices $u_{i_1}$ and $u_{i_2}$ should be non-adjacent in $G(\langle V_i \rangle)$ as well as the vertices $w_{j_1}$ and $w_{j_2}$ should also be non-adjacent in $G(\langle V_j \rangle)$. Since for every $h$, $1 \leq h \leq \alpha$, $\tau(G(\langle V_h \rangle)) \leq 2$, $G(\langle V_h \rangle)$ is the union of independent vertices and / or independent edges, every vertex in each $G(\langle V_h \rangle)$ is of degree either 0 or 1. Suppose either $u_{i_1}$ or $u_{i_2}$ is of degree 0 in $G(\langle V_i \rangle)$. Then without loss of generality, we assume that $u_{i_1}$ is of degree 0 in $G(\langle V_i \rangle)$. Since $u_{i_1}$ is not adjacent to any vertex in $G(\langle V_i \rangle)$, recolour the vertex $u_{i_1}$ with the colour $c_{i_2}$ [Since vertices of $G(\langle V_i \rangle)$
are properly coloured with either $c_{i_1}$ or $c_{i_2}$ colours, this recolouring is possible]. Thus, after this recolouring, the vertices $u_{i_1}, u_{i_2}$, $w_{j_1}$ and $w_{j_2}$ of the path $P$ have received three different colours. Hence, we assume neither $u_{i_1}$ nor $u_{i_2}$ is of degree 0 in $G(\langle V_i \rangle)$. Therefore, the degree of each of the vertices $u_{i_1}$ and $u_{i_2}$ must be of degree 1 in $G(\langle V_i \rangle)$. As vertices of each $G(\langle V_i \rangle)$ are properly coloured for $i$, $1 \leq i \leq \alpha$ and as the vertices $u_{i_1}$ and $u_{i_2}$ are coloured with the same colour $c_{i_1}$ in $G(\langle V_i \rangle)$, the vertices $u_{i_1}$ and $u_{i_2}$ must be non-adjacent in $G(\langle V_i \rangle)$. Since the $deg(u_{i_1}) = 1$ in $G(\langle V_i \rangle)$, the vertex $u_{i_1}$ should have an adjacent vertex $u_{i_1}^\prime$ in $G(\langle V_i \rangle)$ and it should have been coloured with the colour $c_{i_2}$ in $G(\langle V_i \rangle)$ by the proper $\tau(G)$-vertex colouring. For each $h$, $1 \leq h \leq k$, $\tau(G(\langle V_h \rangle) \leq 2$, the edge $u_{i_1} u_{i_1}^\prime$ must be an independent edge in $G(\langle V_i \rangle)$. Exchange the colours of $u_{i_1}$ and $u_{i_1}^\prime$. Thus, after this recolouring (this exchange), the vertex $u_{i_1}$ is coloured with $c_{i_2}$. Therefore, after the recolouring the vertices $u_{i_1}$ and $u_{i_2}$ received two different colours $c_{i_2}$ and $c_{i_1}$ respectively in $G(\langle V_i \rangle)$. As a result, the vertices $u_{i_1}$, $u_{i_2}$, $w_{j_1}$ and $w_{j_2}$ have received three different colours in $G(\langle V_i \cup V_j \rangle)$. Hence the path $P$ of order four having the four vertices $u_{i_1}$, $u_{i_2}$, $w_{j_1}$, $w_{j_2}$ are coloured with three different colours in $G$ after the recolouring.
\noindent\textbf{Case 3} Suppose the path $P$ of order four in $G$ has three vertices in $G(\langle V_i \rangle)$
\hspace{0.8cm} and the remaining one vertex in $G(\langle V_j \rangle)$, for $i,j$, $1 \leq i < j \leq \alpha$.
\noindent Without loss of generality, we assume that $w_{j_1}$ is one of the vertices of $P$ which is in $G(\langle V_j \rangle)$ and we assume $u_{i_1}$, $u_{i_2}$ and $u_{i_3}$ are the other three vertices of $P$ which are in $G(\langle V_i \rangle)$. Then as seen in Case 1, two of the vertices from the three vertices $u_{i_1}$, $u_{i_2}$ and $u_{i_3}$ should have received two different colours $c_{i_1}$, $c_{i_2}$ by the proper $\tau(G)$-vertex colouring. Consequently, the vertices of the Path $P$ should have received three or four different colours in $G$.
Thus, every path $P$ of order four in $G$ is either coloured with at least three different colours by the proper $\tau(G)$-vertex colouring of $G$ or else if they are bicoloured by the proper $\tau(G)$-vertex colouring, then the vertices of such a path $P$ can be recoloured as done in the above recolouring process so that the vertices of $P$ are coloured with at least three different colours.\\ Thus there exist a $\tau(G)$-star colouring for $G$. Hence, $\chi_s(G) \leq \tau(G)$. Hence Claim 2.
If $G$ is a disconnected graph with $t \geq 2$ components $G_1,G_2,\dots,G_t$. Then by Claim 2, $\chi_s(G_i) \leq \tau(G_i)$, for $i$, $1 \leq i \leq t$. Let $\smash{\displaystyle\max_{1 \leq i \leq t}}\,\, \chi_s(G_i) = \chi_s(G_k)$ for some $k$, $1 \leq k \leq t$. Since $\chi_s(G) = \smash{\displaystyle\max_{1 \leq i \leq t}}\,\,\chi_s(G_i)$, we have $\chi_s(G) = \chi_s(G_k) \leq \tau(G_k) \leq \smash{\displaystyle\max_{1 \leq i \leq t}}\,\, \tau(G_i) = \tau(G)$. Thus, $\chi_s(G) \leq \tau(G)$. This completes the proof. \end{proof}
An acyclic colouring of $G$ is a proper vertex colouring of $G$ such that no cycle of $G$ is bicoloured. Acyclic chromatic number of a graph $G$, denoted $a(G)$ is the minimum of colours which are necessary to acyclically colour $G$.
\begin{corollary} Let $G$ be any graph. Then the acyclic chromatic number of $G$, $a(G) \leq \tau(G)$. \end{corollary}
\begin{proof} For every graph $G$, $a(G) \leq \chi_s(G)$. By Theorem \ref{star}, we have $\chi_s(G) \leq \tau(G)$ for any graph $G$. Thus, $a(G) \leq \tau(G)$, for any graph $G$. \end{proof}
\section{Discussion} Path Partition Theorem is a beautiful and natural theorem and it significantly helped to get the upper bounds for chromatic number, star chromatic number and detour chromatic number. We believe that Path Partition Theorem can be significantly used for obtaining upper bounds of other different coloring related parameters too. In a general approach, understanding the following question will be interesting and significant too. \begin{quotation} What are the other graph parameters for which such partitions(like $\tau$-partition) can be obtained? \end{quotation}
\section*{References}
\end{document} |
\begin{document}
\title{The One-Phase Bifurcation For The $p$-Laplacian} \author{Alaa Akram Haj Ali \& Peiyong Wang\footnote{Peiyong Wang is partially supported by a Simons Collaboration Grant.}\\ \footnotesize Department of Mathematics\\ \footnotesize Wayne State University\\
\footnotesize Detroit, MI 48202\\
\normalsize} \date{} \maketitle \begin{abstract} A bifurcation about the uniqueness of a solution of a singularly perturbed free boundary problem of phase transition associated with the $p$-Laplacian, subject to given boundary condition is proved in this paper. We show this phenomenon by proving the existence of a third solution through the Mountain Pass Lemma when the boundary data decreases below a threshold. In the second part, we prove the convergence of an evolution to stable solutions, and show the Mountain Pass solution is unstable in this sense. \end{abstract}
\textbf{AMS Classifications:} 35J92, 35J25, 35J62, 35K92, 35K20, 35K59
\textbf{Keywords:} bifurcation, phase transition, $p$-Laplacian, Mountain Pass Theorem, Palais-Smale condition, critical point, critical boundary data, convergence of evolution.
\section{Introduction}\label{introduction} In this paper, one considers the phase transition problem of minimizing the $p$-functional \begin{equation}\label{p-functional}
J_{p,\varepsilon}(u) = \int_{\Omega}\frac{1}{p}|\nabla u(x)|^p + Q(x)\Gamma_{\varepsilon}(u(x))\,dx\ \ \ (1<p<\infty) \end{equation} which is a singular perturbation of the one-phase problem of minimizing the functional associated with the $p$-Laplacian \begin{equation}\label{p-functional_original}
J_p(u) = \int_{\Omega}\frac{1}{p}|\nabla u(x)|^p + Q(x)\chi_{\{u(x)>0\}}\,dx, \end{equation} where $\Gamma_{\varepsilon}(s) = \Gamma(\frac{s}{ \varepsilon})$ for $\varepsilon > 0$ and for a $C^{\infty}$ function $\Gamma$ defined by $$\Gamma(s) = \left\{\begin{array}{ll} 0 &\ \text{\ if\ }s\leq 0\\ 1 &\ \text{\ if\ }s\geq 1, \end{array}\right.$$ and $0\leq\Gamma(s)\leq 1$ for $0<s<1$, and $Q\in W^{2,2}(\Omega)$ is a positive continuous function on $\Omega$ such that $\inf_{\Omega}Q(x) > 0$. Let $\beta_{\varepsilon}(s) = \Gamma'_{\varepsilon}(s) = \frac{1}{ \varepsilon}\beta(\frac{s}{\varepsilon})$ with $\beta = \Gamma'$. The domain $\Omega$ is always assumed to be smooth in this paper for convenience. As in the following we will fix the value of $\varepsilon$ unless we specifically examine the influence of the value of $\varepsilon$ on the critical boundary data and will not use the notation $J_p$ for a different purpose, we are going to abuse the notation by using $J_p$ for the functional $J_{p,\varepsilon}$ from now on.
The Euler-Lagrange equation of (\ref{p-functional}) is \begin{equation}\label{eulereq} -\bigtriangleup_p u + Q(x)\beta_{\varepsilon}(u) = 0\ \ x\in\Omega \end{equation} One imposes the boundary condition \begin{equation}\label{bdrycondition} u(x) = \sigma(x),\ \ x\in\partial\Omega \end{equation} on $u$, for $\sigma\in C(\partial\Omega)$ with $\min_{\partial\Omega}\sigma > 0$, to form a boundary value problem.
In this paper, we take on the task of establishing in the general case when $p\ne 2$ the results proved in \cite{CW} for the Laplacian when $p=2$. The main difficulty in this generalization lies in the lack of sufficient regularity and the singular-degenerate nature of the $p$-Laplacian when $p\ne 2$. A well-known fact about $p$-harmonic functions is the optimal regularity generally possessed by them is $C^{1,\alpha}$ (e.\,g.\,\cite{E} and \cite{Le}). Thus we need to employ more techniques associated with the $p$-Laplacian, and in a case or two we have to make our conclusion slightly weaker. Nevertheless, we follow the overall scheme of approach used in \cite{CW}. In the second section, we prove the bifurcation phenomenon through the Mountain Pass Theorem. In the third section, we establish a parabolic comparison principle. In the last section, we show the convergence of an evolution to a stable steady state in accordance with respective initial data.
\section{A Third Solution}\label{thirdsolution} We first prove if the boundary data is small enough, then the minimizer is nontrivial. More precisely, let $u_0$ be the trivial solution of (\ref{eulereq}) and (\ref{bdrycondition}), being $p$-harmonic in the weak sense, and $u_2$ be a minimizer of the $p$-functional (\ref{p-functional}), and set $$\sigma_M = \max_{\partial\Omega}\sigma(x)\ \ \text{ and\ }\ \ \sigma_m = \min_{\partial\Omega}\sigma(x).$$ If $\sigma_M$ is small enough, then $u_0\neq u_2$.
In fact, we pick $u\in W^{1,p}(\Omega)$ so that \begin{equation} \left\{\begin{array}{ll} u = 0 &\ \ \text{ in $\Omega_{\delta}$}\\ u = \sigma &\ \ \text{ on $\partial\Omega$,\ \ \ \ and }\\ -\bigtriangleup_p u = 0 &\ \ \text{ in $\Omega\backslash\bar{\Omega}_{\delta}$,}\end{array}\right. \end{equation} where $\Omega_{\delta} = \{x\in\Omega\colon dist(x,\partial\Omega) > \delta\}$ and $\delta > 0$ is a small constant independent of $\varepsilon$ and $\sigma$ so that $\int_{\Omega_{\delta}}Q(x)\,dx$ has a positive lower bound which is also independent of $\varepsilon$ and $\sigma$. Using an approximating domain if necessary, we may assume $\Omega_{\delta}$ possesses a smooth boundary. Clearly, \begin{equation*}
J_p(u_0) = \int_{\Omega}\frac{1}{p}|\nabla u_0|^p + Q(x)\,dx \geq \int_{\Omega}Q(x)\,dx. \end{equation*} It is well-known that \begin{equation*}
\int_{\Omega\backslash\Omega_{\delta}}|\nabla u|^p \leq C\sigma^{\,p}_M\delta^{1-p}\ \ \text{ for $C = C(n,p,\Omega)$}, \end{equation*} so that \begin{alignat*}{1}
&J_p(u) = \int_{\Omega\backslash \Omega_{\delta}}\frac{1}{p}|\nabla u|^p + \int_{\Omega\backslash\Omega_{\delta}}Q(x)\,dx\\ &\leq C\sigma^{\,p}_M\delta^{1-p} + \int_{\Omega\backslash\Omega_{\delta}}Q(x)\,dx. \end{alignat*} So, for all small $\varepsilon > 0$, \begin{equation*} J_p(u) - J_p(u_0) \leq C\sigma^{\,p}_M\delta^{1-p} - \int_{\Omega_{\delta}} Q(x)\,dx < 0 \end{equation*} if $\sigma_M\leq \sigma_0$ for some small enough $\sigma_0 = \sigma_0(\delta, \Omega, Q)$.
Let $\mathfrak{B}$ denote the Banach space $W^{1,p}_0(\Omega)$ we will work with. For every $v\in\mathfrak{B}$, we write $u = v + u_0$ and adopt the norm $\|v\|_{\mathfrak{B}} = \left(\int_{\Omega}|\nabla v|^p\right)^{\frac{1}{p}} = \left(\int_{\Omega}|\nabla u - \nabla u_0|^p\right)^{\frac{1}{p}}$. We define the functional \begin{equation}
I[v] = J_p(u) - J_p(u_0) = \int_{\Omega}\frac{1}{p}|\nabla u|^p - \int_{\{u < \varepsilon\}}Q(x)\left(1 - \Gamma_{\varepsilon}(u)\right) - \int_{\Omega}\frac{1}{p} |\nabla u_0|^p \end{equation} Set $v_2 = u_2 - u_0$. Clearly, $I[0] = 0$ and $I[v_2] \leq 0$ on account of the definition of $u_2$ as a minimizer of $J_p$. If $I[v_2] < 0$ which is the case if $\sigma_M$ is small, we will apply the Mountain Pass Lemma to prove there exists a critical point of the functional $I$ which is a weak solution of the problem (\ref{eulereq}) and (\ref{bdrycondition}).
The Fr\'{e}chet derivative of $I$ at $v\in \mathfrak{B}$ is given by \begin{equation}
I'[v]\varphi = \int_{\Omega}|\nabla u|^{p-2}\nabla u\cdot\nabla\varphi + Q(x)\beta_{\varepsilon}(u)\varphi\ \ \ \ \varphi\in\mathfrak{B} \end{equation} which is obviously in the dual space $\mathfrak{B}^*$ of $\mathfrak{B}$ in light of the H\"{o}lder's inequality. Equivalently \begin{equation} I'[v] = -\bigtriangleup_p (v+u_0) + Q(x)\beta_{\varepsilon}(v + u_0)\in\mathfrak{B}^*. \end{equation} We see that $I'$ is Lipschitz continuous on any bounded subset of $\mathfrak{B}$ with Lipschitz constant depending on $\varepsilon$, $p$, and $\sup Q$. In fact, for any $v$, $w$, and $\varphi\in \mathfrak{B}$, \begin{alignat*}{1}
&\ \ \left|I'[v]\varphi - I'[w]\varphi\right| = |\int_{\Omega}|\nabla v + \nabla u_0|^{p-2}(\nabla v + \nabla u_0)\cdot\nabla\varphi + Q(x)\beta_{\varepsilon}(v+u_0)\\
&- |\nabla w + \nabla u_0|^{p-2}(\nabla w + \nabla u_0)\cdot\nabla\varphi - Q(x)\beta_{\varepsilon}(w+u_0)| \\
&\leq \left|\int_{\Omega}|\nabla v + \nabla u_0|^{p-2}(\nabla v + \nabla u_0)\cdot\nabla\varphi -
|\nabla w + \nabla u_0|^{p-2}(\nabla w + \nabla u_0)\cdot\nabla\varphi\right| \\
&+ \left|\int_{\Omega}Q(x)\beta_{\varepsilon}(v+u_0) - Q(x)\beta_{\varepsilon}(w+u_0)\right| \end{alignat*} Furthermore, \begin{alignat*}{1}
&\ \ \ \ \left|\int_{\Omega}Q(x)\beta_{\varepsilon}(v+u_0) - Q(x)\beta_{\varepsilon}(w + u_0)\right|\\
&= \left|\int_{\Omega}Q(x)\int^1_0\beta'_{\varepsilon}((1-t)w + tv + u_0)\,dt\,(v(x) - w(x))\,dx \right|\\
&\leq \sup|\beta'_{\varepsilon}|\int_{\Omega}\left|Q(x)\left(v(x) - w(x)\right)\right|\,dx \\
&\leq \frac{C}{\varepsilon^2}\left(\int_{\Omega}Q^{p'}(x)\right)^{\frac{1}{p'}}\left(\int_{\Omega}|v(x) - w(x)|^p\,dx\right)^{\frac{1}{p}} \end{alignat*} and \begin{alignat*}{1}
&\ \ \ \ \left|\int_{\Omega}|\nabla v + \nabla u_0|^{p-2}(\nabla v + \nabla u_0)\cdot \nabla \varphi - |\nabla w + \nabla u_0|^{p-2}(\nabla w + \nabla u_0)\cdot \nabla \varphi\right|\\
&\leq \left|\int_{\Omega}|\nabla v + \nabla u_0|^{p-2}(\nabla v - \nabla w)\cdot \nabla \varphi\right| \\
&\ \ \ \ + \left|\int_{\Omega}\left(|\nabla v + \nabla u_0|^{p-2} - |\nabla w + \nabla u_0|^{p-2}\right)(\nabla w + \nabla u_0)\cdot \nabla \varphi\right|. \end{alignat*} In addition, \begin{alignat*}{1}
&\ \ \ \ \left|\int_{\Omega}|\nabla v + \nabla u_0|^{p-2}(\nabla v - \nabla w)\cdot \nabla \varphi\right|\\
&\leq \left(\int_{\Omega}|\nabla v + \nabla u_0|^p\right)^{\frac{p-2}{p}}\left(\int_{\Omega}|\nabla\varphi|^p\right)^{\frac{1}{p}}\left(\int_{\Omega}|\nabla v - \nabla w|^p\right)^{\frac{1}{p}}, \end{alignat*} and \begin{alignat*}{1}
&\ \ \ \ \left|\int_{\Omega}\left(|\nabla v + \nabla u_0|^{p-2} - |\nabla w + \nabla u_0|^{p-2}\right)\left(\nabla w + \nabla u_0\right)\cdot\nabla\varphi\right|\\
&\leq C(p)\int_{\Omega}\left(|\nabla v + \nabla u_0|^{p-3} + |\nabla w + \nabla u_0|^{p-3}\right)|\nabla v - \nabla w||\nabla w + \nabla u_0||\nabla\varphi|\\
&\leq C(p)\left(\|\nabla v\|_{L^p} + \|\nabla w\|_{L^p} + \|\nabla u_0\|_{L^p}\right)^{p-2}\|\nabla v - \nabla w\|_{L^p(\Omega)}\|\nabla\varphi\|_{L^p(\Omega)}. \end{alignat*} Therefore $I'$ is Lipschitz continuous on bounded subsets of $\mathfrak{B}$.
We note that $f\in\mathfrak{B}^*$ if and only if there exist $f^0$, $f^1$, $f^2$, ..., $f^n\in L^{p'}(\Omega)$, where $\frac{1}{p} + \frac{1}{p'} = 1$, such that \begin{alignat}{1} &<f,u>\ = \int_{\Omega}f^0u + \sum^n_{i=1}f^iu_{x_i} \ \ \text{ holds for all $u\in\mathfrak{B}$; and}\label{repre}\\
&\|f\|_{\mathfrak{B}^*} = \inf\left\{\left(\int_{\Omega}\sum^n_{i=0}|f^i|^{p'}\,dx\right)^{\frac{1}{p'}}\colon \text{(\ref{repre}) holds.}\right\} \end{alignat}
Next we justify the Palais-Smale condition on the functional $I$. Suppose $\{v_k\}\subset\mathfrak{B}$ is a Palais-Smale sequence in the sense that \begin{equation*}
\left|I[v_k]\right|\leq M\ \ \ \ \text{and\ \ }\ \ I'[v_k]\rightarrow 0\ \ \ \ \text{in $\mathfrak{B}^*$} \end{equation*} for some $M > 0$. Let $u_k = v_k + u_0\in W^{1,p}(\Omega)$, $k = 1, 2, 3, ...$.
That $Q(x)\beta_{\varepsilon}(v + u_0)\in W^{1,p}_0(\Omega)$ implies that the mapping $v\mapsto Q(x)\beta_{\varepsilon}(v + u_0)$ from $W^{1,p}_0(\Omega)$ to $\mathfrak{B}^*$ is compact due to the fact $W^{1,p}_0(\Omega)\subset\subset L^p(\Omega)\subset \mathfrak{B}^*$ following from the Rellich-Kondrachov Compactness Theorem. Then there exists $f\in L^p(\Omega)\subset\mathfrak{B}^*$ such that for a subsequence, still denoted by $\{v_k\}$, of $\{v_k\}$, it holds that \begin{equation*} Q(x)\beta_{\varepsilon}(u_k)\rightarrow -f\ \ \text{ in $L^p(\Omega)$.} \end{equation*} Recall that \begin{equation*}
\left|I'[v_k]\varphi\right| = \sup_{\|\varphi\|_{\mathfrak{B}}\leq 1}\left|\int_{\Omega}|\nabla u_k|^{p-2}\nabla u_k\cdot\nabla\varphi + Q(x)\beta_{\varepsilon}(u_k) \varphi\right|\rightarrow 0. \end{equation*} As a consequence, \begin{equation}\label{test1}
\sup_{\|\varphi\|_{\mathfrak{B}}\leq M}\left|\int_{\Omega}|\nabla u_k|^{p-2}\nabla u_k\cdot\nabla \varphi - f\varphi\right| \rightarrow 0\ \ \ \ \text{for any $M\geq 0$.} \end{equation} Obviously, that $\{I[v_k]\}$ is bounded implies that a subsequence of $\{v_k\}$, still denoted by $\{v_k\}$ by abusing the notation without confusion, converges weakly in $\mathfrak{B} = W^{1, p}_0(\Omega)$. In particular, \begin{equation*} \int_{\Omega}fv_k - fv_m\rightarrow 0\ \ \ \ \text{as $k$, $m\rightarrow\infty$.} \end{equation*} Then by setting $\varphi = v_k - v_m = u_k - u_m$ in (\ref{test1}), one gets \begin{equation}\label{conv}
\left|\int_{\Omega}\left(|\nabla u_k|^{p-2}\nabla u_k - |\nabla u_m|^{p-2}\nabla u_m\right)\cdot \nabla (u_k - u_m)\right| \rightarrow 0\ \ \ \ \text{as $k$, $m\rightarrow\infty$,} \end{equation} since \begin{equation*}
\|u_k - u_m\|^p_{\mathfrak{B}} = \|v_k - v_m\|^p_{\mathfrak{B}} \leq 2pM + 2J_p[u_0]. \end{equation*} In particular, if $p = 2$, $\{v_k\}$ is a Cauchy sequence in $W^{1,2}_0(\Omega)$ and hence converges. We will apply the following elementary inequalities associated with the $p$-Laplacian, \cite{L}, to the general case $p\neq 2$: \begin{alignat}{1}
&<|b|^{p-2}b - |a|^{p-2}a,\,b - a> \geq (p-1)|b-a|^2(1 + |a|^2 + |b|^2)^{\frac{p-2}{2}},\ \ 1\leq p\leq 2;\label{ele1}\\
&\text{and}\ \ \ \ <|b|^{p-2}b - |a|^{p-2}a,\,b - a> \geq 2^{2-p}|b-a|^p,\ \ p\geq 2.\label{ele2} \end{alignat} We assume first $1 < p < 2$. Let $K = 2pM + 2J_p[u_0]$. Then the first elementary inequality (\ref{ele1}) implies \begin{alignat*}{1}
&\ \ \ \ \ (p-1)\int_{\Omega}|\nabla u_k - \nabla u_m|^2\left(1+|\nabla u_k|^2 + |\nabla u_m|^2\right)^{\frac{p-2}{2}}\\ &\leq \int_{\Omega}\left(|\nabla u_k|^{p-2}\nabla u_k - |\nabla u_m|^{p-2}\nabla u_m\right)\cdot \nabla (u_k - u_m) \rightarrow 0 \end{alignat*} Meanwhile H\"{o}lder's inequality implies \begin{alignat*}{1}
&\ \ \ \ \ \int_{\Omega}|\nabla v_k - \nabla v_m|^p = \int_{\Omega}|\nabla u_k - \nabla u_m|^p \\
&\leq \left(\int_{\Omega}|\nabla u_k - \nabla u_m|^2\left(1 + |\nabla u_k|^2 + |\nabla u_m|^2\right)^{\frac{p-2}{2}}\right)^{\frac{p}{2}}
\left(\int_{\Omega}\left(1 + |\nabla u_k|^2 + |\nabla u_m|^2\right)^{\frac{p}{2}}\right)^{\frac{2-p}{2}} \\
&\leq C(p)\left(|\Omega| + K\right)^{\frac{2-p}{2}} \left(\int_{\Omega}|\nabla u_k - \nabla u_m|^2\left(1 + |\nabla u_k|^2 + |\nabla u_m|^2\right)^{\frac{p-2}{2}}\right)^{\frac{p}{2}} \end{alignat*} Therefore, $\{v_k\}$ is a Cauchy sequence in $\mathfrak{B}$ and hence converges.
Suppose $p > 2$. The second elementary inequality (\ref{ele2}) implies \begin{alignat*}{1}
&\ \ \ \ \ \int_{\Omega}|\nabla v_k - \nabla v_m|^p = \int_{\Omega}|\nabla u_k - \nabla u_m|^p \\
&\leq 2^{p-2}\int_{\Omega}\left(|\nabla u_k|^{p-2}\nabla u_k - |\nabla u_m|^{ p-2}\nabla u_m\right)\cdot \left(\nabla u_k - \nabla u_m\right), \end{alignat*} which in turn implies $\{v_k\}$ is a Cauchy sequence in $\mathfrak{B}$ and hence converges, on account of (\ref{conv}). The Palais-Smale condition is verified for $1 < p < \infty$ for the functional $I$ on the Banach space $W^{1,p}_0(\Omega)$.
Before we continue the main proof, let us state an elementary result closely related to the $p$-Laplacian, which follows readily from the Fundamental Theorem of Calculus. \begin{lemma}\label{p-inequalities} For any $a$ and $b\in\mathbb{R}^n$, it holds \begin{equation}\label{ele3}
|b|^p \geq |a|^p + p<|a|^{p-2}a, b-a> +\, C(p)|b - a|^p\ \ \ \ (p\geq 2) \end{equation} where $C(p) > 0$.
If $1 < p < 2$, then \begin{equation}\label{ele4}
|b|^p \geq |a|^p + p<|a|^{p-2}a, b-a> +\, C(p)|b-a|^2\int^1_0\int^t_0\left|(1-s)a+sb\right|^{p-2}\,dsdt, \end{equation} where $C(p) = p(p-1)$. \end{lemma}
We are now in a position to show there is a closed mountain ridge around the origin of the Banach space $\mathfrak{B}$ that separates $v_2$ from the origin with the energy $I$ as the elevation function, which is the content of the following lemma. \begin{lemma}
For all small $\varepsilon > 0$ such that $C\varepsilon \leq \frac{1}{2}\sigma_m$ for a large universal constant $C$, there exist positive constants $\delta$ and $a$ independent of $\varepsilon$, such that, for every $v$ in $\mathfrak{B}$ with $\|v\|_{\mathfrak{B}} = \delta$, the inequality $I[v] \geq a$ holds. \end{lemma} \begin{pf}
It suffices to prove $I[v] \geq a > 0$ for every $v\in C^{\infty}_0(\Omega)$ with $\|v\|_{\mathfrak{B}} = \delta$ for $\delta$ small enough, as $I$ is continuous on $\mathfrak{B}$, and $C^{\infty}_0(\Omega)$ is dense in $\mathfrak{B}$.
Let $\Lambda = \{x\in\Omega\colon u(x)\leq\varepsilon\}$, where $u = v + u_0$. We claim that $\Lambda = \emptyset$ if $\delta$ is small enough. If not, one may pick $z\in\Lambda$. Let $\mathcal{AC}([a,b], S)$ be the set of absolutely continuous functions $\gamma\colon [a,b]\rightarrow S$, where $S\subseteq\mathbb{R}^n$. For each $\gamma\in\mathcal{AC}([a,b], S)$, we define its length to be $L(\gamma) = \int^b_a|\gamma'(t)|\,dt$. For $x_0\in\partial\Omega$, we define the distance from $x_0$ to $z$ to be \begin{equation*} d(x_0,z) = \inf\{L(\gamma): \gamma\in\mathcal{AC}([0,1],\bar{\Omega}), \ \text{s.t.\ }\gamma(0) = x_0, \ \text{and\ }\gamma(1)=z\} \end{equation*} As shown in \cite{CW}, there is a minimizing path $\gamma_{x_0}$ for the distance $d(x_0, z)$.
Suppose the domain $\Omega$ is convex or star-like about $z$. For any $x_0\in\partial\Omega$, let $\gamma = \gamma_{x_0}$ be a minimizing path of $d(x_0, z)$. Then it is clear that $\gamma$ is a straight line segment and $\gamma(t)\neq z$ for $t\in [0,1)$. Furthermore, for any two distinct points $x_1$ and $x_2\in\partial\Omega$, the corresponding minimizing paths do not intersect in $\Omega\backslash\{z\}$. For this reason, we can carry out the following computation. Clearly $v(x_0) = 0$ and $v(\gamma(1)) = \varepsilon - u_0(\gamma(1))\leq \varepsilon - \sigma_m < 0$. So the Fundamental Theorem of Calculus \begin{equation*} v(\gamma(1)) - v(\gamma(0)) = \int^1_0\nabla v(\gamma(t))\cdot\gamma'(t)dt \end{equation*} implies \begin{equation}\label{ineq-ftc}
\sigma_m - \varepsilon \leq \int^1_0|\nabla v(\gamma(t))||\gamma'(t)|dt. \end{equation} For each $x_0\in\partial\Omega$, let $e(x_0)$ be the unit vector in the direction of $x_0 - z$ and $\nu(x_0)$ the outer normal to $\partial\Omega$ at $x_0$. Then $\nu(x_0)\cdot e(x_0) > 0$ everywhere on $\partial\Omega$. Hence the above inequality (\ref{ineq-ftc}) implies \begin{alignat*}{1} &\ \ \ \ (\sigma_m - \varepsilon)\int_{\partial\Omega}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0)\\
&\leq \int_{\partial\Omega}\int^1_0|\nabla v(\gamma(t))| |\gamma'(t)|\,dt\,\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \\
&\leq \int_{\partial\Omega}\left(\int^1_0|\gamma'(t)|\,dt\right)^{\frac{1}{p'}}\left(\int^1_0|\nabla v(
\gamma(t))|^p|\gamma'(t)|\,dt\right)^{\frac{1}{p}}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0),\\ &\hspace{3.5in}\text{ where $\frac{1}{p} + \frac{1}{p'} = 1$,} \\
&= \int_{\partial\Omega} L(\gamma_{x_0})^{\frac{1}{p'}}\left(\int^1_0|\nabla v(\gamma(t))|^p|\gamma'(t)| \,dt\right)^{\frac{1}{p}}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \\ &\leq \left(\int_{\partial\Omega}L(\gamma_{x_0})\nu(x_0)\cdot e(x_0)\,dH^{n-1}\right)^{\frac{1}{p'}}\left(\int_{\partial\Omega}
\int^1_0|\nabla v(\gamma(t))|^p|\gamma'(t)|\nu \cdot e\,dt\,dH^{n-1}\right)^{\frac{1}{p}} \\
&= C|\Omega|^{\frac{1}{p'}}\left(\int_{\Omega}|\nabla v|^p\,dx\right)^{\frac{1}{p}}\\
&\leq C|\{u > \varepsilon\}|^{\frac{1}{p'}}\delta \leq C|\{u>0\}|^{\frac{1}{p'}}\delta, \end{alignat*} where the second and third inequalities are due to the application of the H\"{o}lder's inequality, and the constant $C$ depends on $n$ and $p$. The second equality follows from the two representation formulas \begin{equation*}
\left|\Omega\right| = C(n)\int_{\partial\Omega}L(\gamma_{x_0})\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \end{equation*} and \begin{equation*}
\int_{\Omega}\left|\nabla v(x)\right|^p\,dx = C(n)\int_{\partial\Omega}\int^1_0\left|\nabla v(\gamma_{x_0}(t))\right|^p\,\left|\gamma'_{x_0}(t)\right|\nu(x_0) \cdot e(x_0)\,dt\,dH^{n-1}(x_0). \end{equation*} If we take $\delta$ sufficiently small and independent of $\varepsilon$ in the preceding inequality \begin{equation*}
(\sigma_m - \varepsilon)\int_{\partial\Omega}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \leq C|\{u>0\}|^{\frac{1}{p'}}\delta, \end{equation*}
the measure $|\{u > 0\}|$ of the positive domain would be greater than that of $\Omega$, which is impossible, provided that \begin{equation}\label{direction-normal-ineq} \int_{\partial\Omega}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \geq C, \end{equation}
for a constant $C$ which depends on $n$, $p$ and $|\Omega|$, but not on $z$ or $v$. Hence $\Lambda$ must be empty. So we need to justify the inequality (\ref{direction-normal-ineq}). To fulfil that condition, for $e = e(x_0)$, we set $l(e,z) = l(e) = L(\gamma_{x_0})$. Then \begin{equation*} \int_{\partial\Omega}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) = \int_{e\in \partial B}\left(l(e)\right)^{n-1}\,d\sigma(e), \end{equation*} where $B$ is the unit ball about $z$ and $d\sigma(e)$ is the surface area element on the unit sphere $\partial B$ which is invariant under rotation and reflection. Clearly, \begin{equation*} \left(\int_{\partial B}\left(l(e)\right)^{n-1}\,d\sigma(e)\right)^{\frac{2}{n-1}}\geq C(n)\int_{\partial B}l^2(e)\,d\sigma(e) \end{equation*} Consequently, in order to prove (\ref{direction-normal-ineq}), one needs only to prove \begin{equation}\label{equiv-integ}
\int_{\partial B}l^2(e)\,d\sigma(e) \geq C(n, p, |\Omega|). \end{equation}
Next, we show the integral on the left-hand-side of (\ref{equiv-integ}) is minimal if $\Omega$ is a ball while its measure is kept unchanged. In fact, this is almost obvious if one notices the following fact. Let $\pi$ be any hyperplane passing through $z$, and $x_1$ and $x_2$ be the points on $\partial\Omega$ which lie on a line perpendicular to $\pi$. Let $x^*_1$ and $x^*_2$ be the points on the boundary $\partial\Omega_{\pi}$, where $\Omega_{\pi}$ is the symmetrized image of $\Omega$ about the hyperplane $\pi$, which lie on the line $\overline{x_1x_2}$. Let $2a = |\overline{x_1x_2}| = |\overline{x^*_1x^*_2}|$ and $d$ be the distance from $z$ to the line $\overline{x_1x_2}$. Then for some $t$ in $-a \leq t \leq a$, it holds that \begin{equation*} L^2(\gamma_{x_1}) + L^2(\gamma_{x_2}) = \left(d^2+(a-t)^2\right) + \left(d^2+(a+t)^2\right) \geq 2(d^2 + a^2) = 2\left(L^*(\gamma_{x^*_1})\right)^2. \end{equation*} As a consequence, if $\Omega^*$ is the symmetrized ball with measure equal to that of $\Omega$, then \begin{equation*}
\int_{\partial B}l^2(e)\,d\sigma(e) \geq \int_{\partial B}\left(l^*(e)\right)^2\,d\sigma(e) = C(n,|\Omega|), \end{equation*} where $l^*$ is the length from $z$ to a point on the boundary $\partial\Omega^*$ which is constant. This finishes the proof of the fact that $\Lambda = \emptyset$.
In case the domain $\Omega$ is not convex, the minimizing paths of $d(x_1, z)$ and $d(x_2, z)$ for distinct $x_1$, $x_2\in\partial\Omega$ may partially coincide. We form the set $\mathcal{DA}(\partial\Omega)$ of the points $x_0$ on $\partial\Omega$ so that a minimizing path $\gamma$ of $d(x_0, z)$
satisfies $\gamma(t)\in\Omega\backslash\{z\}$ for $t\in (0,1)$. We call a point in $\mathcal{DA}(\partial\Omega)$ a \textbf{directly accessible} boundary point. Let $\Omega_1$ be the union of these minimizing paths for the directly accessible boundary points. It is not difficult to see that $|\Omega_1| > 0$ and hence $H^{n-1}(\mathcal{DA}(\partial\Omega)) > 0$. Then we may apply the above computation to the star-like domain $\Omega_1$ with minimal modification. We have \begin{equation}
(\sigma_m - C\varepsilon)\int_{\partial\Omega}\nu(x_0)\cdot e(x_0)\,dH^{n-1}(x_0) \leq C|\Omega_1|^{\frac{1}{p'}}\delta \leq C|\Omega|^{\frac{1}{p'}}\delta. \end{equation}
For small enough $\delta$, this raises a contradiction $|\Omega| > |\Omega|$. So $\Lambda = \emptyset$.
Finally we prove that $\|v\|_{\mathfrak{B}} = \delta$ implies \begin{equation}
I[v]= \int_{\Omega}\frac{1}{p}|\nabla v + \nabla u_0|^p - \frac{1}{p}|\nabla u_0|^p \geq a\ \ \text{for a certain $a > 0$.} \end{equation}
If $p\geq 2$, then the elementary inequality (\ref{ele3}) implies that \begin{alignat*}{1}
I[v] &= \int_{\Omega}\frac{1}{p}\left|\nabla v+ \nabla u_0\right|^p - \frac{1}{p}\left|\nabla u_0\right|^p \\
&\geq \int_{\Omega}<\left|\nabla u_0\right|^{p-2}\nabla u_0, \nabla v> + C(p)\left|\nabla v\right|^p \\
&= C(p)\int_{\Omega}\left|\nabla v\right|^p = C(p)\delta^p > 0, \end{alignat*} while if $1 < p < 2$, then the elementary inequality (\ref{ele4}) implies \begin{alignat*}{1}
I[v] &\geq p(p-1)\int_{\Omega}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left|\nabla u_0 + s\nabla v\right|^{2-p}}\,dsdtdx \\
&\geq p(p-1)\int_{\Omega}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(\left|\nabla u_0\right| + s\left|\nabla v\right|\right)^{2-p}}\,dsdtdx.
\end{alignat*}
If $\int_{\Omega}|\nabla u_0|^p = 0$, then $I[v] = \frac{1}{p}\delta^p > 0$. So in the following, we assume $\int_{\Omega}|\nabla u_0|^p > 0$.
Let $S = S_{\lambda} = \{x\in\Omega\colon |\nabla v| > \lambda\delta\}$, where the constant $\lambda = \lambda(p,|\Omega|)$ is to be taken. Then \begin{alignat*}{1}
\delta^p &= \int_{\Omega}|\nabla v|^p = \int_{\{|\nabla v|\leq \lambda\delta\}}|\nabla v|^p + \int_S|\nabla v|^p \\
&\leq (\lambda\delta)^p|\Omega| + \int_S|\nabla v|^p \end{alignat*} and hence \begin{equation*}
\int_S|\nabla v|^p \geq \delta^p\left(1 - \lambda^p|\Omega|\right) \geq \frac{1}{2}\delta^p,\ \ \text{if $\lambda$ satisfies\ }\frac{1}{4} < \lambda^p|\Omega| \leq \frac{1}{2}. \end{equation*} Meanwhile, for $1 < p < 2$, it holds that \begin{alignat*}{1}
I[v] &\geq C(p)\int_{S}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(\left|\nabla u_0\right| + s\left|\nabla v\right|\right)^{2-p}}\,dsdtdx \\
&=C(p)\left(\int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(|\nabla u_0| + s|\nabla v|\right)^{2-p}}\,dsdtdx \right. \\
&\ \ \ \ + \left.\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(\left|\nabla u_0\right| + s\left|\nabla v\right|\right)^{2-p}}\,dsdtdx\right). \end{alignat*} The first integral on the right satisfies \begin{alignat*}{1}
&\ \ \ \ \int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(|\nabla u_0| + s|\nabla v|\right)^{2-p}}\,dsdtdx \\
&\geq \int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}\left|\nabla v\right|^p\int^1_0\int^t_0\frac{1}{\left(1 + s\right)^{2-p}}\,dsdtdx \\
&= C(p)\int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}\left|\nabla v\right|^p\,dx, \end{alignat*} while the second integral on the right satisfies \begin{alignat*}{1}
&\ \ \ \ \int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^2\int^1_0\int^t_0\frac{1}{\left(\left|\nabla u_0\right| + s\left|\nabla v\right|\right)^{2-p}}\,dsdtdx \\
&\geq \int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\frac{\left|\nabla v\right|^2}{|\nabla u_0|^{2-p}}\int^1_0\int^t_0\frac{ds\,dt}{(1+s)^{2-p}}\,dx \\
&= C(p) \int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\frac{\left|\nabla v\right|^2}{|\nabla u_0|^{2-p}}\,dx. \end{alignat*} The H\"{o}lder's inequality applied with exponents $\frac{2}{p}$ and $\frac{2}{2-p}$ implies that \begin{equation*}
\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^p \leq \left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\frac{|\nabla v|^2}{|\nabla u_0|^{2-p}}\right)^{\frac{p}{2}}\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}|\nabla u_0|^p\right)^{\frac{2-p}{2}}, \end{equation*} or equivalently \begin{alignat*}{1}
\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\frac{|\nabla v|^2}{|\nabla u_0|^{2-p}} &\geq \frac{\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^p\right)^{\frac{2}{p}}}{\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}|\nabla u_0|^p\right)^{\frac{2-p}{p}}} \\
&\geq \frac{\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^p\right)^{\frac{2}{p}}}{\left(\int_{\Omega}|\nabla u_0|^p\right)^{\frac{2-p}{p}}}. \end{alignat*} Consequently, \begin{alignat*}{1}
I[v] &\geq C(p)\int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}|\nabla v|^p + C(p)\frac{\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^p\right)^{\frac{2}{p}}}{\left(\int_{\Omega}|\nabla u_0|^p\right)^{\frac{2-p}{p}}} \\
&\geq C(p)\left(\int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}|\nabla v|^p\right)^{\frac{2}{p}} + C(p)\frac{\left(\int_{S\cap\{|\nabla u_0| > |\nabla v|\}}\left|\nabla v\right|^p\right)^{\frac{2}{p}}}{\left(\int_{\Omega}|\nabla u_0|^p\right)^{\frac{2-p}{p}}},\ \ \text{as $\delta$ is small} \\
&\geq C(p)A(u_0)\left(\left(\int_{S\cap\{|\nabla u_0|\leq |\nabla v|\}}|\nabla v|^p\right)^{\frac{2}{p}} + \left(\int_{S\cap\{|\nabla u_0|> |\nabla v|\}}|\nabla v|^p\right)^{\frac{2}{p}}\right) \\
&\geq C(p)A(u_0)\left(\int_{S}|\nabla v|^p\right)^{\frac{2}{p}} = C(p)A(u_0)\delta^2, \end{alignat*} where the last inequality is a consequence of the elementary inequality \begin{equation*} a^{\frac{2}{p}} + b^{\frac{2}{p}} \geq C(p)\left(a+b\right)^{\frac{2}{p}}\ \ \text{for\ }\ a, b\geq 0, \end{equation*} and the constant \begin{equation*}
A(u_0) = \min\left\{1,\frac{1}{\left(\int_{\Omega}|\nabla u_0|^p\right)^{\frac{2-p}{p}}}\right\}. \end{equation*}
So we have proved $I[v] \geq a > 0$ for some $a> 0$ whenever $v\in C^{\infty}_0(\Omega)$ satisfies $\|v\|_{\mathfrak{B}} = \delta$, for any $p\in (1, \infty)$. \end{pf}
Let \begin{equation*} \mathcal{G} = \{\gamma\in C([0,1],H): \gamma(0) = 0\ \text{and\ }\gamma(1) = v_2\} \end{equation*} and \begin{equation*} c = \inf_{\gamma\in\mathcal{G}}\max_{0\leq t\leq 1}I[\gamma(t)]. \end{equation*} The verified Palais-Smale condition and the preceding lemma allow us to apply the Mountain Pass Theorem as stated, for example, in \cite{J} to conclude that there is a $v_1\in \mathfrak{B}$ such that $I[v_1] = c$, and $I'[v_1] = 0$ in $\mathfrak{B}^*$. That is \begin{equation*}
\int_{\Omega}\left|\nabla u_1\right|^{p-2}\nabla u_1\cdot\nabla \varphi + Q(x)\beta_{\varepsilon}(u_1)\varphi dx = 0 \end{equation*} for any $\varphi\in \mathfrak{B} = W^{1,p}_0(\Omega)$, where $u_1 = v_1 + u_0$. So $u_1$ is a weak solution of the problem (\ref{eulereq}) and (\ref{bdrycondition}). In essence, the Mountain Pass Theorem is a way to produce a saddle point solution. Therefore, in general, $u_1$ tends to be an unstable solution in contrast to the stable solutions $u_0$ and $u_2$.
In this section, we have proved the following theorem. \begin{theorem} If $\varepsilon << \sigma_m$ and $J_p(u_2) < J_p(u_0)$, then there exists a third weak solution $u_1$ of the problem (\ref{eulereq}) and (\ref{bdrycondition}). Moreover, $J_p(u_1) \geq J_p(u_0) + a$, where $a$ is independent of $\varepsilon$. \end{theorem}
\section{A Comparison Principle for Evolution}\label{comparison} In this section, we prove a comparison theorem for the following evolution problem. \begin{equation}\label{evolution2} \left\{\begin{array}{ll} w_t - \bigtriangleup_p w + \alpha(x,w) = 0 &\ \text{in\ }\Omega\times(0, T)\\ w(x,t) = \sigma(x) &\ \text{on\ }\partial \Omega\times (0, T)\\ w(x,0) = v_0(x) &\ \text{for\ }x\in\bar{\Omega},\end{array}\right. \end{equation} where $T>0$ may be finite or infinite, and $\alpha$ is a continuous function satisfying $0 \leq \alpha(x,w) \leq Kw$ and \begin{equation*}
\left|\alpha(x,r_2) - \alpha(x,r_1)\right| \leq K\left|r_2 - r_1\right| \end{equation*} for all $x\in\Omega$, $r_1$ and $r_2\in\mathbb{R}$, and some $K \geq 0$. Let us introduce the notation $H_pw = w_t - \bigtriangleup_p w + \alpha(x,w)$. We recall a weak sub-solution $w\in L^2(0,T; W^{1,p}(\Omega))$ satisfies \begin{equation*}
\left.\int_Vw\varphi\ \right|_{t_1}^{t_2} + \int^{t_2}_{t_1}\int_V-w\varphi_t + |\nabla w|^{p-2}\nabla w\cdot\nabla\varphi + \alpha(x,w)\varphi \leq 0 \end{equation*} for any region $V\subset\subset\Omega$ and any test function $\varphi\in L^2(0,T; W^{1,p}(\Omega))$ such that $\varphi_t\in L^2(\Omega\times\mathbb{R}_T)$ and $\varphi\geq 0$ in $\Omega\times\mathbb{R}_T$, where $L^2_0(0,T; W^{1,p}(\Omega))$ is the subset of $L^2(0,T; W^{1,p}(\Omega))$ that contains functions which is equal zero on the boundary of $\Omega\times\mathbb{R}_T$, where $\mathbb{R}_T = [0, T]$. For convenience, we let $\mathfrak{T}_+$ denote this set of test functions in the following.
In particular, it holds that \begin{equation*}
\int^T_0\int_{\Omega}-w\varphi_t + <|\nabla w|^{p-2}\nabla w, \nabla\varphi> + \alpha(x,w)\varphi \leq 0 \end{equation*} for any test function $\varphi\in L^2_0(0,T; W^{1,p}(\Omega))$ such that $\varphi_t\in L^2(\Omega\times\mathbb{R}_T)$ and $\varphi\geq 0$ in $\Omega\times\mathbb{R}_T$.
The comparison principle for weak sub- and super-solutions is stated as follows. \begin{theorem}\label{paraboliccomparison} Suppose $w_1$ and $w_2$ are weak sub- and super-solutions of the evolutionary problem (\ref{evolution2}) respectively with $w_1\leq w_2$ on the parabolic boundary $(\bar{\Omega}\times\{0\}) \cup(\partial \Omega\times (0, +\infty))$. Then $w_1\leq w_2$ in $\mathcal{D}$. \end{theorem} Uniqueness of a weak solution of (\ref{evolution2}) follows from the comparison principle, Theorem \ref{paraboliccomparison}, immediately. \begin{lemma} For $T > 0$ small enough, if $H_pw_1 \leq 0 \leq H_pw_2$ in the weak sense in $\Omega\times \mathbb{R}_T$ and $w_1 < w_2$ on $\partial_p(\Omega \times \mathbb{R}_T)$, then $w_1\leq w_2$ in $\Omega\times\mathbb{R}_T$. \end{lemma} \begin{pf} For any given small number $\delta > 0$, we define a new function $\tilde{w}_1$ by $$\tilde{w}_1(x,t) = w_1(x,t) - \frac{\delta}{T-t},$$ where $x\in\bar{\Omega}$ and $0\leq t < T$. In order to prove $w_1\leq w_2$ in $\Omega\times\mathbb{R}_T$, it suffices to prove $\tilde{w}_1\leq w_2$ in $\Omega\times\mathbb{R}_T$ for all small $\delta > 0$. Clearly, $\tilde{w}_1 < w_2$ on $\partial_p(\Omega\times \mathbb{R}_T)$, and $\lim_{t\rightarrow T}\tilde{w}_1(x,t) = -\infty$ uniformly on $\Omega$. Moreover, the following holds for any $\varphi\in\mathfrak{T}_+$: \begin{alignat*}{1}
&\ \ \ \ \ \ \int^T_0\int_{\Omega}-\tilde{w}_1\varphi_t + <|\nabla\tilde{w}_1|^{p-2}\nabla\tilde{w}_1, \nabla\varphi> + \alpha(x,\tilde{w}_1)\varphi \\
&= \int^T_0\int_{\Omega}-w_1\varphi_t + <|\nabla w_1|^{p-2}\nabla w_1, \nabla\varphi> + \frac{\delta}{T-t}\varphi_t + \left(\alpha(x,\tilde{w}_1) - \alpha(x, w_1)\right)\varphi \\ &\leq \int^T_0\int_{\Omega}\frac{\delta}{T-t}\varphi_t + K\frac{\delta}{T-t}\varphi, \ \ \text{as $w_1$ is a weak sub-solution}\\ &= \int^T_0\int_{\Omega}\left(-\frac{\delta}{(T-t)^2} + K\frac{\delta}{T-t}\right)\varphi \\ &\leq \int^T_0\int_{\Omega} -\frac{\delta}{2(T-t)^2}\varphi,\ \ \text{for $T\leq\frac{1}{2K}$ so that $2K\leq \frac{1}{T-t}$} \\ &< 0, \end{alignat*} i.\,e.\, \begin{equation*} H_p\tilde{w}_1 \leq -\frac{\delta}{2(T-t)^2} \leq -\frac{\delta}{2T^2} < 0\ \ \text{in the weak sense.} \end{equation*}
That is, if we abuse the notation a little by denoting $\tilde{w}_1$ by $w_1$ in the following for convenience, it holds for any $\varphi\in\mathfrak{T}_+$, \begin{equation*}
\int^T_0\int_{\Omega}-w_1\varphi_t + <|\nabla w_1|^{p-2}\nabla w_1, \nabla\varphi> + \alpha(x,w_1)\varphi \leq \int^T_0\int_{\Omega}-\frac{\delta}{2T^2}\varphi < 0. \end{equation*} Meanwhile, for any $\varphi\in \mathfrak{T}_+$, $w_2$ satisfies \begin{equation*}
\int^T_0\int_{\Omega}-w_2\varphi_t + <|\nabla w_2|^{p-2}\nabla w_2, \nabla\varphi> + \alpha(x,w_2)\varphi \geq 0. \end{equation*}
Define, for $j = 1, 2$, $v_j(x,t) = e^{-\lambda t}w_j(x, t)$, where the constant $\lambda > 2K$. Then $w_j(x,t) = e^{\lambda t}v_j(x, t)$, and it is clear that $w_1\leq w_2$ in $\Omega\times\mathbb{R}_T$ is equivalent to $v_1\leq v_2$ in $\Omega\times\mathbb{R}_T$. In addition, for any $\varphi\in \mathfrak{T}_+$, the following inequalities hold: \begin{alignat*}{1}
&\int^T_0\int_{\Omega}-e^{\lambda t}v_1\varphi_t + e^{\lambda(p-1)t}<|\nabla v_1|^{p-2}\nabla v_1, \nabla\varphi> + \alpha(x,e^{\lambda t}v_1)\varphi \leq -\int^T_0\int_{\Omega}\frac{\delta}{2T^2}\varphi \\
&\text{and\ }\ \int^T_0\int_{\Omega}-e^{\lambda t}v_2\varphi_t + e^{\lambda(p-1)t}<|\nabla v_2|^{p-2}\nabla v_2, \nabla\varphi> + \alpha(x,e^{\lambda t}v_2)\varphi \geq 0. \end{alignat*} Consequently, it holds for any $\varphi\in \mathfrak{T}_+$ \begin{alignat*}{1}
&\int^T_0\int_{\Omega}-e^{\lambda t}(v_1 - v_2)\varphi_t + e^{\lambda(p-1)t}<|\nabla v_1|^{p-2}\nabla v_1 - |\nabla v_2|^{p-2}\nabla v_2, \nabla\varphi> \\
&\ \ \ \ \ \ \ \ + \left( \alpha(x, e^{\lambda t}v_1) - \alpha(x, e^{\lambda t}v_2)\right)\varphi \leq -\int^T_0\int_{\Omega}\frac{\delta}{2T^2}\varphi. \end{alignat*} We take $\varphi = \left(v_1 - v_2\right)^+ = \max\{v_1 - v_2, 0\}$ as the test function, since it vanishes on the boundary of $\Omega\times\mathbb{R}_T$. Then \begin{alignat*}{1}
&\int^T_0\int_{\{v_1>v_2\}}-e^{\lambda t}(v_1 - v_2)(v_1 - v_2)_t + e^{\lambda(p-1)t}<|\nabla v_1|^{p-2}\nabla v_1 - |\nabla v_2|^{p-2}\nabla v_2, \nabla v_1 - \nabla v_2> \\ &\ \ \ \ \ \ \ \ + \left(\alpha(x,e^{\lambda t}v_1) - \alpha(x,e^{\lambda t}v_2)\right)(v_1 - v_2) \leq -\frac{\delta}{2T^2}\int^T_0\int_{\{v_1>v_2\}}(v_1-v_2). \end{alignat*} Since \begin{equation*} \{v_1>v_2\}\subset\Omega\times(0,T)\ \ \text{due to the facts $v_1\leq v_2$ on $\partial_p(\Omega\times\mathbb{R}_T)$ and $v_1\rightarrow -\infty$ as $t\uparrow T$,} \end{equation*} the divergence theorem implies \begin{equation*} \int^T_0\int_{\{v_1 > v_2\}}-e^{\lambda t}(v_1-v_2)(v_1-v_2)_t = \int^T_0\int_{\{v_1>v_2\}}\lambda e^{\lambda t}\frac{1}{2}(v_1-v_2)^2. \end{equation*} On the other hand, \begin{equation*} \left(\alpha(x,e^{\lambda t}v_1) - \alpha(x,e^{\lambda t}v_2)\right)(v_1 - v_2)\geq -Ke^{\lambda t}(v_1-v_2)^2\ \ \text{on $\{v_1 > v_2\}$.} \end{equation*} As a consequence, it holds that \begin{alignat*}{1}
&\int^T_0\int_{\{v_1>v_2\}}\left(\frac{\lambda}{2} - K\right)e^{\lambda t}(v_1 - v_2)^2 + e^{\lambda(p-1)t}<|\nabla v_1|^{p-2}\nabla v_1 - |\nabla v_2|^{p-2}\nabla v_2, \nabla v_1 - \nabla v_2> \\ & \leq -\frac{\delta}{2T^2}\int^T_0\int_{\{v_1>v_2\}}(v_1-v_2). \end{alignat*} We call into play two elementary inequalities (\cite{L}) associated with the $p$-Laplacian: \begin{equation*}
<|b|^{p-2}b - |a|^{p-2}a, b-a> \geq (p-1)|b-a|^2\left(1 + |b|^2 + |a|^2\right)^{\frac{p-2}{2}}\ \ (1\leq p\leq 2), \end{equation*} and \begin{equation*}
<|b|^{p-2}b - |a|^{p-2}a, b-a> \geq 2^{2-p}|b-a|^p\ \ (p\geq 2)\ \ \text{for any $a$, $b\in\mathbb{R}^n$.} \end{equation*} By applying them with $b = \nabla v_1$ and $a = \nabla v_2$ in the preceding inequalities, we obtain \begin{alignat*}{1}
&\int^T_0\int_{\{v_1>v_2\}}\left(\frac{\lambda}{2} - K\right)e^{\lambda t}(v_1 - v_2)^2 + (p-1)e^{\lambda(p-1)t}\left|\nabla v_1 - \nabla v_2\right|^2\left( 1 + |\nabla v_1|^2 + |\nabla v_2|^2\right)^{\frac{p-2}{2}} \\ & \leq -\frac{\delta}{2T^2}\int^T_0\int_{\{v_1>v_2\}}(v_1-v_2)\ \ \ \ \text{for $1 < p <2$} \end{alignat*} and \begin{alignat*}{1}
&\int^T_0\int_{\{v_1>v_2\}}\left(\frac{\lambda}{2} - K\right)e^{\lambda t}(v_1 - v_2)^2 + 2^{2-p}e^{\lambda(p-1)t}\left|\nabla v_1 - \nabla v_2\right|^p \\ & \leq -\frac{\delta}{2T^2}\int^T_0\int_{\{v_1>v_2\}}(v_1-v_2)\ \ \ \ \text{for $p\geq 2$.} \end{alignat*} One can easily see in either case the respective inequality is true only if the measure of the set $\{v_1>v_2\}$ is zero. The proof is complete. \end{pf}
In the next lemma, we show the strict inequality on the boundary data can be relaxed to a non-strict one. \begin{lemma} For $T > 0$ sufficiently small, if $H_pw_1 \leq 0 \leq H_pw_2$ in the weak sense in $\Omega\times\mathbb{R}_T$ and $w_1\leq w_2$ on $\partial_p(\Omega \times \mathbb{R}_T)$, then $w_1\leq w_2$ on $\overline{\Omega\times\mathbb{R}_T}$. \end{lemma} \begin{pf} For any $\delta > 0$, take $\tilde{\delta} > 0$ such that $\tilde{\delta} \leq\frac{\delta}{4K}$ and define \begin{equation*} \tilde{w}_1(x,t) = w_1(x,t) - \delta t - \tilde{\delta}\ \ (x,t)\in\bar{\Omega}\times\mathbb{R}^n. \end{equation*} Then $\tilde{w}_1 < w_1 \leq w_2$ on $\partial_p(\Omega\times\mathbb{R}^n)$, and for any $\varphi\in\mathfrak{T}_+$, the following holds: \begin{alignat*}{1}
&\ \ \ \ \int^T_0\int_{\Omega}-\tilde{w}_1\varphi_t + <|\nabla\tilde{w}_1|^{p-2}\nabla\tilde{w}_1,\nabla\varphi> + \alpha(x,\tilde{w})\varphi \\
&= \int^T_0\int_{\Omega}-w_1\varphi_t + <|\nabla w_1|^{p-2}\nabla w_1,\nabla\varphi> + \alpha(x,w_1)\varphi\\ &\ \ \ \ \ \ \ \ \ \ - \delta \varphi + \left(\alpha(x,w_1 - \delta t - \tilde{\delta}) - \alpha(x,w_1)\right)\varphi \\ &\leq \int^T_0\int_{\Omega} -\delta\varphi + K\left(\delta t + \tilde{\delta}\right)\varphi \leq \int^T_0\int_{\Omega} -\delta\varphi + K\left(\delta T + \tilde{\delta}\right)\varphi \\ &\leq \int^T_0\int_{\Omega}\left(-\delta + \frac{\delta}{2} + \frac{\delta}{4}\right)\varphi\ \ \text{for $T$ small} \\ &= -\frac{\delta}{4}\int^T_0\int_{\Omega}\varphi. \end{alignat*} The preceding lemma implies $\tilde{w}_1\leq w_2$ in $\overline{\Omega\times\mathbb{R}_T}$ for small $T$ and for any small $\delta > 0$, and whence the conclusion of this lemma. \end{pf}
Now the parabolic comparison theorem (\ref{paraboliccomparison}) follows from the preceding lemma quite easily as shown by the following argument: Let $T_0 > 0$ be any small value of $T$ in the preceding lemma so that the conclusion of the preceding lemma holds. Then $w_1\leq w_2$ on $\overline{\Omega\times (0, T_0)}$. In particular, $w_1 \leq w_2$ on $\partial_p(\Omega\times (T_0,2T_0))$. The preceding lemma may be applied again to conclude that $w_1 \leq w_2$ on $\overline{\Omega\times (T_0, 2T_0)}$. And so on. This recursion allows us to conclude that $w_1 \leq w_2$ on $\overline{\Omega\times\mathbb{R}_T}$.
\section{Convergence of Evolution}
Define $\mathfrak{S}$ to be the set of weak solutions of the stationary problem (\ref{eulereq}) and (\ref{bdrycondition}). The $p$-harmonic function $u_0$ is the maximum element in $\mathfrak{S}$, while $u_2$ denotes the least solution which may be constructed as the infimum of super-solutions. We also use the term \textit{non-minimal solution} with the same definition in \cite{CW}. That is, $u$ a non-minimal solution of the problem (\ref{eulereq}) and (\ref{bdrycondition}) if it is a viscosity solution but not a local minimizer in the sense that for any $\delta > 0$, there exists $v$ in the admissible set of the functional $J_p$ with $v = \sigma$ on $\partial\Omega$ such that
$\|v - u\|_{L^{\infty}} < \delta$, and $J_p(v) < J_p(u)$.
In this section, we consider the evolutionary problem \begin{equation}\label{evolution} \left\{\begin{array}{ll} w_t - \bigtriangleup_p w + Q(x)\beta_{\varepsilon}(w) = 0 &\ \text{in\ }\Omega\times(0,+\infty)\\ w(x,t) = \sigma(x) &\ \text{on\ }\partial \Omega\times (0, +\infty)\\ w(x,0) = v_0(x) &\ \text{for\ }x\in\bar{\Omega},\end{array}\right. \end{equation} and will apply the parabolic comparison principle (\ref{paraboliccomparison}) proved in Section \ref{comparison} to prove the following convergence of evolution theorem. One just notes that the parabolic problem (\ref{evolution2}) includes the above problem (\ref{evolution}) as a special case so that the comparison principle (\ref{paraboliccomparison}) applies in this case. \begin{theorem}\label{convergence} If the initial data $v_0$ falls into any of the categories specified below, the corresponding conclusion of convergence holds. \begin{enumerate} \item If $v_0 \leq u_2$ on $\bar{\Omega}$, then $\lim_{t\rightarrow +\infty}w(x,t) = u_2(x)$ locally uniformly for $x\in\bar{\Omega}$; \item Define \begin{equation*} \bar{u}_2(x) = \inf_{u\in\mathfrak{S}, u \geq u_2, u \neq u_2}u(x),\ x\in\bar{\Omega}. \end{equation*} If $\bar{u}_2 > u_2$, then for $v_0$ such that $u_2 < v_0 < \bar{u}_2$, $\lim_{t\rightarrow +\infty}w(x,t) = u_2(x)$ locally uniformly for $x\in\bar{\Omega}$; \item Define $\bar{u}_0(x) = \sup_{u\in\mathfrak{S}, u \leq u_0, u\neq u_0}u(x)$, $x\in\bar{\Omega}$. If $\bar{u}_0 < u_0$, then for $v_0$ such that $\bar{u}_0 < v_0 < u_0$, $\lim_{t\rightarrow +\infty}w(x,t) = u_0(x)$ locally uniformly for $x\in\bar{\Omega}$; \item If $v_0 \geq u_0$ in $\bar{\Omega}$, then $\lim_{t\rightarrow +\infty}w(x,t) = u_0(x)$ locally uniformly for $x\in\bar{\Omega}$;
\item Suppose $u_1$ is a non-minimal solution of (\ref{eulereq}) and (\ref{bdrycondition}). For any small $\delta > 0$, there exists $v_0$ such that $\|v_0 - u_1\|_{L^{\infty}(\Omega)} < \delta$ and the solution $w$ of the problem (\ref{evolution}) does not satisfy $$\lim_{t\rightarrow \infty} w(x,t) = u_1(x)\ \text{\ in\ } \Omega.$$ \end{enumerate} \end{theorem} \begin{pf}
We first take care of case 4. We may take new initial data a smooth function $\tilde{v}_0$ so that $D^2\tilde{v}_0 < -KI$ and $|\nabla \tilde{v}_0| \geq \delta > 0$ on $\bar{\Omega}$. According to the parabolic comparison principle (\ref{paraboliccomparison}), it suffices to prove the solution $\tilde{w}$ generated by the initial data $\tilde{v}_0$ converges locally uniformly to $u_0$ if we also take $\tilde{v}_0$ large than $v_0$, which can easily be done. So we use $v_0$ and $w$ for the new functions $\tilde{v}_0$ and $\tilde{w}$ without any confusion.
For any $V\subset\subset\Omega$ and any nonnegative function $\varphi$ which is independent of the time variable $t$ and supported in $V$, it holds that \begin{equation*} \begin{split}
\int_V|\nabla v_0|^{p-2}\nabla v_0\cdot\nabla\varphi &= \int_V - div\left(|\nabla v_0|^{p-2}\nabla v_0\right)\varphi \\ &\geq \int_V M\varphi\ \ \ \ \text{for some $M = M(n,p,K,\delta) > 0$.} \end{split} \end{equation*} The H\"{o}lder continuity of $\nabla w$ up to $t = 0$ as stated in \cite{DiB}, then implies \begin{equation*}
\int_V|\nabla w|^{p-2}\nabla w\cdot\nabla\varphi \geq \frac{M}{2}\int_V\varphi \end{equation*} for any small $t$ in $(0,t_0)$, and any nonnegative function $\varphi$ which is independent of $t$, supported in $V$ and subject to the condition \begin{equation}\label{conda}
\frac{\int_V|\nabla \varphi|}{\int_V\varphi}\leq A \end{equation} for a fixed constant $A > 0$ and some $t_0 > 0$ dependent on $A$. Then the sub-solution condition on $w$ \begin{equation*}
\left.\int_Vw\varphi\right|_{t=t_2} - \left.\int_Vw\varphi\right|_{t=t_1} + \int^{t_2}_{t_1}\int_V|\nabla w|^{p-2}\nabla w\cdot\nabla\varphi \leq 0 \end{equation*} implies that \begin{equation*}
\left.\int_Vw\varphi\right|_{t=t_2} - \left.\int_Vw\varphi\right|_{t=t_1} \leq -\frac{M}{2}(t_2-t_1)\int_V\varphi \end{equation*}
for any small $t_2 > t_1$ in $(0, t_0)$, and any nonnegative function $\varphi$ which is independent of $t$, supported in $V$ and subject to (\ref{conda}). In particular, $\left.\int_Vw\varphi\right|^{t_2}_{t_1} \leq 0$ for any nonnegative function $\varphi$ independent of $t$, supported in $V$ and subject to (\ref{conda}). So $$w(x, t_2) \leq w(x, t_1)$$ for any $x\in\Omega$ and $0\leq t_1\leq t_2$. Then the parabolic comparison principle readily implies $w$ is decreasing in $t$ for $t$ in $[0, \infty)$. Therefore $w(x,t)\rightarrow u^{\infty}(x)$ locally uniformly as $t\rightarrow\infty$ and hence $u^{\infty}$ is a solution of (\ref{eulereq}) and (\ref{bdrycondition}). Furthermore, the parabolic comparison principle also implies $w(x,t)\geq u_0(x)$ at any time $t > 0$. Consequently, $u^{\infty} = u_0$ as $u_0$ is the greatest solution of (\ref{eulereq}) and (\ref{bdrycondition}).
Next, we briefly explain the proof for case 1. We may take a new smooth initial data $\tilde{v}_0$ such that $\tilde{v}_0$ is very large negative, $D^2\tilde{v}_0 \geq KI$ and $|\nabla \tilde{v}_0| \geq\delta$ on $\bar{\Omega}$ for large constant $K>0$ and constant $\delta > 0$. It suffices to prove the solution $\tilde{w}$ generated by the initial data $\tilde{v}_0$ converges to $u_2$ locally uniformly on $\bar{\Omega}$ as $t\rightarrow\infty$. Following a computation exactly parallel to that in case 4, we can prove $w$ is increasing in $t$ in $[0, \infty)$. So $w$ converges locally uniformly to a solution $u^{\infty}$ of (\ref{eulereq}) and (\ref{bdrycondition}). As $u^{\infty}\leq u_2$ and $u_2$ is the least solution of (\ref{eulereq}) and (\ref{bdrycondition}), we conclude $u^{\infty} = u_2$.
In case 2, we may replace $v_0$ by a strict super-solution of $\bigtriangleup_p v - Q\beta_{\varepsilon}(v) = 0$ in $\bar{\Omega}$ between $u_2$ and $\bar{u}_2$, by employing the fact that $u_2$ is the infimum of super-solutions of (\ref{eulereq}) and (\ref{bdrycondition}). Using $v_0$ as the initial data, we obtain a solution $w(x,t)$ of (\ref{evolution}). Then one argues as in case 4 that for any $V\subset\subset\Omega$, there exist constants $A > 0$ and $t_0 > 0$ such that for $t_1 < t_2$ with $t_1$, $t_2\in [0, t_0)$, $\int_Vw\varphi\,|^{t_2}_{t_1} \leq 0$ for any nonnegative function $\varphi$ independent of $t$, supported in $V$ and subject to the condition $\frac{\int_V|\nabla\varphi|}{\int_V\varphi} \leq A$. As a consequence, $w(x,t_1) \geq w(x,t_2)\ \ (x\in\Omega)$. Then the parabolic comparison principle implies $w$ is decreasing in $t$ over $[0, +\infty)$. Therefore $w(x,t)$ converges locally uniformly to some function $u^{\infty}$ as $t\rightarrow\infty$ which solves (\ref{eulereq}) and (\ref{bdrycondition}). Clearly $u_2(x) \leq w(x,t) \leq \bar{u}_2(x)$ from which $u_2(x) \leq u^{\infty}(x) \leq \bar{u}_2(x)$ follows. As $w$ is decreasing in $t$ and $v_0\neq \bar{u}_2$, $u^{\infty} \neq \bar{u}_2$. Hence $u^{\infty} = u_2$.
The proof of case 3 is parallel to that of case 2 with the switch of sub- and super-solutions. Hence we skip it.
In case 5, we pick $v_0$ with $\|v_0 - u_1\|_{L^{\infty}} < \delta$ and $J_p(v_0) < J_p(u_1)$. Let $w$ be the solution of (\ref{evolution}) with $v_0$ as the initial data. Clearly, we may change the value of $v_0$ slightly if necessary so that it is not a solution of the equation
$$-\nabla\cdot\left(\left(\varepsilon + |\nabla u|^2\right)^{p/2-1}\nabla u\right) + Q(x)\beta(u) = 0$$ for any small $\varepsilon > 0$.
Let $w^{\varepsilon}$ be the smooth solution of the uniformly parabolic boundary-value problem \begin{equation*} \left\{\begin{array}{ll}
w_t - \nabla\cdot\left(\left(\varepsilon + |\nabla w|^2\right)^{p/2-1}\nabla w\right) + Q\beta(w) = 0 &\ \ \text{in $\Omega\times (0, +\infty)$}\\ w(x,t) = \sigma(x) &\ \ \text{on $\partial\Omega\times (0, +\infty)$}\\ w(x,0) = v_0(x) &\ \ \text{on $\bar{\Omega}$.} \end{array}\right. \end{equation*} $w^{\varepsilon}$ converges to $w$ in $W^{1,p}(\Omega)$ for every $t\in [0, \infty)$ as $\varepsilon\rightarrow 0$.
We define the functional
$$J_{\varepsilon, p}(u) = \frac{1}{p}\int_{\Omega}\left(\varepsilon + |\nabla u|^2\right)^{p/2} + Q(x)\Gamma(u)\,dx.$$ It is easy to see that
$$\int^t_0\int_{\Omega}\left(w^{\varepsilon}_t\right)^2 - \nabla\cdot\left(\left(\varepsilon + |\nabla w^{\varepsilon}|^2\right)^{p/2-1}\nabla w^{\varepsilon}\right)w^{\varepsilon}_t + Q\beta(w^{\varepsilon})w^{\varepsilon}_t = 0.$$ As $w^{\varepsilon}_t = 0$ on $\partial\Omega\times (0,\infty)$, one gets
$$\int^t_0\int_{\Omega}\left(w^{\varepsilon}_t\right)^2 + \left(\varepsilon + |\nabla w^{\varepsilon}|^2\right)^{p/2-1}\nabla w^{\varepsilon}\cdot \nabla w^{\varepsilon}_t + Q(x)\Gamma(w^{\varepsilon})_t = 0,$$ which implies
$$\int^t_0\int_{\Omega}\left(w^{\varepsilon}_t\right)^2 + \frac{1}{p}\left(\left(\varepsilon + |\nabla w^{\varepsilon}|^2\right)^{p/2}\right)_t + Q(x)\Gamma(w^{\varepsilon})_t = 0.$$ Consequently, it holds \begin{equation*} \begin{split}
&\ \ \ \ \int^t_0\int_{\Omega}\left(w^{\varepsilon}_t\right)^2 + \frac{1}{p}\int_{\Omega}\left(\varepsilon + |\nabla w^{\varepsilon}(x,t)|^2\right)^{p/2} + Q\Gamma(w^{\varepsilon}(x,t)) \\
&= \frac{1}{p}\int_{\Omega}\left(\varepsilon + |\nabla w^{\varepsilon}(x,0)|^2\right)^{p/2} + Q\Gamma(w^{\varepsilon}(x,0)) \end{split} \end{equation*} i.\,e.\, \begin{equation*} \int^t_0\int_{\Omega}\left(w^{\varepsilon}_t\right)^2 + J_{\varepsilon, p}(w^{\varepsilon}(\cdot,t)) = J_{\varepsilon, p}(w^{\varepsilon}(\cdot,0)). \end{equation*} Therefore $$J_{\varepsilon, p}(w^{\varepsilon}(\cdot, t) \leq J_{\varepsilon, p}(v_0),$$ which in turn implies $$J_p(w(\cdot, t) \leq J_p(v_0) < J_p(u_1).$$ In conclusion, $w$ does not converge to $u_1$ as $t\rightarrow\infty$. \end{pf}
\end{document} |
\begin{document} \sloppy \title{Counting Words Avoiding a Short Increasing Pattern and the Pattern 1k\dots2}
\begin{abstract} We find finite-state recurrences to enumerate the words on the alphabet $[n]^r$ which avoid the patterns 123 and $1k(k-1)\dots2$, and, separately, the words which avoid the patterns 1234 and $1k(k-1)\dots2$. \end{abstract}
\section{Introduction} A word $W = w_1w_2 \dots w_n$ on an ordered alphabet \emph{contains} the pattern $p_1p_2\dots p_k$ if there exists a (strictly) increasing sequence $i_1,i_2,\dots, i_k$ such that $w_{i_r} < w_{i_s}$ if and only if $p_r < p_s$ and $w_{i_r} > w_{i_s}$ if and only if $p_r > p_s$. If both $W$ and $p_1p_2 \dots p_k$ are permutations, then $w_{i_r} < w_{i_s}$ if and only if $p_r < p_s$ is an equivalent and more common definition. If $W$ does not contain $p_1p_2 \dots p_k$, then $W$ \emph{avoids} it. The study of pattern-avoiding permutations began with Donald Knuth in \textit{The Art of Computer Programming}, and has become an active area of combinatorial research. See \cite{Vatter} for an in-depth survey of the major results in this field.
The study of pattern-avoiding words other than permutations is comparatively recent, being inaugurated in \cite{Regev} and greatly expanded in \cite{Burstein}. Much is known about avoidance properties for specific patterns and families of patterns; for instance, Burstein counted the number of length-$n$ words with letters in $[k]=\{1,2,\dots,k\}$ which avoid all patterns in $S$ for all $S \subseteq S_3$. Meanwhile, \cite{Mansour} found generating functions for the number of such words which avoid both 132 and one of a large family of other patterns including $12\dots l$ and $l12\dots(l-1)$.
Other authors have looked at words with letters in $[n]$ where each letter must appear exactly $r$ times (we will say that these are the words on $[n]^r$). These words are a direct generalization of permutations, which are given by the $r=1$ case. In \cite{Shar}, the authors created an algorithm to find the ordinary generating functions enumerating words on $[n]^r$ which avoid 123, while \cite{Zeil1d} found that the generating functions enumerating words on $[n]^r$ avoiding $12\dots l$ are D-finite.
We study this second type of word. Our contribution is to find finite, linear recurrences for the numbers of words on $[n]^r$ that avoid 123 and $1k(k-1)\dots2$ as well as the ones that avoid 1234 and $1k(k-1)\dots2$. It is well known (see \cite{Cfinite} for instance) that this fact implies that these quantities have rational generating functions, and, moreover, gives a way to compute them (in principle if not always in practice - see Section \ref{compute}). While generating functions were previously found in \cite{Kratten} for the permutations avoiding 123 and $1k(k-1)\dots2$, this is the first time that such a result has been extended to these more general words. In the 1234 and $1k(k-1)\dots2$ case, the result was, to the best of our knowledge, previously not even known for permutations with $k$ as small as 5.
\section{Words Avoiding 123}\label{123} We begin this section with an algorithm for counting 123 avoiding permutations from \cite{DrZ}. For $L = [l_1,l_2,\dots l_n]$, let $A(L)$ be the number of words containing $l_i$ copies of $i$ for $1 \le i \le n$ which avoid 123. The following result allows us to quickly compute $A(L)$. \begin{thm} The following recurrence holds: \begin{displaymath} A(L) = \sum_{i=1}^n A([l_1,l_2,\dots,l_{i-1}, l_i-1, l_{i+1}+l_{i+2}+\dots+l_n]). \end{displaymath} \end{thm} \begin{proof} Let $A_i(L)$ be the number of words with letter counts $l_1,l_2,\dots,l_n$ which avoid 123 and begin with the letter $i$. We will biject the words counted by $A_i(L)$ with those counted by $A([l_1,l_2,\dots,l_{i-1}, l_i-1, l_{i+1}+l_{i+2}+\dots+l_n])$. Let $W=iw_2,\dots,w_t$ have letter counts in $L$ and let $f(W)$ be given by removing the initial $i$ from $W$ and then replacing all letters greater than $i$ with $i+1$. Also, for some word $V = v_1v_2,\dots,v_{t-1}$ with letter counts in $[l_1,l_2,\dots,l_{i-1},l_i-1,l_{i+1}+\dots+l_n]$, let $f^{-1}$ be given by replacing the sequence of $i+1$'s with $l_{n} \text{ }n$'s, $l_{n-1} \text{ }n-1$'s, and so on in that order, and then prepending $i$ to this word.
We claim that $f^{-1}$ is the inverse of $f$. To find $f(f^{-1}(V))$, we would replace the sequence of $i+1$s with $l_{n} \text{ }n$'s, $l_{n-1} \text{ }n-1$'s, and so on, and then prepend an $i$, before removing that $i$ and replacing all those letters larger than $i$ with $i+1$ again, giving us back $V$. To find $f^{-1}f(W)$, we would replace all the letters larger than $i$ with $i+1$ and remove the initial $i$, before replacing that $i$ and putting back all the letters larger than $i$ (note that they had to be in descending order to begin with or else $W$ would contain a 123 pattern). Thus, $A_i(L)=A([l_1,l_2,\dots,l_{i-1}, l_i-1, l_{i+1}+l_{i+2}+\dots+l_n])$ and summing over all $i$ gives the promised equality. \end{proof}
This technique can be extended to many more avoidance classes. In this section we use it to count words avoiding both $123$ and $1k(k-1)\dots2$ simultaneously. We first fix $k \ge 3$, choose integers $n$ and $r$, and consider the number of words on the alphabet $[n]^r$ which avoid both $123$ and $1k(k-1)\dots2$. This time, however, we will need to keep track of more information than just the letter counts. To that end, we consider the set of words $A(r,a,b,L)$ where $r,a,$ and $b$ are integers, and $L=[l_1,l_2,\dots,l_t]$. This is the number of words with $r$ copies of the letters $1, \dots, a$, $b$ copies of the letter $a+1$, and $l_i$ copies of the letter $a+1+i$ which not only avoid both 123 and $1k(k-1)\dots2$, but would still avoid both those patterns if $a+1$ were prepended to the word. Note that this condition implies $t \le k-2$ because any sequence of $k-1$ distinct letters will either contain an increasing subsequence of length 2 (and hence create a 123 pattern) or will be entirely decreasing (and hence create a $1k(k-1)\dots2$ pattern).
Before we state the next theorem, we describe in more human-friendly language the algorithm that it suggests. Suppose that we are building a word $W$. Up to this point, the smallest letter which has been used is $a+1$, and $r-b$ copies of it have been used. We have a list $L=[l_1,l_2,\dots l_t]$ indicating how many copies of each letter greater than $a+1$ remain to be added, and we note that all these letters must be added in reverse order. To complete $W$, we need to add $r$ copies each of $1,2,\dots,a$, $b$ copies of $a+1$, and $l_i$ copies of $a+1+i$ for all $1 \le i \le t$. Examine $W$'s next letter $w_1$; considering only the requirement that $w_1$ be succeeded by at most $k-2$ distinct letters larger than it, we find that $w_1$ can be any element of $\{a+2, a+3,\dots,a+1+t\}$ or else it can be an element of $\{a-(k-2)+t+1, a-(k-2)+t+2, \dots, a+1\}$. But, we also need to consider the requirement that prepending $a+1$ to the new word will not create a 123 pattern. Therefore, $w_1 \in \{a-(k-2)+t+1, a-(k-2)+t+2, \dots, a+1, a+1+t\}$. If $w_1 = a+1+t$ or $a+1$, then removing it gives a word counted by $A(r,a,b,L')$ or $A(r,a,b-1,L)$ respectively where $L' = [l_1,\dots,l_{t-1},l_t -1]$. Otherwise, we need to add all the letters larger than $w_1$ to $L$ in order to ensure that future letters don't create 123 patterns.
Since we want $L$ to contain only letter counts for letters which will be added to $W$, i.e. we don't want it to contain 0, define the operator $R$ which removes all the zeroes from the list $L$.
\begin{thm} If $b \ge 1$, then \begin{align*} A(r,a,b,L) &= \sum_{i = a-(k-2)+t+1}^{a} A(r,i-1,r-1,[\underbrace{r,r,\dots,r,}_{a-i \text{ copies}} b, l_1,\dots,l_t]) \\ &+ A(r,a,b-1,L) + A(r,a,b,R([l_1,l_2,\dots, l_t -1])). \end{align*} If $b =0$, then \begin{align*} A(r,a,b,L) &= \sum_{i = a-(k-2)+t+1}^{a} A(r,i-1,r-1,[\underbrace{r,r,\dots,r,}_{a-i \text{ copies}} l_1,\dots,l_t]) \\ &+A(r,a,b,R([l_1,l_2,\dots, l_t -1])). \end{align*} \end{thm}
\begin{proof} As noted in the previous paragraph, $w_1 \in \{a - (k-2) +t +1, a-(k-2)+t+2,\dots,a+1,a+1+t\}$; each member of this set corresponds to a term of the summation. Fix $i$ with $a-(k-2)+t+1 \le i \le a$, and consider those words $W$ with $w_1 =i$. Suppose we remove $w_1$ from one of these words to form a word $W'$. We are left with $r$ copies of the letters 1 through $i-1$, $r-1$ copies of $i$ (because $i \le a$ there were $r$ copies of it including $w_1$), and $l'_j$ copies of $(i-1)+1+j$ where $L'=[l'_1,\dots,l'_u]=[\underbrace{r,r,\dots,r}_{a-i \text{ copies}},b,l_1,\dots,l_t]$. We are left with a word which avoids 123 and $1k(k-1)\dots2$, and, moreover, avoids 123 even when $i$ is prepended. Furthermore, prepending an $i$ to any word fitting this description gives a word counted by $A(r,a,b,L)$, and so the number of words counted by $A(r,a,b,L)$ which begin with $i$ is $A(r,i-1,r-1,[\underbrace{r,r,\dots,r,}_{a-i \text{ copies}} b, l_1,\dots,l_t])$ for all $a-(k-1)+t+1 \le i \le a$.
This leaves two other possibilities for $w_1$: $a+1$ and $a+t+1$. If $w_1 = a+1$, then the only difference between the letter counts of $W$ and $W'$ is that $W$ has $b$ copies of $a+1$ and $W'$ has only $b-1$. In terms of avoidance, both $W$ and $W'$ avoid 123 and $1k(k-1)\dots2$ even with $a+1$ prepended. Thus, the number of $W$ with $w_1=a+1$ is $A(r,a,b-1,L)$ as long as $b \ge 1$, and, 0 if $b=0$.
Similarly, if $w_1=a+t+1$, then the only difference between the letter counts of $W$ and $W'$ is that $W$ has $l_t$ copies of $a+t+1$ and $W'$ has $a+t$. Just as in the previous case, the avoidance properties are identical and so the number of $W$ with $w_1=a+t+1$ is $A(r,a,b,R([l_1,l_2,\dots,l_t-1]))$ where we needed to remove $l_t-1$ if it is zero so that we know that the next letter is allowed to be $l_{t-1}$.
Summing over all possible $w_1$ now gives the promised result.
\end{proof}
\section{Words Avoiding 1234}\label{1234} Just as we can find recurrences, and therefore generating functions, for words avoiding 123 and $1k(k-1)\dots2$, we can (in principle at least) find a similar system of recurrences and generating functions for words on $[n]^r$ avoiding 1234 and $1k(k-1)\dots2$. The idea is to construct a word $W$ one letter at a time, and with each letter see if we have made a forbidden pattern. Unfortunately, doing this naively would require keeping track of all previous letters in $W$, denying us a finite recurrence. By only paying attention to the letters that could actually contribute to a forbidden pattern, though, we find that we actually only need to retain a bounded quantity of information regarding $W$.
\subsection{The Existence of a Finite Recurrence} In order to discuss the structure of words avoiding 1234 and $1k(k-1)\dots2$, we recall one common definition and introduce some new ones. A \emph{left-to-right minimum} (LTR min) is a letter of a word which is (strictly) smaller than all the letters which precede it. To an LTR min, we associate an \emph{activated sequence} which consists of all the letters following the LTR min which are (again strictly) larger. Notice that, since 1234 is forbidden, anytime a letter $w$ is preceded by some smaller letter, all the letters larger than and following $w$ must occur in reverse order. We call these letters \emph{fixed}. If all the letters greater than an LTR min are fixed, then it is either guaranteed or impossible that the LTR min and its activated sequence form a $1k(k-1)..2$ pattern; in this case we say that the activated sequence has been \emph{deactivated} and we no longer consider it an activated sequence. If an LTR min with an empty activated sequence is followed by another LTR min (or another copy of itself), then any forbidden pattern using the first LTR min could also be made using the second LTR min; we say that the first LTR min is \emph{superceded} and no longer consider it an LTR min.
With these definitions, we are nearly ready to state the actual set we will be recursively enumerating. Let $r,k,$ and $a$ be integers, let $\mathcal{S}=[S_1=[s_{1,1},\dots,s_{1,q_1}],\dots, S_u=[s_{u,1},\dots,s_{q_u}]]$ be a list of lists whose elements are in $[t]$, let $M=[m_1,\dots,m_u]$ be a list with elements in $[t]$, and let $L=[l_1,\dots,l_t]$ be a list with elements in $\{0\} \cup [r]$. Suppose we are building a word, and so far the letters $1,\dots,a$ have never been used, while the letters greater than $a+t+1$ have been entirely used up and, moreover, are not LTR mins or in any activated sequence. Suppose this word has LTR mins $m_1+a,\dots,m_s+a$ with corresponding activated sequences $[s_{1,1}+a,\dots,s_{1,q_1}+a],\dots,[s_{u,1}+a,\dots,s_{u,q_u}+a]$ (excluding LTR mins which have been superceded or whose sequences have been deactivated). Finally, assume that the word so far avoids 1234 and $1k(k-1)\dots2$, and that $L_i$ copies of $a+i$ remain to be placed for all $1 \le i \le t$ (recall that $r$ copies of $1,\dots,a$ and 0 copies of $a+t+1,a+t+2,\dots$ remain to be placed). Then, the number of ways to completing the word is defined to be $A(r,k,a,M,\mathcal{S},L)$.
Our plan is to show that (i) $A$ is well defined, (ii) all arguments of $A$ besides $a$ take on finitely many values, and (iii) $A$ satisfies a recurrence in which a particular non-negative function of its arguments is always reduced (until the base case). Before carrying out this plan, though, we provide an example to make sure our definitions are clear.
\begin{eg}\label{ex} Suppose we are building a word on the alphabet $[9]^2$ to avoid 1234 and 15432, and so far have 69945. The LTR mins are 6 and 4 with corresponding activated strings 99 and 5. However, 99 has been deactivated because all the letters greater than its LTR min are fixed and must occur in decreasing order. Thus, the number of ways to complete this word is given by $A(2,5,3,[1],[[2]],[1,1,1,2,2]).$ \end{eg}
Notice that the number of ways is also given by $A(2,5,2,[2],[[3]],[2,1,1,1,2,2])$. While this is not a problem in principle, it would be nice to have a canonical way of expressing this quantity, and so we will eventually insist that $L$ have a particular length given by a function of $r$ and $k$.
\begin{thm}\label{well-defined} $A$ is well defined. \end{thm} \begin{proof} Suppose that $W_1$ and $W_2$ are two partial words that give the same arguments to $A$.
Given the LTR mins of a partial word, it is easy to see in which order they occurred. It is similarly easy to see in which order the elements of activated sequences occurred, since they are all listed in order in the activated sequence corresponding to the first LTR min (any element of some other activated sequence lower than or equal to the first LTR min would fix that LTR min's activated sequence and thus deactivate it). Finally, we can see how the sequence of LTR mins and the sequence of other elements are interweaved by noting that a non-LTR min occurs after an LTR min if and only if it appears in that min's activated sequence. Therefore, the subwords formed by the LTR mins and activated strings of $W_1$ and $W_2$ are identical.
Suppose $A$ is not well-defined; then there is some string which can be added to (without loss of generality) $W_1$ without creating a forbidden pattern, but which does create a forbidden pattern when added to $W_2$. By the argument of the previous paragraph, there is an element of $W_2$ which is neither an LTR min nor part of an activated sequence, but which does participate in this pattern.
But, this is not possible. Every element in $W_2$ is an LTR min, part of an activated sequence, fixed, or a copy of the LTR min immediately preceding it. We have assumed that the first two cases do not hold. The third case similarly cannot hold because when an element is fixed, so are all the elements larger than its LTR min, which is to say all the elements which could conceivably be part of a forbidden pattern with it. Therefore, every fixed element either must participate in a forbidden pattern or it cannot possibly do so. Finally the fourth case cannot hold because any forbidden pattern involving a copy of the immediately preceding LTR min could also be formed with that LTR min. Thus we have a contradiction. \end{proof}
Next, we want to establish bounds on $s$ and $t$ as well on the number of elements in any $S_i$. These bounds should depend only on $r$ and $k$.
\begin{thm}
Bounds for $t$, $u$, and all $|S_i|$ are as follows: $t \le 6(k-2)+2$, $u \le 2r(k-2)+1$, and $|S_i| \le 2r(k-2)$ for all $1 \le i \le u$. \end{thm}
\begin{proof} Recall that the Erd\H os-Szekeres Theorem states that any sequence of distinct real numbers of length $(p-1)(q-1)+1$ must contain either a length$-p$ increasing sequence or a length$-q$ decreasing sequence (see \cite{ESz}). Since every activated sequence must avoid both 123 and $(k-1)(k-2)\dots1$, it follows that no activated sequence can have more than $2(k-2)$ distinct letters. Since there are at most $r$ copies of any single letter, the longest an activated sequence could possibly be is $2r(k-2)$. Each activated sequence must either have some element that the next one lacks or correspond to the most recent LTR min (or else its LTR min would be superseded), and in the proof of Theorem \ref{well-defined} we showed that the first activated sequence must contain all the elements of every other activated sequence. Therefore, there can be at most $2r(k-2)+1$ activated sequences, and we have successfully bounded both $u$ and the size of any $S_i$.
To find a bound on $t$, note that as soon as we have used all $r$ copies of a letter and none of those copies remain as either LTR mins or in activated sequences, we can ignore that letter entirely, secure in the knowledge that if it is not already part of a forbidden pattern, it never will be. Thus, we only need to keep track of letters which are LTR mins, are in activated sequences, or are among the largest $2(k-2)+1$ letters still available to be used. By the reasoning of the previous paragraph, at most $2(k-2)+1$ distinct letters are LTR mins, at most $2(k-2)$ are in activated sequences, and so we need to keep track of $6(k-2)+2$ letters all together; all other letters either have never been used and so have $r$ copies remaining or else have had all copies used and can no longer participate in forbidden patterns. \end{proof}
After Example \ref{ex}, we commented that there may be several ways to describe a given partial word using $a,L,M,$ and $\mathcal{S}$. To allow for unique descriptions, we adopt the convention that $|L| = t = 6(k-2)+2$.
\subsection{Finding the Recurrence}
At this point, we have finished parts (i) and (ii) of our program; all that's left to show is that $A$ can be computed using a recurrence. To this end, we introduce three new functions. $\Fix(m,S,L)$ returns $S$ with all available letters (with counts determined by $L$) which exceed $m$ appended in decreasing order, $\Red(L,i)$ returns $L$ with the $i\tss{th}$ element decreased by one, and $\Rem(r,M,\mathcal{S},L,i)$ returns the tuple $M,\mathcal{S},L$ with the following changes: all elements of $M$ and all elements of every $S$ in $\mathcal{S}$ that are below $i$ are incremented by one, the $i\tss{th}$ element of $L$ is deleted and $r$ is prepended to $L$.
\begin{eg}\label{fix} $\Fix(1,[2,3],[1,1,1,2,2]) = [2,3,5,5,4,4,3,2].$ \end{eg}
\begin{eg}\label{reduce} $\Red([1,1,1,2,2],2) = [1,0,1,2,2].$ \end{eg}
\begin{eg}\label{remove} $\Rem(2,[1],[[2,4]],[1,1,0,1,2],3) = ([2],[[3,4]],[2,1,1,1,2]).$ \end{eg}
For fixed $r$ and $k$, the base cases are $A(r,k,a,M,\mathcal{S},L)$ for all $M,\mathcal{S}, L$ and $0\le a \le t$. Otherwise, there are no more than $2(k-2)+1$ possibilities for the next letter $i$ (corresponding to the largest $2(k-2)+1$ nonzero entries of $L$) which we divide into $u+1$ cases: when $i \le m_u$, when $m_h < i \le m_{h-1}$ for $2 \le h \le u-1$, and when $m_1 < i$. Suppose that $i \le m_u$; then we calculate the number of ways to complete the word after adding an $i$ as follows.
Suppose that $S_u = []$, then adding an element less than or equal to the current LTR min will supercede that LTR min. Symbolically, we have $\mathcal{S}' = [S_1,\dots,S_{u-1},[]]$, $M' = [m_1,\dots,m_{u-1},i]$, and $L' = \Red(L,i)$. If $S_u \neq []$, then we are adding a new LTR min while leaving all existing ones in place. This gives arguments $\mathcal{S}' = [S_1,\dots,S_{u},[]]$, $M'=[m_1,\dots,m_u,i]$, and $L' = \Red(L,i)$. Now, suppose that $J = \{j_1,\dots,j_w\}$ is a set of all the the integers $j \in [t]$ such that $L'_j = 0$, and $j$ fails to appear in $M'$ or in any $S \in \mathcal{S}'$. For all $j \in J$ from smallest to largest, update $\mathcal{S}',M',$ and $L'$ by setting $M',\mathcal{S}',L' = \Rem(r,M',\mathcal{S}',L',j)$. We finally have that the number of ways to complete the word after adding an $i$ is $A(r,k,a-|J|,M',\mathcal{S}',L')$.
Alternatively, we may add $i$ such that $i > m_h$ with $h$ chosen as small as possible. Either this $i$ is no larger than the smallest element of $S_1$, or else it is the largest letter that still remains to be added (otherwise a 1234 pattern is inevitable once the largest remaining letter is added). For all $m_j \ge i$, check to see if $\Fix(m_j,S_j,L)$ contains a $(k-1)(k-2)\dots1$ pattern. If so, this choice of $i$ contributes nothing to $A(r,k,a,M,\mathcal{S},L)$. If this is not true for any $j$, then we can add $i$ to our word, but doing so deactivates $S_1,S_2,\dots,S_{h-1}$, and so we forget about those activated sequences and their LTR mins. Thus, we take $\mathcal{S}' = [S_h,\dots,S_u]$, $M' = [M_h,\dots,M_u]$, and $L' = \Red(L,i)$. As before, suppose that $J = \{j_1,\dots,j_w\}$ is a set of all the the integers $j \in [t]$ such that $L'_j = 0$, and $j$ fails to appear in $M'$ or in any $S \in \mathcal{S}'$. For all $j \in J$ from smallest to largest, update $M',\mathcal{S}',$ and $L'$ by setting $M',\mathcal{S}',L' = \Rem(r,M',\mathcal{S}',L',j)$. Again we have that the number of ways to complete the word after adding an $i$ is $A(r,k,a-|J|,\mathcal{S}',M',L')$.
We have expressed $A(r,k,a,M,\mathcal{S},L)$ as a sum of other terms. Notice that in each of these other terms, the number of letters left to be added (given by $r\cdot a +\sum_{i=1}^t L_i$) decreases by 1; eventually it will decrease below $r\cdot t$ and a base case will apply.
While all the base cases could in principle be computed individually, this would probably be a long and unpleasant task. Fortunately, our recurrence can be easily tweaked to calculate base cases. To do so, simply run the recurrence as given, but anytime $A$ would be called with a negative $a$, replace the first $|a|$ nonzero entries of $L$ with 0 and change $a$ to 0. As it turns out, the only base case that we really need is $A(r,k,0,M,\mathcal{S},[0,0,\dots,0])=1$. A full implementation of this recurrence is available in an accompanying Maple package -- see Section \ref{maple}.
\subsection{From Recurrences to Generating Functions}\label{rigor} This subsection contains an algorithm for turning the recurrences found in this section and Section \ref{123} into generating functions. Readers interested in a more complete exposition should consult Chapter 4 in \cite{Kauers}.
Suppose the different terms in our system of recurrences are given by $A_1(n), A_2(n),\dots A_m(n)$. While we could choose $n$ like before and let it be the number of distinct unused letters whose counts do not appear in $L$, the rest of this process will be easier if each $A_i(n)$ depends only on $A_j(n-1)$. To make this happen, we interpret $n$ as the total number of unused letters. For example, we might fix $r=2,k=5$ (note that $t=|L|$ is then chosen to be 20), and let $A_1(n) = A(2,5,n,[],[],[2,2,\dots,2]).$ If we let $A_2(n) = A(2,5,n,[1],[[]],[1,2,\dots,2])$, $A_3(n) = A(2,5,n,[2],[[]],[2,1,2,\dots,2])$ and so on until $A_{21}(n) = A(2,5,n,[20],[[]],[2,\dots,2,1])$, we find the recurrence relation $A_1(n) = \sum_{i=2}^{21} A_i(n-1)$.
Let $M$ be the matrix whose $i,j$ entry is the coefficient of $A_j(n-1)$ in the recurrence for $A_i(n)$, and let $f_i(x)$ be the generating function $\sum_{n=0}^\infty A_i(n) x^n$. It follows that $f_i(x)$ is a rational function with denominator $\text{det}(xI-M)$ for all $i$. The numerator of each generating function has degree less than the number of rows of $M$, and the coefficients of each one can be determined using the system of recurrences' initial conditions.
Since we are treating $n$ as the total number of letters in a word, we must make the substitution $x^r \mapsto x$ in order to obtain the generating function for the number of words on the alphabet $[n]^r$ avoiding the two patterns.
\section{Computational Results}\label{compute} The first algorithm presented in this paper, the one which enumerates words avoiding 123 and $1k(k-1)\dots2$, runs very quickly. With $r = 2$, we are able to get generating functions for $k$ as large as 8 (and we could go even further if we chose to). With $r = 3$, we are able to get generating functions for $k$ as large as 7.
Unfortunately, the algorithm in Section \ref{1234} is much slower. In the simplest open case of $r=1, k=5$, we are able to conjecture the generating function to be $\ds \frac{-2x^3+7x^2-6x+1}{2x^4-11x^3+17x^2-8x+1}$, but rigorously deriving it seems to be out of the question without carefully pruning the recurrence. We can also use the recurrence to just generate terms without worrying about finding generating functions. With $r =1$, i.e. in the permutation case, we find ten terms apiece in the enumeration sequences for $k=3,4 \dots, 10$, and could easily get more terms; in fact in the particular case of $k=5$ we found 16 in 20 minutes.
All these results can be found in the output files on this paper's webpage (see Section \ref{maple}).
\section{Future Work} The driving force behind the argument in this paper is the Erdo\H s-Szekeres theorem; it ensures that we only have finitely many possible letters to add to a word at any point in time. For any pair of patterns which are not of the form $12\dots l, 1k(k-1)\dots 2$, this theorem will not apply, and so it is difficult to see how strategies like those in this paper could work.
It does seems reasonable to hope that they would work for other patterns of the form $12\dots l, 1k(k-1)\dots 2$. The only problem with applying them to the pair 12345, $1k(k-1)\dots 2$ is that we lose the fact that any element greater than an LTR min immediately fixes all elements above it. As a result, it is possible to have multiple activated strings, neither of which is a subset of the other. However, we are hopeful that some clever idea can get around this obstacle.
\section{Maple Implementation}\label{maple} This paper is accompanied by three Maple packages available from the paper's website: \url{http://sites.math.rutgers.edu/~yb165/SchemesForWords/SchemesForWords.html}. The packages are {\tt 123Avoid.txt} which implements the recurrence described in Section \ref{123}, {\tt 123Recurrences.txt} which uses this recurrence to rigorously find the generating functions enumerating the words on $[n]^r$ avoiding 123 and $1k(k-1)\dots2$, and {\tt 1234Avoid.txt} which implements the recurrence described in Section \ref{1234}. It also uses Doron Zeilberger's package Cfinite to automatically conjecture generating functions for the sequences of numbers of words on $[n]^r$ avoiding 1234 and $1k(k-1)\dots2$.
After loading any of these packages, type {\tt Help();} to see a list of available functions. You can get more details about any function by calling Help again with the function's name as an argument. This will also give an example of the function's usage.
\end{document} |
\begin{document}
\tolerance2500
\title{\Large{\textbf{On some groupoids of small orders with Bol-Moufang type of identities}}} \author{\normalsize {Vladimir Chernov, Alexander Moldovyan, Victor Shcherbacov} }
\maketitle
\begin{abstract} We count number of groupoids of order 3 with some Bol-Moufang type identities.
\noindent \textbf{2000 Mathematics Subject Classification:} 20N05 20N02
\noindent \textbf{Key words and phrases:} groupoid, Bol-Moufang type identity. \end{abstract}
\section{Introduction}
A binary groupoid $(G, \cdot)$ is a non-empty set $G$ together with a binary operation \lq\lq $\cdot$\rq\rq. This definition is very general, therefore usually groupoids with some identities are studied. For example, groupoids with identity associativity (semi-groups) are researched.
We continue the study of groupoids with some Bol-Moufang type identities \cite{NOVIKOV_08, VD, 2017_Scerb}. Here we present results published in \cite{CHErnov, CHErnov_2018}.
{\bf Definition.} \label{Bol_Moufang_TYpe_Id} Identities that involve three variables, two of which appear once on both sides of the equation and one of which appears twice on both sides are called Bol-Moufang type identities. \index{identity!Bol-Moufang type}
Various properties of Bol-Moufang type identities in quasigroups and loops are studied in \cite{Fenyves_1, Ph_2005, Cote, AKHTAR}.
Groupoid $(Q, \ast)$ is called a quasigroup, if the following conditions are true \cite{VD}: $(\forall u, v \in Q) (\exists ! \, x, y \in Q) (u * x = v \, \& \, y * u = v)$.
For groupoids the following natural problems are researched: how many groupoids with some identities of small order there exist? A list of numbers of semigroups of orders up to 8 is given in \cite{Satoh}; a list of numbers of quasigroups up to 11 is given in \cite{HOP, WIKI_44}.
\section{Some results}
Original algorithm is elaborated and corresponding program is written for generating of groupoids of small (2 and 3) orders with some Bol-Moufang identities, which are well known in quasigroup theory.
To verify the correctness of the written program the number of semigroups of order 3 was counted. Obtained result coincided with well known, namely, there exist 113 semigroups of order 3.
The following identities have the property that any of them define a commutative Moufang loop \cite{BRUCK_46, VD, HOP, 2017_Scerb} in the class of loops: left (right) semimedial identity, Cote identity and its dual identity, Manin identity and its dual identity or in the class of quasigroups (identity (\ref{Comm_Muf_quas_Id}) and its dual identity).
\subsection{Groupoids with left semi-medial identity}
Left semi-medial identity in a groupoid $(Q, \ast)$ has the following form: $xx*yz=xy*xz$. Bruck \cite{BRUCK_46, VD, 2017_Scerb} uses namely this identity to define commutative Moufang loops in the class of loops.
There exist 10 left semi-medial groupoids of order 2. There exist 7 non-isomorphic left semi-medial groupoids of order 2. The first five of them are semigroups \cite{WIKI_44}.
\[ \begin{array}{lcrr}
\begin{array}{l|ll} \ast&1&2\\ \hline 1&1&1\\ 2&1&1\\ \end{array} &
\begin{array}{l|ll} \star&1&2\\ \hline 1&1&1\\ 2&1&2\\ \end{array} &
\begin{array}{l|ll} \circ&1&2\\ \hline 1&1&1\\ 2&2&2\\ \end{array} &
\begin{array}{l|ll} \cdot&1&2\\ \hline 1&1&2\\ 2&1&2\\ \end{array} \end{array} \]
\[ \begin{array}{lcr}
\begin{array}{l|ll} \diamond&1&2\\ \hline 1&1&2\\ 2&2&1\\ \end{array} &
\begin{array}{l|ll} \odot&1&2\\ \hline 1&2&1\\ 2&2&1\\ \end{array} &
\begin{array}{l|ll} \bullet&1&2\\ \hline 1&2&2\\ 2&1&1\\ \end{array} \end{array} \]
There exist 399 left semi-medial groupoids of order 3.
The similar results are true for groupoids with right semi-medial identity $xy*zz=xz*yz$. It is clear that the identities of left and right semi-mediality are dual. In other language they are (12)-parastrophes of each other \cite{VD, 2017_Scerb}.
It is clear that groupoids with dual identities have similar properties, including the number of groupoids of a fixed order.
\subsection{Groupoids with Cote identity}
Identity $x(xy*z) = (z*xx)y$ is discovered in \cite{Cote}. Here we name this identity Cote identity.
There exist 6 groupoids of order 2 with Cote identity. There exist 3 non-isomorphic in pairs groupoids of order 2 with Cote identity.
There exist 99 groupoids of order 3 with Cote identity.
The similar results are true for groupoids with the following identity $(z\ast yx)x = y(xx \ast z)$. The last identity is (12)-parastrophe of Cote identity.
\subsection{Groupoids with Manin identity}
The identity $x(y*xz) = (xx*y)z$ we call Manin identity \cite{MANIN}. The following identity is dual identity to Manin identity: $(zx\ast y)x = z(y\ast xx)$.
There exist 10 groupoids of order 2 with Manin identity. There exist 7 non-isomorphic in pairs groupoids of order 2 with Manin identity.
There exist 167 groupoids of order 3 with Manin identity.
\subsection{Groupoids with identity $(xy\ast x)z = (y\ast xz) x$ \label{Comm_Muf_quas_Id} (identity (\ref{Comm_Muf_quas_Id}))}
Some properties of identity (\ref{Comm_Muf_quas_Id}) are given in \cite{VS_2014_Kiev, 2017_Scerb}.
The following identity is dual identity to identity (\ref{Comm_Muf_quas_Id}): $z(x\ast yx) = x(zx\ast y)$.
There exist 6 groupoids of order 2 with identity (\ref{Comm_Muf_quas_Id}).
There exist 3 non-isomorphic in pairs groupoids of order 2 with (\ref{Comm_Muf_quas_Id}) identity. Any of these groupoids is a semigroup.
There exist 117 groupoids of order 3 with identity (\ref{Comm_Muf_quas_Id}).
\subsection{Number of groupoids of order 3 with some identities}
We count number of groupoids of order 3 with some identities. We use list of Bol-Moufang type identities given in \cite{Cote}. In Table 1 we present number of groupoids of order 3 with the respective identity.
\begin{table} \centering \caption{Number of groupoids of order 3 with some identities.} \footnotesize{ \[
\begin{array}{|c||c| c| c| c|} \hline
Name & Abbreviation & Identity & Number \\ \hline\hline Semigroups & SGR & x(yz) = (xy)z & 113\\ \hline
Extra & EL & x(y(zx)) = ((xy)z)x & 239\\ \hline Moufang & ML & (xy)(zx) = (x(yz))x & 196\\ \hline Left Bol & LB & x(y(xz)) = (x(yx))z & 215\\ \hline Right Bol & RB & y((xz)x) = ((yx)z)x & 215\\ \hline C-loops & CL & y(x(xz)) = ((yx)x)z & 133\\ \hline LC-loops & LC & (xx)(yz) = (x(xy))z & 220\\ \hline RC-loops & RC & y((zx)x) = (yz)(xx) & 220\\ \hline Middle Nuclear Square & MN & y((xx)z) = (y(xx))z & 350\\ \hline Right Nuclear Square & RN & y(z(xx)) = (yz)(xx) & 932\\ \hline Left Nuclear Square & LN & ((xx)y)z = (xx)(yz) & 932\\ \hline Comm. Moufang & CM & (xy)(xz) = (xx)(zy) & 297\\ \hline Abelian Group & AG & x(yz) = (yx)z & 91\\ \hline Comm. C-loop & CC & (y(xy))z = x(y(yz)) & 169\\ \hline Comm. Alternative & CA & ((xx)y)z = z(x(yx)) & 110\\ \hline Comm. Nuclear square & CN & ((xx)y)z = (xx)(zy) & 472\\ \hline Comm. loops & CP & ((yx)x)z = z(x(yx)) & 744\\ \hline Cheban \, 1 & C1 & x((xy)z) = (yx)(xz) & 219\\ \hline Cheban \, 2 & C2 & x((xy)z) = (y(zx))x & 153\\ \hline Lonely \, I & L1 & (x(xy))z = y((zx)x) & 117\\ \hline Cheban\, I\, Dual & CD & (yx)(xz) = (y(zx))x & 219\\ \hline Lonely \, II & L2 & (x(xy))z = y((xx)z) & 157\\ \hline Lonely \, III & L3 & (y(xx))z = y((zx)x) & 157\\ \hline Mate \, I & M1 & (x(xy))z = ((yz)x)x & 111\\ \hline Mate \, II & M2 & (y(xx))z = ((yz)x)x & 196\\ \hline Mate \, III & M3 & x(x(yz)) = y((zx)x) & 111\\ \hline Mate \, IV & M4 & x(x(yz)) = y((xx)z) & 196\\ \hline Triad \, I & T1 & (xx)(yz) = y(z(xx)) & 162\\ \hline Triad \, II & T2 & ((xx)y)z = y(z(xx)) & 180\\ \hline Triad \, III & T3 & ((xx)y)z = (yz)(xx) & 162\\ \hline Triad \, IV & T4 & ((xx)y)z = ((yz)x)x & 132\\ \hline Triad \, V & T5 & x(x(yz)) = y(z(xx)) & 132\\ \hline Triad \, VI & T6 & (xx)(yz) = (yz)(xx) & 1419\\ \hline Triad \, VII & T7 & ((xx)y)z = ((yx)x)z & 428\\ \hline Triad \, VIII & T8 & (xx)(yz) = y((zx)x) & 120\\ \hline Triad \, IX & T9 & (x(xy))z = y(z(xx)) & 102\\ \hline Frute & FR & (x(xy))z = (y(zx))x & 129\\ \hline Crazy Loop & CR & (x(xy))z = (yx)(xz) & 136\\ \hline Krypton & KL & ((xx)y)z = (x(yz))x & 268\\ \hline \end{array} \]} \end{table}
\textbf{Acknowledgments.} Authors thank Dr. V.D. Derech for his information on semigroups of small orders.
\begin{center} \begin{parbox}{118mm}{\footnotesize Vladimir Chernov$^{1}$, Nicolai Moldovyan$^{2}$, Victor Shcherbacov$^{3}$
\noindent $^{1}$Master/Shevchenko Transnistria State University
\noindent Email: [email protected]
\noindent $^{2}$Professor/St. Petersburg Institute for Informatics and Automation of Russian Academy of Sciences
\noindent Email: [email protected]
\noindent $^{3}$Principal Researcher/Institute of Mathematics and Computer Science of Moldova
\noindent Email: [email protected]
}
\end{parbox} \end{center}
\end{document} |
\begin{document}
\title{Noether's Theorem and the Willmore Functional} \author{Yann Bernard\footnote{Departement Mathematik, ETH-Zentrum, 8093 Z\"urich, Switzerland.}} \date{ } \maketitle
{\bf Abstract :} {\it Noether's theorem and the invariances of the Willmore functional are used to derive conservation laws that are satisfied by the critical points of the Willmore energy subject to generic constraints. We recover in particular previous results independently obtained by R. Capovilla and J. Guven, and by T. Rivi\`ere. Several examples are considered in details.}
\reset
\section{Introduction}
Prior to establishing herself as a leading German mathematician of the early 20$^\text{th}$ century through her seminal work in abstract algebra, Emmy Noether had already made a significant contribution to variational calculus and its applications to physics. Proved in 1915 and published in 1918 \cite{Noe}, what was to become known as {\it Noether's theorem}, is a fundamental tool in modern theoretical physics and in the calculus of variations \cite{GM, Kos, Run}. Generalizing the idea of constants of motion found in classical mechanics, Noether's theorem provides a deep connection between symmetries and conservation laws. It is a recipe to construct a divergence-free vector field from a solution of a variational problem whose corresponding action (i.e. energy) is invariant under a continuous symmetry. For example, in 1-dimensional problems where the independent variable represents time, these vector fields are quantities which are conserved in time, such as the total energy, the linear momentum, or the angular momentum. We now precisely state one version of Noether's theorem\footnote{further generalizations may be found {\it inter alia} in \cite{Kos, Run}.}.
Let $\Omega$ be an open subset of $\mathcal{D}\subset\mathbb{R}^s$, and let $\mathcal{M}\subset\mathbb{R}^m$. Suppose that $$
L\,:\,\Big\{(x,q,p)\,\big|\,(x,q)\in\mathcal{D}\times\mathcal{M}\;,\;p\in T_q\mathcal{M}\otimes T^*_x\mathcal{D} \Big\}\;\longmapsto\;\mathbb{R} $$ is a continuously differentiable function. Choosing a $C^1$ density measure $d\mu(x)$ on $\Omega$, we can define the {\it action functional} $$ \mathcal{L}(u)\;:=\;\int_\Omega L(x,u(x),du(x))\,d\mu(x) $$ on the set of maps $u\in C^1(\Omega,\mathcal{M})$. A tangent vector field $X$ on $\mathcal{M}$ is called an {\it infinitesimal symmetry} for $\mathcal{L}$ if it satisfies $$ \dfrac{\partial L}{\partial q^i}(x,q,p)X^i(q)+\dfrac{\partial L}{\partial p^i_\alpha}(x,q,p)\dfrac{\partial X^i}{\partial q^j}(q)p^j_\alpha\;=\;0\:. $$ \begin{Th} Let $X$ be a Lipschitz tangent vector field on $\mathcal{M}$ which is an infinitesimal symmetry for the action $\mathcal{L}$. If $u:\Omega\rightarrow\mathcal{M}$ is a critical point of $\mathcal{L}$, then \begin{equation}\label{noether} \sum_{\alpha=1}^{s}\,\dfrac{\partial}{\partial x^\alpha}\bigg(\rho(x)X^j(u)\dfrac{\partial L}{\partial p^j_\alpha}(x,u,du) \bigg)\;=\;0\:, \end{equation} where $\{x^\alpha\}_{\alpha=1,\ldots,s}$ are coordinates on $\Omega$ such that $d\mu(x)=\rho(x)dx^1\cdot\cdot\cdot dx^s$. \end{Th} Equation (\ref{noether}) is the conservation law associated with the symmetry represented by $X$. The quantity $$ \rho(x)X^j(u)\dfrac{\partial L}{\partial p^j_\alpha}(x,u,du) $$ is often called {\it Noether current}, especially in the physics literature. \\
Whether in the form given above or in analogous forms, Noether's theorem has long been recognized as a fundamental tool in variational calculus. In the context of harmonic map theory, Noether's theorem was first used by \cite{Raw} in the mid 1980s. A few years later, several authors \cite{YMC, KRS, Sha} have independently used it to replace the harmonic map equation into spheres, where derivatives of the solution appear in a quadratic way, by an equation in divergence form, where derivatives of the solution appear in a linear way. This gives a particularly helpful analytical edge when studying harmonic maps with only very weak regularity hypotheses. Fr\'ed\'eric H\'elein made significant contributions to the analysis of harmonic maps using conservation laws via Noether's theorem \cite{Hel}. In the same vein, Tristan Rivi\`ere used conservation laws to study conformally invariant variational problems \cite{Riv1}. \\
We will in this paper also make use of Noether's theorem\footnote{not directly in the form (\ref{noether}), but the spirit behind our derivations is the same.}, this time in the context of fourth-order geometric problems in connection with the {\it Willmore functional}. We now briefly recall the main historical landmarks that led to the discovery -- and rediscovery, indeed -- of the Willmore functional. \\
Imagine that you had at your disposal the bow of a violin and a horizontal thin metallic plate covered with grains of sand. What would you observe if you were to rub the bow against the edge of the plate? In 1680, the English philosopher and scientist Robert Hooke was the first to try to answer this question (then posed in slightly different experimental terms). Some 120 years later, the German physicist and musician Ernst Chladni repeated the experiment in a systematic way \cite{Chl}. Rubbing the bow with varying frequency, he observed that the grains of sand arrange themselves in remarkable patterns -- nowadays known as {\it Chladni figures}. Those who witnessed Chladni's experiment were fascinated by the patterns, as was in 1809 the French emperor Napol\'eon I. Eager to understand the physical phenomenon at the origin of the Chladni figures, the emperor mandated Pierre-Simon de Laplace of the Acad\'emie des Sciences to organize a competition whose goal would be to provide a mathematical explanation for the figures. The winner would receive one kilogram of solid gold. Joseph-Louis Lagrange discouraged many potential candidates as he declared that the solution of the problem would require the creation of a new branch of mathematics. Only two contenders remained in the race: the very academic Sim\'eon-Denis Poisson and one autodidactic outsider: Sophie Germain. It is unfortunately impossible to give here a detailed account of the interesting events that took place in the following years (see \cite{Dah}). In 1816, Sophie Germain won the prize -- which she never claimed. Although Germain did not answer Napol\'eon's original question, and although she did not isolate the main phenomenon responsible for the Chladni figures, namely resonance, her work proved fundamental, for, as predicted by Lagrange, she laid down the foundations of a whole new branch of applied mathematics: the theory of elasticity of membranes. For the sake of brevity, one could synthesize Germain's main idea by isolating one single decisive postulate which can be inferred from her work \cite{Ger}. Having found her inspiration in the works of Daniel Bernoulli \cite{DBer} and Leonhard Euler \cite{Eul} on the elastica (flexible beams), Sophie Germain postulates that the density of elastic energy stored in a thin plate is proportional to the square of the mean curvature\footnote{Incidentally, the notion of mean curvature was first defined and used in this context; it is a creation which we owe to Germain.} $H$. In other words, the elastic energy of a bent thin plate $\Sigma$ can be expressed in the form $$ \int_{\Sigma}H^2(p)d\sigma(p)\:, $$ where $d\sigma$ denotes the area-element. In the literature, this energy is usually referred to as {\it Willmore energy}. It bears the name of the English mathematician Thomas Willmore who rediscovered it in the 1960s \cite{Wil1}. Prior to Willmore and after Germain, the German school of geometers led by Wilhelm Blaschke considered and studied the Willmore energy in the context of conformal geometry. Blaschke observed that minimal surfaces minimize the Willmore energy and moreover that the Willmore energy is invariant under conformal transformations of $\mathbb{R}^3\cup\{\infty\}$. In his nomenclature, critical points of the Willmore energy were called {\it conformal minimal surfaces} \cite{Bla}. Gerhard Thomsen, a graduate student of Blaschke, derived the Euler-Lagrange equation corresponding to the Willmore energy \cite{Tho} (this was further generalized to higher codimension in the 1970s by Joel Weiner \cite{Wei}). It is a fourth-order nonlinear partial differential equation for the immersion. Namely, let $\vec{\Phi}:\Sigma\rightarrow\mathbb{R}^{m\ge3}$ be a smooth immersion of an oriented surface $\Sigma$. The pull-back metric $g:=\vec{\Phi}^*g_{\mathbb{R}^3}$ is represented in local coordinates with components $g_{ij}$. We let $\nabla_j$ denote the corresponding covariant derivative. The second fundamental form is the normal valued 2-tensor with components $\vec{h}_{ij}:=\nabla_i\nabla_j\vec{\Phi}$. Its half-trace is the mean curvature vector $\vec{H}:=\dfrac{1}{2}\vec{h}^{j}_{j}$. The {\it Willmore equation} reads \begin{equation}\label{will0}
\Delta_\perp\vec{H}+\big(\vec{h}^{i}_{j}\cdot\vec{H}\big)\vec{h}^{j}_{i}-2|\vec{H}|^2\vec{H}\;=\;\vec{0}\:, \end{equation} where $\Delta_\perp$ is the negative covariant Laplacian for the connection $\nabla$ in the normal bundle derived from the ambient scalar product in $\mathbb{R}^m$. Note, in passing, that it is not at all clear how one could define a weak solution of (\ref{will0}) using only the requirement that $\vec{H}$ be square-integrable (i.e. that the Willmore energy be finite). \\
The Willmore energy appears in various areas of science: general relativity, as the main contributor to the Hawking mass \cite{Haw} ; in cell biology (see below) ; in nonlinear elasticity theory \cite{FJM} ; in optical design and lens crafting \cite{KR} ; in string theory, in the guise of a string action \`a la Polyakov \cite{Pol}. As mentioned earlier, the Willmore energy also plays a distinguished role in conformal geometry, where it has given rise to many interesting problems and where it has stimulated too many elaborate works to be cited here. We content ourselves with mentioning the remarkable tours de force of Fernando Marques and Andr\'e Neves \cite{MN} to solve the celebrated Willmore conjecture stating that, up to M\"obius transformations, the Clifford torus\footnote{obtained by rotating a circle of radius 1 around an axis located at a distance $\sqrt{2}$ of its center.} minimizes the Willmore energy amongst immersed tori in $\mathbb{R}^3$. \\
Aiming at solving the Willmore conjecture, Leon Simon initiated the ``modern" variational study of the Willmore functional \cite{Sim} when proving the existence of an embedded torus into $\mathbb{R}^{m\ge3}$ minimizing the $L^2$ norm of the second fundamental form. As this norm does not provide any control of the $C^1$ norm of the surface, speaking of ``immersion" is impossible. Simon thus had to weaken the geometric notion of immersion, and did so by using varifolds and their local approximation by biharmonic graphs. In the following years, this successful ``ambient approach" was used by various authors \cite{BK, KS1, KS2, KS3} to solve important questions about Willmore surfaces. \\
Several authors \cite{CDDRR, Dal, KS2, Pal, Rus} have observed that the Willmore equation in codimension 1 is cognate with a certain divergence form. We will prove below (Theorem \ref{Th1}) a pointwise equality to that effect in any codimension. The versions found in the aforementioned works are weaker in the sense that they only identify an integral identity. \\ In 2006, Tristan Rivi\`ere \cite{Riv2} showed that the fourth-order Willmore equation (\ref{will0}) can be written in divergence form and eventually recast as a system two of second-order equations enjoying a particular structure useful to the analysis of the critical points of the Willmore energy. This observation proved to be decisive in the resolution of several questions pertaining to Willmore surfaces \cite{YBer, BR1, BR2, BR3, KMR, MR, Riv2, Riv3, Riv4}. It also led to the so-called ``parametric approach" of the problem. In contrast with the ambient approach where surfaces are viewed as subsets of $\mathbb{R}^m$, the parametric approach favors viewing surfaces as images of (weak) immersions, and the properties of these immersions become the analytical point of focus. \\
We briefly review the results in \cite{Riv2} (in codimension 1), adapting slightly the original notation and statements to match the orientation of our paper. With the same notation as above, the first conservation law in \cite{Riv2} states that a smooth immersion $\vec{\Phi}:\Sigma\rightarrow\mathbb{R}^3$ is a critical point of the Willmore functional if and only if \begin{equation}\label{ri1}
\nabla_j\big(\nabla^j\vec{H}-2(\vec{n}\cdot\nabla^j\vec{H})\vec{n}+|\vec{H}|^2\nabla^j\vec{\Phi}\big)\;=\;\vec{0}\:, \end{equation} where $\vec{n}$ is the outward unit normal. \\ Locally about every point, (\ref{ri1}) may be integrated to yield a function $\vec{L}\in\mathbb{R}^3$ satisfying $$
|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\;=\;\nabla^j\vec{H}-2(\vec{n}\cdot\nabla^j\vec{H})\vec{n}+|\vec{H}|^2\nabla^j\vec{\Phi}\:, $$
where $|g|$ is the volume element of the pull-back metric $g$, and $\epsilon^{kj}$ is the Levi-Civita symbol. The following equations hold: \begin{equation}\label{ri2} \left\{\begin{array}{rcl}
\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\times\nabla_k\vec{\Phi}-\vec{H}\times\nabla^j\vec{\Phi}\big)&=&\vec{0}\\[1ex]
\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\cdot\nabla_k\vec{\Phi}\big)&=&0\:. \end{array}\right. \end{equation} These two additional conservation laws give rise (locally about every point) to two potentials $\vec{R}\in\mathbb{R}^3$ and $S\in\mathbb{R}$ satisfying \begin{equation*} \left\{\begin{array}{rcl}
\nabla_k\vec{R}&=&\vec{L}\times\nabla_k\vec{\Phi}-|g|^{1/2}\epsilon_{kj}\vec{H}\times\nabla^j\vec{\Phi}\\[1ex] \nabla_k S&=&\vec{L}\cdot\nabla_k\vec{\Phi}\:. \end{array}\right. \end{equation*} A computation shows that these potentials are related to each other via the {system \begin{equation}\label{ri3} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j\vec{n}\cdot\partial_k\vec{R}\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j\vec{n}\,\partial_kS+\partial_j\vec{n}\times\partial_k\vec{R}\Big]\:.
\end{array}\right. \end{equation} This system is linear in $S$ and $\vec{R}$. It enjoys the particularity of being written in flat divergence form, with the right-hand side comprising Jacobian-type terms. The Willmore energy is, up to a topological constant, the $W^{1,2}$-norm of $\vec{n}$. For an immersion $\vec{\Phi}\in W^{2,2}\cap W^{1,\infty}$, one can show that $S$ and $\vec{R}$ belong to $W^{1,2}$. Standard Wente estimates may thus be performed on (\ref{ri3}) to thwart criticality and regularity statements ensue \cite{BR1, Riv2}. Furthermore, one verifies that (\ref{ri3}) is stable under weak limiting process, which has many nontrivial consequences \cite{BR1,BR3}. \\
In 2013, the author found that the divergence form and system derived by Rivi\`ere can be obtained by applying Noether's principle to the Willmore energy\footnote{The results were first presented at Oberwolfach in July 2013 during the mini-workshop {\it The Willmore functional and the Willmore conjecture}.}. The translation, rotation, and dilation invariances of the Willmore energy yield via Noether's principle the conservations laws (\ref{ri1}) and (\ref{ri2}).
\begin{Th}\label{Th1} Let $\vec{\Phi}:\Sigma\rightarrow\mathbb{R}^m$ be a smooth immersion of an oriented surface $\Sigma$. Introduce the quantities $$ \left\{\begin{array}{lcl}
\vec{\mathcal{W}}&:=&\Delta_\perp\vec{H}+\big(\vec{h}^{i}_{j}\cdot\vec{H}\big)\vec{h}^{j}_{i}-2|\vec{H}|^2\vec{H}\\[1ex]
\vec{T}^j&:=&\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi}\:, \end{array}\right. $$ where $\pi_{\vec{n}}$ denotes projection onto the normal space. \\ Via Noether's theorem, the invariance of the Willmore energy by translations, rotations, and dilations in $\mathbb{R}^m$ imply respectively the following three conservation laws: \begin{equation}\label{laws} \left\{\begin{array}{rcl} \nabla_j\vec{T}^j&=&-\,\vec{\mathcal{W}}\\[1ex] \nabla_j\big(\vec{T}^j\wedge\vec{\Phi}+\vec{H}\wedge\nabla^j\vec{\Phi}\big)&=&-\,\vec{\mathcal{W}}\wedge\vec{\Phi}\\[1ex] \nabla_j\big(\vec{T}^j\cdot\vec{\Phi}\big)&=&-\,\vec{\mathcal{W}}\cdot\vec{\Phi}\:. \end{array}\right. \end{equation} In particular, the immersion $\vec{\Phi}$ is Willmore if and only if the following conservation holds: \begin{equation*}
\nabla_j\big(\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi} \big)\;=\;\vec{0}\:. \end{equation*} \end{Th}
For the purpose of local analysis, one may apply Hodge decompositions in order to integrate the three conservation laws (\ref{laws}). Doing so yields ``potential" functions related to each other in a very peculiar way, which we state below. The somewhat unusual notation -- the price to pay to work in higher codimension -- is clarified in Section \ref{nota}. \begin{Th}\label{Th2} Let $\vec{\Phi}:D^2\rightarrow\mathbb{R}^m$ be a smooth\footnote{in practice, this strong hypothesis is reduced to $\vec{\Phi}\in W^{2,2}\cap W^{1,\infty}$ without modifying the result.} immersion of the flat unit disk $D^2\subset\mathbb{R}^2$. We denote by $\vec{n}$ the Gauss-map, by $g:=\vec{\Phi}^*g_{\mathbb{R}^m}$ the pull-back metric, and by $\Delta_g$ the associated negative Laplace-Beltrami operator. Suppose that $\vec{\Phi}$ satisfies the fourth-order equation $$
\Delta_\perp\vec{H}+\big(\vec{h}^{i}_{j}\cdot\vec{H}\big)\vec{h}^{j}_{i}-2|\vec{H}|^2\vec{H}\;=\;\vec{\mathcal{W}}\:, $$ for some given $\vec{\mathcal{W}}$. Let $\vec{V}$, $\vec{X}$, and $Y$ solve the problems $$ \Delta_g\vec{V}\;=\;-\,\vec{\mathcal{W}}\qquad,\qquad \Delta_g\vec{X}\;=\;\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\qquad,\qquad\Delta_gY\;=\;\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\:. $$ Then $\vec{\Phi}$ is a solution of the second-order equation \begin{equation}\label{eqphi}
|g|^{1/2}\Delta_g\vec{\Phi}\;=\;-\,\epsilon^{jk}\Big[\partial_jS\partial_k\vec{\Phi}+\partial_j\vec{R}\bullet\partial_k\vec{\Phi}\Big]+|g|^{1/2}\big(\nabla^jY\nabla_j\vec{\Phi}+\nabla^j\vec{X}\bullet\nabla_j\vec{\Phi} \big)\:, \end{equation} where $S$ and $\vec{R}$ satisfy the system \begin{equation}\label{thesys000} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j(\star\,\vec{n})\cdot\partial_k\vec{R}
\,+\,|g|^{1/2}\nabla_j\big((\star\,\vec{n})\cdot\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j(\star\,\vec{n})\partial_kS+\partial_j(\star\,\vec{n})\bullet\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\big((\star\,\vec{n})\nabla^jY+(\star\,\vec{n})\bullet\nabla^j\vec{X}\big)\:. \end{array}\right. \end{equation} \end{Th} In the special case when $\vec{\Phi}$ is Willmore, we have $\mathcal{\vec{W}}\equiv\vec{0}$, and we may choose $\vec{V}$, $\vec{X}$, and $Y$ to identically vanish. Then (\ref{thesys000}) becomes the conservative Willmore system found originally in \cite{Riv2}. \\
Although perhaps at first glance a little cryptic, Theorem \ref{Th2} turns out to be particularly useful for local analytical purposes. If the given $\mathcal{\vec{W}}$ is sufficiently regular, the non-Jacobian terms involving $Y$ and $\vec{X}$ on the right-hand side of (\ref{thesys000}) form a subcritical perturbation of the Jacobian terms involving $S$ and $\vec{R}$ (see \cite{BWW1} for details). From an analytic standpoint, one is left with studying a linear system of Jacobian-type. Wente estimates provide fine regularity information on the potential functions $S$ and $\vec{R}$, which may, in turn, be bootstrapped into (\ref{eqphi}) to yield regularity information on the immersion $\vec{\Phi}$ itself. \\
In \cite{DDW}, the authors study Willmore surfaces of revolution. They use the invariances of the Willmore functional to recast the Willmore ODE in a form that is a special case of (\ref{ri1}). Applying Noether's principle to the Willmore energy had already been independently done and used in the physics community \cite{CG, Mue}. As far as the author understands, these references are largely unknown in the analysis and geometry community. One goal of this paper is to bridge the gap, as well as to present results which do not appear in print. The author hopes it will increase in the analysis/geometry community the visibility of results, which, he believes, are useful to the study of fourth-order geometric problems associated with the Willmore energy. \\ For the sake of brevity, the present work focuses only on computational derivations and on examples. A second work \cite{BWW1} written jointly with Glen Wheeler and Valentina-Mira Wheeler will shortly be available. It builds upon the reformulations given in the present paper to derive various local analytical results.
\paragraph{Acknowledgments.} The author is grateful to Daniel Lengeler for pointing out to him \cite{CG} and \cite{Mue}. The author would also like to thank Hans-Christoph Grunau, Tristan Rivi\`ere, and Glen Wheeler for insightful discussions. The excellent working conditions of the welcoming facilities of the Forschungsinstitut f\"ur Mathematik at the ETH in Z\"urich are duly acknowledged.
\section{Main Result}
After establishing some notation in Section \ref{nota}, the contents of Theorem \ref{Th1} and of Theorem \ref{Th2} will be proved simultaneously in Section \ref{proof}.
\subsection{Notation}\label{nota}
In the sequel, $\vec{\Phi}:\Sigma\rightarrow\mathbb{R}^{m\ge3}$ denotes a smooth immersion of an oriented surface $\Sigma$ into Euclidean space. The induced metric is $g:=\vec{\Phi}^*g_{\mathbb{R}^m}$ with components $g_{ij}$ and with volume element $|g|$. The components of the second fundamental form are denoted $\vec{h}_{ij}$. The mean curvature is $\vec{H}:=\dfrac{1}{2}\,g^{ij}\vec{h}_{ij}$. At every point on $\Sigma$, there is an oriented basis $\{\vec{n}_\alpha\}_{\alpha=1,\ldots,m-2}$ of the normal space. We denote by $\pi_{\vec{n}}$ the projection on the space spanned by the vectors $\{\vec{n}_\alpha\}$, and by $\pi_T$ the projection on the tangent space (i.e. $\pi_T+\pi_{\vec{n}}=\text{id}$). The Gauss map $\vec{n}$ is the $(m-2)$-vector defined via $$
\star\,\vec{n}\;:=\;\dfrac{1}{2}\,|g|^{-1/2}\epsilon^{ab}\nabla_a\vec{\Phi}\wedge\nabla_b\vec{\Phi}\:, $$ where $\star$ is the usual Hodge-star operator, and $\epsilon^{ab}$ is the Levi-Civita symbol\footnote{Recall that the Levi-Civita is {\it not} a tensor. It satisfies $\epsilon_{ab}=\epsilon^{ab}$.} with components $\epsilon^{11}=0=\epsilon^{22}$ and $\epsilon^{12}=1=-\epsilon^{21}$. Einstein's summation convention applies throughout. We reserve the symbol $\nabla$ for the covariant derivative associated with the metric $g$. Local flat derivatives will be indicated by the symbol $\partial$. \\
\noindent As we work in any codimension, it is helpful to distinguish scalar quantities from vector quantities. For this reason, we append an arrow to the elements of $\Lambda^p(\mathbb{R}^m)$, for all $p>0$. The scalar product in $\mathbb{R}^m$ is denoted by a dot. We also use dot to denote the natural extension of the scalar product in $\mathbb{R}^m$ to multivectors (see \cite{Fed}).\\ Two operations between multivectors are useful. The interior multiplication $\res$ maps the pair comprising a $q$-vector $\gamma$ and a $p$-vector $\beta$ to the $(q-p)$-vector $\gamma\res\beta$. It is defined via \begin{equation*} \langle \gamma\res\beta\,,\alpha\rangle\;=\;\langle \gamma\,,\beta\wedge\alpha\rangle\:\qquad\text{for each $(q-p)$-vector $\alpha$.} \end{equation*} Let $\alpha$ be a $k$-vector. The first-order contraction operation $\bullet$ is defined inductively through \begin{equation*} \alpha\bullet\beta\;=\;\alpha\res\beta\:\:\qquad\text{when $\beta$ is a 1-vector}\:, \end{equation*} and \begin{equation*} \alpha\bullet(\beta\wedge\gamma)\;=\;(\alpha\bullet\beta)\wedge\gamma\,+\,(-1)^{pq}\,(\alpha\bullet\gamma)\wedge\beta\:, \end{equation*} when $\beta$ and $\gamma$ are respectively a $p$-vector and a $q$-vector.
\subsection{Variational Derivations}\label{proof}
Consider a variation of the form: \begin{equation*} \vec{\Phi}_t\;:=\;\vec{\Phi}\,+\,t\big(A^j\nabla_j\vec{\Phi}+\vec{B}\big)\:, \end{equation*} for some $A^j$ and some normal vector $\vec{B}$. We have \begin{equation*} \nabla_i\nabla_j\vec{\Phi}\;=\;\vec{h}_{ij}\:. \end{equation*} Denoting for notational convenience by $\delta$ the variation at $t=0$, we find: \begin{equation*} \delta\nabla_j\vec{\Phi}\;\equiv\;\nabla_j\delta\vec{\Phi}\;=\;(\nabla_jA^s)\nabla_s\vec{\Phi}+A^s\vec{h}_{js}+\nabla_j\vec{B}\:. \end{equation*} Accordingly, we find \begin{eqnarray*} \pi_{\vec{n}}\nabla^j\delta\nabla_j\vec{\Phi}&=&2(\nabla_jA^s)\vec{h}^j_{s}+A^s\pi_{\vec{n}}\nabla^j\vec{h}_{js}+\pi_{\vec{n}}\nabla^j\pi_{\vec{n}}\nabla_j\vec{B}+\pi_{\vec{n}}\nabla^j\pi_T\nabla_j\vec{B}\\ &=&2(\nabla_jA^s)\vec{h}^j_{s}+2A^s\nabla_s\vec{H}+\Delta_\perp\vec{B}+\pi_{\vec{n}}\nabla^j\pi_T\nabla_j\vec{B}\:, \end{eqnarray*} where we have used the definition of the normal Laplacian $\Delta_\perp$ and the contracted Codazzi-Mainardi equation \begin{equation*} \nabla^j\vec{h}_{js}\;=\;2\nabla_s\vec{H}\:. \end{equation*} Since $\vec{B}$ is a normal vector, one easily verifies that \begin{equation*} \pi_T\nabla_j\vec{B}\;=\;-\,(\vec{B}\cdot\vec{h}_j^s)\,\nabla_s\vec{\Phi}\:, \end{equation*} so that \begin{equation*} \pi_{\vec{n}}\nabla^j\pi_T\nabla_j\vec{B}\;=\;-\,(\vec{B}\cdot\vec{h}_j^s)\,\vec{h}_s^j\:. \end{equation*} Hence, the following identity holds \begin{equation}\label{eq1} \pi_{\vec{n}}\nabla^j\delta\nabla_j\vec{\Phi}\;=\;2(\nabla_jA^s)\vec{h}_{js}+2A^s\nabla_s\vec{H}+\Delta_\perp\vec{B}-(\vec{B}\cdot\vec{h}_j^s)\,\vec{h}_s^j\:. \end{equation} Note that \begin{equation}\label{eq2} \delta g^{ij}\;=\;-2\nabla^jA^i+2\vec{B}\cdot\vec{h}^{ij}\:, \end{equation} which, along with (\ref{eq1}) and (\ref{eq2}), then gives \begin{eqnarray*}
\delta|\vec{H}|^2&=&\vec{H}\cdot\delta\nabla^j\nabla_j\vec{\Phi}\;\;=\;\;\vec{H}\cdot\big[(\delta g^{ij})\partial_i\nabla_j\vec{\Phi}+\nabla^j\delta\nabla_j\vec{\Phi}\big]\\[1ex] &=&\vec{H}\cdot\big[(\delta g^{ij})\vec{h}_{ij}+\pi_{\vec{n}}\nabla^j\delta\nabla_j\vec{\Phi}\big]\\[1ex] &=&\vec{H}\cdot\big[\Delta_\perp\vec{B}+(\vec{B}\cdot\vec{h}^i_j)\vec{h}^j_i+2A^j\nabla_j\vec{H} \big]\:. \end{eqnarray*} Finally, since \begin{equation*}
\delta|g|^{1/2}\;=\;|g|^{1/2}\big[\nabla_jA^j-2\vec{B}\cdot\vec{H} \big]\:, \end{equation*} we obtain \begin{eqnarray*}
\delta\big(|\vec{H}|^2|g|^{1/2}\big)&=&|g|^{1/2}\Big[\vec{H}\cdot\Delta_\perp\vec{B}+(\vec{B}\cdot\vec{h}^i_j)\vec{h}^j_i-2(\vec{B}\cdot\vec{H})|H|^2+\nabla_j\big(|\vec{H}|^2A^j\big) \Big]\\[1ex]
&=&|g|^{1/2}\Big[\vec{B}\cdot\vec{\mathcal{W}}+\nabla_j\big(\vec{H}\cdot\nabla^j\vec{B}-\vec{B}\cdot\nabla^j\vec{H}+|\vec{H}|^2A^j\big) \Big]\:, \end{eqnarray*} where \begin{equation*}
\vec{\mathcal{W}}\;:=\;\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}^i_j)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\:. \end{equation*} Therefore, \begin{equation}\label{diffen}
\delta\int_{\Sigma_0}|\vec{H}|^2\;=\;\int_{\Sigma_0}\Big[\vec{B}\cdot\vec{\mathcal{W}}+\nabla_j\big(\vec{H}\cdot\nabla^j\vec{B}-\vec{B}\cdot\nabla^j\vec{H}+|\vec{H}|^2A^j\big) \Big]\:. \end{equation} This identity holds for every piece of surface $\Sigma_0\subset\Sigma$. We will now consider specific deformations which are known to preserve the Willmore energy (namely translations, rotations, and dilations \cite{BYC3}), and thus for which the right-hand side of (\ref{diffen}) vanishes.
\paragraph{Translations.}
We consider a deformation of the form \begin{equation*} \vec{\Phi}_t\;=\;\vec{\Phi}+t\vec{a}\qquad\text{for some fixed $\vec{a}\in\mathbb{R}^m$}\:. \end{equation*} Hence \begin{equation*} \vec{B}\;=\;\pi_{\vec{n}}\vec{a}\qquad\text{and}\qquad A^j\;=\;\vec{a}\cdot\nabla^j\vec{\Phi}\:. \end{equation*} This gives \begin{eqnarray*}
&&\vec{H}\cdot\nabla^j\vec{B}-\vec{B}\cdot\nabla^j\vec{H}+ |\vec{H}|^2A^j\\[1ex]
&=&\vec{a}\cdot\Big[(\vec{H}\cdot\nabla^j\vec{n}_\alpha-\vec{n}_\alpha\cdot\nabla^j\vec{H})\vec{n}_\alpha+H^\alpha\nabla^j\vec{n}_\alpha+|\vec{H}|^2\nabla^j\vec{\Phi}\Big]\\[1ex]
&=&\vec{a}\cdot\Big[\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi}\Big]\:, \end{eqnarray*} so that (\ref{diffen}) yields \begin{equation*}
\vec{a}\cdot\int_{\Sigma_0}\vec{\mathcal{W}}+\nabla_j\Big[\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi} \Big]\;=\;0\:. \end{equation*} As this holds for all $\vec{a}$ and all $\Sigma_0$, letting \begin{equation}\label{defT}
\vec{T}^j\;:=\;\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi} \end{equation} gives \begin{equation}\label{trans} \nabla_j\vec{T}^j\;=\;-\,\vec{\mathcal{W}}\:. \end{equation} This is equivalent to the conservation law derived in \cite{Riv2} in the case when $\vec{\mathcal{W}}=\vec{0}$ and when the induced metric is conformal with respect to the identity. At the equilibrium, i.e. when $\vec{\mathcal{W}}$ identically vanishes, $\vec{T}^j$ plays in the problem the role of stress-energy tensor. \\
For future convenience, we formally introduce the Hodge decomposition\footnote{Naturally, this is only permitted when working locally, or on a domain whose boundary is contractible to a point.} \begin{equation}\label{defLL}
\vec{T}^j\;=\;\nabla^j\vec{V}+|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\:, \end{equation} for some $\vec{L}$ and some $\vec{V}$ satisfying \begin{equation}\label{defV} -\,\Delta_g\vec{V}\;=\;\vec{\mathcal{W}}\:. \end{equation}
\paragraph{Rotations.}
We consider a deformation of the form \begin{equation*} \vec{\Phi}_t\;=\;\vec{\Phi}+t\star(\vec{b}\wedge\vec{\Phi})\qquad\text{for some fixed $\vec{b}\in\Lambda^{m-2}(\mathbb{R}^m)$}\:. \end{equation*} In this case, we have \begin{equation*} B^\alpha\;=\;-\,\vec{b}\cdot\star(\vec{n}_\alpha\wedge\vec{\Phi})\qquad\text{and}\qquad A^j\;=\;-\,\vec{b}\,\cdot\star\big(\nabla^j\vec{\Phi}\wedge\vec{\Phi}\big)\:. \end{equation*} Hence \begin{eqnarray*}
&&\vec{H}\cdot\nabla^j\vec{B}-\vec{B}\cdot\nabla^j\vec{H}+ |\vec{H}|^2A^j\\[1ex]
&&\hspace{-1cm}=\;\;-\,\vec{b}\cdot\star\Big[(\vec{H}\cdot\nabla^j\vec{n}_\alpha-\vec{n}_\alpha\cdot\nabla^j\vec{H})(\vec{n}_\alpha\wedge\vec{\Phi})+H^\alpha\nabla^j(\vec{n}_\alpha\wedge\vec{\Phi})+|\vec{H}|^2\nabla^j\vec{\Phi}\wedge\vec{\Phi}\Big]\\ &&\hspace{-1cm}=\;\;-\,\vec{b}\cdot\star\big(\vec{T}^j\wedge\vec{\Phi}+\vec{H}\wedge\nabla^j\vec{\Phi}\big)\:, \end{eqnarray*} where we have used the tensor $\vec{T}^j$ defined in (\ref{defT}). Putting this last expression in (\ref{diffen}) and proceeding as in the previous paragraph yields the pointwise equalities \begin{eqnarray}\label{ach1} \vec{\mathcal{W}}\wedge\vec{\Phi}&=&-\,\nabla_j\big(\vec{T}^j\wedge\vec{\Phi}+\vec{H}\wedge\nabla^j\vec{\Phi}\big)\nonumber\\
&\stackrel{\text{(\ref{defLL})}}{\equiv}&-\,\nabla_j\big(\nabla^j\vec{V}\wedge\vec{\Phi}+|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\wedge\vec{\Phi}+\vec{H}\wedge\nabla^j\vec{\Phi}\big)\nonumber\\
&=&-\,\nabla_j\big(\nabla^j\vec{V}\wedge\vec{\Phi}-|g|^{-1/2}\epsilon^{kj}\vec{L}\wedge\nabla_k\vec{\Phi}+\vec{H}\wedge\nabla^j\vec{\Phi}\big)\nonumber\\
&=&-\Delta_g\vec{V}\wedge\vec{\Phi}-\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}+\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\wedge\nabla_k\vec{\Phi}-\vec{H}\wedge\nabla^j\vec{\Phi}\big)\:. \end{eqnarray} Owing to (\ref{defV}), we thus find \begin{equation*}
\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\wedge\nabla_k\vec{\Phi}-\vec{H}\wedge\nabla^j\vec{\Phi}\big)\;=\;\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\:. \end{equation*} It will be convenient to define two 2-vectors $\vec{X}$ and $\vec{R}$ satisfying the Hodge decomposition \begin{equation}\label{defR}
|g|^{-1/2}\epsilon^{kj}\vec{L}\wedge\nabla_k\vec{\Phi}-\vec{H}\wedge\nabla^j\vec{\Phi}\;=\;\nabla^j\vec{X}+|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{R}\:, \end{equation} with thus \begin{equation}\label{defX} \Delta_g\vec{X}\;=\;\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\:. \end{equation}
\paragraph{Dilations.}
We consider a deformation of the form \begin{equation*} \vec{\Phi}_t\;=\;\vec{\Phi}+t\lambda\vec{\Phi}\qquad\text{for some fixed $\lambda\in\mathbb{R}$}\:, \end{equation*} from which we obtain \begin{equation*} B^\alpha\;=\;\lambda\,\vec{n}_\alpha\cdot\vec{\Phi}\qquad\text{and}\qquad A^j\;=\;\lambda\,\nabla^j\vec{\Phi}\cdot\vec{\Phi}\:. \end{equation*} Hence \begin{eqnarray*}
&&\vec{H}\cdot\nabla^j\vec{B}-\vec{B}\cdot\nabla^j\vec{H}+ |\vec{H}|^2A^j\\[1ex]
&&\hspace{-1cm}=\;\;\lambda\Big[(\vec{H}\cdot\nabla^j\vec{n}_\alpha-\vec{n}_\alpha\cdot\nabla^j\vec{H})(\vec{n}_\alpha\cdot\vec{\Phi})+H^\alpha\nabla^j(\vec{n}_\alpha\cdot\vec{\Phi})+|\vec{H}|^2\nabla^j\vec{\Phi}\cdot\vec{\Phi} \Big]\\ &&\hspace{-1cm}=\;\;\lambda\,\vec{T}^j\cdot\vec{\Phi}\:, \end{eqnarray*} where we have used that $\vec{H}\cdot\nabla^j\vec{\Phi}=0$, and where $\vec{T}^j$ is as in (\ref{defT}). \\ Putting this last expression in (\ref{diffen}) and proceeding as before gives the pointwise equalities \begin{eqnarray}\label{ach2} \vec{\mathcal{W}}\cdot\vec{\Phi}&=&-\,\nabla_j\big(\vec{T}^j\cdot\vec{\Phi}\big)\\
&\equiv&-\,\nabla_j\big(\nabla^j\vec{V}\cdot\vec{\Phi}+|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\cdot\vec{\Phi} \big)\nonumber\\
&=&-\,\Delta_g\vec{V}\cdot\vec{\Phi}-\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}+\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\cdot\nabla_j\vec{\Phi}\big)\nonumber\:. \end{eqnarray} Hence, from (\ref{defV}), we find \begin{equation*}
\nabla_j\big(|g|^{-1/2}\epsilon^{kj}\vec{L}\cdot\nabla_k\vec{\Phi}\big)\;=\;\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\:. \end{equation*} We again use a Hodge decomposition to write \begin{equation}\label{defS}
|g|^{-1/2}\epsilon^{kj}\vec{L}\cdot\nabla_k\vec{\Phi}\;=\;\nabla^jY+|g|^{-1/2}\epsilon^{kj}\nabla_k S\:, \end{equation} where \begin{equation}\label{defY} \Delta_g Y\;=\;\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\:. \end{equation}
Our next task consists in relating to each other the ``potentials" $\vec{R}$ and $S$ defined above. Although this is the fruit of a rather elementary computation, the result it yields has far-reaching consequences and which, as far as the author knows, has no direct empirical justification. Recall (\ref{defR}) and (\ref{defS}), namely: \begin{equation}\label{defRS} \left\{\begin{array}{lcl}
\nabla_k\vec{R}&=&\vec{L}\wedge\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\big(\vec{H}\wedge\nabla^j\vec{\Phi}+\nabla^j\vec{X}\big)\\[1ex]
\nabla_k S&=&\vec{L}\cdot\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\nabla^jY\:. \end{array}\right. \end{equation} Define the Gauss map \begin{equation*}
\star\,\vec{n}\;:=\;\dfrac{1}{2}\,|g|^{-1/2}\epsilon^{ab}\nabla_a\vec{\Phi}\wedge\nabla_b\vec{\Phi}\:. \end{equation*} We have \begin{eqnarray*}
(\star\,\vec{n})\cdot\nabla_k\vec{R}&=&\dfrac{1}{2}\,|g|^{-1/2}\epsilon^{ab}\big(\nabla_a\vec{\Phi}\wedge\nabla_b\vec{\Phi}\big)\cdot\Big[\vec{L}\wedge\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\big(\vec{H}\wedge\nabla^j\vec{\Phi}+\nabla^j\vec{X}\big) \Big]\\[1ex]
&=&|g|^{-1/2}\epsilon^{ab}g_{bk}\vec{L}\cdot\nabla_a\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\cdot\nabla^j\vec{X}\\[1ex]
&=&|g|^{-1/2}\epsilon^{ab}g_{bk}\big(\nabla_aS+|g|^{1/2}\epsilon_{aj}\nabla^jY \big)\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\cdot\nabla^j\vec{X}\\[1ex]
&=&|g|^{1/2}\epsilon_{bk}\nabla^bS+\nabla_kY\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\cdot\nabla^j\vec{X}\:, \end{eqnarray*} where we have used that $\vec{H}$ is a normal vector, along with the elementary identities \begin{equation*}
|g|^{-1/2}\epsilon^{ab}g_{bk}\;=\;|g|^{1/2}\epsilon_{bk}g^{ab}\qquad\text{and}\qquad\epsilon^{ab}\epsilon_{aj}\;=\;\delta^{b}_{j}\:. \end{equation*} The latter implies \begin{equation}\label{nablaS}
\nabla^jS\;=\;|g|^{-1/2}\epsilon^{jk}\big((\star\,\vec{n})\cdot\nabla_k\vec{R}-\nabla_kY\big)\,+\,(\star\,\vec{n})\cdot\nabla^j\vec{X}\:. \end{equation} Analogously, we find\footnote{$ (\omega_1\wedge\omega_2)\bullet(\omega_3\wedge\omega_4)\;=\;(\omega_2\cdot\omega_4)\omega_1\wedge\omega_3-(\omega_2\cdot\omega_3)\omega_1\wedge\omega_4-(\omega_1\cdot\omega_4)\omega_2\wedge\omega_3+(\omega_1\cdot\omega_3)\omega_2\wedge\omega_4\:. $} \begin{eqnarray*}
(\star\,\vec{n})\bullet\nabla_k\vec{R}&=&\dfrac{1}{2}\,|g|^{-1/2}\epsilon^{ab}\big(\nabla_a\vec{\Phi}\wedge\nabla_b\vec{\Phi}\big)\bullet\Big[\vec{L}\wedge\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\big(\vec{H}\wedge\nabla^j\vec{\Phi}+\nabla^j\vec{X}\big) \Big]\\[1ex]
&=&|g|^{-1/2}\epsilon^{ab}g_{bk}\nabla_a\vec{\Phi}\wedge\vec{L}\,+\,|g|^{-1/2}\epsilon^{ab}(\vec{L}\cdot\nabla_a\vec{\Phi})(\nabla_b\vec{\Phi}\wedge\nabla_k\vec{\Phi})\\
&&-\:\epsilon^{ab}\epsilon_{kb}\nabla_a\vec{\Phi}\wedge\vec{H}\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\bullet\nabla^j\vec{X}\\[1ex]
&=&|g|^{1/2}\epsilon_{kb}\vec{L}\wedge\nabla^b\vec{\Phi}\,-\,(\star\,\vec{n})(\vec{L}\cdot\nabla_k\vec{\Phi})\,-\,\nabla_k\vec{\Phi}\wedge\vec{H}\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\bullet\nabla^j\vec{X}\\[1ex]
&=&|g|^{1/2}\epsilon_{kj}\nabla^j\vec{R}+\nabla_k\vec{X}-(\star\,\vec{n})\big(\nabla_kS+|g|^{1/2}\epsilon_{kj}\nabla^jY \big)\,-\,|g|^{1/2}\epsilon_{kj}(\star\,\vec{n})\bullet\nabla^j\vec{X}\:. \end{eqnarray*} It hence follows that there holds \begin{equation}\label{nablaR}
\nabla^j\vec{R}\;=\;|g|^{-1/2}\epsilon^{kj}\big((\star\,\vec{n})\nabla_kS+(\star\,\vec{n})\bullet\nabla_k\vec{R}-\nabla_k\vec{X}\big)\,+\,(\star\,\vec{n})\nabla^jY\,+\,(\star\,\vec{n})\bullet\nabla^j\vec{X}\:. \end{equation} Applying divergence to each of (\ref{nablaS}) and (\ref{nablaR}) gives rise to the {\it conservative Willmore system}: \begin{equation}\label{conswillsys} \left\{\begin{array}{lcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j(\star\,\vec{n})\cdot\partial_k\vec{R}
\,+\,|g|^{1/2}\nabla_j\big((\star\,\vec{n})\cdot\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j(\star\,\vec{n})\partial_kS+\partial_j(\star\,\vec{n})\bullet\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\big((\star\,\vec{n})\nabla^jY+(\star\,\vec{n})\bullet\nabla^j\vec{X}\big)\:, \end{array}\right. \end{equation} where $\partial_j$ denotes the derivative in flat local coordinates. \\
\noindent This system is to be supplemented with (\ref{defX}) and (\ref{defY}), which, in turn, are solely determined by the value of the Willmore operator $\vec{\mathcal{W}}$ via equation (\ref{defV}). There is furthermore another useful equation to add to the conservative Willmore system, namely one relating the potentials $S$ and $\vec{R}$ back to the immersion $\vec{\Phi}$. We now derive this identity. Using (\ref{defRS}), it easily follows that \begin{eqnarray*}
&&\hspace{-2cm}\epsilon^{km}\big(\nabla_k\vec{R}+|g|^{1/2}\epsilon_{kj}\nabla^j\vec{X}\big)\bullet\nabla_m\vec{\Phi}\\[1ex]
&=&\epsilon^{km}\big(\vec{L}\wedge\nabla_k\vec{\Phi}-|g|^{1/2}\epsilon_{kj}\vec{H}\wedge\nabla^j\vec{\Phi}\big)\bullet\nabla_m\vec{\Phi}\\[1ex]
&=&\epsilon^{km}\Big[\big(\vec{L}\cdot\nabla_m\vec{\Phi}\big)\nabla_k\vec{\Phi}\,-\,g_{mk}\vec{L}\,-\,|g|^{1/2}\epsilon_{km}\vec{H}\Big]\\[1ex]
&=&\epsilon^{km}\big(\nabla_mS+|g|^{1/2}\epsilon_{mj}\nabla^jY\big)\nabla_k\vec{\Phi}\,-\,2\,|g|^{1/2}\vec{H}\:. \end{eqnarray*} Since $\Delta_g\vec{\Phi}=2\vec{H}$, we thus find \begin{equation}\label{law4}
\partial_j\Big[\epsilon^{jk}\big(S\partial_k\vec{\Phi}+\vec{R}\bullet\partial_k\vec{\Phi} \big)+|g|^{1/2}\nabla^j\vec{\Phi}\Big]\;=\;|g|^{1/2}\big(\nabla^jY\nabla_j\vec{\Phi}+\nabla^j\vec{X}\bullet\nabla_j\vec{\Phi} \big)\:.
\end{equation} At the equilibrium (i.e. when $\vec{\mathcal{W}}=\vec{0}$), the right-hand side of the latter identically vanishes and we recover a conservation law. With the help of a somewhat tedious computation, one verifies that this conservation law follows from the invariance of the Willmore energy by inversion and Noether's theorem. To do so, one may for example consider an infinitesimal variation of the type $\delta\vec{\Phi}\;=\;|\vec{\Phi}|^2\vec{a}-2(\vec{\Phi}\cdot\vec{a})\vec{\Phi}$, for some fixed constant vector $\vec{a}\in\mathbb{R}^m$.\\
We summarize our results in the following pair of systems. \begin{equation}\label{side}
\left\{\begin{array}{rcl}\vec{\mathcal{W}}&=&\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\\[1ex] \Delta_g\vec{V}&=&-\,\vec{\mathcal{W}}\\[1ex] \Delta_g\vec{X}&=&\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\\[1ex] \Delta_g{Y}&=&\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi} \end{array}\right. \end{equation} and \begin{equation}\label{thesys} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j(\star\,\vec{n})\cdot\partial_k\vec{R}
\,+\,|g|^{1/2}\nabla_j\big((\star\,\vec{n})\cdot\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j(\star\,\vec{n})\partial_kS+\partial_j(\star\,\vec{n})\bullet\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\big((\star\,\vec{n})\nabla^jY+(\star\,\vec{n})\bullet\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{\Phi}&=&-\,\epsilon^{jk}\Big[\partial_jS\partial_k\vec{\Phi}+\partial_j\vec{R}\bullet\partial_k\vec{\Phi}\Big]+|g|^{1/2}\big(\nabla^jY\nabla_j\vec{\Phi}+\nabla^j\vec{X}\bullet\nabla_j\vec{\Phi} \big)\:. \end{array}\right. \end{equation}
\noindent In the next section, we will examine more precisely the structure of this system through several examples.
\begin{Rm} Owing to the identities \begin{equation*} \vec{u}\bullet\vec{v}\;=\;(\star\vec{u})\times\vec{v}\quad\text{and}\quad \vec{u}\bullet\vec{w}\;=\;\star\big[(\star\vec{u})\times(\star\vec{w})\big]\:\quad\text{for}\:\:\:\vec{u}\in\Lambda^2(\mathbb{R}^3), \vec{v}\in\Lambda^1(\mathbb{R}^3), \vec{w}\in\Lambda^2(\mathbb{R}^3)\:, \end{equation*} we can in $\mathbb{R}^3$ recast the above systems as \begin{equation}\label{side1}
\left\{\begin{array}{rcl}\vec{\mathcal{W}}&=&\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\\[1ex] \Delta_g\vec{V}&=&-\,\vec{\mathcal{W}}\\[1ex] \Delta_g\vec{X}&=&\nabla^j\vec{V}\times\nabla_j\vec{\Phi}\\[1ex] \Delta_g{Y}&=&\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi} \end{array}\right. \end{equation} and \begin{equation}\label{thesys1} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j\vec{n}\cdot\partial_k\vec{R}
\,+\,|g|^{1/2}\nabla_j\big(\vec{n}\cdot\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j\vec{n}\,\partial_kS+\partial_j\vec{n}\times\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\big(\vec{n}\,\nabla^jY+\vec{n}\times\nabla^j\vec{X}\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{\Phi}&=&-\,\epsilon^{jk}\Big[\partial_jS\,\partial_k\vec{\Phi}+\partial_j\vec{R}\times\partial_k\vec{\Phi}\Big]+|g|^{1/2}\big(\nabla^jY\nabla_j\vec{\Phi}+\nabla^j\vec{X}\times\nabla_j\vec{\Phi} \big)\:. \end{array}\right. \end{equation} In this setting, $\vec{X}$ and $\vec{R}$ are no longer 2-vectors, but rather simply vectors of $\mathbb{R}^3$. \end{Rm}
\section{Examples}
\subsection{Willmore Immersions}
A smooth immersion $\vec{\Phi}:\Sigma\rightarrow\mathbb{R}^{m\ge3}$ of an oriented surface $\Sigma$ with induced metric $g=\vec{\Phi}^*g_{\mathbb{R}^m}$ and corresponding mean curvature vector $\vec{H}$, is said to be Willmore if it is a critical point of the Willmore energy $\int_\Sigma|\vec{H}|^2d\text{vol}_g$. They are known \cite{Wil2,Wei} to satisfy the Euler-Lagrange equation $$
\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\;=\;\vec{0}\:. $$ In the notation of the previous section, this corresponds to the case $\vec{\mathcal{W}}=\vec{0}$. According to (\ref{side}), we have the freedom to set $\vec{V}$, $\vec{X}$, and $Y$ to be identically zero. The Willmore equation then yields the second-order system in divergence form \begin{equation}\label{thesyswillmore} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j(\star\,\vec{n})\cdot\partial_k\vec{R} \\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j(\star\,\vec{n})\partial_kS+\partial_j(\star\,\vec{n})\bullet\partial_k\vec{R}\Big]\\[1ex]
|g|^{1/2}\Delta_g\vec{\Phi}&=&-\,\epsilon^{jk}\Big[\partial_jS\partial_k\vec{\Phi}+\partial_j\vec{R}\bullet\partial_k\vec{\Phi}\Big]\:. \end{array}\right.
\end{equation} This system was originally derived by Rivi\`ere in \cite{Riv2}. For notational reasons, the detailed computations were carried out only in local conformal coordinates, that is when $g_{ij}=\text{e}^{2\lambda}\delta_{ij}$, for some conformal parameter $\lambda$. The analytical advantages of the Willmore system (\ref{thesyswillmore}) have been exploited in numerous works \cite{BR1, BR2, BR3, Riv2, Riv3, Riv4}. The flat divergence form of the operator $|g|^{1/2}\Delta_g$ and the Jacobian-type structure of the right-hand side enable using fine Wente-type estimates in order to produce non-trivial local information about Willmore immersions (see aforementioned works).
\begin{Rm} Any Willmore immersion will satisfy the system (\ref{thesyswillmore}). The converse is however not true. Indeed, in order to derive (\ref{thesyswillmore}), we first obtained the existence of some ``potential" $\vec{L}$ satisfying the first-order equation \begin{equation}\label{ceteq}
|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\;=\;\nabla^j\vec{H}-2\pi_{\vec{n}}\nabla^j\vec{H}+|\vec{H}|^2\nabla^j\vec{\Phi}\:. \end{equation} In doing so, we have gone from the Willmore equation, which is second-order for $\vec{H}$, to the above equation, which is only first-order in $\vec{H}$, thereby introducing in the problem an extraneous degree of freedom. As we shall see in the next section, (\ref{ceteq}) is in fact equivalent to the conformally-constrained Willmore equation, which, as one might suspect, is the Willmore equation supplemented with an additional degree of freedom appearing in the guise of a Lagrange multiplier. \end{Rm}
\subsection{Conformally-Constrained Willmore Immersions}
Varying the Willmore energy $\int_\Sigma|\vec{H}|^2d\text{vol}_g$ in a fixed conformal class (i.e. with infinitesimal, smooth, compactly supported, conformal variations) gives rise to a more general class of surfaces called {\it conformally-constrained Willmore surfaces} whose corresponding Euler-Lagrange equation \cite{BPP, KL, Sch} is expressed as follows. Let $\vec{h}_0$ denote the trace-free part of the second fundamental form, namely \begin{equation*} \vec{h}_0\;:=\;\vec{h}\,-\,\vec{H} g\:. \end{equation*} A conformally-constrained Willmore immersion $\vec{\Phi}$ satisfies \begin{equation}\label{cwe}
\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\;=\;\big(\vec{h}_0\big)_{ij}q^{ij}\:, \end{equation} where $q$ is a transverse\footnote{i.e. $q$ is divergence-free: $\nabla^j q_{ji}=0\:\:\forall i$.} traceless symmetric 2-form. This tensor $q$ plays the role of Lagrange multiplier in the constrained variational problem. \\
In \cite{KS3}, it is shown that under a suitable ``small enough energy" assumption, a minimizer of the Willmore energy in a fixed conformal class exists and is smooth. The existence of a minimizer without any restriction on the energy is also obtained in \cite{Riv3} where it is shown that the minimizer is either smooth (with the possible exclusion of finitely many branch points if the energy is large enough to grant their formation) or else isothermic\footnote{the reader will find in \cite {Riv5} an interesting discussion on isothermic immersions.}. One learns in \cite{Sch} that non-degenerate critical points of the Willmore energy constrained to a fixed conformal class are solutions of the conformally constrained Willmore equation. Continuing along the lines of \cite{Riv3}, further developments are given in \cite{Riv6}, where the author shows that if either the genus of the surface satisfies $g\le2$, or else if the Teichm\"uller class of the immersion is not hyperelliptic\footnote{A class in the Teichm\"uller space is said to be {\it hyperelliptic} if the tensor products of holomorphic 1-forms do not generate the vector space of holomorphic quadratic forms.}, then any critical point $\vec{\Phi}$ of the Willmore energy for $C^1$ perturbations included in a submanifold of the Teichm\"uller space is in fact analytic and (\ref{cwe}) is satisfied for some transverse traceless symmetric 2-tensor $q$. \\
The notion of conformally constrained Willmore surfaces clearly generalizes that of Willmore surfaces, obtained via all smooth compactly supported infinitesimal variations (setting $q\equiv0$ in (\ref{cwe})). In \cite{KL}, it is shown that CMC Clifford tori are conformally constrained Willmore surfaces. In \cite{BR1}, the conformally constrained Willmore equation (\ref{cwe}) arises as that satisfied by the limit of a Palais-Smale sequence for the Willmore functional. \\
Minimal surfaces are examples of Willmore surfaces, while parallel mean curvature surfaces\footnote{parallel mean curvature surfaces satisfy $\pi_{\vec{n}} d\vec{H}\equiv\vec{0}$. They generalize to higher codimension the notion of constant mean curvature surfaces defined in $\mathbb{R}^3$. See [YBer].} are examples of conformally-constrained Willmore surfaces\footnote{{\it a non}-minimal parallel mean curvature surface is however {\it not} Willmore (unless of course it is the conformal transform of a Willmore surface ; e.g. the round sphere).}. Not only is the Willmore energy invariant under reparametrization of the domain, but more remarkably, it is invariant under conformal transformations of $\mathbb{R}^m\cup\{\infty\}$. Hence, the image of a [conformally-constrained] Willmore immersion through a conformal transformation is again a [conformally-constrained] Willmore immersion. It comes thus as no surprise that the class of Willmore immersions [resp. conformally-constrained Willmore immersions] is considerably larger than that of immersions whose mean curvature vanishes [resp. is parallel], which is {\it not} preserved through conformal diffeomorphisms. \\
Comparing (\ref{cwe}) to the first equation in (\ref{side}), we see that $\vec{\mathcal{W}}=-\,\big(\vec{h}_0\big)_{ij}q^{ij}$. Because $q$ is traceless and transverse, we have $$ \big(\vec{h}_0\big)_{ij}q^{ij}\;\equiv\;\vec{h}_{ij}q^{ij}-\vec{H}g_{ij}q^{ij}\;=\;\vec{h}_{ij}q^{ij}\;=\;\nabla_j(q^{ij}\nabla_i\vec{\Phi})\:. $$ Accordingly, choosing $\nabla^j\vec{V}=-\,q^{ij}\nabla_i\vec{\Phi}$ will indeed solve the second equation (\ref{side}). Observe next that $$ \nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\;=\;-\,q^{ij}g_{ij}\;=\;0\:, $$ since $q$ is traceless. Putting this into the fourth equation of (\ref{side}) shows that we may choose $Y\equiv0$. Furthermore, as $q$ is symmetric, it holds $$ \nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\;=\;-\,q^{ij}\nabla_i\vec{\Phi}\wedge\nabla_j\vec{\Phi}\;=\;\vec{0}\:, $$ so that the third equation in (\ref{side}) enables us to choose $\vec{X}\equiv\vec{0}$. \\ Altogether, we see that a conformally-constrained Willmore immersion, just like a ``plain" Willmore immersion (i.e. with $q\equiv0$) satisfies the system (\ref{thesyswillmore}). In fact, it was shown in \cite{BR1} that to any smooth solution $\vec{\Phi}$ of (\ref{thesyswillmore}), there corresponds a transverse, traceless, symmetric 2-form $q$ satisfying (\ref{cwe}).
\subsection{Bilayer Models} Erythrocytes (also called red blood cells) are the body's principal mean of transporting vital oxygen to the organs and tissues. The cytoplasm (i.e. the ``inside") of an erythrocyte is rich in hemoglobin, which chemically tends to bind to oxygen molecules and retain them. To maximize the possible intake of oxygen by each cell, erythrocytes -- unlike all the other types of cells of the human body -- have no nuclei\footnote{this is true for all mammals, not just for humans. However, the red blood cells of birds, fish, and reptiles do contain a nucleus.}. The membrane of an erythrocyte is a bilayer made of amphiphilic molecules. Each molecule is made of a ``head" (rather large) with a proclivity for water, and of a ``tail" (rather thin) with a tendency to avoid water molecules. When such amphiphilic molecules congregate, they naturally arrange themselves in a bilayer, whereby the tails are isolated from water by being sandwiched between two rows of heads. The membrane can then close over itself to form a vesicle. Despite the great biochemical complexity of erythrocytes, some phenomena may be described and explained with the sole help of physical mechanisms\footnote{The most celebrated such phenomenon was first observed by the Polish pathologist Tadeusz Browicz in the late 19$^{\text{th}}$ century \cite{Bro}. Using a microscope, he noted that the luminous intensity reflected by a red blood cell varies erratically along its surface, thereby giving the impression of flickering. In the 1930s, the Dutch physicist Frits Zernicke invented the phase-constrast microscope which revealed that the erratic flickering found by Browicz is in fact the result of very minute and very rapid movements of the cell's membrane. These movements were finally explained in 1975 by French physicists Fran\c{c}oise Brochard and Jean-Fran\c{c}ois Lennon: they are the result of a spontaneous thermic agitation of the membrane, which occurs independently of any particular biological activity \cite{BL}.}. For example, to understand the various shapes an erythrocyte might assume, it is sensible to model the red blood cell by a drop of hemoglobin (``vesicle") whose membrane is made of a lipid bilayer \cite{Lip}. Such ``simple" objects, called {\it liposomes}, do exist in Nature, and they can be engineered artificially\footnote{The adjective ``simple" is to be understood with care: stable vesicles with non-trivial topology can be engineered and observed. See \cite{MB} and \cite{Sei}. }. The membrane of a liposome may be seen as a viscous fluid separated from water by two layers of molecules. Unlike a solid, it can undergo shear stress. Experimental results have however shown that vesicles are very resistant and the stress required to deform a liposome to its breaking-point is so great that in practice, vesicles evolving freely in water are never submitted to such destructive forces. One may thus assume that the membrane of a liposome is incompressible: its area remains constant. The volume it encloses also stays constant, for the inside and the outside of the vesicle are assumed to be isolated. As no shearing and no stretching are possible, one may wonder which forces dictate the shape of a liposome. To understand the morphology of liposomes, Canham \cite{Can}, Evans \cite{Eva}, and Helfrich \cite{Hef} made the postulate that, just as any other elastic material does, the membrane of a liposome tends to minimize its elastic energy. As we recalled in the introduction, the elastic energy of a surface is directly proportional to the Willmore energy. Accordingly, to understand the shape of a liposome, one would seek minimizers of the Willmore energy subject to constraints on the area and on the enclosed volume. A third constraint could be taken into account. As the area of the inner layer of the membrane is slightly smaller than the area of its outer layer, it takes fewer molecules to cover the former as it does to cover the latter. This difference, called {\it asymmetric area difference}, is fixed once and for all when the liposome forms. Indeed, no molecule can move from one layer to the other, for that would require its hydrophilic head to be, for some time, facing away from water. From a theoretical point of view, the relevant energy to study is thus: \begin{equation}\label{canham} E\;:=\;\int_{\Sigma}H^2d\text{vol}_g+\alpha A(\Sigma)+\beta V(\Sigma)+\gamma M(\Sigma)\:, \end{equation} where $H$ denotes the mean curvature scalar\footnote{in this section, we will content ourselves with working in codimension 1.}, $A(\Sigma)$, $V(\Sigma)$, $M(\Sigma)$ denote respectively the area, volume, and asymmetric area difference of the membrane $\Sigma$, and where $\alpha$, $\beta$, and $\gamma$ are three Lagrange multipliers. Depending on the authors' background, the energy (\ref{canham}) bears the names Canham, Helfrich, Canham-Helfrich, bilayer coupling model, spontaneous curvature model, among others.\\ This energy -- in the above form or analogous ones -- appears prominently in the applied sciences literature. It would be impossible to list here a comprehensive account of relevant references. The interested reader will find many more details in \cite{Sei} and \cite{Voi} and the references therein. In the more ``analytical" literature, the energy (\ref{canham}) is seldom found (except, of course, in the case when all three Lagrange multipliers vanish). We will, in time, recall precise instances in which it has been studied. But prior to doing so, it is interesting to pause for a moment and better understand a term which might be confusing to the mathematician reader, namely the asymmetric area difference $M(\Sigma)$. In geometric terms, it is simply the total curvature of $\Sigma$: \begin{equation}\label{defM} M(\Sigma)\;:=\;\int_{\Sigma}H\,d\text{vol}_g\:. \end{equation} This follows from the fact that the infinitesimal variation of the area is the mean curvature, and thus the area difference between two nearby surfaces is the first moment of curvature. Hence, we find the equivalent expression \begin{equation}\label{canam} E\;=\;\int_{\Sigma}\bigg(H+\dfrac{\gamma}{2}\bigg)^{\!2}d\text{vol}_g+\bigg(\alpha-\dfrac{\gamma^2}{4}\bigg) A(\Sigma)+\beta V(\Sigma)\:. \end{equation} This form of the bilayer energy is used, inter alia, in \cite{BWW2, Whe1}, where the constant $-\gamma/2$ is called {\it spontaneous curvature}. \\
From a purely mathematical point of view, one may study the energy (\ref{canham}) not just for embedded surfaces, but more generally for immersions. An appropriate definition for the volume $V$ must be assigned to such an immersion $\vec{\Phi}$. As is shown in \cite{MW}, letting as usual $\vec{n}$ denote the outward unit-normal vector to the surface, one defines \begin{equation*} V(\Sigma)\;:=\;\int_\Sigma\vec{\Phi}^*(d\mathcal{H}^3)\;=\;\int_{\Sigma}\vec{\Phi}\cdot\vec{n}\,d\text{vol}_g\:, \end{equation*} where $d\mathcal{H}^3$ is the Hausdorff measure in $\mathbb{R}^3$. Introducing the latter and (\ref{defM}) into (\ref{canham}) yields \begin{equation}\label{veneer} E\;=\;\int_{\Sigma}\big(H^2+\gamma H+\beta\,\vec{\Phi}\cdot\vec{n}+\alpha\big)\,d\text{vol}_g\:. \end{equation}
We next vary the energy $E$ along a normal variation of the form $\delta\vec{\Phi}=\vec{B}\equiv B\vec{n}$. Using the computations from the previous section, it is not difficult to see that \begin{equation}\label{varr1} \delta\int_\Sigma d\text{vol}_g\;=\;-\,2\int_{\Sigma}\vec{B}\cdot\vec{H} \,d\text{vol}_g\qquad\text{and}\qquad \delta\int_\Sigma H\,d\text{vol}_g\;=\;\int_{\Sigma} (\vec{B}\cdot\vec{n})\bigg(\dfrac{1}{2}h^i_jh^j_i-2H^2\bigg)\,d\text{vol}_g\:. \end{equation} With a bit more effort, in \cite{MW}, it is shown that \begin{equation}\label{varr2} \delta\int_\Sigma \vec{\Phi}\cdot\vec{n}\, d\text{vol}_g\;=\;-\int_{\Sigma}\vec{B}\cdot\vec{n}\,d\text{vol}_g\:. \end{equation} Putting (\ref{diffen}), (\ref{varr1}), and (\ref{varr2}) into (\ref{veneer}) yields the corresponding Euler-Lagrange equation \begin{equation}\label{ELhelf}
\vec{\mathcal{W}}\;:=\;\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\;=\;2\,\alpha\vec{H}+\beta\,\vec{n}-\gamma\bigg(\dfrac{1}{2}h^i_jh^j_i-2H^2 \bigg)\,\vec{n}\:. \end{equation} We now seek a solution to the second equation in (\ref{side1}), namely a vector $\vec{V}$ satisfying $\Delta_g\vec{V}=-\vec{\mathcal{W}}$. To do so, it suffices to observe that \begin{equation*} 2\vec{H}\;=\;\Delta_g\vec{\Phi}\qquad\text{and}\qquad \big(h^i_jh^j_i-4H^2\big)\vec{n}\;=\;\nabla_j\Big[(h^{ij}-2Hg^{ij})\nabla_i\vec{\Phi}\Big]\:, \end{equation*} where we have used the Codazzi-Mainardi identity. Furthermore, it holds \begin{equation*}
2\,\vec{n}\;=\;|g|^{-1/2}\epsilon^{ij}\nabla_i\vec{\Phi}\times\nabla_j\vec{\Phi}\;=\;\nabla_j\big[-|g|^{-1/2}\epsilon^{ij}\vec{\Phi}\times\nabla_i\vec{\Phi} \big]\:. \end{equation*} Accordingly, we may choose \begin{equation*}
\nabla^j\vec{V}\;=\;-\,\alpha\,\nabla^j\vec{\Phi}\,+\,\dfrac{\beta}{2}\,|g|^{-1/2}\epsilon^{ij}\,\vec{\Phi}\times\nabla_i\vec{\Phi}\,+\,\dfrac{\gamma}{2}\big(h^{ij}-2Hg^{ij}\big)\nabla_i\vec{\Phi}\:. \end{equation*} Introduced into the third equation of (\ref{side1}), the latter yields \begin{eqnarray}
\Delta_g\vec{X}&=&\nabla^j\vec{V}\times\nabla_j\vec{\Phi}\;\;=\;\;\dfrac{\beta}{2}\,|g|^{-1/2}\epsilon^{ij}\big(\vec{\Phi}\times\nabla_i\vec{\Phi}\big)\times\nabla_j\vec{\Phi}\nonumber\\[0ex]
&=&\dfrac{\beta}{2}\,|g|^{-1/2}\epsilon^{ij}\big[(\vec{\Phi}\cdot\nabla_j\vec{\Phi})\nabla_i\vec{\Phi}-g_{ij}\vec{\Phi}\big]\;\;=\;\;\dfrac{\beta}{4}\,|g|^{-1/2}\epsilon^{ij}\,\nabla_j|\vec{\Phi}|^2\,\nabla_i\vec{\Phi}\:,\nonumber \end{eqnarray} so that we may choose \begin{equation*}\label{XHel}
\nabla^j\vec{X}\;=\;\dfrac{\beta}{4}\,|g|^{-1/2}\epsilon^{ij}|\vec{\Phi}|^2\,\nabla_i\vec{\Phi}\:. \end{equation*} Then we find \begin{equation}\label{Xprop}
\vec{n}\times\nabla^j\vec{X}\;=\;\dfrac{\beta}{4}\,|\vec{\Phi}|^2\nabla^j\vec{\Phi}\qquad\text{and}\qquad\nabla^j\vec{X}\times\nabla^j\vec{\Phi}\;=\;\dfrac{\beta}{2}\,|\vec{\Phi}|^2\,\vec{n}\:. \end{equation} Analogously, the fourth equation of (\ref{side1}) gives \begin{eqnarray}\label{YHel}
\Delta_gY&=&\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\;\;=\;\;-\,2\alpha\,-\,\gamma H\,+\,\dfrac{\beta}{2}\,|g|^{-1/2}\epsilon^{ij}\big(\vec{\Phi}\times\nabla_i\vec{\Phi}\big)\cdot\nabla_j\vec{\Phi}\nonumber\\[0ex] &=&-\,2\alpha\,-\,\gamma H\,+\,\beta\,\vec{\Phi}\cdot\vec{n}\:. \end{eqnarray}
With (\ref{Xprop}) and (\ref{YHel}), the system (\ref{thesys1}) becomes \begin{equation}\label{thesyshel} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j\vec{n}\cdot\partial_k\vec{R}\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j\vec{n}\,\partial_kS+\partial_j\vec{n}\times\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\bigg(\vec{n}\,\nabla^jY+\dfrac{\beta}{4}\,|\vec{\Phi}|^2\nabla^j\vec{\Phi} \bigg)\\[1.5ex]
|g|^{1/2}\Delta_g\vec{\Phi}&=&-\,\epsilon^{jk}\Big[\partial_jS\,\partial_k\vec{\Phi}+\partial_j\vec{R}\times\partial_k\vec{\Phi}\Big]+|g|^{1/2}\bigg(\nabla_j\vec{\Phi}\nabla^jY+\dfrac{\beta}{2}\,|\vec{\Phi}|^2\,\vec{n} \bigg)\:. \end{array}\right. \end{equation}
Under suitable regularity hypotheses (e.g. that the immersion be locally Lipschitz and lie in the Sobolev space $W^{2,2}$), one can show that the non-Jacobian term in the second equation, namely $$
|g|^{1/2}\nabla_j\bigg(\vec{n}\,\nabla^jY+\dfrac{\beta}{4}\,|\vec{\Phi}|^2\nabla^j\vec{\Phi} \bigg)\:, $$ is a subcritical perturbation of the Jacobian term. Analyzing (\ref{thesyshel}) becomes then very similar to analyzing the Willmore system (\ref{thesyswillmore}). Details may be found in \cite{BWW1}. \\
In some cases, our so-far local considerations can yield global information. If the vesicle we consider has the topology of a sphere, every loop on it is contractible to a point. The Hodge decompositions which we have performed in Section II to deduce the existence of $\vec{V}$, and subsequently that of $\vec{X}$ and $Y$ hold globally. Integrating (\ref{YHel}) over the whole surface $\Sigma$ then gives the {\it balancing condition}: $$ 2\alpha A(\Sigma)+\gamma M(\Sigma)\;=\;\beta V(\Sigma)\:. $$ This condition is well-known in the physics literature \cite{CG, Sei}.
\begin{Rm} Another instance in which minimizing the energy (\ref{veneer}) arises is the isoperimetric problem \cite{KMR, Scy}, which consists in minimizing the Willmore energy under the constraint that the dimensionless isoperimetric ratio $\sigma:=A^3/V^2$ be a given constant in $(0,1]$. As both the Willmore energy and the constraint are invariant under dilation, one might fix the volume $V=1$, forcing the area to satisfy $A=\sigma^{1/3}$. This problem is thus equivalent to minimizing the energy (\ref{veneer}) with $\gamma=0$ (no constraint imposed on the total curvature, but the volume and area are prescribed separately). One is again led to the system (\ref{thesyshel}) and local analytical information may be inferred. \end{Rm}
\subsection{Chen's Problem}
An isometric immersion $\vec{\Phi}:N^{n}\rightarrow\mathbb{R}^{m>n}$ of an $n$-dimensional Riemannian manifold $N^n$ into Euclidean space is called {\it biharmonic} if the corresponding mean-curvature vector $\vec{H}$ satisfies \begin{equation}\label{chen} \Delta_g\vec{H}\;=\;\vec{0}\:. \end{equation} The study of biharmonic submanifolds was initiated by B.-Y. Chen \cite{BYC1} in the mid 1980s as he was seeking a classification of the finite-type submanifolds in Euclidean spaces. Independently, G.Y. Jiang \cite{Jia} also studied (\ref{chen}) in the context of the variational analysis of the biharmonic energy in the sense of Eells and Lemaire. Chen conjectures that a biharmonic immersion is necessarily minimal\footnote{The conjecture as originally stated is rather analytically vague: no particular hypotheses on the regularity of the immersion are {\it a priori} imposed. Many authors consider only smooth immersions.}. Smooth solutions of (\ref{chen}) are known to be minimal for $n=1$ \cite{Dim1}, for $(n,m)=(2,3)$ \cite{Dim2}, and for $(n,m)=(3,4)$ \cite{HV}. A growth condition allows a simple PDE argument to work in great generality \cite{Whe2}. Chen's conjecture has also been solved under a variety of hypotheses (see the recent survey paper \cite{BYC2}). The statement remains nevertheless open in general, and in particular for immersed surfaces in $\mathbb{R}^m$. In this section, we show how our reformulation of the Willmore equation may be used to recast the fourth-order equation (\ref{chen}) in a second-order system with interesting analytical features. \\
Let us begin by inspecting the tangential part of (\ref{chen}), namely, \begin{eqnarray}\label{tgtchen} \vec{0}&=&\pi_T\Delta_g\vec{H}\;=\;\big(\nabla_k\vec{\Phi}\cdot\nabla_j\nabla^j\vec{H})\nabla^k\vec{\Phi}\nonumber\\[1ex] &=&\Big[\nabla_j\big(\nabla_k\vec{\Phi}\cdot\nabla^j\vec{H} \big)-\vec{h}_{jk}\cdot\nabla^j\vec{H} \Big]\nabla^k\vec{\Phi}\;\;=\;\;-\,\Big[\vec{H}\cdot\nabla_j\vec{h}^{j}_{k}+2\,\vec{h}_{jk}\cdot\nabla^j\vec{H} \Big]\nabla^k\vec{\Phi}\nonumber\\[1ex]
&=&-\,\Big[\nabla_k|\vec{H}|^2+2\,\vec{h}_{jk}\cdot\nabla^j\vec{H} \Big]\nabla^k\vec{\Phi}\:, \end{eqnarray} where we have used that $\vec{H}$ is a normal vector, as well as the Codazzi-Mainardi identity. With the help of this equation, one obtains a decisive identity, which we now derive, and which too makes use of the Codazzi-Mainardi equation. \begin{eqnarray}\label{deci}
\nabla_j\Big[|\vec{H}|^2\nabla^j\vec{\Phi}-2(\vec{H}\cdot\vec{h}^{jk})\nabla_k\vec{\Phi}\Big]&=&-\,\nabla_j|\vec{H}|^2\nabla^j\vec{\Phi}+2|\vec{H}|^2\vec{H}-2(\vec{h}^{jk}\cdot\nabla_j\vec{H})\nabla_k\vec{\Phi}-2(\vec{H}\cdot\vec{h}^{jk})\vec{h}_{jk}\nonumber\\[1ex]
&\stackrel{\text{(\ref{tgtchen})}}{=}&2|\vec{H}|^2\vec{H}-2(\vec{H}\cdot\vec{h}^{jk})\vec{h}_{jk}\:. \end{eqnarray} Note that, in general, there holds \begin{eqnarray}\label{bidule} \pi_T\nabla^j\vec{H}&=&\big[\nabla_k\vec{\Phi}\cdot\nabla^j\vec{H}\big]\nabla^k\vec{\Phi}\;\;=\;\;-\,\big[\vec{H}\cdot\vec{h}^{j}_{k}\big]\nabla^k\vec{\Phi}\:. \end{eqnarray} An immersion whose mean curvature satisfies (\ref{chen}) has thus the property that \begin{eqnarray*} \Delta_\perp\vec{H}&:=&\pi_{\vec{n}}\nabla_j\pi_{\vec{n}}\nabla^j\vec{H}\;\;=\;\;\pi_{\vec{n}}\Delta_g\vec{H}\,-\,\pi_{\vec{n}}\nabla_j\pi_T\nabla^j\vec{H}\;\;=\;\;\pi_{\vec{n}}\nabla_j\Big[\big(\vec{H}\cdot\vec{h}^{j}_{k}\big)\nabla^k\vec{\Phi} \Big]\nonumber\\[0ex] &=&\big(\vec{H}\cdot\vec{h}^{j}_{k}\big)\vec{h}^{k}_{j}\:. \end{eqnarray*} Putting the latter into the first equation of (\ref{side}) yields \begin{equation}\label{ELchen}
\vec{\mathcal{W}}\;:=\;\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\;=\;2(\vec{H}\cdot\vec{h}^{jk})\vec{h}_{jk}-2|\vec{H}|^2\vec{H}\:. \end{equation} We now seek a solution to the second equation in (\ref{side1}), namely a vector $\vec{V}$ satisfying $\Delta_g\vec{V}=-\vec{\mathcal{W}}$. To do so, it suffices to compare (\ref{ELchen}) and (\ref{deci}) to see that we may choose \begin{equation*}
\nabla^j\vec{V}\;=\;|\vec{H}|^2\nabla^j\vec{\Phi}-2(\vec{H}\cdot\vec{h}^{jk})\nabla_k\vec{\Phi}\:. \end{equation*} Introduced into the third equation of (\ref{side1}), the latter yields immediately \begin{equation*} \Delta_g\vec{X}\;=\;\nabla^j\vec{V}\wedge\nabla_j\vec{\Phi}\;=\;\vec{0}\:, \end{equation*} thereby prompting us to choosing $\vec{X}\equiv\vec{0}$. The fourth equation of (\ref{side1}) gives \begin{equation*}
\Delta_gY\;=\;\nabla^j\vec{V}\cdot\nabla_j\vec{\Phi}\;=\;-\,2|\vec{H}|^2\:. \end{equation*} On the other hand, owing to (\ref{chen}) and (\ref{bidule}), one finds \begin{equation*}
\Delta_g(\vec{\Phi}\cdot\vec{H})\;=\;-\,2|\vec{H}|^2\:, \end{equation*} so that we may choose $Y=\vec{\Phi}\cdot\vec{H}$. Introducing these newly found facts for $\vec{X}$ and $Y$ into the system (\ref{thesys1}) finally gives \begin{equation}\label{thesyschen} \left\{\begin{array}{rcl}
|g|^{1/2}\Delta_g S&=&\epsilon^{jk}\partial_j(\star\,\vec{n})\cdot\partial_k\vec{R}\\[1ex]
|g|^{1/2}\Delta_g\vec{R}&=&\epsilon^{jk}\Big[\partial_j(\star\,\vec{n})\partial_kS+\partial_j(\star\,\vec{n})\bullet\partial_k\vec{R}\Big]+|g|^{1/2}\nabla_j\big((\star\,\vec{n})\nabla^j(\vec{\Phi}\cdot\vec{H})\big)\\[1ex]
|g|^{1/2}\Delta_g\vec{\Phi}&=&-\,\epsilon^{jk}\Big[\partial_jS\partial_k\vec{\Phi}+\partial_j\vec{R}\bullet\partial_k\vec{\Phi}\Big]+|g|^{1/2}\big(\nabla^j(\vec{\Phi}\cdot\vec{H})\nabla_j\vec{\Phi} \big)\:. \end{array}\right. \end{equation}
As previously noted, the reformulation of (\ref{trans}) in the form (\ref{thesys1}) is mostly useful to reduce the nonlinearities present in (\ref{trans}). Moreover, a local analysis of (\ref{thesys1}) is possible when the non-Jacobian terms on the right-hand side are subcritical perturbations of the Jacobian terms. This is not {\it a priori} the case for (\ref{thesyschen}), and the equation (\ref{chen}) is linear to begin with. Nevertheless, the system (\ref{thesyschen}) has enough suppleness\footnote{owing mostly to the fact that the function $Y:=\vec{\Phi}\cdot\vec{H}$ satisfies $\Delta_gY\le 0$ and $\Delta_g^2Y\le0$.} and enough structural features to deduce interesting analytical facts about solutions of (\ref{chen}), under mild regularity requirements. This is discussed in detail in a work to appear \cite{BWW1}\footnote{see also \cite{Whe3} which contains interesting estimates for equation (\ref{chen}).}.
\subsection{Point-Singularities}
As was shown in \cite{YBer, BR2, Riv2}, the Jacobian-type system (\ref{side}) is particularly suited to the local analysis of point-singularities. The goal of this section is not to present a detailed account of the local analysis of point-singularities -- this is one of the topics of \cite{BWW1} -- but rather to give the reader a few pertinent key arguments on how this could be done. \\
Let $\vec{\Phi}:D^2\setminus\{0\}\rightarrow\mathbb{R}^m$ be a smooth immersion of the unit-disk, continuous at the origin (the origin will be the point-singularity in question). In order to make sense of the Willmore energy of the immersion $\vec{\Phi}$, we suppose that $\int_{D^2}|\vec{H}|^2d\text{vol}_g<\infty$. Our immersion is assumed to satisfy the problem \begin{equation}\label{ptprob}
\Delta_\perp\vec{H}+(\vec{H}\cdot\vec{h}_j^i)\vec{h}^j_i-2|\vec{H}|^2\vec{H}\;=\;\vec{\mathcal{W}}\qquad\text{on}\:\:D^2\setminus\{0\}\:, \end{equation} where the vector $\vec{\mathcal{W}}$ is given. It may depend only on geometric quantities (as is the case in the Willmore problem or in Chen's problem), but it may also involve ``exterior" quantities (as is the case in the conformally-constrained Willmore problem). To simplify the presentation, we will not in this paper discuss the integrability assumptions that must be imposed on $\vec{\mathcal{W}}$ to carry out the procedure that will be outlined. The interested reader is invited to consult \cite{BWW1} for more details on this topic. \\
\noindent As we have shown in (\ref{defT}), equation (\ref{ptprob}) may be rephrased as \begin{equation*}
\partial_j\big(|g|^{1/2}\vec{T}^j\big)\;=\;-\,\vec{\mathcal{W}}\qquad\text{on}\:\:D^2\setminus\{0\}\:, \end{equation*} for some suitable tensor $\vec{T}^j$ defined solely in geometric terms. Consider next the problem \begin{equation*} \Delta_g\vec{V}\;=\;-\,\vec{\mathcal{W}}\qquad\text{on}\:\:D^2\:. \end{equation*} As long as $\vec{\mathcal{W}}$ is not too wildly behaved, this equation will have at least one solution. Let next $\mathcal{L}_g$ satisfy \begin{equation*}
\partial_j\big(|g|^{1/2}\nabla^j\mathcal{L}_g\big)\;=\;\delta_0\qquad\text{on}\:\:D^2\:. \end{equation*} If the immersion is correctly chosen (e.g. $\vec{\Phi}\in W^{2,2}\cap W^{1,\infty}$), the solution $\mathcal{L}_g$ exists and has suitable analytical properties (see \cite{BWW1} for details). \\ We have \begin{equation}\label{poinc1}
\partial_j\Big[|g|^{1/2}\big(\vec{T}^j-\nabla^j\vec{V}-\vec{\beta}\,\nabla^j\mathcal{L}_g\big)\Big]\;=\;\vec{0}\qquad\text{on}\:\:D^2\setminus\{0\}\:, \end{equation} for any constant $\vec{\beta}\in\mathbb{R}^m$, and in particular for the unique $\vec{\beta}$ fulfilling the circulation condition that \begin{equation}\label{poinc2} \int_{\partial D^2}\vec{\nu}\cdot\big(\vec{T}^j-\nabla^j\vec{V}-\vec{\beta}\,\nabla^j\mathcal{L}_g\big)\;=\;0\:, \end{equation} where $\vec{\nu}\in\mathbb{R}^2$ denotes the outer unit normal vector to the boundary of the unit-disk. This vector $\vec{\beta}$ will be called {\it residue}.\\ Bringing together (\ref{poinc1}) and (\ref{poinc2}) and calling upon the Poincar\'e lemma, one infers the existence of an element $\vec{L}$ satisfying \begin{equation}
\vec{T}^j-\nabla^j\vec{V}-\vec{\beta}\,\nabla^j\mathcal{L}_g\;=\;|g|^{-1/2}\epsilon^{kj}\nabla_k\vec{L}\:, \end{equation} with the same notation as before. We are now in the position of repeating {\it mutatis mutandis} the computations derived in the previous section, taking into account the presence of the residue. We define $\vec{X}$ and $Y$ via: \begin{equation}\label{ptside} \left\{\begin{array}{rcl}
\Delta_g\vec{X}&=&\nabla^j\big(\vec{V}+\vec{\beta}\mathcal{L}_g\big)\wedge\nabla_j\vec{\Phi}\\[1ex] \Delta_g{Y}&=&\nabla^j\big(\vec{V}+\vec{\beta}\mathcal{L}_g\big)\cdot\nabla_j\vec{\Phi} \end{array}\right.\qquad\text{on}\:\:D^2\:. \end{equation} One verifies that the following equations hold \begin{equation*} \left\{\begin{array}{rcl}
\nabla^k\Big[\vec{L}\wedge\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\big(\vec{H}\wedge\nabla^j\vec{\Phi}+\nabla^j\vec{X}\big)\Big]&=&\vec{0}\\[2ex]
\nabla^k\Big[\vec{L}\cdot\nabla_k\vec{\Phi}\,-\,|g|^{1/2}\epsilon_{kj}\nabla^jY\Big]&=&0 \end{array}\right.\qquad\text{on}\:\:D^2\setminus\{0\}\:. \end{equation*} Imposing suitable hypotheses on the integrability of $\vec{\mathcal{W}}$ yields that the bracketed quantities in the latter are square-integrable. With the help of a classical result of Laurent Schwartz \cite{Scw}, the equations may be extended without modification to the whole unit-disk. As before, this grants the existence of two potential functions $S$ and $\vec{R}$ which satisfy (\ref{defRS}) and the system (\ref{thesys}) on $D^2$. The Jacobian-type/divergence-type structure of the system sets the stage for a local analysis argument, which eventually yields a local expansion of the immersion $\vec{\Phi}$ around the point-singularity. This expansion involves the residue $\vec{\beta}$. The procedure was carried out in details for Willmore immersions in \cite{BR2}\footnote{An equivalent notion of residue was also identified in \cite{KS2}.}, and for conformally constrained Willmore immersions in \cite{YBer}. Further considerations can be found in \cite{BWW1}.
\end{document} |
\begin{document}
\title{The pleasures and pains of studying the two-type Richardson model}
\author{Maria Deijfen \thanks{Department of Mathematics, Stockholm University, 106 91 Stockholm, Sweden. E-mail: [email protected]} \and Olle H\"{a}ggstr\"{o}m \thanks{Department of Mathematical Sciences, Chalmers University of Technology, 412 96 G\"oteborg, Sweden. E-mail: [email protected]}}
\date{July 2007}
\maketitle
\thispagestyle{empty}
\begin{abstract}
\noindent This paper provides a survey of known results and open problems for the two-type Richardson model, which is a stochastic model for competition on $\mathbb{Z}^d$. In its simplest formulation, the Richardson model describes the evolution of a single infectious entity on $\mathbb{Z}^d$, but more recently the dynamics have been extended to comprise two competing growing entities. For this version of the model, the main question is whether there is a positive probability for both entities to simultaneously grow to occupy infinite parts of the lattice, the conjecture being that the answer is yes if and only if the entities have the same intensity. In this paper attention focuses on the two-type model, but the most important results for the one-type version are also described.
\noindent \emph{Keywords:} Richardson model, first-passage percolation, asymptotic shape, competing growth, coexistence.
\noindent AMS 2000 Subject Classification: 60K35, 82B43. \end{abstract}
\section{Introduction}
Consider an interacting particle system in which, at any time $t$, each site $x\in\mathbb{Z}^d$ is in either of two states, denoted by 0 and 1. A site in state 0 flips to a 1 at rate proportional to the number of nearest neighbors in state 1, while a site in state 1 remains a 1 forever. We may think of sites in state 1 as being occupied by some kind of infectious entity, and the model then describes the propagation of an infection where each infected site tries to infect each of its nearest neighbors on $\mathbb{Z}^d$ at some constant rate $\lambda>0$. More precisely, if at time $t$ a vertex $x$ is infected and a neighboring vertex $y$ is uninfected, then, conditional on the dynamics up to time $t$, the probability that $x$ infects $y$ during a short time window $(t, t+h)$ is $\lambda h + o(h)$. Here and in what follows, sites in state 0 and 1 are referred to as uninfected and infected respectively. This is the intuitive description of the model; a formal definition is given in Section \ref{sect:one-type}.
The model is a special case of a class of models introduced by Richardson (1973), and is commonly referred to as the Richardson model. It has several cousins among processes from mathematical biology, see e.g.\ Eden (1961), Williams and Bjerknes (1972) and Bramson and Griffeath (1981). The model is also a special case of so called first-passage percolation, which was introduced in Hammersley and Welsh (1965) as a model for describing the passage of a fluid through a porous medium. In first-passage percolation, each edge of the $\mathbb{Z}^d$-lattice is equipped with a random variable representing the time it takes for the fluid to traverse the edge, and the Richardson model is obtained by letting these passage times be i.i.d.\ exponential.
Since an infected site stays infected forever, the set of infected sites in the Richardson model increases to cover all of $\mathbb{Z}^d$ as $t\to\infty$, and attention focuses on \emph{how} this set grows. The main result is roughly that the infection grows linearly in time in each fixed direction and that, scaled by a factor $1/t$, the set of infected points converges to a non-random asymptotic shape as $t\to\infty$. To prove that the growth is linear in a fixed direction involves Kingman's subadditive ergodic theorem -- in fact, the study of first-passage percolation was one of the main motivations for the development of subadditive ergodic theory. That the linear growth is preserved when all directions are considered simultaneously is stated in the celebrated shape theorem (Theorem \ref{thm:shape} in Section \ref{sect:two-type_bounded}) which originates from Richardson (1973).
Now consider the following extension of the Richardson model, known as the two-type Richardson model and introduced in H\"{a}ggstr\"{o}m and Pemantle (1998). Instead of two possible states for the sites there are three states, which we denote by 0, 1 and 2. The process then evolves in such a way that, for $i=1,2$, a site in state 0 flips to state $i$ at rate $\lambda_i$ times the number of nearest neighbors in state $i$ and once in state 1 or 2, a site remains in that state forever. Interpreting states 1 and 2 as two different types of infection and state 0 as absence of infection, this gives rise to a model describing the simultaneous spread of two infections on $\mathbb{Z}^d$. To rigorously define the model requires a bit more work; see Section \ref{sect:two-type_bounded}. In what follows we will always assume that $d\geq 2$; the model makes sense also for $d=1$ but the questions considered here become trivial.
A number of similar extensions of (one-type) growth models to (two-type) competition models appear in the literature; see for instance Neuhauser (1992), Durrett and Neuhauser (1997), Kordzakhia and Lalley (2005) and Ferrari et al.\ (2006). These tend to require somewhat different techniques, and results tend not to be easily translated from these other models to the two-type Richardson model (and vice versa). Closer to the latter are (non-Markovian) competition models based on first-passage percolation models with non-exponential passage time variables -- Garet and Marchand (2005), Hoffman (2005:1), Hoffman (2005:2), Garet and Marchand (2006), Gou\'er\'e (2007), Pimentel (2007) -- and a certain continuum model -- Deijfen et al.\ (2004), Deijfen and H\"aggstr\"om (2004), Gou\'er\'e (2007). For ease of exposition, we shall not consider these variations even in cases where results generalize.
The behavior of the two-type Richardson model depends on the initial configuration of the infection and on the ratio between the intensities $\lambda_1$ and $\lambda_2$ of the infection types. Assume first, for simplicity, that the model is started at time 0 from two single sites, the origin being type 1 infected and the site $(1,0,\ldots,0)$ next to the origin being type 2 infected. Three different scenarios for the development of the infection are conceivable:
\begin{itemize} \item[(a)] The type 1 infection at some point completely surrounds type 2, thereby preventing type 2 from growing any further.
\item[(b)] Type 2 similarly strangles type 1.
\item[(c)] Both infections grow to occupy infinitely many sites. \end{itemize}
\noindent It is not hard to see that, regardless of the intensities of the infections, outcomes (a) and (b) where one of the infection types at some point encloses the other have positive probability regardless of $\lambda_1$ and $\lambda_2$. This is because each of (a) and (b) can be guaranteed through some finite initial sequence of infections. In contrast, scenario (c) -- referred to as infinite coexistence -- can never be guaranteed from any finite sequence of infections, and is therefore harder to deal with: the main challenge is to decide whether, for given values of the parameters $\lambda_1$ and $\lambda_2$, this event (c) has positive probability or not. Intuitively, infinite coexistence represents some kind of power balance between the infections, and it seems reasonable to suspect that such a balance is possible if and only if the infections are equally powerful, that is, when $\lambda_1=\lambda_2$. This is Conjecture \ref{samex_conj} in Section \ref{sect:two-type_bounded}, which goes back to H\"{a}ggstr\"{o}m and Pemantle (1998), and, although a lot of progress have been made, it is not yet fully proved. We describe the state of the art in Sections \ref{sect:symmetric} and \ref{sect:nonsymmetric}.
As mentioned above, apart from the intensities, the development of the infections in the two-type model also depends on the initial state of the model. However, if we are only interested in deciding whether the event of infinite coexistence has positive probability or not, it turns out that, as long as the initial configuration is bounded and one of the sets does not completely surround the other, the precise configuration does not matter, that is, whether infinite coexistence is possible or not is determined only by the relation between the intensities. This is proved in Deijfen and H\"{a}ggstr\"{o}m (2006:1); see Theorem \ref{th:startomr} in Section \ref{sect:two-type_bounded} for a precise formulation. Of course one may also consider unbounded initial configurations. Starting with both infection types occupying infinitely many sites means -- apart from in very labored cases -- that they will both infect infinitely many sites. A more interesting case is when one of the infection types starts from an infinite set and the other one from a finite set. We may then ask if outcomes where the finite type infects infinitely many sites have positive probability or not. This question is dealt with in Deijfen and H\"{a}ggstr\"{o}m (2007), and we describe the results in Section \ref{sect:unbounded}.
The dynamics of the two-type Richardson model is deceptively simple, and yet gives rise to intriguing phenomena on a global scale. In this lies a large part of the pleasure indicated in the title. Furthermore, proofs tend to involve elegant probabilistic techniques such as coupling, subadditivity and stochastic comparisons, adding more pleasure. The pain alluded to (which by the way is not so severe that it should dissuade readers from entering this field) comes from the stubborn resistance that some of the central problems have so far put up against attempts to solve them. A case in point is the ``only if'' direction of the aforementioned Conjecture \ref{samex_conj}, saying that infinite coexistence starting from a bounded initial configuration does not occur when $\lambda_1 \neq \lambda_2$.
\section{The one-type model} \label{sect:one-type}
As mentioned in the introduction, the one-type Richardson model is equivalent to first-passage percolation with i.i.d.\ exponential passage times. To make the construction of the model more precise, first define $E_{\mathbb{Z}^d}$ as the edge set for the $\mathbb{Z}^d$ lattice (i.e., each pair of vertices $x,y \in \mathbb{Z}^d$ at Euclidean distance $1$ from each other have an edge $e \in E_{\mathbb{Z}^d}$ connecting them). Then attach i.i.d.\ non-negative random variables $\{\tau(e)\}_{e \in E_{\mathbb{Z}^d}}$ to the edges. We take each $\tau(e)$ to be exponentially distributed with parameter $\lambda>0$, meaning that \[ P(\tau(e)>t) = \exp(-\lambda t) \] for all $t\geq 0$. For $x,y \in \mathbb{Z}^d$, define \begin{equation} \label{eq:path_time} T(x,y) = \inf_\Gamma \sum_{e \in \Gamma} \tau(e) \end{equation} where the infimum is over all paths $\Gamma$ from $x$ to $y$. The Richardson model with a given set $S_0 \subset \mathbb{Z}^d$ of initially infected sites is now defined by taking the set $S_t$ of sites infected at time $t$ to be \begin{equation} \label{eq:infected_at_time_t} S_t = \{ x \in \mathbb{Z}^d : T(y,x) \leq t\mbox{ for some } y \in S_0\} \, . \end{equation} It turns out that the infimum in (\ref{eq:path_time}) is a.s.\ a minimum and attained by a unique path. That $S_t$ grows in the way described in the introduction is a consequence of the memoryless property of the exponential distribution: for any $s,t >0$ we have that
$P(\tau(e)>s+t \, | \tau(e)>s) = \exp(-\lambda t)$.
Note that for any $x,y,z \in\mathbb{Z}^d$ we have $T(x,y)\leq T(x,z)+T(z,y)$. This subadditivity property opens up for the use of subadditive ergodic theory in analyzing the model. To formulate the basic result, let $T(x)$ be the time when the point $x\in\mathbb{Z}^d$ is infected when starting from a single infected site at the origin and write $\mathbf{n}=(n,0,\ldots,0)$. It then follows from the subadditive ergodic theorem -- see e.g.\ Kingman (1968) -- that there is a constant $\mu_\lambda$ such that $T(\mathbf{n})/n\to\mu_\lambda$ almost surely and in $L_1$ as $n\to\infty$. Furthermore, a simple time scaling argument implies that $\mu_\lambda=\lambda\mu_1$ and hence, writing $\mu_1=\mu$, we have that
\begin{equation}\label{eq:time_constant} \lim_{n\to\infty}\frac{T(\mathbf{n})}{n}=\lambda\mu\quad\textrm{a.s. and in }L_1. \end{equation}
\noindent The constant $\mu$ indicates the inverse asymptotic speed of the growth along the axes in a unit rate process and is commonly referred to as the time constant. It turns out that $\mu>0$, so that indeed the growth is linear in time. Similarly, an analog of (\ref{eq:time_constant}) holds in any direction, that is, for any $x\in\mathbb{Z}^d$, there is a constant $\mu(x)>0$ such that $T(nx)/n\to\lambda\mu(x)$. The infection hence grows linearly in time in each fixed direction and the asymptotic speed of the growth in a given direction is an almost sure constant.
We now turn to the shape theorem, which asserts roughly that the linear growth of the infection is preserved also when all directions are considered simultaneously. More precisely, when scaled down by a factor $1/t$ the set $S_t$ converges to a non-random shape $A$. To formalize this, let $\tilde{S}_t\subset \mathbb{R}^d$ be a continuum version of $S_t$ obtained by replacing each $x\in S_t$ by a unit cube centered at $x$.
\begin{theorem}[Shape Theorem] \label{thm:shape} There is a compact convex set $A$ such that, for any $\varepsilon>0$, almost surely $$ (1-\varepsilon)\lambda A\subset\frac{\tilde{S}_t}{t} \subset (1+\varepsilon)\lambda A $$ for large $t$. \end{theorem}
\noindent In the above form, the shape theorem was proved in Kesten (1973) as an improvement on the original ``in probability" version, which appears already in Richardson (1973). See also Cox and Durrett (1988) and Boivin (1991) for generalizations to first-passage percolation processes with more general passage times. Results concerning fluctuations around the asymptotic shape can be found, e.g., in Kesten (1993), Alexander (1993) and Newman and Piza (1995), and, for certain other passage time distributions, in Benjamini et al.\ (2003).
Working out exactly, or even approximately, what the asymptotic shape $A$ is has turned out to be difficult. Obviously the asymptotic shape inherits all symmetries of the $\mathbb{Z}^d$ lattice -- invarince under reflection and permutation of coordiante hyperplanes -- and it is known to be compact and convex, but, apart from this, not much is known about its qualitative features. These difficulties with characterizing the shape revolve around the fact that $\mathbb{Z}^d$ is not rotationally invariant, which causes the growth to behave differently in different directions. For instance, simulations on $\mathbb{Z}^2$ indicate that the asymptotic growth is slightly faster along the axes as compared to the diagonals. There is however no formal proof of this.
Before proceeding with the two-type model, we mention some work concerning properties of the time-minimizing paths in (\ref{eq:path_time}), also known as geodesics. Starting at time $0$ with a single infection at the origin ${\bf 0}$, we denote by $\Gamma(x)$ the (unique) path $\Gamma$ for which the infimum $T({\bf 0}, x)$ in (\ref{eq:path_time}) is attained. Define $\Psi=\cup_{x\in\mathbb{Z}^d} \Gamma(x)$, making $\Psi$ a graph specifying which paths the infection actually takes. It is not hard to see that $\Psi$ is a tree spanning all of $\mathbb{Z}^d$ and hence there must be at least one semi-infinite self-avoiding path from the origin (called an end) in $\Psi$. The issue of whether $\Psi$ has more than one end was noted by H\"aggstr\"om and Pemantle (1998) to be closely related to the issue of infinite coexistence in the two-type Richardson model with $\lambda_1=\lambda_2$: such infinite coexistence happens with positive probability starting from a finite initial configuration if and only if $\Psi$ has at least two ends with positive probability.
We say that an infinite path $x_1,x_2,\ldots$ has asymptotic direction $\hat{x}$ if $x_k/|x_k|\to\hat{x}$ as $k\to\infty$. In $d=2$, it has been conjectured that every end in $\Psi$ has an asymptotic direction and that, for every $x\in\mathbb{R}^2$, there is at least one end (but never more than two) in $\Psi$ with asymptotic direction $\hat{x}$. In particular, this would mean that $\Psi$ has uncountably many ends. For results supporting this conjecture, see Newman (1995) and Newman and Licea (1996). In the former of these papers, the conjecture is shown to be true provided an unproven but highly plausible assumption on the asymptotic shape $A$, saying roughly that the boundary is sufficiently smooth. See also Lalley (2003) for related work.
Results not involving unproven assumptions are comparatively weak: The coexistence result of H\"aggstr\"om and Pemantle (1998) shows for $d=2$ that $\Psi$ has at least two ends with positive probability. This was later improved to $\Psi$ having almost surely at least $2d$ ends, by Hoffman (2005:2) for $d=2$ and by Gou\'er\'e (2007) for higher dimensions.
\section{Introducing two types} \label{sect:two-type_bounded}
The definition of the two-type Richardson model turns out to be simplest in the symmetric case $\lambda_1=\lambda_2$, where the same passage time variables $\{\tau(e)\}_{e \in E_{\mathbb{Z}^d}}$ as in the one-type model can be used, with $\lambda= \lambda_1=\lambda_2$. Suppose we start with an initial configuration $(S^1_0, S^2_0)$ of infected sites, where $S^1_0 \subset \mathbb{Z}^d$ are those initially containing type $1$ infection, and $S^2_0 \subset \mathbb{Z}^d$ are those initially containing type $2$ infection. We wish to define the sets $S_t^1$ and $S_t^2$ of type 1 and type $2$ infected sites for all $t>0$. To this end, set $S_0=S^1_0 \cup S^2_0$, and take the set $S_t=S_t^1 \cup S_t^2$ of infected sites at time $t$ to be given by precisely the same formula (\ref{eq:infected_at_time_t}) as in the one-type model; a vertex $x\in S_t$ is then assigned infection $1$ or $2$ depending on whether the $y \in S_0$ for which \[ \inf\{T(y,x):y\in S_0\} \] is attained is in $S_0^1$ or $S_0^2$.
As in the one-type model, it is a straightforward exercise involving the memoryless property of the exponential distribution to verify that $(S_t^1, S_t^2)_{t \geq 0}$ behaves in terms of infection intensities as described in the introduction.
This construction demonstrates an intimate link between the one-type and the symmetric two-type Richardson model: if we watch the two-type model wearing a pair of of glasses preventing us from distinguishing the two types of infection, what we see behaves exactly as the one-type model. The link between infinite coexistence in the two-type model and the number of ends in the tree of infection $\Psi$ of the one-type model claimed in the previous section is also a consequence of the construction.
In the asymmetric case $\lambda_1 \neq \lambda_2$, the two-type model is somewhat less trivial to define due to the fact that the time it takes for infection to spread along a path depends on the type of infection. There are various ways to deal with this, one being to assign, independently to each $e \in E_{{\mathbb{Z}^d}}$, two independent random variables $\tau_1(e)$ and $\tau_2(e)$, exponentially distributed with respective parameters $\lambda_1$ and $\lambda_2$, representing the time it takes for infections $1$ resp.\ $2$ to traverse $e$. Starting from an intial configuration $(S^1_0, S^2_0)$, we may picture the infections as spreading along the edges, taking time $\tau_1(e)$ or $\tau_2(e)$ to cross $e$ depending on the type of infection, with the extra condition that once a vertex becomes hit by one type of infection it becomes inaccessible for the other type. This is intuitively clear, but readers with a taste for detail may require a more rigorous definition, which however we refrain from here; see H\"aggstr\"om and Pemantle (2000) and Deijfen and H\"aggstr\"om (2006:1).
We now move on to describing conjectures and results. Write $G_i$ for the event that type $i$ infects infinitely many sites on $\mathbb{Z}^d$ and define $G=G_1\cap G_2$. The question at issue is:
\begin{equation}\label{eq:coex?} \textrm{Does $G$ have positive probability?} \end{equation}
\noindent A priori, the answer to this question may depend both on the initial configuration -- that is, on the choice of the sets $S_0^1$ and $S_0^2$ -- and on the ratio between the infection intensities $\lambda_1$ and $\lambda_2$. However, it turns out that, if we are not interested in the actual value of the probability of $G$, but only in whether it is positive or not, then the initial configuration is basically irrelevant, as long as neither of the initial sets completely surrounds the other. This motivates the following definition.
\begin{defn} Let $\xi_1$ and $\xi_2$ be two disjoint finite subsets of $\mathbb{Z}^d$. We say that one of the sets ($\xi_i$) \emph{strangles} the other ($\xi_j$) if there exists no infinite self-avoiding path in $\mathbb{Z}^d$ that starts at a vertex in $\xi_j$ and that does not intersect $\xi_i$. The pair $(\xi_1,\xi_2)$ is said to be \emph{fertile} if neither of the sets strangles the other. \end{defn}
Now write $P^{\lambda_1,\lambda_2}_{\xi_1,\xi_2}$ for the distribution of a two-type process started from $S_0^1=\xi_1$ and $S_0^2=\xi_2$. We then have the following result.
\begin{theorem}\label{th:startomr} Let $(\xi_1,\xi_2)$ and $(\xi_1',\xi_2')$ be two fertile pairs of disjoint finite subsets of $\mathbb{Z}^d$, where $d\geq 2$. For all choices of $(\lambda_1,\lambda_2)$, we have $$ P^{\lambda_1,\lambda_2}_{\xi_1,\xi_2}(A)>0\Leftrightarrow P^{\lambda_1,\lambda_2}_{\xi_1',\xi_2'}(A)>0. $$ \end{theorem}
For connected initial sets $\xi_1$ and $\xi_2$ and $d=2$, this result is proved in H\"{a}ggstr\"{o}m and Pemantle (1998). The idea of the proof in that case is that, by controlling the passage times of only finitely many edges, two processes started from $(\xi_1,\xi_2)$ and $(\xi'_1,\xi'_2)$ respectively can be made to evolve to the same total infected set after some finite time, with the same configuration of the infection types on the boundary. Coupling the processes from this time on and observing that the development of the infections depends only on the boundary configuration yields the result. This argument however breaks down when the initial sets are not connected (since it is then not sure that the same boundary configuration can be obtained in the two processes) and it is unclear whether it applies for $d\geq 3$. Theorem \ref{th:startomr} is proved in full generality in Deijfen and H\"{a}ggstr\"{o}m (2006:1), using a more involved coupling construction.
It follows from Theorem \ref{th:startomr} that the answer to (\ref{eq:coex?}) depends only on the value of the intensities $\lambda_1$ and $\lambda_2$. Hence it is sufficient to consider a process started from $S_0^1=\mathbf{0}$ and $S_0^2=\mathbf{1}$ (recall that $\mathbf{n}=(n,0,\ldots,0)$), and in this case we drop subscripts and write $P^{\lambda_1,\lambda_2}$ for $P^{\lambda_1,\lambda_2}_{{\bf 0}, {\bf 1}}$. Also, by time-scaling, we may assume that $\lambda_1=1$. The following conjecture, where we write $\lambda_2=\lambda$, goes back to H\"{a}ggstr\"{o}m and Pemantle (1998).
\begin{conj}\label{samex_conj} In any dimension $d\geq 2$, we have that $P^{1,\lambda}(G)>0$ if and only if $\lambda=1$. \end{conj}
\noindent The conjecture is no doubt true, although proving it has turned out to be a difficult task. In fact, the ``only if'' direction is not yet fully established. In the following two sections we describe the existing results for $\lambda=1$ and $\lambda\neq 1$ respectively.
\section{The case $\lambda=1$} \label{sect:symmetric}
When $\lambda=1$, we are dealing with two equally powerful infections and Conjecture \ref{samex_conj} predicts a positive probability for infinite coexistence. This part of the conjecture has been proved:
\begin{theorem}\label{th:lambda=1} If $\lambda=1$, we have, for any $d\geq 2$, that $P^{1,\lambda}(G)>0$. \end{theorem}
\noindent This was first proved in the special case $d=2$ by H\"{a}ggstr\"{o}m and Pemantle (1998). That proof has a very ad hoc flavor, and heavily exploits not only the two-dimensionality but also other specific properties of the square lattice, including a lower bound on the time constant $\mu$ in (\ref{eq:time_constant}) that just happens to be good enough. When eventually the result was generalized to higher dimensions, which was done simultaneously and independently by Garet and Marchand (2005) and Hoffman (2005:1), much more appealing proofs were obtained. Yet another distinct proof of Theorem \ref{th:lambda=1} was given by Deijfen and H\"{a}ggstr\"{o}m (2007). All four proofs are different, though if you inspect them for a smallest common denominator you find that they all make critical use of the fact that the time constant $\mu$ is strictly positive. We will give the Garet--Marchand proof below. In Hoffman's proof ergodic theory is applied to the tree of infection $\Psi$ and a so-called Busemann function which is shown to exhibit contradictory behavior under the assumption that infinite coexistence has probability zero. The Deijfen--H\"{a}ggstr\"{o}m proof proceeds via the two-type Richardson model with certain infinite initial configurations (cf.\ Section \ref{sect:unbounded}).
\noindent {\bf Proof of Theorem \ref{th:lambda=1}:} The following argument is due to Garet and Marchand (2005), though our presentation follows more closely the proof of an analogous result in a continuum setting in Deijfen and H\"{a}ggstr\"{o}m (2004) -- a paper that, despite the publication dates, was preceded by and also heavily influenced by Garet and Marchand (2005).
Fix a small $\varepsilon>0$. By Theorem \ref{th:startomr}, we are free to choose any finite starting configuration we want, and here it turns out convenient to begin with a single type $1$ infection at the origin ${\bf 0}$, and a single type $2$ infection at a vertex ${\bf n}=(n,0,\ldots, 0)$, where $n$ is large enough so that \begin{description} \item{(i)} $E[T({\bf 0}, {\bf n})] \leq (1 + \varepsilon) n \mu$, and \item{(ii)} $P(T({\bf 0}, {\bf n}) < (1 - \varepsilon) n \mu) < \varepsilon$; \end{description} note that both (i) and (ii) hold for $n$ large enough due to the asymptotic speed result (\ref{eq:time_constant}). The reader may easily check, for later reference, that (i) and (ii) together with the nonnegativity of $T({\bf 0}, {\bf n})$ imply for any event $B$ with $P(B)=\alpha$ that \begin{equation} \label{eq:key_estimate_GM}
E[T({\bf 0}, {\bf n})\, | \, \neg B] \, \leq \, \left(1 + \frac{3\varepsilon}{1-\alpha}\right) n \mu \, . \end{equation} Next comes an important telescoping idea: for any positive integer $k$ we have \begin{eqnarray*} E[T({\bf 0}, k{\bf n})] & = & E[T({\bf 0}, {\bf n})] + E[T({\bf 0}, 2{\bf n}) - T({\bf 0}, {\bf n})] + E[T({\bf 0}, 3{\bf n}) - T({\bf 0}, 2{\bf n})] \\ & & + \ldots + E[T({\bf 0}, k{\bf n}) - T({\bf 0}, (k-1){\bf n})] \, . \end{eqnarray*} Since $\lim_{k \rightarrow \infty}k^{-1}E[T({\bf 0}, k{\bf n})] = n \mu$, there must exist arbitrarily large $k$ such that \[ E[T({\bf 0}, (k+1){\bf n}) - T({\bf 0}, k{\bf n})] \geq (1 - \varepsilon)n \mu \, . \] By taking ${\bf m}= k{\bf n}$, and by translation and reflection invariance, we may deduce that \begin{equation} \label{eq:for_arbitrarily_large_m} E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m})] \geq (1 - \varepsilon)n \mu \end{equation} for some arbitrarily large $m$. We will pick such an $m$; how large will soon be specified.
The goal is to show that $P(G)>0$, so we may assume for contradiction that $P(G)=0$. By symmetry of the initial configuration, we then have that $P(G_1)=P(G_2)=\frac{1}{2}$. This implies that \[ \lim_{m \rightarrow \infty} P({\bf -m} \mbox{ gets infected by type 2})= \lim_{m \rightarrow \infty} P(T({\bf n}, -{\bf m}) < T({\bf 0}, -{\bf m})) =\frac{1}{2} \] so let us pick $m$ in such a way that \begin{equation} \label{eq:our_chosen_m} P(T({\bf n}, -{\bf m}) < T({\bf 0}, -{\bf m})) \geq \frac{1}{4} \end{equation} while also (\ref{eq:for_arbitrarily_large_m}) holds. Write $B$ for the event in (\ref{eq:our_chosen_m}). The expectation $E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m})]$ may be decomposed as \begin{eqnarray*} E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m})] & = &
E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m}) \, | B]P(B) \\
& & +E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m}) \, | \neg B]P(\neg B) \\
& \leq & E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m}) \, | \neg B]P(\neg B) \\ & \leq & \frac{3}{4}
E[T({\bf n}, -{\bf m}) - T({\bf 0}, -{\bf m}) \, | \neg B] \\
& \leq & \frac{3}{4} E[T({\bf n}, {\bf 0}) | \neg B] \\ & \leq & \frac{3}{4} (1 + 4 \varepsilon) n \mu \end{eqnarray*} where the second-to-last inequality is due to the triangle inequality $T({\bf n}, -{\bf m}) \leq T({\bf n}, {\bf 0}) + T({\bf 0}, - {\bf m})$, and the last one uses (\ref{eq:key_estimate_GM}). For small $\varepsilon$, this contradicts (\ref{eq:for_arbitrarily_large_m}), so the proof is complete. ${
\Box}$
\section{The case $\lambda\neq 1$} \label{sect:nonsymmetric}
Let us move on to the case when $\lambda\neq 1$, that is, when the type 2 infection has a different intensity than type 1. It then seems unlikely that the kind of equilibrium which is necessary for infinite coexistence to occur would persist in the long run. However, this part of Conjecture \ref{samex_conj} is not proved. The best result to date is the following theorem from H\"{a}ggstr\"{o}m and Pemantle (2000).
\begin{theorem}\label{th:pain} For any $d\geq 2$, we have $P^{1,\lambda}(G)=0$ for all but at most countably many values of $\lambda$. \end{theorem}
\noindent We leave it to the reader to decide whether this is a very strong or a very weak result: it is very strong in the sense of showing that infinite coexistence has probability $0$ for (Lebesgue)-almost all $\lambda$, but very weak in the sense that infinite coexistence is not ruled out for any given $\lambda$.
The result may seem a bit peculiar at first sight and we will spend some time explaining where it comes from and where the difficulties arise when one tries to strengthen it. Indeed, as formulated in Conjecture \ref{samex_conj}, the belief is that the set $\{\lambda:P^{1,\lambda}(G)>0\}$ in fact consists of the single point $\lambda=1$, but Theorem \ref{th:pain} only asserts that the set is countable.
First note that, by time-scaling and symmetry, we have $P^{1,\lambda}(G)=P^{1,1/\lambda}(G)$ and hence it is enough to consider $\lambda\leq 1$. An essential ingredient in the proof of Theorem \ref{th:pain} is a coupling of the two-type processes $\{P^{1,\lambda}\}_{\lambda\in(0,1]}$ obtained by associating two independent exponential mean 1 variables $\tau_1(e)$ and $\tau_2'(e)$ to each edge $e\in\mathbb{Z}^d$ and then letting the type 2 passage time at parameter value $\lambda$ be given by $\tau_2(e)=\lambda^{-1}\tau_2'(e)$ and the type 1 time (for any $\lambda$) by $\tau_1(e)$. Write $Q$ for the probability measure underlying this coupling and let $G^\lambda$ be the event that infinite coexistence occurs at parameter value $\lambda$. Theorem \ref{th:pain} is obtained by showing that \begin{equation} \label{eq:in_the_coupling} \begin{array}{l} \mbox{with $Q$-probability 1 the event $G^\lambda$ occurs} \\ \mbox{for at most one value of $\lambda\in(0,1]$}. \end{array} \end{equation} Hence, $Q(G^\lambda)$ can be positive for at most countably many $\lambda$, and Theorem \ref{th:pain} then follows by noting that $P^{1,\lambda}(G)=Q(G^\lambda)$.
But why is (\ref{eq:in_the_coupling}) true? Let $G^\lambda_i$ be the event that the type $i$ infection grows unboundedly at parameter value $\lambda$. Then the coupling defining $Q$ can be shown to be monotone in the sense that $G^\lambda_1$ is decreasing in $\lambda$ -- that is, if $G^\lambda_1$ occurs then $G^{\lambda'}_1$ occurs for all $\lambda'<\lambda$ as well -- and $G^\lambda_2$ is increasing in $\lambda$. This kind of monotonicity of the coupling is crucial for proving (\ref{eq:in_the_coupling}), as is the following result, which asserts that, on the event that the type 2 infection survives, the total infected set in a two-type process with distribution $P^{1,\lambda}$, where $\lambda<1$, grows to a first approximation like a one-type process with intensity $\lambda$. More precisely, the speed of the growth in the two-type process is determined by the weaker type 2 infection type. We take $\tilde{S}_t^i$ to denote the union of all unit cubes centered at points in $S_t^i$ and $A$ is the limiting shape for a one-type process with rate 1.
\begin{theorem}\label{th:svag_bestammer} Consider a two-type process with distribution $P^{1,\lambda}$ for some $\lambda\leq 1$. On the event $G_2$ we have, for any $\varepsilon>0$, that almost surely $$ (1-\varepsilon)\lambda A\subset\frac{\tilde{S}_t^1\cup \tilde{S}_t^2}{t}\subset (1+\varepsilon)\lambda A $$ for large $t$. \end{theorem}
\noindent Theorem \ref{th:pain} follows readily from this result and the monotonicity properties of the coupling $Q$. Indeed, fix $\varepsilon>0$ and suppose $G^\lambda$ occurs. Then Theorem \ref{th:svag_bestammer} guarantees that on level $\lambda$ the type 1 infection is eventually contained in $(1+ \varepsilon)\lambda tA$, a conclusion that extends to all $\lambda'>\lambda$, because increasing the type 2 infection rate does not help type 1. On the other hand, for any $\lambda'>\lambda$ we get on level $\lambda'$ that the union of the two infections will -- again by Theorem \ref{th:svag_bestammer} -- eventually contain $(1- \varepsilon)\lambda' tA$, so by taking $\varepsilon$ sufficiently small we see that the type 1 infection is strangled on level $\lambda'$, implying (\ref{eq:in_the_coupling}), and Theorem \ref{th:pain} follows.
We will not prove Theorem \ref{th:svag_bestammer}, but mention that the hard work in proving it lies in establishing a certain key result (Proposition 2.2 in H\"{a}ggstr\"{o}m and Pemantle (2000)) that asserts that if the strong infection type reaches outside $(1 + \varepsilon) \lambda tA$ infinitely often, then the weak type is doomed. The proof of this uses geometrical arguments, the most important ingredient being a certain spiral construction, emanating from the part of the strong of infection reaching beyond $(1 + \varepsilon) \lambda tA$, and designed to allow the strong type to completely surround the weak type before the weak type catches up from inside.
How would one go about to strengthen Theorem \ref{th:pain} and rule out infinite coexistence for all $\lambda \neq 1$? One possibility would be to try to derive a contradiction with Theorem \ref{th:svag_bestammer} from the assumption that the strong infection type grows unboundedly. For instance, intuitively it seems likely that the strong type occupying a positive fraction of the boundary of the infected set would cause the speed of the growth to exceed the speed prescribed by the weak infection type. This type of argument is indeed used in Garet and Marchand (2007) to show, for $d=2$, that on the event of infinite coexistence the fraction of infected sites occupied by the strong infection will tend to $0$ as $t\rightarrow \infty$. This feels like a strong indication that infinite coexistence does not happen.
Another approach to strengthening Theorem \ref{th:pain} in order to obtain the only-if direction of Conjecture \ref{samex_conj} is based on the observation that, since coexistence represents a power balance between the infections, it is reasonable to expect that $P^{1,\lambda}(G)$ decreases as $\lambda$ moves away from 1. We may formulate that intuition as a conjecture:
\begin{conj}\label{conj:monotonitet} For the two-type Richardson model on $\mathbb{Z}^d$ with $d\geq 2$, we have, for $\lambda<\lambda'\in(0,1]$, that $P^{1,\lambda}(G)\leq P^{1,\lambda'}(G)$. \end{conj}
\noindent A confirmation of this conjecture would, in combination with Theorem \ref{th:pain}, clearly establish the only-if direction of Conjecture \ref{samex_conj}: If $P^{1,\lambda}(G)>0$ for some $\lambda<1$, then, according to Conjecture \ref{conj:monotonitet}, we would have $P^{1,\lambda'}(G)>0$ for all $\lambda'\in(\lambda,1]$ as well. But the interval $(\lambda,1]$ is uncountable, yielding a contradiction to Theorem \ref{th:pain}.
Although Conjecture \ref{conj:monotonitet} might seem close to obvious, it has turned out to be very difficult to prove. A natural first attempt would be to use coupling. Consider for instance the coupling $Q$ described above. As pointed out, the events $G^\lambda_1$ and $G^\lambda_2$ that the individual infections grow unboundedly at parameter value $\lambda$ are then monotone in $\lambda$, but one of them is increasing and the other is decreasing, so monotonicity of their intersection $G^\lambda$ does not follow. Hence more sophisticated arguments are needed.
Observing how our colleagues react during seminars and corridor chat, we have noted that it is very tempting to go about trying to prove Conjecture \ref{conj:monotonitet} by abstract and ``easy'' arguments, here meaning arguments that do not involve any specifics about the geometry or graph structure of $\mathbb{Z}^d$. To warn against such attempts, Deijfen and H\"aggstr\"om (2006:2) constructed graphs on which the two-type Richardson model fails to exhibit the monotonicity behavior predicted in Conjecture \ref{conj:monotonitet}. Let us briefly explain the results.
The dynamics of the two-type Richardson model can of course be defined on graphs other than the $\mathbb{Z}^d$ lattice. For a graph $\mathcal{G}$, write Coex$(\mathcal{G})$ for the set of all $\lambda\geq 1$ such that there exists a finite initial configuration $(\xi_1, \xi_2)$ for which the two-type Richardson model with infection intensities $1$ and $\lambda$ started from $(\xi_1, \xi_2)$ yields infinite coexistence with positive probability. Note that, by time-scaling and interchange of the infections, coexistence is possible at parameter value $\lambda$ if and only if it is possible at $\lambda^{-1}$, so no information is lost by restricting to $\lambda\geq 1$. In Deijfen and H\"aggstr\"om (2006:2) examples of graphs $\mathcal{G}$ are given that demonstrate that, among others, the following kinds of coexistence sets Coex$(\mathcal{G})$ are possible:
\begin{itemize} \item[(i)] Coex$(\mathcal{G})$ may be an interval $(a,b)$ with $1<a<b$.
\item[(ii)] For any positive integer $k$ the set Coex$(\mathcal{G})$ may consist of exactly $k$ points.
\item[(iii)] Coex$(\mathcal{G})$ may be countably infinite.
\end{itemize}
\noindent All these phenomena show that the monotonicity suggested in Conjecture \ref{conj:monotonitet} fails for general graphs. However, a reasonable guess is that Conjecture \ref{conj:monotonitet} is true on transitive graphs. Indeed, all counterexamples provided by Deijfen and H\"aggstr\"om are highly non-symmetric (one might even say ugly) with certain parts of the graph being designed specifically with propagation of type 1 in mind, while other parts are meant for type 2. We omit the details.
\section{Unbounded initial configurations} \label{sect:unbounded}
Let us now go back to the $\mathbb{Z}^d$ setting and describe some results from our most recent paper, Deijfen and H\"aggstr\"om (2007), concerning the two-type model with unbounded initial configurations. Roughly, the model will be started from configurations where one of the infections occupies a single site in an infinite ``sea" of the other type. The dynamics is as before and also the question at issue is the same: can both infection types simultaneously infect infinitely many sites? With both types initially occupying infinitely many sites the answer is (apart from in particularly silly cases) obviously yes, so we will focus on configurations where type 1 starts with infinitely many sites and type 2 with finitely many -- for simplicity only one. The question then becomes whether type 2 is able to survive.
To describe the configurations in more detail, write $(x_1,\ldots,x_d)$ for the coordinates of a point $x\in\mathbb{Z}^d$, and define $\mathcal{H}=\{x:x_1=0\}$ and $\mathcal{L}=\{x:x_1 \leq 0\textrm{ and }x_i=0\textrm{ for }i =2,\ldots, d\}$. We will consider the following starting configurations.
\begin{equation}\label{initial_configurations} \begin{array}{rl} I(\mathcal{H}): & \mbox{all points in $\mathcal{H}\backslash\{{\bf 0}\}$ are type 1 infected and}\\ & {\bf 0} \mbox{ is type 2 infected, and}\\ I(\mathcal{L}): & \mbox{all points in $\mathcal{L}\backslash\{{\bf 0}\}$ are type 1 infected and}\\ & {\bf 0} \mbox{ is type 2 infected.} \end{array} \end{equation}
\noindent Interestingly, it turns out that the set of parameter values for which type 2 is able to grow indefinitely is slightly different for these two configuration. First note that, as before, we may restrict to the case $\lambda_1=1$. Write $P^{1,\lambda}_{\mathcal{H},\mathbf{0}}$ and $P^{1,\lambda}_{\mathcal{L},\mathbf{0}}$ for the distribution of the process started from $I(\mathcal{H})$ and $I(\mathcal{L})$ respectively and with type 2 intensity $\lambda$. The following result, where $G_2$ denotes the event that type 2 grows unboundedly, is proved in Deijfen and H\"aggstr\"om (2007).
\begin{theorem}\label{th:unbounded} For the two-type Richardson model in $d\geq 2$ dimensions, we have
\begin{itemize} \item[\rm{(a)}] $P^{1,\lambda}_{\mathcal{H},{\bf 0}}(G_2)>0$ if and only if $\lambda>1$; \item[\rm{\rm{(b)}}]$P^{1,\lambda}_{\mathcal{L},{\bf 0}}(G_2)>0$ if and only if $\lambda\geq 1$. \end{itemize} \end{theorem}
\noindent In words, a strictly stronger type 2 infection will be able to survive in both configurations, but, when the infections have the same intensity, type 2 can survive only in the configuration $I(\mathcal{L})$.
The proof of the if-direction of Theorem \ref{th:unbounded} (a) is based on a lemma stating roughly that the speed of a hampered one-type process, living only inside a tube which is bounded in all directions except one, is close to the speed of an unhampered process when the tube is large. For a two-type process started from $I(\mathcal{H})$, this lemma can be used to show that, if the strong type 2 infection at the origin is successful in the beginning of the time course, it will take off along the $x_1$-axis and grow faster than the surrounding type 1 infection inside a tube around the $x_1$-axis, thereby escaping eradication. The same scenario -- that the type 2 infection rushes away along the $x_1$-axis -- can, by different means, be proved to have positive probability in a process with $\lambda=1$ started from $I(\mathcal{L})$. Infinite growth for type 2 when $\lambda<1$ is ruled out by the key proposition from H\"aggstr\"om and Pemantle (2000) mentioned in Section 3. Proving that type 2 cannot survive in a process with $\lambda=1$ started from $I(\mathcal{H})$ is the most tricky part. The idea is basically to divide $\mathbb{Z}^d$ in different levels, the $l$-th level being all sites with $x_1$-coordinate $l$, and then show that the expected number of type 2 infected sites at level $l$ is constant and equal to 1. It then follows from a certain comparison with a one-type process on each level combined with an application of Levy's 0-1 law that the number of type 2 infected sites at the $l$-th level converges almost surely to 0 as $l\to\infty$.
Finally we mention a question formulated by Itai Benjamini as well as by an anonymous referee of Deijfen and H\"aggstr\"om (2007). We have seen that, when $\lambda=1$, the type 2 infection at the origin can grow unboundedly from $I(\mathcal{L})$ but not from $I(\mathcal{H})$. It is then natural to ask what happens if we interpolate between these two configurations. More precisely, instead of letting type 1 occupy only the negative $x_1$-axis (as in $I(\mathcal{L})$), we let it occupy a cone of constant slope around the same axis. The question then is what the critical slope is for this cone such that there is a positive probability for type 2 to grow unboundedly. That type 2 cannot survive when the cone occupies the whole left half-space follows from Theorem \ref{th:unbounded}, as this situation is equivalent to starting the process from $I(\mathcal{H})$. It seems likely, as suggested by Itai Benjamini, that this is actually also the critical case, that is, infinite growth for type 2 most likely have positive probability for any smaller type 1 cone. This however remains to be proved.
\section*{References}
\noindent Alexander, K.\ (1993): A note on some rates of convergence in first-passage percolation, \emph{Ann. Appl. Probab.} \textbf{3}, 81-90.
\noindent Benjamini, I., Kalai, G. and Schramm, O.\ (2003): First passage percolation has sublinear distance variation, \emph{Ann. Probab.} \textbf{31}, 1970-1978.
\noindent Bramson, M. and Griffeath, D. (1981): On the Williams-Bjerknes tumour growth model I, \emph{Ann. Probab.} \textbf{9}, 173-185.
\noindent Cox, J.T. and Durrett, R. (1981): Some limit theorems for percolation processes with necessary and sufficient conditions, \emph{Ann. Probab.} \textbf{9}, 583-603.
\noindent Deijfen, M.\ and H\"aggstr\"om, O.\ (2004): Coexistence in a two-type continuum growth model, \emph{Adv. Appl. Probab.} \textbf{36}, 973-980.
\noindent Deijfen, M.\ and H\"aggstr\"om, O.\ (2006:1): The initial configuration is irrelevant for the possibility of mutual unbounded growth in the two-type Richardson model, {\em Comb. Probab. Computing} \textbf{15}, 345-353.
\noindent Deijfen, M.\ and H\"{a}ggstr\"{o}m, O.\ (2006:2): Nonmonotonic coexistence regions for the two-type Richardson model on graphs, \emph{Electr. J. Probab.} \textbf{11}, 331-344.
\noindent Deijfen, M.\ and H\"aggstr\"om, O.\ (2007): The two-type Richardson model with unbounded initial configurations, \emph{Ann. Appl. Probab.}, to appear.
\noindent Deijfen, M., H\"aggstr\"om, O.\ and Bagley, J.\ (2004): A stochastic model for competing growth on $R^d$, \emph{Markov Proc. Relat. Fields} \textbf{10}, 217-248.
\noindent Durrett, R. (1988): \emph{Lecture Notes on Particle Systems and Percolation}, Wadsworth $\&$ Brooks/Cole.
\noindent Durrett, R.\ and Neuhauser, C. (1997): Coexistence results for some competition models \emph{Ann. Appl. Probab.} \textbf{7}, 10-45.
\noindent Eden, M. (1961): A two-dimensional growth process, \emph{Proceedings of the 4th Berkeley symposium on mathematical statistics and probability} vol. \textbf{IV}, 223-239, University of California Press.
\noindent Ferrari, P., Martin, J.\ and Pimentel, L. (2006), Roughening and inclination of competition interfaces, \emph{Phys Rev E} \textbf{73}, 031602 (4 p).
\noindent Garet, O.\ and Marchand, R.\ (2005): Coexistence in two-type first-passage percolation models, {\em Ann. Appl. Probab.} {\bf 15}, 298-330.
\noindent Garet, O.\ and Marchand, R.\ (2006): Competition between growths governed by Bernoulli percolation, \emph{Markov Proc. Relat. Fields} \textbf{12}, 695-734.
\noindent Garet, O.\ and Marchand, R.\ (2007): First-passage competition with different speeds: positive density for both species is impossible, preprint, ArXiV math.PR/0608667.
\noindent Gou\'er\'e, J.-B. (2007) Shape of territories in some competing growth models, \emph{Ann. Appl. Probab.}, to appear.
\noindent H\"{a}ggstr\"{o}m, O.\ and Pemantle, R.\ (1998): First passage percolation and a model for competing spatial growth, \emph{J. Appl. Probab.} \textbf{35}, 683-692.
\noindent H\"{a}ggstr\"{o}m, O.\ and Pemantle, R.\ (2000): Absence of mutual unbounded growth for almost all parameter values in the two-type Richardson model, \emph{Stoch. Proc. Appl.} \textbf{90}, 207-222.
\noindent Hammersley, J.\ and Welsh D.\ (1965): First passage percolation, subadditive processes, stochastic networks and generalized renewal theory, \emph{1965 Proc. Internat. Res. Semin., Statist. Lab., Univ. California, Berkeley}, 61-110, Springer.
\noindent Hoffman, C.\ (2005:1): Coexistence for Richardson type competing spatial growth models, {\em Ann. Appl. Probab.} {\bf 15}, 739-747.
\noindent Hoffman, C.\ (2005:2): Geodesics in first passage percolation, preprint, ArXiV math.PR/0508114.
\noindent Kesten, H.\ (1973): Discussion contribution, \emph{Ann. Probab.} {\bf 1}, 903.
\noindent Kesten, H.\ (1993): On the speed of convergence in first-passage percolation, \emph{Ann. Appl. Probab.} \textbf{3}, 296-338.
\noindent Kingman, J.F.C.\ (1968): The ergodic theory of subadditive stochastic processes, \emph{J. Roy. Statist. Soc. Ser. B} \textbf{30}, 499-510.
\noindent Kordzakhia, G. and Lalley, S. (2005): A two-species competition model on $Z^d$, \emph{Stoch. Proc. Appl.} \textbf{115}, 781-796.
\noindent Lalley, S. (2003): Strict convexity of the limit shape in first-passage percolation, {\em Electr. Comm. Probab.} {\bf 8}, 135--141.
\noindent Licea, C.\ and Newman, C.\ (1996): Geodesics in two-dimensional first-passage percolation, \emph{Ann. Probab.} \textbf{24}, 399-410.
\noindent Neuhauser, C.\ (1992): Ergodic theorems for the multitype contact process, \emph{Probab. Theory Relat. Fields} \textbf{91}, 467-506.
\noindent Newman, C. (1995): A surface view of first passage percolation, \emph{Proc. Int. Congr. Mathematicians} \textbf{1,2} (Zurich 1994), 1017-1023.
\noindent Newman, C.\ and Piza, M.\ (1995): Divergence of shape fluctuations in two dimensions, \emph{Ann. Probab.} \textbf{23}, 977-1005.
\noindent Pimentel, L. (2007): Multitype shape theorems for first passage percolation models, \emph{Adv. Appl. Probab.} \textbf{39}, 53-76.
\noindent Richardson, D.\ (1973): Random growth in a tessellation, \emph{Proc. Cambridge Phil. Soc.} \textbf{74}, 515-528.
\noindent Williams, T. and Bjerknes R. (1972): Stochastic model for abnormal clone spread through epithelial basal layer, \emph{Nature} \textbf{236}, 19-21.
\end{document} |
\begin{document}
\begin{sloppypar}
\title{The $g$-extra connectivity of the strong product of paths and cycles\footnote{The research is supported by National Natural Science Foundation of China (11861066).}}
\author{Qinze Zhu, Yingzhi Tian\footnote{Corresponding author. E-mail: [email protected] (Q. Zhu); [email protected] (Y. Tian).} \\ {\small College of Mathematics and System Sciences, Xinjiang University, Urumqi, Xinjiang, 830046, PR China}}
\date{} \maketitle
\noindent{\bf Abstract } Let $G$ be a connected graph and $g$ be a non-negative integer. The $g$-extra connectivity of $G$ is the minimum cardinality of a set of vertices in $G$, if it exists, whose removal disconnects $G$ and leaves every component with more than $g$ vertices. The strong product $G_1 \boxtimes G_2$ of graphs $G_1=(V_{1}, E_{1})$ and $G_2=(V_{2}, E_{2})$ is the graph with vertex set $V(G_1 \boxtimes G_2)=V_{1} \times V_{2}$, where two distinct vertices $(x_{1}, x_{2}), (y_{1}, y_{2}) \in V_{1} \times V_{2}$ are adjacent in $G_1 \boxtimes G_2$ if and only if $x_{i}=y_{i}$ or $x_{i} y_{i} \in E_{i}$ for $i=1, 2$. In this paper, we obtain the $g$-extra connectivity of the strong product of two paths, the strong product of a path and a cycle, and the strong product of two cycles.
\noindent{\bf Keywords:} Conditional connectivity; $g$-extra connectivity; Strong product; Paths; Cycles
\section{Introduction}
Let $G$ be a graph with vertex set $V(G)$ and edge set $E(G)$. The $minimum$ $degree$ of $G$ is denoted by $\delta(G)$. A $vertex$ $cut$ in $G$ is a set of vertices whose deletion makes $G$ disconnected. The $connectivity$ $\kappa(G)$ of the graph $G$ is the minimum order of a vertex cut in $G$ if $G$ is not a complete graph; otherwise $\kappa(G)=|V(G)|-1$. Usually, the topology structure of an interconnection network can be modeled by a graph $G$, where $V(G)$ represents the set of nodes and $E(G)$ represents the set of links connecting nodes in the network. Connectivity is used to measure the reliability the network, while it always underestimates the resilience of large networks.
To overcome this deficiency, Harary \cite{Harary} proposed the concept of conditional connectivity. For a graph-theoretic property $\mathcal{P}$, the $conditional$ $connectivity$ $\kappa(G; \mathcal{P})$ is the minimum cardinality of a set of vertices whose deletion disconnects $G$ and every remaining component has property $\mathcal{P}$. Later, F{\`{a}}brega and Fiol \cite{Fabrega} introduced the concept of $g$-extra connectivity, which is a kind of conditional connectivity. Let $g$ be a non-negative integer. A subset $S\subseteq V(G)$ is called a $g$-$extra$ $cut$ if $G-S$ is disconnected and each component of $G-S$ has at least $g+1$ vertices. The $g$-$extra$ $connectivity$ of $G$, denoted by $\kappa_{g}(G)$, is the minimum order of a $g$-extra cut if $G$ has at least one $g$-extra cut; otherwise define $\kappa_{g}(G)=\infty$. If $S$ is a $g$-extra cut in $G$ with order $\kappa_g(G)$, then we call $S$ a $\kappa_g$-$cut$. Since $\kappa_0(G)=\kappa(G)$ for any connected graph $G$ that is not a complete graph, the $g$-extra connectivity can be seen as a generalization of the traditional connectivity. The authors in \cite{Chang} pointed out that there is no polynomial-time algorithm for computing $\kappa_g$ for a general graph. Consequently, much of the work has been focused on the computing of the $g$-extra connectivity of some given graphs, see [1,4,6,8,10-11,16-21] for examples.
The most studied four standard graph products are the Cartesian product, the direct product, the strong product and the lexicographic product. The $Cartesian$ $product$ of two graphs $G_1$ and $G_2$, denoted by $G_1 \square G_2$, is defined on the vertex sets $V(G_1) \times V(G_2)$, and $(x_1, y_1)(x_2, y_2)$ is an edge in $G_1 \square G_2$ if and only if one of the following is true: ($i$) $x_1=x_2$ and $y_1 y_2 \in E(G_2)$; ($ii$) $y_1=y_2$ and $x_1 x_2 \in E(G_1)$.
The $strong$ $product$ $G_1 \boxtimes G_2$ of $G_1$ and $G_2$ is the graph with the vertex set $V(G_1 \boxtimes G_2)=V(G_1) \times V(G_2)$, where two vertices $(x_{1}, y_{1})$, $(x_{2}, y_{2}) \in V(G_1) \times V(G_2)$ are adjacent in $G_1 \boxtimes G_2$ if and only if one of the following holds: ($i$) $x_1=x_2$ and $y_1 y_2 \in E(G_2)$; ($ii$) $y_1=y_2$ and $x_1 x_2 \in E(G_1)$; ($iii$) $x_1 x_2 \in E(G_1)$ and $y_1 y_2 \in E(G_2)$.
{\v{S}}pacapan \cite{Spacapan1} proved that for any nontrivial graphs $G_1$ and $G_2$, $\kappa(G_1 \square G_2)=\min \{\kappa(G_1)|V(G_2)|, \kappa(G_2)|V(G_1)|, \delta(G_1 \square G_2)\}$. L{\"{u}}, Wu, Chen and Lv \cite{Lu} provided bounds for the 1-extra connectivity of the Cartesian product of two connected graphs. Tian and Meng \cite{Tian} determined the exact values of the 1-extra connectivity of the Cartesian product for some class of graphs. In \cite{Chen}, Chen, Meng, Tian and Liu further studied the 2-extra connectivity and the 3-extra connectivity of the Cartesian product of graphs.
Bre{\v{s}}ar and {\v{S}}pacapan \cite{Bresar} determined the edge-connectivity of the strong products of two connected graphs. For the connectivity of the strong product graphs, {\v{S}}pacapan \cite{Spacapan2} obtained $Theorem\ \ref{2}$ in the following. Let $S_i$ be a vertex cut in $G_i$ for $i=1,2$, and let $A_i$ be a component of $ G_i-S_i$ for $i=1,2$. Following the definitions in \cite{Spacapan2}, $I=S_1\times V_2$ or $I=V_1\times S_2$ is called an $I$-set in $G_1\boxtimes G_2$, and $L=(S_1\times A_2)\cup(S_1\times S_2)\cup(A_1\times S_2)$ is called an $L$-set in $G_1\boxtimes G_2$.
\begin{theorem}\label{2} (\cite{Spacapan2}) Let $G_1$ and $G_2$ be two connected graphs. Then every minimum vertex cut in $G_1\boxtimes G_2$ is either an $I$-set or an $L$-set in $G_1\boxtimes G_2$. \end{theorem}
Motivated by the results above, we will study the $g$-extra connectivity of the strong product graphs. In the next section, we introduce some definitions and lemmas. In Section 3, we will give the $g$-extra connectivity of the strong product of two paths, the strong product of a path and a cycle, and the strong product of two cycles. Conclusion will be given in Section 4.
\section{Preliminary}
For graph-theoretical terminology and notations not defined here, we follow \cite{Bondy}. Let $G$ be a graph with vertex set $V(G)$ and edge set $E(G)$. The $neighborhood$ of a vertex $u$ in $G$ is $N_G(u)=\{v\in V(G)\ |\ v\;\text{is}\; \text{adjacent}\;\text{to} \;\text{the}\; \text{vertex}\; u\}$. Let $A$ be a subset of $V(G)$, the neighborhood of $A$ in $G$ is $N_{G}(A)=\{v \in V(G) \backslash A \ |\ v\;\text{is}\; \text{adjacent}\;\text{to}\; \text{a}\; \text{vertex}\; \text{in}\; A\}$. The subgraph induced by $A$ in $G$ is denoted by $G[A]$. We use $P_n$ to denote the path with order $n$ and $C_n$ to denote the cycle with order $n$.
Let $G_1$ and $G_2$ be two graphs. Define two natural projections $p_1$ and $p_2$ on $V(G_1)\times V(G_2)$ as follows: $p_1(x,y)=x$ and $p_2(x,y)=y$ for any $(x,y)\in V(G_1)\times V(G_2)$. The subgraph induced by $\{(u, y)|u\in V(G_1)\}$ in $G_1\boxtimes G_2$, denoted by $G_{1y}$, is called a $G_1$-$layer$ in $G_1\boxtimes G_2$ for each vertex $y\in V(G_2)$. Analogously, the subgraph induced by $\{(x, v)|v\in V(G_2)\}$ in $G_1\boxtimes G_2$, denoted by ${}_{x}G_2$, is called a $G_2$-$layer$ in $G_1\boxtimes G_2$ for each vertex $x\in V(G_1)$. Clearly, a $G_1$-layer in $G_1\boxtimes G_2$ is isomorphic to $G_1$, and a $G_2$-layer in $G_1\boxtimes G_2$ is isomorphic to $G_2$.
Let $S\subseteq V(G_1\boxtimes G_2)$. For any $x\in V(G_1)$, denote $S\cap V({}_{x}G_2)$ by ${}_{x}S$, and analogously, for any $y\in V(G_2)$, denote $S\cap V(G_{1y})$ by $S_{y}$. Furthermore, we use $\overline{{}_{x}S}=V({}_{x}G_{2})\setminus {}_{x}S$ and $\overline{S_y}=V(G_{1y})\setminus S_y$. By a similar argument as the proof of the second paragraph of $Theorem$ 3.2 in \cite{Spacapan2}, we can obtain the following lemma.
\begin{lemma}\label{1}
Let $G$ be the strong product $G_1\boxtimes G_2$ of two connected graphs $G_1$ and $G_2$, and let $g$ be a non-negative integer. Assume $G$ has $g$-extra cuts and $S$ is a $\kappa_g$-cut of $G$.
(i) If ${}_{x}S\neq\emptyset$ for some $x\in V(G_1)$, then $|{}_{x}S|\geq \kappa(G_2)$.
(ii) If $S_{y}\neq \emptyset$ for some $y\in V(G_2)$, then $|S_{y}|\geq \kappa(G_1)$.
\end{lemma}
\noindent{\bf Proof.} ($i$) Suppose ${ }_{x} S \neq \emptyset$ for some $x\in V(G_1)$. Note that this is obviously true if ${ }_{x} S=V({ }_{x} G_{2})$. If $\overline{{}_{x}S}$ is not contained in one component of $G-S$, then clearly the induced subgraph $G[\overline{{}_{x}S}]$ is not connected, and hence $|{ }_{x} S| \geq \kappa(G_{2})$. If $\overline{{}_{x}S}$ is contained in one component of $G-S$, then choose an arbitrary fixed vertex $(x, y)$ from ${}_{x}S$. Let $H_{1}$ be the component of $G-S$ such that $\overline{{}_{x}S} \subseteq V(H_{1})$ and let $H_{2}=G-S\cup V(H_{1})$. Since $S$ is a $\kappa_{g}$-cut, we find that the vertex $(x, y) \in{ }_{x} S$ has a neighbor $(x_{1}, y_{1}) \in V(H_{2})$. Since $(x_{1}, y_{1}) \in V(H_{2})$, we find that $(x, y_{1}) \in {}_{x} S$, moreover, for any $(x, u) \in \overline{{}_{x}S}$, we find that $(x, u)$ is not adjacent to $(x, y_{1})$, otherwise, $(x, u)$ would be adjacent to $(x_{1}, y_{1})$, which is not true since those two vertices are in different components of $G-S$. Thus if $R={ }_{x} S\setminus\{(x, y_{1})\}$, then $p_{2}(R)$ is a vertex cut in $G_{2}$ and one component of $G_{2}-p_{2}(R)$ is $\{y_{1}\}$. Thus $|{ }_{x} S|=|R|+1 \geq \kappa(G_{2})+1$. Analogously, we can get $|S_{y}| \geq \kappa(G_{1})$ if ($ii$) holds. $\Box$
\section{Main results}
Let $H$ be a subgraph of $G_1\boxtimes G_2$. For the sake of simplicity, we use ${}_{x}H$ instead of ${}_{x}V(H)$ to represent $V(H)\cap V({}_{x}G_2)$ for any $x\in V(G_1)$ and $H_{y}$ to represent $V(H)\cap V(G_{1y})$ for any $y\in V(G_2)$. Since $\kappa_g(P_1\boxtimes P_n)=1$ for $g\leq \lfloor\frac{n-1}{2}\rfloor-1$ and $\kappa_g(P_2\boxtimes P_n)=2$ for $g\leq 2\lfloor\frac{n-1}{2}\rfloor-1$, we assume $m,n\geq3$ in the following theorem.
\begin{theorem}\label{5}
Let $g$ be a non-negative integer and $G=P_m\boxtimes P_n$, where $m,n\geq 3$. If $g\leq min\{n\lfloor\frac{m-1}{2}\rfloor-1, m\lfloor\frac{n-1}{2}\rfloor-1 \}$, then $\kappa_g(G)=min\{m, n, \lceil 2\sqrt{g+1}\ \rceil+1\}$.
\end{theorem}
\noindent{\bf Proof. }Denote $P_m=x_1x_2\dots x_m$ and $P_n=y_1y_2\dots y_n$. Let $S_1=V(P_m)\times \{y_{\lfloor\frac{n-1}{2}\rfloor+1}\}$ and $S_2=\{x_{\lfloor\frac{m-1}{2}\rfloor+1}\}\times V(P_n)$. Since $g\leq min\{n\lfloor\frac{m-1}{2}\rfloor-1, m\lfloor\frac{n-1}{2}\rfloor-1 \}$, we verify that $S_1$ and $S_2$ are two $g$-extra cuts of $G$. Thus $\kappa_g(G)\leq$ min$\{m, n\}$. If $\lceil 2\sqrt{g+1}\ \rceil+1\geq$ min$\{m, n\}$, then $\kappa_g(G)\leq$ min$\{m, n, \lceil 2\sqrt{g+1}\ \rceil+1\}$. If $\lceil 2\sqrt{g+1}\ \rceil+1<$ min$\{m, n\}$, then let $S_3=(J_1\times K_2)\cup (J_1\times J_2)\cup (K_1\times J_2)$, where
$J_1=\{x_{\lceil \sqrt{g+1}\ \rceil+1}\}$, $K_1=\{x_1, x_2, \dots, x_{\lceil \sqrt{g+1}\ \rceil}\}$, $J_2=\{y_{\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil}\rceil+1}\}$ and $K_2=\{y_1, y_2, \dots, y_{\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil}\rceil}\}$. It is routine to verify that $S_3$ is a $g$-extra cut of $G$. By $|S_3|=\lceil \sqrt{g+1}\ \rceil+\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil}\rceil+1=\lceil 2\sqrt{g+1}\ \rceil+1$, we have $\kappa_g(G)\leq \lceil 2\sqrt{g+1}\ \rceil+1$. Therefore, $\kappa_g(G)\leq$ min$\{m, n, \lceil 2\sqrt{g+1}\ \rceil+1\}$ holds.
Now, it is sufficient to prove $\kappa_g(G)\geq$ min$\{m, n, \lceil 2\sqrt{g+1}\ \rceil+1\}$. Assume $S$ is a $\kappa_g$-cut of $G$. We consider two cases in the following.
\noindent{\bf Case 1. } ${}_{x}S\neq\emptyset$ for all $x\in V(P_m)$, or $S_y\neq \emptyset$ for all $y\in V(P_n)$.
Assume ${}_{x}S\neq\emptyset$ for all $x\in V(P_m)$. By $Lemma$ 2.1, $|S|= \sum_{x\in V(P_m)}|{}_{x}S|\geq \kappa(P_n)|V(P_m)|=m$. Analogously, if $S_y\neq \emptyset$ for all $y\in V(P_n)$, then $|S|=\sum_{y\in V(P_n)}|S_y|\geq \kappa(P_m)|V(P_n)|=n$.
\noindent{\bf Case 2. } There exist a vertex $x_a\in V(P_m)$ and a vertex $y_b\in V(P_n)$ such that ${}_{x_a}S=S_{y_b}=\emptyset$.
By the assumption ${}_{x_a}S=S_{y_b}=\emptyset$, we know $V({}_{x_a}G_2)$ and $V(G_{1y_b})$ are contained in a component $H'$ of $G-S$. Let $H$ be another component of $G-S$. Let $p_1(V(H))=\{x_{s+1},x_{s+2},\cdots,x_{s+k}\}$ and $p_2(V(H))=\{y_{t+1},y_{t+2},\cdots,y_{t+h}\}$. Without loss of generality, assume $s+k<a$ and $t+h< b$. Clearly, $|V(H)|\leq kh$. Since $S$ is a $\kappa_g$-cut, we have $N_G(V(H))=S$ and $|V(H)|\geq g+1$. If we can prove $|N_G(V(H))|\geq k+h+1$, then $\kappa_g(G)=|S|=|N_G(V(H))|\geq k+h+1\geq2\sqrt{kh}+1\geq2\sqrt{g+1}+1$ and the theorem holds. Thus, we only need to show that $|N_G(V(H))|\geq k+h+1$ in the remaining proof.
\begin{figure}
\caption{An illustration for the proof of Theorem 3.1.}
\label{1}
\end{figure}
Let $(x_{s+i}, y_{d_i})$ be the vertex in ${}_{x_{s+i}}H$ such that $d_i$ is maximum for $i=1,\cdots, k$, and let $(x_{r_j}, y_{t+j})$ be the vertex in $H_{y_{t+j}}$ such that $r_j$ is maximum for $j=1,\cdots, h$. Denote $D=\{(x_{s+1}, y_{d_1}),\cdots,(x_{s+k}, y_{d_k})\}$ and $R=\{(x_{r_1}, y_{t+1}),\cdots,(x_{r_h}, y_{t+h})\}$. For the convenience of counting, we will construct an injective mapping $f$ from $D\cup R$ to $N_G(V(H)\setminus\{(x_{s+k+1}, y_{d_k+1})\}$. Although $D$ and $R$ may have common elements, we consider the elements in $D$ and $R$ to be different in defining the mapping $f$ below.
First, the mapping $f$ on $D$ is defined as follows. \begin{center} $f((x_{s+i}, y_{d_i}))=(x_{s+i}, y_{d_i+1})$ for $i=1,\cdots, k$. \end{center} Denote $F_1=\{(x_{s+1}, y_{d_1+1}),\cdots,(x_{s+k}, y_{d_k+1})\}$.
Second, for each vertex $(x_{r_j}, y_{t+j})$ satisfying $(x_{r_j+1}, y_{t+j})\notin F_1$, define $f((x_{r_j}, y_{t+j}))=(x_{r_j+1}, y_{t+j})$.
If $(x_{r_j}, y_{t+j})$ satisfies $(x_{r_j+1}, y_{t+j})\notin F_1$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{r_{j'}}, y_{t+j'})$ satisfying $(x_{r_{j'}+1}, y_{t+j'})\in F_1$, we give the definition as follows. By the definitions of $D$ and $R$, we have $(x_{r_{j'}+1+i}, y_{t+j'+j})\notin V(H)$ for all $i, j\geq 0$, and $\{(x_{r_{j'}},y_{t+j'}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})\}\subseteq R$ (see Figure 1 for an illustration). Now, we define $f((x_{r_{j'}}, y_{t+j'}))=(x_{r_{j'}+1}, y_{t+j'+1})$ and change the images of $(x_{r_{j'}}, y_{t+j'+1}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})$ to $(x_{r_{j'}+1}, y_{t+j'+2}),\cdots,(x_{r_{j'}+1}, y_{d_{r_{j'}-s}+1})$, respectively. The images of $f$ on $R$ are well defined.
Finally, we have an injective mapping $f$ from $D\cup R$ to $N_G(V(H)\setminus\{(x_{s+k+1}, y_{d_k+1})\}$. Then $\kappa_g(G)=|S|=|N_G(V(H))|\geq |D|+|R|+1\geq k+h+1\geq2\sqrt{kh}+1\geq2\sqrt{g+1}+1$. The proof is thus complete. $\Box$
Since $\kappa_g(C_3\boxtimes P_n)=3$ for $g\leq 3\lfloor\frac{n-1}{2}\rfloor-1$, we assume $m\geq4$ in the following theorem.
\begin{theorem}\label{6} Let $g$ be a non-negative integer and $G=C_m\boxtimes P_n$, where $m\geq4$, $n\geq 3$. If $g\leq min\{n\lfloor\frac{m-2}{2}\rfloor-1, m\lfloor\frac{n-1}{2}\rfloor-1 \}$, then $\kappa_g(G)= min\{m, 2n, \lceil2\sqrt{2(g+1)}\rceil+2\}$. \end{theorem}
\noindent{\bf Proof.} Denote $C_m=x_0x_1\dots x_{m-1} x_{m}$ (where $x_0=x_{m}$) and $P_n=y_1y_2\dots y_n$. The addition of the subscripts of $x$ in the proof is modular $m$ arithmetic. Let $S_1=V(C_m)\times\{y_{\lfloor\frac{n-1}{2}\rfloor+1}\}$ and $S_2=\{x_{0}, x_{\lfloor\frac{m-2}{2}\rfloor+1}\}\times V(P_n)$. Since $g\leq min\{n\lfloor\frac{m-2}{2}\rfloor-1, m\lfloor\frac{n-1}{2}\rfloor-1 \}$, it is routine to check that $S_1$ and $S_2$ are two $g$-extra cuts of $G$. Thus $\kappa_g(G)\leq$ min$\{m, 2n\}$. If $\lceil2\sqrt{2(g+1)}\rceil+2\geq$ min$\{m, 2n\}$, then $\kappa_g(G)\leq$ min$\{m, 2n, \lceil2\sqrt{2(g+1)}\rceil+2\}$. If $\lceil2\sqrt{2(g+1)}\rceil+2<$ min$\{m, 2n\}$, then let $S_3=(J_1\times K_2)\cup (J_1\times J_2)\cup (K_1\times J_2)$, where
$J_1=\{x_{0}, x_{\lceil\sqrt{2(g+1)}\rceil+1} \}$, $K_1=\{x_1, x_2, \dots, x_{\lceil\sqrt{2(g+1)}\rceil}\}$, $J_2=\{y_{\lceil\frac{2(g+1)}{\lceil \sqrt{2(g+1)}\rceil}\rceil+1}\}$ and $K_2=\{y_1, y_2, \dots, y_{\lceil\frac{2(g+1)}{\lceil \sqrt{2(g+1)}\rceil}\rceil}\}$. It is routine to verify that $S_3$ is a $g$-extra cut of $G$. By $|S_3|=\lceil \sqrt{2(g+1)}\rceil+\lceil\frac{2(g+1)}{\lceil \sqrt{2(g+1)}\rceil}\rceil+2=\lceil 2\sqrt{2(g+1)}\rceil+2$, we have $\kappa_g(G)\leq \lceil2\sqrt{2(g+1)}\rceil+2$. Therefore, $\kappa_g(G)\leq$ min$\{m, 2n, \lceil2\sqrt{2(g+1)}\rceil+2\}$.
Now, it is sufficient to prove $\kappa_g(G)\geq$ min$\{m, 2n, \lceil2\sqrt{2(g+1)}\rceil+2\}$. Assume $S$ is a $\kappa_g$-cut of $G$. We consider two cases in the following.
\noindent{\bf Case 1. } ${}_{x}S\neq\emptyset$ for all $x\in V(C_m)$, or $S_y\neq \emptyset$ for all $y\in V(P_n)$.
Assume ${}_{x}S\neq\emptyset$ for all $x\in V(C_m)$. By $Lemma$ 2.1, $|S|= \sum_{x\in V(C_m)}|{}_{x}S|\geq \kappa(P_n)|V(C_m)|=m$. Analogously, if $S_y\neq \emptyset$ for all $y\in V(P_n)$, then $|S|=\sum_{y\in V(P_n)}|S_y|\geq \kappa(C_m)|V(P_n)|=2n$.
\noindent{\bf Case 2. } There exist a vertex $x_a\in V(C_m)$ and a vertex $y_b\in V(P_n)$ such that ${}_{x_a}S=S_{y_b}=\emptyset$.
By the assumption ${}_{x_a}S=S_{y_b}=\emptyset$, we know $V({}_{x_a}G_2)$ and $V(G_{1y_b})$ are contained in a component $H'$ of $G-S$. Let $H$ be another component of $G-S$. Let $p_1(V(H))=\{x_{s+1},x_{s+2},\cdots,x_{s+k}\}$ and $p_2(V(H))=\{y_{t+1},y_{t+2},\cdots,y_{t+h}\}$. Without loss of generality, assume $s+k<a$ and $t+h< b$. Clearly, $|V(H)|\leq kh$. Since $S$ is a $\kappa_g$-cut, we have $N_G(V(H))=S$ and $|V(H)|\geq g+1$. If we can prove $|N_G(V(H))|\geq k+2h+2$, then $\kappa_g(G)=|S|=|N_G(V(H))|\geq k+2h+2\geq2\sqrt{2kh}+2\geq2\sqrt{2(g+1)}+2$ and the theorem holds. Thus, we only need to show that $|N_G(V(H))|\geq k+2h+2$ in the remaining proof.
Let $(x_{s+i}, y_{d_i})$ be the vertex in ${}_{x_{s+i}}H$ such that $d_i$ is maximum for $i=1,\cdots, k$, and let $(x_{l_j}, y_{t+j})$ and $(x_{r_j}, y_{t+j})$ be the vertices in $H_{y_{t+j}}$ such that $l_j$ and $r_j$ are listed in the foremost and in the last along the sequence $(a+1,\cdots,m-1,0,1,\cdots,a-1)$, respectively, for $j=1,\cdots, h$. Denote $D=\{(x_{s+1}, y_{d_1}),\cdots,(x_{s+k}, y_{d_k})\}$, $L=\{(x_{l_1}, y_{t+1}),\cdots,(x_{l_h}, y_{t+h})\}$ and $R=\{(x_{r_1}, y_{t+1}),\cdots,(x_{r_h}, y_{t+h})\}$. For the convenience of counting, we will construct an injective mapping $f$ from $D\cup L\cup R$ to $N_G(V(H)\setminus\{(x_{s}, y_{d_1+1}),(x_{s+k+1}, y_{d_k+1})\}$. Although $D$, $L$ and $R$ may have common elements, we consider the elements in $D$, $L$ and $R$ to be different in defining the mapping $f$ below.
First, the mapping $f$ on $D$ is defined as follows. \begin{center} $f((x_{s+i}, y_{d_i}))=(x_{s+i}, y_{d_i+1})$ for $i=1,\cdots, k$. \end{center} Denote $F_1=\{(x_{s+1}, y_{d_1+1}),\cdots,(x_{s+k}, y_{d_k+1})\}$.
Second, for each vertex $(x_{r_j}, y_{t+j})$ satisfying $(x_{r_j+1}, y_{t+j})\notin F_1$, define $f((x_{r_j}, y_{t+j}))=(x_{r_j+1}, y_{t+j})$.
If $(x_{r_j}, y_{t+j})$ satisfies $(x_{r_j+1}, y_{t+j})\notin F_1$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{r_{j'}}, y_{t+j'})$ satisfying $(x_{r_{j'}+1}, y_{t+j'})\in F_1$, we give the definition as follows. By the definitions of $D$ and $R$, we have $\{(x_{r_{j'}},y_{t+j'}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})\}\subseteq R$. Now, we define $f((x_{r_{j'}}, y_{t+j'}))=(x_{r_{j'}+1}, y_{t+j'+1})$ and change the images of $(x_{r_{j'}}, y_{t+j'+1}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})$ to $(x_{r_{j'}+1}, y_{t+j'+2}),\cdots,(x_{r_{j'}+1}, y_{d_{r_{j'}-s}+1})$, respectively. The mapping $f$ on $R$ is defined well .
Third, for each vertex $(x_{l_j}, y_{t+j})$ satisfying $(x_{l_j-1}, y_{t+j})\notin F_1$, define $f((x_{l_j}, y_{t+j}))=(x_{l_j-1}, y_{t+j})$.
If $(x_{l_j}, y_{t+j})$ satisfies $(x_{l_j-1}, y_{t+j})\notin F_1$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{l_{j'}}, y_{t+j'})$ satisfying $(x_{l_{j'}-1}, y_{t+j'})\in F_1$. By the definitions of $D$ and $L$, we have $\{(x_{l_{j'}},y_{t+j'}),\cdots,(x_{l_{j'}}, y_{d_{l_{j'}-s}})\}\subseteq L$. Now, we define $f((x_{l_{j'}}, y_{t+j'}))=(x_{l_{j'}-1}, y_{t+j'+1})$ and change the images of $(x_{l_{j'}}, y_{t+j'+1}),\cdots,(x_{l_{j'}}, y_{d_{l_{j'}-s}})$ to $(x_{r_{j'}-1}, y_{t+j'+2}),\cdots,(x_{l_{j'}-1}, y_{d_{r_{j'}-s}+1})$, respectively. The definition of $f$ on $L$ is complete.
Finally, we construct an injective mapping $f$ from $D\cup L\cup R$ to $N_G(V(H)\setminus\{(x_{s}, y_{d_1+1}),(x_{s+k+1}, y_{d_k+1})\}$. Then $\kappa_g(G)=|S|=|N_G(V(H))|\geq |D|+|L|+|R|+2\geq k+2h+1\geq2\sqrt{2kh}+2\geq2\sqrt{2(g+1)}+2$. The proof is thus complete. $\Box$
Since $\kappa_g(C_3\boxtimes C_n)=6$ for $g\leq 3\lfloor\frac{n-2}{2}\rfloor-1$, we assume $m, n\geq4$ in the following theorem.
\begin{theorem} Let $g$ be a non-negative integer and $G=C_m\boxtimes C_n$, where $m, n\geq 4$. If $g\leq min\{n\lfloor\frac{m-2}{2}\rfloor-1, m\lfloor\frac{n-2}{2}\rfloor-1 \}$, then $\kappa_g(G)=min\{2m, 2n, \lceil 4\sqrt{g+1}\ \rceil+4\}$. \end{theorem}
\noindent{\bf Proof.} Denote $C_m=x_0x_1\dots x_{m-1} x_{m}$ (where $x_0=x_{m}$) and $C_n=y_0y_1\dots y_n$ (where $y_0=y_{n}$). The addition of the subscripts of $x$ in the proof is modular $m$ arithmetic, and the addition of the subscripts of $y$ in the proof is modular $n$ arithmetic. Let $S_1=V(C_m)\times\{y_{0}, y_{\lfloor\frac{n-2}{2}\rfloor+1}\}$ and $S_2=\{x_{0}, x_{\lfloor\frac{m-2}{2}\rfloor+1}\}\times V(C_n)$. Since $g\leq min\{n\lfloor\frac{m-2}{2}\rfloor-1, m\lfloor\frac{n-2}{2}\rfloor-1 \}$, we can check that $S_1$ and $S_2$ are two $g$-extra cuts of $G$. Thus $\kappa_g(G)\leq$ min$\{2m, 2n\}$. If $\lceil 4\sqrt{g+1}\ \rceil+4\geq$ min$\{2m, 2n\}$, then $\kappa_g(G)\leq$ min$\{2m, 2n, \lceil 4\sqrt{g+1}\ \rceil+4\}$. If $\lceil 4\sqrt{g+1}\ \rceil+4<$ min$\{2m, 2n\}$, then let $S_3=(J_1\times K_2)\cup (J_1\times J_2)\cup (K_1\times J_2)$, where
$J_1=\{x_{0}, x_{\lceil \sqrt{g+1}\ \rceil+1}\}$, $K_1=\{x_1, x_2, \dots, x_{\lceil \sqrt{g+1}\ \rceil}\}$, $J_2=\{y_0,y_{\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil} \rceil+1}\}$ and $K_2=\{y_1, y_2, \dots, y_{\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil}\rceil}\}$. It is routine to verify that $S_3$ is a $g$-extra cut of $G$. By $|S_3|=2\lceil \sqrt{g+1}\rceil+2\lceil\frac{g+1}{\lceil \sqrt{g+1}\ \rceil}\rceil+4=\lceil 4\sqrt{g+1}\rceil+4$, we have $\kappa_g(G)\leq \lceil 4\sqrt{g+1}\ \rceil+4$. Therefore, $\kappa_g(G)\leq$ min$\{2m, 2n, \lceil 4\sqrt{g+1}\ \rceil+4\}$.
Now, it is sufficient to prove $\kappa_g(G)\geq$ min$\{2m, 2n, \lceil 4\sqrt{g+1}\ \rceil+4\}$. Assume $S$ is a $\kappa_g$-cut of $G$. We consider two cases in the following.
\noindent{\bf Case 1. } ${}_{x}S\neq\emptyset$ for all $x\in V(C_m)$, or $S_y\neq \emptyset$ for all $y\in V(C_n)$.
Assume ${}_{x}S\neq\emptyset$ for all $x\in V(C_m)$. By $Lemma$ 2.1, $|S|= \sum_{x\in V(C_m)}|{}_{x}S|\geq \kappa(C_n)|V(C_m)|=2m$. Analogously, if $S_y\neq \emptyset$ for all $y\in V(C_n)$, then $|S|=\sum_{y\in V(C_n)}|S_y|\geq \kappa(C_m)|V(C_n)|=2n$.
\noindent{\bf Case 2. } There exist a vertex $x_a\in V(C_m)$ and a vertex $y_b\in V(C_n)$ such that ${}_{x_a}S=S_{y_b}=\emptyset$.
By the assumption ${}_{x_a}S=S_{y_b}=\emptyset$, we know $V({}_{x_a}G_2)$ and $V(G_{1y_b})$ are contained in a component $H'$ of $G-S$. Let $H$ be another component of $G-S$. Let $p_1(V(H))=\{x_{s+1},x_{s+2},\cdots,x_{s+k}\}$ and $p_2(V(H))=\{y_{t+1},y_{t+2},\cdots,y_{t+h}\}$. Without loss of generality, assume $s+k<a$ and $t+h< b$. Clearly, $|V(H)|\leq kh$. Since $S$ is a $\kappa_g$-cut, we have $N_G(V(H))=S$ and $|V(H)|\geq g+1$. If we can prove $|N_G(V(H))|\geq 2k+2h+4$, then $\kappa_g(G)=|S|=|N_G(V(H))|\geq 2k+2h+4\geq4\sqrt{kh}+4\geq4\sqrt{g+1}+4$ and the theorem holds. Thus, we only need to show that $|N_G(V(H))|\geq 2k+2h+4$ in the remaining proof.
Let $(x_{s+i}, y_{t_i})$ and $(x_{s+i}, y_{d_i})$ be the vertices in ${}_{x_{s+i}}H$ such that $t_i$ and $d_i$ are listed in the foremost and in the last along the sequence $(b+1,\cdots,n-1,0,1,\cdots,b-1)$, respectively, for $i=1,\cdots, k$, and let $(x_{l_j}, y_{t+j})$ and $(x_{r_j}, y_{t+j})$ be the vertices in $H_{y_{t+j}}$ such that $l_j$ and $r_j$ are listed in the foremost and in the last along the sequence $(a+1,\cdots,m-1,0,1,\cdots,a-1)$, respectively, for $j=1,\cdots, h$. Denote $D=\{(x_{s+1}, y_{d_1}),\cdots,(x_{s+k}, y_{d_k})\}$, $T=\{(x_{s+1}, y_{t_1}),\cdots,(x_{s+k}, y_{t_k})\}$, $L=\{(x_{l_1}, y_{t+1}),\cdots,(x_{l_h}, y_{t+h})\}$ and $R=\{(x_{r_1}, y_{t+1}),\cdots,(x_{r_h}, y_{t+h})\}$. For the convenience of counting, we will construct an injective mapping $f$ from $D\cup T\cup L\cup R$ to $N_G(V(H)\setminus\{(x_{s}, y_{d_1+1}), (x_{s}, y_{t_1-1}), (x_{s+k+1}, y_{d_k+1}), (x_{s+k+1}, y_{t_k-1})\}$. Although $D$, $T$, $L$ and $R$ may have common elements, we consider the elements in $D$, $T$, $L$ and $R$ to be different in defining the mapping $f$ below.
First, the mapping $f$ on $D$ is defined as follows. \begin{center} $f((x_{s+i}, y_{d_i}))=(x_{s+i}, y_{d_i+1})$ for $i=1,\cdots, k$. \end{center} Denote $F_1=\{(x_{s+1}, y_{d_1+1}),\cdots,(x_{s+k}, y_{d_k+1})\}$.
Second, the mapping $f$ on $T$ is defined as follows. \begin{center} $f((x_{s+i}, y_{t_i}))=(x_{s+i}, y_{t_i-1})$ for $i=1,\cdots, k$. \end{center} Denote $F_2=\{(x_{s+1}, y_{t_1-1}),\cdots,(x_{s+k}, y_{t_k-1})\}$.
Third, for each vertex $(x_{r_j}, y_{t+j})$ satisfying $(x_{r_j+1}, y_{t+j})\notin F_1$, define $f((x_{r_j}, y_{t+j}))=(x_{r_j+1}, y_{t+j})$.
If $(x_{r_j}, y_{t+j})$ satisfies $(x_{r_j+1}, y_{t+j})\notin F_1$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{r_{j'}}, y_{t+j'})$ satisfying $(x_{r_{j'}+1}, y_{t+j'})\in F_1$, we define as follows. By the definitions of $D$ and $R$, we have $\{(x_{r_{j'}},y_{t+j'}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})\}\subseteq R$. Now, we define $f((x_{r_{j'}}, y_{t+j'}))=(x_{r_{j'}+1}, y_{t+j'+1})$ and change the images of $(x_{r_{j'}}, y_{t+j'+1}),\cdots,(x_{r_{j'}}, y_{d_{r_{j'}-s}})$ to $(x_{r_{j'}+1}, y_{t+j'+2}),\cdots,(x_{r_{j'}+1}, y_{d_{r_{j'}-s}+1})$, respectively.
Fourth, for each vertex $(x_{r_j}, y_{t+j})$ satisfying $(x_{r_j+1}, y_{t+j})\notin F_2$, define $f((x_{r_j}, y_{t+j}))=(x_{r_j+1}, y_{t+j})$.
If $(x_{r_j}, y_{t+j})$ satisfies $(x_{r_j+1}, y_{t+j})\notin F_2$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{r_{j'}}, y_{t+j'})$ satisfying $(x_{r_{j'}+1}, y_{t+j'})\in F_2$, we define as follows. By the definitions of $T$ and $R$, we have $\{(x_{r_{j'}},y_{t+j'}),\cdots,(x_{r_{j'}}, y_{d_{t_{j'}-s}})\}\subseteq R$. Now, we define $f((x_{r_{j'}}, y_{t+j'}))=(x_{r_{j'}+1}, y_{t+j'-1})$ and change the images of $(x_{r_{j'}}, y_{t+j'-1}),\cdots,(x_{r_{j'}}, y_{d_{t_{j'}-s}})$ to $(x_{r_{j'}+1}, y_{t+j'-2}),\cdots,(x_{r_{j'}+1}, y_{d_{r_{j'}-s}-1})$, respectively.
Note that the proof of four paragraphs above gives the definition of the mapping $f$ on $R$. In the following proof, we will give the definition of the mapping $f$ on $L$.
Fifth, for each vertex $(x_{l_j}, y_{t+j})$ satisfying $(x_{l_j-1}, y_{t+j})\notin F_1$, define $f((x_{l_j}, y_{t+j}))=(x_{l_j-1}, y_{t+j})$.
If $(x_{l_j}, y_{t+j})$ satisfies $(x_{l_j-1}, y_{t+j})\notin F_1$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{l_{j'}}, y_{t+j'})$ satisfying $(x_{l_{j'}-1}, y_{t+j'})\in F_1$, we define as follows. By the definitions of $D$ and $L$, we have $\{(x_{l_{j'}},y_{t+j'}),\cdots,(x_{l_{j'}}, y_{d_{l_{j'}-s}})\}\subseteq L$. Now, we define $f((x_{l_{j'}}, y_{t+j'}))=(x_{l_{j'}-1}, y_{t+j'+1})$ and change the images of $(x_{l_{j'}}, y_{t+j'+1}),\cdots,(x_{l_{j'}}, y_{d_{l_{j'}-s}})$ to $(x_{r_{j'}-1}, y_{t+j'+2}),\cdots,(x_{l_{j'}-1}, y_{d_{r_{j'}-s}+1})$, respectively.
Sixth, for each vertex $(x_{l_j}, y_{t+j})$ satisfying $(x_{l_j-1}, y_{t+j})\notin F_2$, define $f((x_{l_j}, y_{t+j}))=(x_{l_j-1}, y_{t+j})$.
If $(x_{l_j}, y_{t+j})$ satisfies $(x_{l_j-1}, y_{t+j})\notin F_2$ for any $j\in\{1,\cdots, h\}$, then we are done. Otherwise, for each $(x_{l_{j'}}, y_{t+j'})$ satisfying any $(x_{t_{j'}-1}, y_{t+j'})\in F_2$, we define as follows. By the definitions of $L$ and $T$, we have $\{(x_{l_{j'}},y_{t+j'}),\cdots,(x_{l_{j'}}, y_{d_{t_{j'}-s}})\}\subseteq L$. Now, we define $f((x_{l_{j'}}, y_{t+j'}))=(x_{l_{j'}-1}, y_{t+j'-1})$ and change the images of $(x_{l_{j'}}, y_{t+j'-1}),\cdots,(x_{l_{j'}}, y_{d_{t_{j'}-s}})$ to $(x_{l_{j'}-1}, y_{t+j'-2}),\cdots,(x_{l_{j'}-1}, y_{d_{t_{j'}-s}-1})$, respectively.
Finally, we construct an injective mapping $f$ from $D\cup T\cup L\cup R$ to $N_G(V(H)\setminus\{(x_{s}, y_{d_1+1}), (x_{s}, y_{t_1-1}), (x_{s+k+1}, y_{d_k+1}), (x_{s+k+1}, y_{t_k-1})\}$. Then $\kappa_g(G)=|S|=|N_G(V(H))|\geq |D|+|T|+|L|+|R|+4\geq 2k+2h+4\geq4\sqrt{kh}+4\geq4\sqrt{g+1}+4$. The proof is thus complete. $\Box$
\section{Conclusion}
Graph products are used to construct large graphs from small ones. Strong product is one of the most studied four graph products. As a generalization of traditional connectivity, $g$-extra connectivity can be seen as a refined parameter to measure the reliability of interconnection networks. There is no polynomial-time algorithm to compute the $g\ (\geq1)$-extra connectivity for a general graph. In this paper, we determined the $g$-extra connectivity of the strong product of two paths, the strong product of a path and a cycle, and the strong product of two cycles. In the future work, we would like to investigate the $g$-extra connectivity of the strong product of two general graphs.
\end{sloppypar}
\end{document} |
\begin{document}
\title{The variety generated by all the ordinal sums of perfect MV-chains} \author{Matteo Bianchi\\{\small Dipartimento di Informatica e Comunicazione}\\{\small Università degli Studi di Milano}\\{\footnotesize \texttt{\href{mailto:[email protected]}{[email protected]}}}} \date{} \maketitle \begin{quote} \small \it ``To my friend Erika, and to her invaluable talent in finding surprisingly deep connections among poetry, art, philosophy and logic.'' \end{quote} \begin{abstract} We present the logic BL$_\text{Chang}$, an axiomatic extension of BL (see \cite{haj}) whose corresponding algebras form the smallest variety containing all the ordinal sums of perfect MV-chains. We will analyze this logic and the corresponding algebraic semantics in the propositional and in the first-order case. As we will see, moreover, the variety of BL$_\text{Chang}$-algebras will be strictly connected to the one generated by Chang's MV-algebra (that is, the variety generated by all the perfect MV-algebras): we will also give some new results concerning these last structures and their logic. \end{abstract} \section{Introduction} MV-algebras were introduced in \cite{chang} as the algebraic counterpart of \L ukasie\-wicz (infinite-valued) logic. During the years these structures have been intensively studied (for a hystorical overview, see \cite{cig}): the book \cite{mun} is a reference monograph on this topic.
Perfect MV-algebras were firstly studied in \cite{dnl2} as a refinement of the notion of local MV-algebras: this analysis was expanded in \cite{dnl1}, where it was also shown that the class of perfect MV-algebras $Perf(MV)$ does not form a variety, and the variety generated by $Perf(MV)$ is also generated by Chang's MV-algebra (see \Cref{sec:mvcha} for the definition). Further studies, about this variety and the associated logic have been done in \cite{bdg, bdg1}.
On the other side, Basic Logic BL and its correspondent variety, BL-algebras, were introduced in \cite{haj}: \L ukasiewicz logic results to be one of the axiomatic extensions of BL and MV-algebras can also be defined as a subclass of BL-algebras. Moreover, the connection between MV-algebras and BL-algebras is even stronger: in fact, as shown in \cite{am}, every ordinal sum of MV-chains is a BL-chain.
For these reasons one can ask if there is a variety of BL-algebras whose chains are (isomorphic to) ordinal sums of perfect MV-chains: even if the answer to this question is negative, we will present the smallest variety (whose correspondent logic is called BL$_\text{Chang}$) containing this class of BL-chains.
As we have anticipated in the abstract, there is a connection between the variety of BL$_\text{Chang}$-algebras and the one generated by Chang's MV-algebra. In fact the first-one is axiomatized (over the variety of BL-algebras) with an equation that, over MV-algebras, is equivalent to the one that axiomatize the variety generated by Chang MV-algebras: however, the two equations are \emph{not} equivalent, over BL. \paragraph*{} The paper is structured as follows: in \Cref{sec:prelim} we introduce the necessary logical and algebraic background: moreover some basic results about perfect MV-algebras and other structures will be listed. In \Cref{sec:blcha} we introduce the main theme of the article: the variety of BL$_\text{Chang}$ and the associated logic. The analysis will be done in the propositional case: completeness results, algebraic and logical properties and also some results about the variety generated by Chang's MV-algebra. We conclude with \Cref{sec:first}, where we will analyze the first-order versions of BL$_\text{Chang}$ and \L$_\text{Chang}$: for the first-one the completeness results will be much more negative. \paragraph{} To conclude, we list the main results. \begin{itemize} \item BL$_\text{Chang}$ enjoys the finite strong completeness (but not the strong one) w.r.t. $\omega\mathcal{V}$, where $\omega\mathcal{V}$ represents the ordinal sum of $\omega$ copies of the disconnected rotation of the standard cancellative hoop. \item \L$_\text{Chang}$ (the logic associated to the variety generated by Chang's MV-algebra) enjoys the finite strong completeness (but not the strong one) w.r.t. $\mathcal{V}$, $\mathcal{V}$ being the disconnected rotation of the standard cancellative hoop. \item There are two BL-chains $\mathcal{A}, \mathcal{B}$ that are strongly complete w.r.t., respectively \L$_\text{Chang}$ and BL$_\text{Chang}$. \item Every \L$_\text{Chang}$-chain that is strongly complete w.r.t. \L$_\text{Chang}$ is also stron\-gly complete w.r.t \L$_\text{Chang}\forall$. \item There is no BL$_\text{Chang}$-chain to be complete w.r.t. BL$_\text{Chang}\forall$. \end{itemize} \section{Preliminaries}\label{sec:prelim} \subsection{Basic concepts} Basic Logic BL was introduced by P. H\'{a}jek in \cite{haj}. It is based over the connectives $\{\&,\to,\bot\}$ and a denumerable set of variables $VAR$. The formulas are defined inductively, as usual (see \cite{haj} for details).
Other derived connectives are the following.
{\em negation}: $\neg \varphi \mathrel{\mathop:}= \varphi\to\bot$; {\em verum} or {\em top}: $\top \mathrel{\mathop:}=\neg\bot$; {\em meet}: $\varphi\land\psi \mathrel{\mathop:}= \varphi\&(\varphi\to\psi)$; {\em join}: $\varphi\vee\psi \mathrel{\mathop:}= ((\varphi\to\psi)\to\psi)\land((\psi\to\varphi)\to\varphi)$. \paragraph*{} \noindent BL is axiomatized as follows. \begin{align} \tag{A1}&(\varphi \rightarrow \psi)\rightarrow ((\psi\rightarrow \chi)\rightarrow(\varphi\rightarrow \chi))\\ \tag{A2}&(\varphi\&\psi)\rightarrow \varphi\\ \tag{A3}&(\varphi\&\psi)\rightarrow(\psi\&\varphi)\\ \tag{A4}&(\varphi\&(\varphi\to\psi))\to(\psi\& (\psi\to\varphi))\\ \tag{A5a}&(\varphi\rightarrow(\psi\rightarrow\chi))\rightarrow((\varphi\&\psi)\rightarrow \chi)\\ \tag{A5b}&((\varphi\&\psi)\rightarrow \chi)\rightarrow(\varphi\rightarrow(\psi\rightarrow\chi))\\ \tag{A6}&((\varphi\rightarrow\psi)\rightarrow\chi)\rightarrow(((\psi\rightarrow\varphi)\rightarrow\chi)\rightarrow\chi)\\ \tag{A7}&\bot\rightarrow\varphi \end{align} {\em Modus ponens} is the only inference rule: \begin{equation} \tag{MP}\frac{\varphi\quad\varphi\to\psi}{\psi}. \end{equation} Among the extensions of BL (logics obtained from it by adding other axioms) there is the well known \L ukasiewicz (infinitely-valued) logic \L, that is, BL plus \begin{equation} \tag*{(INV)}\neg\neg\varphi\to\varphi. \end{equation} On \L ukasiewicz logic we can also define a strong disjunction connective (in the following sections, we will introduce a strong disjunction connective, for BL, that will be proved to be equivalent to the following, over \L) \begin{equation*} \varphi\curlyvee\psi\mathrel{\mathop:}=\neg(\neg\varphi\&\neg\psi). \end{equation*} The notations $\varphi^n$ and $n\varphi$ will indicate $\underbrace{\varphi\&\dots\&\varphi}_{n\text{ times}}$ and $\underbrace{\varphi\curlyvee\dots\curlyvee\varphi}_{n\text{ times}}$.
Given an axiomatic extension L of BL, a formula $\varphi$ and a theory $T$ (a set of formulas), the notation $T\vdash_L\varphi$ indicates that there is a proof of $\varphi$ from the axioms of L and the ones of $T$. The notion of proof is defined like in classical case (see \cite{haj}). \paragraph*{} We now move to the semantics: for all the unexplained notions of universal algebra, we refer to \cite{bs, gra}. \begin{definition} A BL-algebra is an algebraic structure of the form $\mathcal{A}=\left \langle A,*,\Rightarrow,\sqcap,\sqcup,0,1\right \rangle$ such that \begin{itemize} \item $\left \langle A,\sqcap,\sqcup,0,1 \right \rangle$ is a bounded lattice, where $0$ is the bottom and $1$ the top element. \item $\left \langle A,*,1\right \rangle$ is a commutative monoid. \item $\left \langle *,\Rightarrow\right \rangle$ forms a residuated pair,\index{residuated pair} i.e. \begin{equation} \tag*{(res)}z*x\leq y\quad\text{iff}\quad z\leq x\Rightarrow y, \label{eq:res} \end{equation} it can be shown that the only operation that satisfies \ref{eq:res} is $x\Rightarrow y=\max\{z:\ z*x\leq y\}$. \item $\mathcal{A}$ satisfies the following equations \begin{align} \tag*{(pl)}&(x\Rightarrow y)\sqcup(y\Rightarrow x)=1\\ \tag*{(div)}&x\sqcap y=x*(x\Rightarrow y). \end{align} \end{itemize} Two important types of BL-algebras are the followings. \begin{itemize} \item A BL-chain is a totally ordered BL-algebra. \item A standard BL-algebra is a BL-algebra whose support is $[0,1]$. \end{itemize} \end{definition} Notation: in the following, with $\sim x$ we will indicate $x\Rightarrow 0$. \begin{definition} An MV-algebra is a BL-algebra satisfying \begin{equation} \tag*{(inv)}x=\sim\sim x. \end{equation} A well known example of MV-algebra is the standard MV-algebra $[0,1]_\text{\L}=\\\left \langle [0,1], *,\Rightarrow, \min, \max, 0, 1\right \rangle$, where $x*y=\max(0,x+y-1)$ and $x\Rightarrow y=\min(1, 1-x+y)$. \end{definition} In every MV-algebra we define the algebraic equivalent of $\curlyvee$, that is \begin{equation*} x\oplus y\mathrel{\mathop:}=\sim(\sim x * \sim y). \end{equation*} The notations (where $x$ is an element of some BL-algebra) $x^n$ and $nx$ will indicate $\underbrace{x*\dots *x}_{n\text{ times}}$ and $\underbrace{x\oplus\dots\oplus x}_{n\text{ times}}$.
Given a BL-algebra $\mathcal{A}$, the notion of $\mathcal{A}$-evaluation is defined in a truth-functional way (starting from a map $v:\, VAR\to A$, and extending it to formulas), for details see \cite{haj}.
Consider a BL-algebra $\mathcal{A}$, a theory $T$ and a formula $\varphi$. With $\mathcal{A}\models \varphi$ ($\mathcal{A}$ is a model of $\varphi$) we indicate that $v(\varphi)=1$, for every $\mathcal{A}$-evaluation $v$; $\mathcal{A}\models T$ denotes that $\mathcal{A}\models \psi$, for every $\psi\in T$. Finally, the notation $T\models_\mathcal{A}\varphi$ means that if $\mathcal{A}\models T$, then $\mathcal{A}\models \varphi$.
A BL-algebra $\mathcal{A}$ is called L-algebra, where L is an axiomatic extension of BL, whenever $\mathcal{A}$ is a model for all the axioms of L. \begin{definition} Let L be an axiomatic extension of BL and $K$ a class of L-algebras. We say that L is strongly complete (respectively: finitely strongly complete, complete) with respect to $K$ if for every set $T$ of formulas (respectively, for every finite set $T$ of formulas, for $T=\emptyset$) and for every formula $\varphi$ we have \begin{equation*} T\vdash_L\varphi\quad \text{iff}\quad T\models_K\varphi. \end{equation*} \end{definition} \subsection{Perfect MV-algebras, hoops and disconnected rotations}\label{sec:mvcha} We recall that Chang's \emph{MV}-algebra (\cite{chang}) is a BL-algebra of the form \begin{equation*} C=\left \langle \{a_n:\ n\in\mathbb{N}\}\cup\{b_n:\ n\in\mathbb{N}\}, *, \Rightarrow, \sqcap,\sqcup, b_0, a_0\right \rangle. \end{equation*} Where for each $n,m\in \mathbb{N}$, it holds that $b_n<a_m$, and, if $n<m$, then $a_m<a_n,\ b_n<b_m$; moreover $a_0=1,\ b_0=0$ (the top and the bottom element).
The operation $*$ is defined as follows, for each $n,m\in \mathbb{N}$: \begin{equation*} b_n*b_m=b_0,\ b_n*a_m=b_{\max(0,n-m)},\ a_n*a_m=a_{n+m}. \end{equation*} \begin{definition}[\cite{dnl2}] Let $\mathcal{A}$ be an MV-algebra and let $x\in\mathcal{A}$: with $ord(x)$ we mean the least (positive) natural $n$ such that $x^n=0$. If there is no such $n$, then we set $ord(x)=\infty$. \begin{itemize} \item An MV-algebra is called \emph{local}\footnote{Usually, the local MV-algebras are defined as MV-algebras having a unique (proper) maximal ideal. In \cite{dnl2}, however, it is shown that the two definitions are equivalent. We have preferred the other definition since it shows in a more transparent way that perfect MV-algebras are particular cases of local MV-algebras.} if for every element $x$ it holds that \\$ord(x)<\infty$ or $ord(\sim x)<\infty$. \item An MV-algebra is called \emph{perfect} if for every element $x$ it holds that $ord(x)<\infty$ iff $ord(\sim x)=\infty$. \end{itemize} \end{definition} An easy consequence of this definition is that every perfect MV-algebra cannot have a negation fixpoint.
With $Perfect(MV)$ and $Local(MV)$ we will indicate the class of perfect and local MV-algebras. Moreover, given a BL-algebra $\mathcal{A}$, with $\mathbf{V}(\mathcal{A})$ we will denote the variety generated by $\mathcal{A}$. \begin{theorem}[\cite{dnl2}] Every MV-chain is local. \end{theorem} Clearly there are local MV-algebras that are not perfect: $[0,1]_\text{\L}$ is an example.
Now, in \cite{dnl1} it is shown that \begin{theorem}\label{teo:perfcha} \begin{itemize} \item[] \item $\mathbf{V}(C)=\mathbf{V}(Perfect(MV))$, \item $Perfect(MV)=Local(MV)\cap\mathbf{V}(C)$. \end{itemize} \end{theorem} It follows that the class of chains in $\mathbf{V}(C)$ coincides with the one of perfect MV-chains. Moreover \begin{theorem}[\cite{dnl1}]\label{teo:chaeq} An MV-algebra is in the variety $\mathbf{V}(C)$ iff it satisfies the equation $(2x)^2=2(x^2)$. \end{theorem} As shown in \cite{bdg}, the logic correspondent to this variety is axiomatized as {\L} plus $(2\varphi)^2\leftrightarrow 2(\varphi^2)$: we will call it {\L}$_\text{Chang}$. \paragraph*{} We now recall some results about hoops \begin{definition}[\cite{f, bf}] A \emph{hoop} is a structure $\mathcal{A}=\langle A, * ,\Rightarrow ,1\rangle $ such that $\langle A, * ,1\rangle $ is a commutative monoid, and $\Rightarrow $ is a binary operation such that \[ x\Rightarrow x=1,\hspace{0.5cm}x\Rightarrow (y\Rightarrow z)=(x * y)\Rightarrow z\hspace{0.5cm}\mathrm{and}\hspace{0.5cm}x * (x\Rightarrow y)=y * (y\Rightarrow x). \] \end{definition}
In any hoop, the operation $\Rightarrow $ induces a partial order $\le $ defined by $x\le y$ iff $x\Rightarrow y=1$. Moreover, hoops are precisely the partially ordered commutative integral residuated monoids (pocrims) in which the meet operation $\sqcap$ is definable by $x\sqcap y=x * (x\Rightarrow y)$. Finally, hoops satisfy the following divisibility condition: \begin{equation} \tag*{(div)}\text{If } x \le y, \text{ then there is an element } z\text{ such that }z * y=x. \end{equation} We recall a useful result. \begin{definition} Let $\mathcal{A}$ and $\mathcal{B}$ be two algebras of the same language. Then we say that \begin{itemize} \item $\mathcal{A}$ is a partial subalgebra of $\mathcal{B}$ if $A\subseteq B$ and the operations of $\mathcal{A}$ are the ones of $\mathcal{A}$ restricted to $A$. Note that $A$ could not be closed under these operations (in this case these last ones will be undefined over some elements of $A$): in this sense $\mathcal{A}$ is a partial subalgebra. \item $\mathcal{A}$ is partially embeddable into $\mathcal{B}$ when every finite partial subalgebra of $\mathcal{A}$ is embeddable into $\mathcal{B}$. Generalizing this notion to classes of algebras, we say that a class $K$ of algebras is partially embeddable into a class $M$ if every finite partial subalgebra of a member of $K$ is embeddable into a member of $M$. \end{itemize} \end{definition} \begin{definition}\label{def:bound} A \emph{bounded} hoop is a hoop with a minimum element; conversely, an \emph{unbounded} hoop is a hoop without minimum.
Let $\mathcal{A}$ be a bounded hoop with minimum $a$: with $\mathcal{A}^+$ we mean the (partial) subalgebra of $\mathcal{A}$ defined over the universe $A^+=\{x\in A:\, x>x\Rightarrow a\}$.
A hoop is Wajsberg iff it satisfies the equation $(x\Rightarrow y)\Rightarrow y=(y\Rightarrow x)\Rightarrow x$.
A hoop is cancellative iff it satisfies the equation $x=y\Rightarrow(x*y)$. \end{definition}
\begin{proposition}[\cite{f, bf, afm}]\label{prop:canc} Every cancellative hoop is Wajsberg. Totally ordered cancellative hoops coincide with unbounded totally ordered Wajsberg hoops, whereas bounded Wajsberg hoops coincide with (the $0$-free reducts of) MV-algebras. \end{proposition} We now recall a construction introduced in \cite{jen} (and also used in \cite{eghm, neg}), called \emph{disconnected rotation}.
\begin{definition} Let $\mathcal{A}$ be a cancellative hoop. We define an algebra, $\mathcal{A}^*$, called the \emph{disconnected rotation} of $\mathcal{A}$, as follows. Let $\mathcal{A}\times\{0\}$ be a disjoint copy of A. For every $a\in A$ we write $a'$ instead of $\langle a, 0\rangle$. Consider $\left \langle A'= \{a' : a \in A\}, \leq\right \rangle$ with the inverse order and let $A^*\mathrel{\mathop:}= A\cup A'$. We extend these orderings to an order in $A^*$ by putting $a' < b$ for every $a,b\in A$. Finally, we take the following operations in $A^*$: $1\mathrel{\mathop:}= 1_\mathcal{A}$, $0\mathrel{\mathop:}= 1'$, $\sqcap_{\mathcal{A}^*}, \sqcup_{\mathcal{A}^*}$ as the meet and the join with respect to the order over $A^*$. Moreover, \begin{align*} \sim_{\mathcal{A}^*} a\mathrel{\mathop:}=&\begin{cases} a'&\text{if }a\in A\\ b &\text{if }a=b'\in A'
\end{cases}\\ a*_{\mathcal{A}^*}b\mathrel{\mathop:}=&\begin{cases} a*_\mathcal{A} b&\text{if }a, b\in A\\ \sim_{\mathcal{A}^*}(a\Rightarrow_{\mathcal{A}} \sim_{\mathcal{A}^*}b)&\text{if }a\in A, b\in A'\\ \sim_{\mathcal{A}^*}(b\Rightarrow_{\mathcal{A}} \sim_{\mathcal{A}^*}a)&\text{if }a\in A', b\in A\\ 0&\text{if }a, b\in A'
\end{cases}\\ a\Rightarrow_{\mathcal{A}^*}b\mathrel{\mathop:}=&\begin{cases} a\Rightarrow_\mathcal{A} b&\text{if }a, b\in A\\ \sim_{\mathcal{A}^*}(a *_{\mathcal{A}^*} \sim_{\mathcal{A}^*}b)&\text{if }a\in A, b\in A'\\ 1&\text{if }a\in A', b\in A\\ (\sim_{\mathcal{A}^*}b) \Rightarrow_{\mathcal{A}} (\sim_{\mathcal{A}^*}a)&\text{if }a, b\in A'.
\end{cases} \end{align*} \end{definition}
\begin{theorem}[{\cite[theorem 9]{neg}}]\label{teo:perfrot} Let $\mathcal{A}$ be an MV-algebra. The followings are equivalent: \begin{itemize}
\item A is a perfect MV-algebra.
\item A is isomorphic to the disconnected rotation of a cancellative hoop. \end{itemize} \end{theorem} To conclude the section, we present the definition of ordinal sum. \begin{definition}[\cite{am}] Let $\langle I,\le \rangle $ be a totally ordered set with minimum $0$. For all $i\in I$, let $\mathcal{A}_{i}$ be a hoop such that for $i\ne j$, $ A_{i}\cap A_{j}=\{1\}$, and assume that $\mathcal{A}_0$ is bounded. Then $\bigoplus_{i\in I}{\mathcal{A}}_{i}$ (the \emph{ordinal sum} of the family $({\mathcal{A}}_{i})_{i\in I}$) is the structure whose base set is $ \bigcup_{i\in I}A_{i}$, whose bottom is the minimum of $\mathcal{A}_0$, whose top is $1$, and whose operations are \begin{align*} x\Rightarrow y&=\begin{cases} x\Rightarrow ^{{\mathcal{A}}_{i}}y & \mathrm{if} \,\,x,y\in A_{i} \\ y & \mathrm{if}\,\, \exists i>j(x\in A_{i}\,\,\mathrm{and}\,\,y\in A_{j}) \\ 1 & \mathrm{if}\,\,\exists i<j(x\in A_{i}\setminus \{1\}\,\,\mathrm{and }\,\,y\in A_{j}) \end{cases}\\ x* y&=\begin{cases} x * ^{{\mathcal{A}}_{i}}y & \mathrm{if}\,\,x,y\in A_{i} \\ x & \mathrm{if}\,\,\exists i<j(x\in A_{i}\setminus\{1\},\,\,y\in A_{j})\\ y & \mathrm{if}\,\,\exists i<j(y\in A_{i}\setminus\{1\},\,x\in A_{j}) \end{cases} \end{align*} When defining the ordinal sum $\bigoplus_{i\in I}{\mathcal{A}}_{i}$ we will tacitly assume that whenever the condition $A_{i}\cap A_{j}=\left\{ 1\right\} $ is not satisfied for all $i,j\in I$ with $i\neq j$, we will replace the $\mathcal{A}_{i}$ by isomorphic copies satisfying such condition. Moreover if all $\mathcal{A}_i$'s are isomorphic to some $\mathcal{A}$, then we will write $I\mathcal{A}$, instead of $\bigoplus_{i \in I}\mathcal{A}_{i}$. Finally, the ordinal sum of two hoops $\mathcal{A}$ and $\mathcal{B}$ will be denoted by $\mathcal{A}\oplus\mathcal{B}$. \end{definition} Note that, since every bounded Wajsberg hoop is the $0$-free reduct of an MV-algebra, then the previous definition also works with these structures. \begin{theorem}[{\cite[theorem 3.7]{am}}]\label{teo:am} Every BL-chain is isomorphic to an ordinal sum whose first component is an MV-chain and the others are totally ordered Wajsberg hoops. \end{theorem} Note that in \cite{bus} it is presented an alternative and simpler proof of this result. \section{The variety of BL$_\text{Chang}$-algebras}\label{sec:blcha} Consider the following connective \begin{equation*} \varphi\veebar\psi\mathrel{\mathop:}=((\varphi \rightarrow (\varphi \& \psi ))\rightarrow\psi )\land ((\psi \rightarrow (\varphi \& \psi ))\rightarrow \varphi ) \end{equation*} Call $\uplus$ the algebraic operation, over a BL-algebra, corresponding to $\veebar$; we have that \begin{lemma}\label{lem:disgeq} In every MV-algebra the following equation holds \begin{equation*} x\uplus y=x\oplus y. \end{equation*} \end{lemma} \begin{proof} It is easy to check that $x\uplus y=x\oplus y$, over $[0,1]_{MV}$, for every $x,y\in [0,1]$. \end{proof} We now analyze this connective in the context of Wajsberg hoops. \begin{proposition}\label{prop:disj} Let $\mathcal{A}$ be a linearly ordered Wajsberg hoop. Then \begin{itemize} \item If $\mathcal{A}$ is unbounded (i.e. a cancellative hoop), then $x\uplus y=1$, for every $x,y\in\mathcal{A}$. \item If $\mathcal{A}$ is bounded, let $a$ be its minimum. Then, by defining $\sim x\mathrel{\mathop:}= x\Rightarrow a$ and $x\oplus y=\sim(\sim x*\sim y)$ we have that $x\oplus y=x\uplus y$, for every $x,y\in\mathcal{A}$ \end{itemize} \end{proposition} \begin{proof} An easy check. \end{proof} Now, since the variety of cancellative hoops is generated by its linearly ordered members (see \cite{eghm}), then we have that \begin{corollary}\label{cor:disjcanc} The equation $x\uplus y=1$ holds in every cancellative hoop. \end{corollary} We now characterize the behavior of $\uplus$ for the case of BL-chains. \begin{proposition}\label{prop:disg} Let $\mathcal{A}=\bigoplus_{i\in I}\mathcal{A}_i$ be a BL-chain. Then \begin{equation*} x\uplus y=\begin{cases} x\oplus y,&\text{if }x,y\in \mathcal{A}_i\text{ and }\mathcal{A}_i\text{ is bounded }\\ 1,&\text{if }x,y\in \mathcal{A}_i\text{ and }\mathcal{A}_i\text{ is unbounded }\\ \max(x,y),&\text{otherwise}. \end{cases} \end{equation*} for every $x,y\in\mathcal{A}$. \end{proposition} \begin{proof} If $x,y$ belong to the same component of $\mathcal{A}$, then the result follows from \Cref{lem:disgeq} and \Cref{prop:disj}. For the case in which $x$ and $y$ belong to different components of $\mathcal{A}$, this is a direct computation. \end{proof} \begin{remark} From the previous proposition we can argue that $\uplus$ is a good approximation, for BL, of what that $\oplus$ represents for MV-algebras. Note that a similar operation was introduced in \cite{abm}: the main difference with respect to $\uplus$ is that, when $x$ and $y$ belong to different components of a BL-chain, then the operation introduced in \cite{abm} holds $1$. \end{remark} In the following, for every element $x$ of a BL-algebra, with the notation $\overline{n}x$ we will denote $\underbrace{x\uplus\dots\uplus x}_{n\ times}$; analogously $\overline{n}\varphi$ means $\underbrace{\varphi\veebar\dots\veebar\varphi}_{n\ times}$. \begin{definition} We define BL$_\text{Chang}$ as the axiomatic extension of BL, obtained by adding \begin{equation} \tag{cha}(\overline{2}\varphi)^2\leftrightarrow \overline{2}(\varphi^2). \end{equation} That is, writing it in extended form \begin{equation*} (\varphi^2\to(\varphi^2\&\varphi^2)\to\varphi^2)\leftrightarrow((\varphi\to\varphi^2)\to\varphi)^2. \end{equation*} \end{definition} Clearly the variety corresponding to BL$_\text{Chang}$ is given by the class of BL-algebras satisfying the equation $(\overline{2}x)^2=\overline{2}(x^2)$.
Moreover, \begin{definition}\label{def:pseudoperf} We will call pseudo-perfect Wajsberg hoops those Wajsberg hoops satisfying the equation $(\overline{2}x)^2=\overline{2}(x^2)$. \end{definition} \begin{remark}\label{rem:2} Thanks to \Cref{lem:disgeq} we have that \begin{equation*} \vdash_\text{\L}((\overline{2}\varphi)^2\leftrightarrow \overline{2}(\varphi^2))\leftrightarrow ((2\varphi)^2\leftrightarrow 2(\varphi^2)), \end{equation*} that is, if we add $(\overline{2}\varphi)^2\leftrightarrow \overline{2}(\varphi^2)$ or $(2\varphi)^2\leftrightarrow 2(\varphi^2)$ to {\L}, then we obtain the same logic \L$_\text{Chang}$.
These formulas, however are not equivalent over BL: see \Cref{rem:p0} for details. \end{remark} \begin{theorem}\label{teo:chainpswh} Every totally ordered pseudo-perfect Wajsberg hoop is a totally ordered cancellative hoop or (the $0$-free reduct of) a perfect MV-chain.
More in general, the variety of pseudo-perfect Wajsberg hoops coincides with the class of the $0$-free subreducts of members of $\mathbf{V}(C)$. \end{theorem} \begin{proof} In \cite{eghm} it is shown that the variety of Wajsberg hoops coincides with the class of the $0$-free subreducts of MV-algebras. The results easily follow from this fact and from \Cref{prop:canc}, \Cref{teo:chaeq} and \Cref{def:pseudoperf}. \end{proof} As a consequence, we have \begin{theorem}\label{teo:hoopincl} Let $\mathbb{WH}, \mathbb{CH}, ps\mathbb{WH}$ be, respectively, the varieties of Wajsberg hoops, cancellative hoops, pseudo-perfect Wajsberg hoops. Then we have that \begin{equation*} \mathbb{CH}\subset ps\mathbb{WH} \subset \mathbb{WH}. \end{equation*} \end{theorem} \begin{proof} An easy consequence of \Cref{teo:chainpswh}.
The first inclusion follows from the fact that $ps\mathbb{WH}$ contains all the totally ordered cancellative hoops and hence the variety generated by them. For the second inclusion note that, for example, the $0$-free reduct of $[0,1]_\text{\L}$ belongs to $\mathbb{WH}\setminus ps\mathbb{WH}$. \end{proof} We now describe the structure of BL$_\text{Chang}$-chains, with an analogous of the \Cref{teo:am} for BL-chains. \begin{theorem}\label{teo:chainstruct} Every BL$_\text{Chang}$-chain is isomorphic to an ordinal sum whose first component is a perfect MV-chain and the others are totally ordered pseudo-perfect Wajsberg hoops.
It follows that every ordinal sum of perfect MV-chains is a BL$_\text{Chang}$-chain. \end{theorem} \begin{proof} Thanks to \Cref{teo:perfcha,teo:chaeq}, \Cref{rem:2} and \Cref{def:pseudoperf}, we have that every MV-chain (Wajsberg hoop) satisfying the equation $(\overline{2}x)^2=\overline{2}(x^2)$ is perfect (pseudo-perfect): using these facts and \Cref{prop:disg} we have that a BL-chain satisfies the equation $(\overline{2}x)^2=\overline{2}(x^2)$ iff it holds true in all the components of its ordinal sum. From these facts and \Cref{teo:am} we get the result. \end{proof} As a consequence, we obtain the following corollaries. \begin{corollary}\label{cor:blchacont} The variety of BL$_\text{Chang}$-algebras contains the ones of \\product-algebras and G\"{o}del-algebras: however it does not contains the variety of MV-algebras. \end{corollary} \begin{proof} From the previous theorem it is easy to see that the variety of BL$_\text{Chang}$-algebras contains $[0,1]_\Pi$ and $[0,1]_G$, but not $[0,1]_\text{\L}$. \end{proof} \begin{corollary} Every finite BL$_\text{Chang}$-chain is an ordinal sum of a finite number of copies of the two elements boolean algebra. Hence the class of finite BL$_\text{Chang}$-chains coincides with the one of finite G\"{o}del chains. \end{corollary} For this reason it is immediate to see that the finite model property does not hold for BL$_\text{Chang}$.
We conclude with the following remark. \begin{remark}\label{rem:p0} \begin{itemize} \item One can ask if it is possible to axiomatize the class BL$_\text{perf}$ of BL-algebras, whose chains are the BL-algebras that are ordinal sum of perfect MV-chains: the answer, however, is negative. In fact, the class of bounded Wajsberg hoops does not form a variety: for example, it is easy to check that for every bounded pseudo-perfect Wajsberg hoop $\mathcal{A}$, its subalgebra $\mathcal{A}^+$ (see \Cref{def:bound} ) forms a cancellative hoop. Hence BL$_\text{perf}$ cannot be a variety.
However, as we will see in \Cref{sec:propcompl}, the variety of BL$_\text{Chang}$-algebras is the ``best approximation'' of BL$_\text{perf}$, in the sense that it is the smallest variety to contain BL$_\text{perf}$. \item In \cite{dnl4} (see also \cite{ct}) it is studied the variety, called $P_0$, generated by all the perfect BL-algebras (a BL-algebra $\mathcal{A}$ is perfect if, by calling $MV(\mathcal{A})$ the biggest subalgebra of $\mathcal{A}$ to be an MV-algebra, then $MV(\mathcal{A})$ is a perfect MV-algebra). $P_0$ is axiomatized with the equation
\begin{equation}
\tag*{($p_0$)}\sim((\sim(x^2))^2)=(\sim ((\sim x)^2))^2.\label{eq:p0}
\end{equation}
One can ask which is the relation between $P_0$ and the variety of BL$_\text{Chang}$-algebras. The answer is that the variety of BL$_\text{Chang}$-algebras is strictly contained in $P_0$. In fact, an easy check shows that a BL-chain is perfect if and only if the first component of its ordinal sum is a perfect MV-chain. Hence we have:
\begin{itemize}
\item Every BL$_\text{Chang}$-chain is a perfect BL-chain.
\item There are perfect BL-chains that are not BL$_\text{Chang}$-chains: an example is given by $C\oplus [0,1]_\text{\L}$.
\end{itemize}
Now, since the variety of BL$_\text{Chang}$-algebras is generated by its chains (like any variety of BL-algebras, see \cite{haj}), then we get the result.
Finally note that \ref{eq:p0} is equivalent to $2(x^2)=(2x)^2$: hence, differently to what happens over {\L} (see \Cref{rem:2} ), the equations $2(x^2)=(2x)^2$ and $\overline{2}(x^2)=(\overline{2}x)^2$ are not equivalent, over BL. \end{itemize} \end{remark} \subsection{Subdirectly irreducible and simple algebras} We begin with a general result about Wajsberg hoops. \begin{theorem}[{\cite[Corollary 3.11]{f}}] Every subdirectly irreducible Wajsberg hoop is totally ordered. \end{theorem} As a consequence, we have: \begin{corollary}\label{cor:pssubir} Every subdirectly irreducible pseudo-perfect Wajsberg hoop is totally ordered. \end{corollary} We now move to simple algebras.
It is shown in \cite[Theorem 1]{tur} that the simple BL-algebras coincide with the simple MV-algebras, that is, with the subalgebras of $[0,1]_\text{\L}$ (see \cite[Theorem 3.5.1]{mun}). Therefore we have: \begin{theorem} The only simple BL$_\text{Chang}$-algebra is the two elements boolean algebra $\mathbf{2}$. \end{theorem} An easy consequence of this fact is that the only simple {\L}$_\text{Chang}$-algebra is $\mathbf{2}$. \subsection{Completeness}\label{sec:propcompl} We begin with a result about pseudo-perfect Wajsberg hoops. \begin{theorem} The class $pMV$ of $0$-free reducts of perfect MV-chains generates $ps\mathbb{WH}$. \end{theorem} \begin{proof} From \Cref{teo:perfrot,teo:chainpswh} it is easy to check that the variety generated by $pMV$ contains all the totally ordered pseudo-perfect Wajsberg hoops.
From these facts and \Cref{cor:pssubir}, we have that $pMV$ must be generic for $ps\mathbb{WH}$. \end{proof} \begin{theorem}[\cite{dist}]\label{teo:dist} Let L be an axiomatic extension of BL, then L enjoys the finite strong completeness w.r.t a class $K$ of L-algebras iff every countable L-chain is partially embeddable into $K$. \end{theorem} As shown in \cite{haj} product logic enjoys the finite strong completeness w.r.t $[0,1]_\Pi$ and hence every countable product chain is partially embeddable into $[0,1]_\Pi\simeq\mathbf{2}\oplus (0,1]_C$, with $(0,1]_C$ being the standard cancellative hoop (i.e. the $0$-free reduct of $[0,1]_\Pi\setminus\{0\}$). Since every totally ordered product chain is of the form $\mathbf{2}\oplus\mathcal{A}$, where $\mathcal{A}$ is a cancellative hoop (see \cite{eghm}), it follows that: \begin{proposition}\label{prop:cancemb} Every countable totally ordered cancellative hoop partially embeds into $(0,1]_C$. \end{proposition} \begin{theorem}\label{teo:perfho} Every countable perfect MV-chain partially embeds into $\mathcal{V}=(0,1]^*_C$ (i.e. the disconnected rotation of $(0,1]_C$). \end{theorem} \begin{proof} Immediate from \Cref{prop:cancemb} and \Cref{teo:perfrot}. \end{proof} \begin{corollary}\label{cor:comp} The logic {\L}$_\text{Chang}$ is finitely strongly complete w.r.t. $\mathcal{V}$. \end{corollary} \begin{theorem}\label{teo:blccomp} BL$_\text{Chang}$ enjoys the finite strong completeness w.r.t. $\omega\mathcal{V}$. As a consequence, the variety of BL$_\text{Chang}$-algebras is generated by the class of all ordinal sums of perfect MV-chains and hence is the smallest variety to contain this class of algebras. \end{theorem} \begin{proof} Thanks to \Cref{teo:dist} it is enough to show that every countable BL$_\text{Chang}$-chain partially embeds into $\omega\mathcal{V}$ (i.e. the ordinal sum of ``$\omega$ copies'' of $\mathcal{V}$). This fact, however, follows immediately from \Cref{prop:cancemb} and \Cref{teo:chainstruct,teo:perfho}. \end{proof} But we cannot obtain a stronger result: in fact \begin{theorem} BL$_\text{Chang}$ is not strongly complete w.r.t. $\omega\mathcal{V}$. \end{theorem} \begin{proof} Suppose not: from the results of \cite[Theorem 3.5]{dist} this is equivalent to claim that every countable BL$_\text{Chang}$-chain embeds into $\omega\mathcal{V}$. But, this would imply that every countable totally ordered cancellative hoop embeds into $(0,1]_C$: this means that every countable product-chain embeds into $[0,1]_\Pi$, that is product logic is strongly complete w.r.t $[0,1]_\Pi$. As it is well known (see \cite[Corollary 4.1.18]{haj}), this is false. \end{proof} With an analogous proof we obtain \begin{theorem} {\L}$_\text{Chang}$ is not strongly complete w.r.t. $\mathcal{V}$ \end{theorem} However, thanks to \cite[Theorem 3]{monchain} we can claim \begin{theorem}\label{teo:strcompl} There exist a {\L}$_\text{Chang}$-chain $\mathcal{A}$ and a BL$_\text{Chang}$-chain $\mathcal{B}$ such that {\L}$_\text{Chang}$ is strongly complete w.r.t. $\mathcal{A}$ and BL$_\text{Chang}$ is strongly complete w.r.t. $\mathcal{B}$. \end{theorem} \begin{problem} Which can be some concrete examples of such $\mathcal{A}$ and $\mathcal{B}$ ? \end{problem}
\section{First-order logics}\label{sec:first} We assume that the reader is acquainted with the formalization of first-order logics, as developed in \cite{haj, ch}.
Briefly, we work with (first-order) languages without equality, containing only predicate and constant symbols: as quantifiers we have $\forall$ and $\exists$. The notions of terms and formulas are defined inductively like in classical case.
As regards to semantics, given an axiomatic extension L of BL we restrict to L-chains: the first-order version of L is called L$\forall$ (see \cite{haj, ch} for an axiomatization). A first-order $\mathcal{A}$-interpretation ($\mathcal{A}$ being an L-chain) is a structure $\mathbf{M}=\left \langle M, \{r_P\}_{p\in \mathbf{P}}, \{m_c\}_{c\in \mathbf{C}}\right \rangle$, where $M$ is a non-empty set, every $r_P$ is a fuzzy $ariety(P)$-ary relation, over $M$, in which we interpretate the predicate $P$, and every $m_c$ is an element of $M$, in which we map the constant $c$.
Given a map $v:\, VAR\to M$, the interpretation of $\lVert\varphi\rVert_{\mathbf{M}, v}^\mathcal{A}$ in this semantics is defined in a Tarskian way: in particular the universally quantified formulas are defined as the infimum (over $\mathcal{A}$) of truth values, whereas those existentially quantified are evaluated as the supremum. Note that these $\inf$ and $\sup$ could not exist in $\mathcal{A}$: an $\mathcal{A}$-model $\mathbf{M}$ is called \emph{safe} if $\lVert\varphi\rVert^\mathcal{A}_{\mathbf{M}, v}$ is defined for every $\varphi$ and $v$.
A model is called \emph{witnessed} if the universally (existentially) quantified formulas are evaluated by taking the minimum (maximum) of truth values in place of the infimum (supremum): see \cite{witn, ch1, ch} for details.
The notions of soundness and completeness are defined by restricting to safe models (even if in some cases it is possible to enlarge the class of models: see \cite{bm}): see \cite{haj, ch, ch1} for details. \paragraph*{} We begin with a positive result about \L$_\text{Chang}\forall$. \begin{definition} Let L be an axiomatic extension of BL. With L$\forall^w$ we define the extension of L$\forall$ with the following axioms \begin{align} \tag*{(C$\forall$)}&(\exists y)(\varphi(y)\to (\forall x)\varphi(x))\label{cforall}\\ \tag*{(C$\exists$)}&(\exists y)((\exists x)\varphi(x)\to\varphi(y))\label{cexist}. \end{align} \end{definition}
\begin{theorem}[{\cite[Proposition 6]{ch1}}] \L$\forall$ coincides with \L$\forall^w$, that is \\\L$\forall\vdash$\emph{\ref{cforall},\ref{cexist}}. \end{theorem} An immediate consequence is: \begin{corollary}\label{cor:witnluk} Let L be an axiomatic extension of \L. Then L$\forall$ coincides with L$\forall^w$. \end{corollary} \begin{theorem}[{\cite[Theorem 8]{ch1}}]\label{teo:witncompl} Let L be an axiomatic extension of BL. Then L$\forall^w$ enjoys the strong witnessed completeness with respect to the class $K$ of L-chains, i.e. \begin{equation*} T\vdash_{L\forall^w}\varphi\quad\text{iff}\quad \lVert\varphi\rVert_\mathbf{M}^\mathcal{A}=1, \end{equation*} for every theory $T$, formula $\varphi$, algebra $\mathcal{A}\in K$ and witnessed $\mathcal{A}$-model $\mathbf{M}$ such that $\lVert\psi\rVert_\mathbf{M}^\mathcal{A}=1$ for every $\psi\in T$. \end{theorem} \begin{lemma}[{\cite[Lemma 1]{monchain}}]\label{lem:witn} Let L be an axiomatic extension of BL, let $\mathcal{A}$ be an L-chain, let $\mathcal{B}$ be an L-chain such that $A\subseteq B$ and let $\mathbf{M}$ be a witnessed $\mathcal{A}$-structure. Then for every formula $\varphi$ and evaluation $v$, we have $\lVert\varphi\rVert^\mathcal{A}_{\mathbf{M},v}=\lVert\varphi\rVert^\mathcal{B}_{\mathbf{M},v}$. \end{lemma} \begin{theorem} There is a \L$_\text{Chang}$-chain such that \L$_\text{Chang}\forall$ is strongly complete w.r.t. it. More in general, every \L$_\text{Chang}$-chain that is strongly complete w.r.t \L$_\text{Chang}$ is also strongly complete w.r.t. \L$_\text{Chang}\forall$. \end{theorem} \begin{proof} An adaptation of the proof for the analogous result, given in \cite[Theorem 16]{monchain}, for \L$\forall$.
From \Cref{teo:strcompl} we know that there is a \L$_\text{Chang}$-chain $\mathcal{A}$ strongly complete w.r.t. \L$_\text{Chang}$: from \cite[Theorem 3.5]{dist} this is equivalent to claim that every countable \L$_\text{Chang}$-chain embeds into $\mathcal{A}$. We show that $\mathcal{A}$ is also strongly complete w.r.t. \L$_\text{Chang}\forall$.
Suppose that $T\not\vdash_{\text{\L}_\text{Chang}\forall}\varphi$. Thanks to \Cref{cor:witnluk} and \Cref{teo:witncompl} there is a countable \L$_\text{Chang}$-chain $\mathcal{C}$ and a witnessed $\mathcal{C}$-model $\mathbf{M}$ such that $\lVert\psi\rVert_\mathbf{M}^\mathcal{C}=1$, for every $\psi\in T$, but $\lVert\varphi\rVert_\mathbf{M}^\mathcal{C}<1$. Finally, from \Cref{lem:witn} we have that $\lVert\psi\rVert_\mathbf{M}^\mathcal{A}=1$, for every $\psi\in T$ and $\lVert\varphi\rVert_\mathbf{M}^\mathcal{A}=\lVert\varphi\rVert_\mathbf{M}^\mathcal{C}<1$: this completes the proof. \end{proof} For BL$_\text{Chang}\forall$, however, the situation is not so good. \begin{theorem} BL$_\text{Chang}\forall$ cannot enjoy the completeness w.r.t. a single BL$_\text{Chang}$-chain. \end{theorem} \begin{proof} The proof is an adaptation of the analogous result given in \cite[Theorem 17]{monchain} for BL$\forall$.
Let $\mathcal{A}$ be a BL$_\text{Chang}$-chain: call $\mathcal{A}_0$ its first component. We have three cases \begin{itemize} \item $\mathcal{A}_0$ is finite: from \Cref{teo:chainstruct} we have that $\mathcal{A}_0=\mathbf{2}$ and hence $\mathcal{A}\models (\neg\neg x)\rightarrow (\neg\neg x)^2$. However $\mathcal{V}\not\models (\neg\neg x)\rightarrow (\neg\neg x)^2$, where $\mathcal{V}$ is the chain introduced in \Cref{sec:propcompl}, and hence $\mathcal{A}$ cannot be complete w.r.t. BL$_\text{Chang}\forall$. \item $\mathcal{A}_0$ is infinite and dense. As shown in \cite[Theorem 17]{monchain} the formula \\$(\forall x)\neg\neg P(x)\to \neg\neg (\forall x) P(x)$ is a tautology in every BL-chain whose first component is infinite and densely ordered: hence we have that $\mathcal{A}\models (\forall x)\neg\neg P(x)\to \neg\neg (\forall x) P(x)$. However it is easy to check that this formula fails in $[0,1]_G$: take a $[0,1]_G$-model $\mathbf{M}$ with $M=(0,1]$ and such that $r_P(m)=m$. Hence, from \Cref{cor:blchacont}, it follows that BL$_\text{Chang}\forall\not\vdash (\forall x)\neg\neg P(x)\to \neg\neg (\forall x) P(x)$. \item $\mathcal{A}_0$ is infinite and not dense. As shown in \cite[Theorem 17]{monchain} the formula $(\forall x)\neg\neg P(x)\to \neg\neg (\forall x) P(x)\vee \neg (\forall x) P(x)\to ((\forall x) P(x))^2$ is a tautology in every BL-chain whose first component is infinite and not densely ordered: hence we have that $\mathcal{A}\models (\forall x)\neg\neg P(x)\to \neg\neg (\forall x) P(x)\vee \neg (\forall x) P(x)\to ((\forall x) P(x))^2$. Also in this case, however, this formula fails in $[0,1]_G$, using the same model $\mathbf{M}$ of the previous case. \end{itemize} \end{proof}
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Fault-tolerant quantum speedup from constant depth quantum circuits}
\author{Rawad Mezher $^1$$^{,2,3}$}
\email{[email protected]}
\author{Joe Ghalbouni $^2$} \author{Joseph Dgheim $^2$} \author{Damian Markham $^{1,4}$} \email{[email protected]}
\affiliation{(1) Laboratoire d'Informatique de Paris 6, CNRS, Sorbonne Universit\'e, 4 place Jussieu, 75252 Paris Cedex 05, France}
\affiliation{(2) Laboratoire de Physique Appliquée, Faculty of Sciences 2, Lebanese University, 90656 Fanar, Lebanon}
\affiliation{(3) School of Informatics, University of Edinburgh, 10 Crichton Street, Edinburgh, EH8 9AB}
\affiliation {(4) JFLI, National Institute for Informatics, and the University of Tokyo,
Tokyo, Japan.}
\date{\today}
\begin{abstract}
A defining feature in the field of quantum computing is the potential of a quantum device to outperform its classical counterpart for a specific computational task. By now, several proposals exist showing that certain sampling problems can be done efficiently quantumly, but are not possible efficiently classically, assuming strongly held conjectures in complexity theory. A feature dubbed \emph{quantum speedup}. However, the effect of noise on these proposals is not well understood in general, and in certain cases it is known that simple noise can destroy the quantum speedup.
Here we develop a fault-tolerant version of one family of these sampling problems, which we show can be implemented using quantum circuits of constant depth. We present two constructions, each taking $poly(n)$ physical qubits, some of which are prepared in noisy magic states. The first of our constructions is a constant depth quantum circuit composed of single and two-qubit nearest neighbour Clifford gates in four dimensions. This circuit has one layer of interaction with a classical computer before final measurements. Our second construction is a constant depth quantum circuit with single and two-qubit nearest neighbour Clifford gates in three dimensions, but with two layers of interaction with a classical computer before the final measurements.
For each of these constructions, we show that there is no classical algorithm which can sample according to its output distribution in $poly(n)$ time, assuming two standard complexity theoretic conjectures hold. The noise model we assume is the so-called \emph{local stochastic quantum noise}.
Along the way, we introduce various new concepts such as constant depth magic state distillation (MSD), and constant depth output routing, which arise naturally in measurement based quantum computation (MBQC), but have no constant-depth analogue in the circuit model.
\end{abstract} \maketitle
\textbf{\emph{Introduction }}- Quantum computers promise incredible benefits over their classical counterparts in various areas, from breaking RSA encryption \cite{schor94}, to machine learning \cite{biamonte2017quantum}, and improvements to generic search \cite{grover96}, among others \cite{montanaro2016quantum,olson2017quantum}.
Although these and other examples of quantum algorithms do outperform classical ones, on the practical level, they in general require quantum computers with a high level of fault-tolerance and scalability, the likes of which appear to be out of the reach of current technological developments \cite{Preskill18}.
An interesting question is thus, what can be done with so-called \emph{sub-universal} quantum devices which are not universal, in the sense that they cannot perform any quantum computation, but are realizable in principle by our current technologies.
Several examples of such practically motivated sub-universal models which nevertheless capture a sense of quantum advantage have been discovered in recent years \cite{BJS10,AA11,BMS16PRL,HM18,gao17,BHS+17,HBS+17,MB17,MGDM19,BIS+18,NRK+18,AAB+19,NBG+19,HHB+19,BGK+19,bravyi2018quantum}.
In most of these works, sampling from the output probability distribution of these sub-universal devices has been shown to be classically impossible to do efficiently, provided widely believed complexity theoretic conjectures hold \cite{BJS10,AA11}.
Thus, these devices demonstrate what is known as an exponential $quantum$ $speedup$.
The first experimental demonstration of quantum speedup is a major milestone in quantum information. Recent audacious experimental efforts \cite{AAB+19} and subsequent proposals of their classical simulation \cite{IBM} bring to light the challenges and subtleties of achieving this goal. Statements of quantum speedup are complexity theoretic in nature, making it difficult to pin down when a problem can in practice be simulated or not classically, even if we know in the limit of `infinite size' experiments that efficient classical simulation is impossible. At the same time, the role of noise in simplifying the simulation is ever more important, as systems grow, noise becomes more difficult to control, and it is a subtle question as to when it dominates; and even simple noise can very easily lead to breakdown of quantum speedup. Indeed, in \cite{BMS16,OB18,shchesnovich2019noise,TTT+20,KLF20,yung2017can,gao2018efficient} it was shown that noise generally renders the output probabilities of these devices (which in the noiseless case demonstrate quantum speedup) classically simulable efficiently. There is clearly a great need to understand better the effect of noise, and develop methods of mitigation.
Applying the standard approach to deal with noise in computation, fault-tolerance, is non-trivial in this setting for at least two reasons \cite{fowler2012proof,NC2000,LAR+11,DKL+02}. Firstly, the resources it consumes can be huge.
Secondly, it typically involves operations that step outside of the simplified computational model that makes it attractive in the first place. For example, in \cite{BJS10} the sub-universal model IQP was defined, as essentially the family of circuits where all gates are diagonal in the $X$-basis, and shown to provide sampling problems demonstrating quantum speedup in the noiseless case. However in \cite{BMS16} it was shown that a simple noise model - each output bit undergoes a bit flip with probability $\varepsilon$ - renders the output probabilities of sufficiently anti-concentrated IQP circuits efficiently simulable classically. Interestingly, for this special type of noise, they also show that quanutm speedup can be recovered using classical fault-tolerance and larger encodings of the problem quantumly, still within the IQP framework \cite{BMS16}. However, for more general noise (for example Pauli noise in all the Pauli bases), this does not appear to work, and it is not obvious if it is possible to do so within the constrained computational mode. In this case that would mean maintaining all gates be diagonal in $X$, which is not obvious as typical encoding and syndrome measurements involve more diverse gates.
In this work, we study how quantum speedup can be demonstrated in the presence of noise for a family of sampling problems. We take the \emph{local stochastic quantum noise} (we will also refer to this noise as local stochastic noise) model, commonly studied in the quantum error correction and fault-tolerance literature \cite{BGK+19,FGL18,gottesman2013fault,aliferis2005quantum,aliferis2007accuracy}. Our sampling problems are built on a family of schemes essentially based on local measurements on regular graph states, which correspond to constant depth 2D nearest neighbor quantum circuits showing quantum speedup \cite{gao17,BHS+17,HBS+17,MGDM18,MGDM19,HHB+19}. We show that these can be made fault-tolerant in a way which maintains constant depth of the quantum circuits, albeit with large (but polynomial) overhead in the number of ancilla systems used, and at most two rounds of (efficient) classical computation during the running of the circuit.
We present two different constructions based on two different techniques of fault-tolerance, the first of which involves the use of transversal gates and topological codes each encoding a single logical qubit \cite{BGK+19,fowler2012proof,wang2011surface}. This construction results in a constant depth quantum circuit demonstrating a quantum speedup, but, because of the need for long range transversal gates, can only be viewed as a quantum circuit with single qubit Clifford gates and nearest neighbor two-qubit Clifford gates in 4D (we will henceforth refer to this as our 4D nearest neighbor (NN) architecture). Our second construction avoids using transversal gates by exploiting topological defect-based quantum computation \cite{RHG07}, thereby resulting in a constant depth quantum circuit which is a 3D NN architecture. The tradeoff, unfortunately, is that our 3D NN architecture requires polynomially more ancillas than our 4D NN architecture, and has two layers of interaction with a classical computer, as compared to one such layer in our 4D NN architecture.
Our first construction in 4D uses several techniques from \cite{BGK+19}, in particular regarding the propagation of noise through Clifford circuits. For the second construction, we also develop techniques from \cite{KT19}. In \cite{KT19}, a construction for fault-tolerant quantum speedup was presented which consisted of a constant depth quantum circuit obtained by using defect-based topological quantum computing \cite{RHG07}. This construction is non-adaptive (no interaction with classical computer during running of circuit), and can be viewed as a 3D NN architecture. The main disadvantage of the construction in \cite{KT19} was the magic state distillation (MSD) procedure employed, which makes the scheme impractical in the sense that one should repeat the experiment an exponential number of times in order to observe an instance which is hard for the classical computer to simulate. In both our 3D and 4D NN constructions, we overcome this problem by optimizing our MSD procedure, thereby making the appearance of a hard instance very likely in only a few repetitions of the experiment, a feature called single-instance hardness \cite{gao17}. This, however, comes at the cost of adding adaptive interactions with the classical computer while running the quantum circuit.
This paper is organised as follows. First, we introduce the family of sampling problems using graph states, on which our constructions are based. After briefly defining the noise model, we describe in detail the encoding procedure for our 4D NN architecture. We then describe the effects of noise on our construction, step by step, starting from the Clifford part of the circuit and ending with the MSD, while introducing our optimized MSD techniques based on MBQC, namely constant depth non-adaptive MSD, and MBQC routing. Finally, we explain how to modify, using our optimized MSD techniques, the 3D NN architecture in \cite{KT19} in order to give rise to the single-instance hardness feature \cite{gao17}. Note that in our 3D NN architecture, we use different (fixed) measurement angles to those in \cite{KT19} to construct a different sampling problem having an anti-concentration property \cite{MGDM18,MB17,HBS+17}.
\textbf{\emph{Graph state sampling}} - Our approach is to construct a fault-tolerant version of the architectures based on measurement based quantum computation (MBQC) \cite{RB01}, which have recently been shown to demonstrate a quantum speedup \cite{gao17,BHS+17,HBS+17,MGDM18,MGDM19,HHB+19}.
In these constructions, the sampling is generated by performing local measurements on a large entangled state, known as a graph state. Given a graph $G$, with vertices $V$ and edges $E$, the associated graph state $|G\rangle$, of $|V|$ qubits is defined as
\begin{equation}
\label{eq1PRL}
|G\rangle:=\prod_{\{i,j\} \in E}CZ_{ij}\bigotimes_ {a \in V}|+\rangle_a, \end{equation}
where $|+\rangle:=\dfrac{|0\rangle+|1\rangle}{\sqrt{2}}$ and $CZ_{ij}$ is the controlled-Z gate ($CZ$) acting on qubits $i$ and $j$ connected by an edge. For certain graphs of regular structure, such as the cluster \cite{RB01} or brickwork \cite{BFK+09} states, applying single qubit measurements, of particular choices of angles on the $XY$-plane, effectively samples distributions, in a way that is impossible to do efficiently classically, up to the standard assumptions \cite{BHS+17,HBS+17,gao17,MGDM18,KT19}.
Although our techniques can be applied to $any$ such architecture where the measurement angles in the $XY$-plane of the Bloch sphere are chosen from the set $\left\{0,\dfrac{\pi}{2},\dfrac{\pi}{4}\right\}$ \cite{gao17,BHS+17,HBS+17,MGDM18}; for concreteness we will focus on the architecture of \cite{MGDM18}.
\begin{figure*}
\caption{ Graph state $|G\rangle$ of \cite{MGDM18} together with the pre-specified measurements in the $XY$ plane. This graph state is composed of $n$ rows and $k$ columns as seen in the main text (lower part of figure), and made up of two-qubit gadgets $G_B$ (green rectangles) zoomed in at the upper part of the figure (orange circle and arrow). Blue circles are qubits, blue vertical and horizontal lines are $CZ$ gates, the symbols inside each circle correspond to the angle in the $XY$ plane at which this qubit is measured. The $\pi/4$ symbol is a measurement at an angle $\pi/4$ in the $XY$ plane, similarly for $\pi/2$ and $0$.
In the original construction of \cite{MGDM18}, the red horizontal line is a long range $CZ$, these are used periodically in $|G\rangle$ to connect two consecutive $G_B$ gadgets acting on qubits of either the first row or the last row of $|G\rangle.$ Here, this red horizontal line is a linear cluster of twelve qubits measured at an $XY$ angle of 0, this is in order to make the construction nearest neighbor. Note that this only adds single qubit random Pauli gates to the random gates of \cite{MGDM18}, and therefore does not affect their universality capacity in implementing a $t-$design.}
\label{FIG G original}
\end{figure*}
Following \cite{MGDM18} we start with a regular graph state, closely related to the brickwork state \cite{BFK+09}, composed of $n$ rows and $k$ columns.
Then we (non-adaptively) measure qubits of all but the last column at pre-specified fixed $XY$-angles from the set $\left\{0,\dfrac{\pi}{2},\dfrac{\pi}{4}\right\}$ effectively applying a unitary, on the $n$ unmeasured qubits. This is illustrated in Figure \ref{FIG G original}.
Let $V_1 \subset V$ be the set of qubits which are measured at angle $\dfrac{\pi}{4}$ and $V_2 \subset V$ is the set of qubits which are measured at an $XY$ angle $\dfrac{\pi}{2}$. One can equivalently perform local rotations to the graph state and measure all systems in the $Z$ basis. In this way, if we define \begin{eqnarray}
|G'\rangle:= \left(\bigotimes_{a \in V}H_a \bigotimes_{b\in V_1} Z_b\left(\pi/4\right)\bigotimes_{c\in V_2}Z_c\left(\pi/2\right) \right)|G\rangle, \nonumber \end{eqnarray} where $H$ is the Hadamard unitary and $Z(\theta):=e^{-i{\dfrac{\theta}{2} Z}}$ is a rotation by $\theta$ around Pauli Z, then one can represent the outcome by a measurement result bit string $s\in \{0,1\}^{n.(k-1)}$, with associated resultant state \begin{eqnarray}
\label{eq2PRL}
\langle s|G'\rangle = \dfrac{1}{\sqrt{2^{n.(k-1)}}} U_s|0\rangle^{\otimes n}. \end{eqnarray}
This procedure effectively samples from the ensemble of unitaries $\left\{\dfrac{1}{2^{n.(k-1)}},U_s\right\}.$ It was shown in \cite{MGDM18} that setting $k=O(t^{9}(nt+log(\dfrac{1}{\varepsilon})))$, this ensemble has the property of being an $\varepsilon$-approximate unitary $t$-design \cite{DCE+09} - that is, it approximates sampling on the Haar measure up to the $t$-th moments. This property allows us to reduce the requirements for the proof of quantum speedup since it implies anti-concentration for $t=2$ from \cite{HBS+17}.
Measuring qubits of the last column in the computational ($Z$) basis and denoting the outcome by a bit string $x \in \{0,1\}^n$, our construction samples the bit strings $s,x$ with probability given by \begin{equation}
\label{eq3PRL}
D(s,x)= \dfrac{1}{2^{n.(k-1)}}|\langle x|U_s|0\rangle^{\otimes n}|^2. \end{equation} Fixing $t=2$ and $\varepsilon$ to an appropriate value, in this case the value of $k$ becomes $k=O(n)$, we will use this value of $k$ throughout this work. The results of \cite{HBS+17,MB17} directly imply (see also \cite{MGDM19}), that the distribution \begin{equation} \label{eq4PRL}
D:=\{D(s,x)\} \end{equation} satisfies the following anti-concentration property \cite{MB17,HBS+17} \begin{equation} \label{eq5PRL}
Pr_{s,x}\left(D(s,x) \geq \dfrac{\alpha}{2^{k.n}}\right) \geq \beta, \end{equation} where $\alpha$ is a positive constant, $0<\beta \leq 1$, and $Pr_{s,x}(.)$ is the probability over the uniform choice of bit strings $s$ and $x$.
By using the same techniques as \cite{BMS16PRL,HBS+17, MGDM19}, the following proposition can be shown.
\begin{proposition} \label{prop1PRL} Given that the polynomial Hierarchy (PH) does not collapse to its 3rd level, and that the worst-case hardness of approximating the probabilities of $D$ (Equations (\ref{eq3PRL}) and (\ref{eq4PRL})) extends to average-case; there exists a positive constant $\mu$ such that no $poly(n)$-time classical algorithm $C$ exists that can sample from a probability distribution $D_C$ such that \begin{equation} \label{eq6PRL}
\sum_{s,x}|D_C(s,x)-D(s,x)| \leq \mu. \end{equation}
\end{proposition}
Indeed, as shown in \cite{MGDM18}, (\ref{eq2PRL}) can be viewed as implementing a 1D random circuit, as those in \cite{BHH16}.
In this picture the circuits have depth $O(n)$ (for fixed $t$ and $\varepsilon$) and are composed of 2-qubit gates which are universal on $U(4)$.
These circuits are therefore universal under post-selection implying that there exist probabilities $D(s,x)$ which are hard ($\sharp$ P) to approximate up to relative error 1/4+O(1) \cite{FT17} (this property is referred to as \emph{worst-case hardness} of approximating the probabilities of $D$, or for simplicity worst-case hardness). Worst-case hardness together with the anti-concentration property of Equation (\ref{eq5PRL}) mean that the techniques of \cite{BMS16PRL} directly prove Proposition \ref{prop1PRL}.
Note that Proposition \ref{prop1PRL} is a conditional statement, meaning that it is true up to some conjectures being true.
The first is that the $PH$ does not collapse to its 3rd level, a generalization of $P \neq NP$, which is widely held to be true \cite{gasarch2012guest}. The second conjecture is that the worst-case hardness of the problem extends to average-case, meaning roughly that $most$ outputs are hard to approximate up to relative error 1/4 + O(1). Although this conjecture is less-widely accepted, there exists evidence to support it mainly in the case of random circuits sampling unitaries from the Haar measure \cite{bouland2018quantum,movassagh2019cayley}. Particularly relevant to our case are arguments in \cite{BHS+17,MGDM19} which give convincing evidence that worst-case hardness should extend to average-case for distributions of the form $D(s,x)$ (Equation (\ref{eq3PRL})), where the uniform distribution over bit-strings $s$ effectively makes $D(s,x)$ \emph{more flat} as compared to, say, the outputs of random quantum circuits \cite{bouland2018quantum,movassagh2019cayley} or standard IQP circuits \cite{BMS16PRL}. Also, in \cite{gao17} an average-case hardness conjecture was stated involving an MBQC construction with fixed $XY$ angles, as is the case here. Furthermore, we note that a worst-to-average-case conjecture is effectively always required in all known proofs of hardness of approximate classical sampling up to a constant error in the $l_1$-norm \cite{HM17}.
The circuit implementing this construction is constant depth. To see this, notice that the regularly structured graph states of \cite{BHS+17,gao17,HBS+17,HHB+19,MGDM18,MGDM19} can be constructed from constant depth quantum circuits composed of Hadamard ($H$) and $CZ$ gates \cite{HDE+06}. The measurements, being non-adaptive, may be performed simultaneously (depth one). The explicit form of the circuit can be seen by re-writing the state $|G'\rangle$ as follows \begin{eqnarray} \label{EQN: G' as circuit on Ts and 0s}
|G'\rangle = \bigotimes_{a \in V}H_a \bigotimes_{b\in V_2} && Z_b \left(\pi/2\right) \prod_{\{i,j\} \in E} CZ_{ij}\nonumber\\
&&\bigotimes_{c\in V_1} |T\rangle_c \bigotimes_{d \in V/V_1} H_d |0\rangle_d, \end{eqnarray}
where $|T\rangle = Z(\pi/4)H|0\rangle =(|0\rangle + e^{i\pi/4}|1\rangle)/\sqrt{2}$ is referred to as the $T$-state or magic state. Taking out the $T$-state explicitly as here will be useful for applying fault-tolerant techniques. In this way, these architectures can be viewed as constant depth 2D circuits with NN two-qubit gates \footnote{A recent paper \cite{NLD+20} shows that the outputs of 2D constant depth circuits are generally efficiently simulable classically. However, we note that the circuits discussed here \cite{BHS+17,gao17,HBS+17,HHB+19,MGDM18,MGDM19} correspond to worst-case instances of the circuits in \cite{NLD+20}, where their efficient classical algorithm fails. Indeed, the $XY$ measurement angles performed effectively induce a 1D dynamics which is purely unitary, and which for the choice of $XY$ angles made in \cite{MGDM18,MGDM19} and here in our case typically evolves an input state onto a \emph{volume law entangled} state. The classical algorithm in \cite{NLD+20} is generally inefficient in simulating such volume law entangled states.}.
We will show that this constant depth property prevails in our fault-tolerant version of these architectures as well, in our case using 4D and 3D circuits with NN two-qubit gates. As a final remark, note that the 2D NN circuit presented here has the single-instance hardness property, because the choice of measurement angles is fixed \cite{gao17}.
\textbf{\emph{Noise model}} - Before going into details of the fault-tolerant techniques, we present the noise model which we adopt. We will consider the \emph{local stochastic quantum noise} model, following \cite{FGL18,BGK+19}. Local stochastic noise can be thought of as a type of noise where the probability of the error $E$ occuring decays exponentially with the size of its support. This noise model encompasses errors that can occur in qubit preparations, gate applications, as well as measurements. It also allows for the errors between successive time steps of the circuit to be correlated \cite{FGL18,BGK+19}. More precisely, following the notation in \cite{FGL18,BGK+19}, a local stochastic noise with rate $p$, where $p$ is constant satisfying $0<p<1$, is an $m$-qubit Pauli operator $$E=\otimes_{i=1,...,m}P_{i},$$ where $P_{i} \in \{1,X,Y,Z\}$ are the single qubit Pauli operators, such that
$$Pr\big( F \subseteq Supp(E)\big) \leq p^{|F|},$$ for all $F \subseteq \{1,...,m\}$, where $Supp(E) \subseteq \{1,..,m\}$ is the subset of qubits for which $P_{i} \neq 1$. Also following notation in \cite{FGL18,BGK+19}, we will denote a local stochastic noise with rate $0<p<1$ as $E \sim \mathcal{N}(p)$.
We will use the following property of local stochastic noise, shown in \cite{BGK+19}, which says that all errors for constant depth Clifford circuits can be pushed to the end. Consider a constant depth-$d$ noiseless quantum circuit \begin{equation*} U=U_{d}...U_{1}, \end{equation*} which acts on a prepared input state and is followed by measurements, where each $U_{i}$ for $i=\{1,...,d\}$ is a depth-one circuit composed of single and two-qubit Clifford gates. It was shown in \cite{BGK+19} that a noisy version of this circuit satisfies \begin{eqnarray} \label{eq7PRL} U_{noisy}&=& E_{out}.E_{d}U_{d}....E_{1}U_{1}E_{prep} \nonumber \\ \nonumber &=& E(U_d...U_1) \\ &=& EU, \end{eqnarray} where $E_i \sim \mathcal{N}(p_i)$ for $i \in \{1,...,d\}$, with constant $0<p_{i}<1$ is the noisy implementation of depth-one circuit $U_i$, $E_{prep} \sim \mathcal{N}(p_{prep})$ and $E_{out} \sim \mathcal{N}(p_{out})$ with constants $0<p_{prep},p_{out}<1$ are the errors in the preparation and measurement respectively \footnote{Note that by choosing different values of $p_{prep}$, $p_{out}$, and $p_i$ one can differentiate between the noises of preparation, gate application, and measurement. One can also account for scenarios where some operations could be more faulty than others, as is commonly done for example when assuming two-qubit gates are faultier than single qubit gates \cite{Li15}. }.
For constant depth $d$, $E \sim \mathcal{N}(q)$ where $0<q<1$ is a constant which is a function of $p_1,...,p_d,p_{prep},p_{out}$ \cite{BGK+19} \footnote{For example, when $p_{prep}=p_{out}=p_{1}=...=p_{d}=p$, then $q \leq p^{4^{-d-1}}$ \cite{BGK+19}. Note that $p^{4^{-d-1}}$ is a constant when $d$ is a constant, meaning that $q$ is upper-bounded by a non-zero constant. For a suitable choice of $p$ we can therefore tune $q$ to be below the threshold of fault-tolerant computing with the surface code, where the classical decoding fails with a probability decaying exponentially with the code distance \cite{BGK+19}. }. Equation (\ref{eq7PRL}) shows that the errors accumulating in a constant depth quantum circuit composed of single and two qubit Clifford gates can be treated as a single error $E$. Furthermore, for small enough $q$ (i.e small enough $p_1,...,p_d,p_{prep},p_{out}$ - typically, these should be smaller than the threshold of fault-tolerant computing with the surface code \cite{BGK+19,DKL+02} or of the 3D cluster state \cite{RHG07} in our case), $E$ can be corrected with high probability by using standard techniques in quantum error correction (QEC) \cite{BGK+19,wang2011surface}. Also, $E$ can be propagated until after the measurements, where the error correction procedure is completely classical.
\section*{4D NN architecture}
In this part of the paper, we will describe the construction of our 4D NN architecture demonstrating a quantum speedup. Our approach takes three ingredients, the sampling based on regular graph states mentioned above \cite{gao17,BHS+17,HBS+17,MGDM18,MGDM19,HHB+19}, fault-tolerant single shot preparations of logical qubit states \cite{BGK+19}, and magic state distillation (MSD) \cite{BK05,HHP+17,Li15}. A large part of fault-tolerant techniques follow the work of \cite{BGK+19}, where they present a family of constant depth circuits which give statistics that cannot be reproduced by any classical computer of constant depth. To do so they introduce error correcting codes where it is possible to prepare logical states fault-tolerantly with constant depth, and Clifford gates are transversal. Then, they also show that for local stochastic quantum noise, all errors for Clifford circuits can be traced through to effectively be treated as a final error, meaning that errors do not have to be corrected during the circuit. Together these allow for constant depth fault-tolerant versions of constant depth Clifford circuits. Compared to \cite{BGK+19}, the big difference in our work is the need for non-Clifford operations (for the choice of local measurement angle). To address this, we use so called magic states which can be distilled fault-tolerantly \cite{BK05}. Generally their distillation circuits are not constant depth however, and here we adapt the distillation circuits of \cite{HHP+17} to be constant depth using ideas from MBQC. In particular we do not use feed-forward in the distillation procedure, and instead translate depth of circuits for cost of having to do many copies of constant depth circuits (each being an MSD circuit with no feed-forward) many times in parrallel. We show that, for specific MSD techniques \cite{HHP+17,HH18,Jones13,HH182}, a balance can be reached which gives sufficiently many magic states of high enough fidelity to demonstrate quantum speedup in constant depth with polynomial overhead in number of ancillas. We then use MBQC notions to route in the high fidelity magic states into our sampling circuit. This is also done in constant depth. At this point, interaction with a classical computer is required. This is mainly in order to identify which copies of MSD circuits (which are done in parallel) were successful in distilling magic states of sufficiently high fidelity. After, these high fidelity magic states are taken, together with more ancillas, to make a logical version of the graph state, which is then measured. Effectively we then have two constant depth quantum circuits with an efficient (polynomial) classical computation in between.
The constant depth MBQC distillation, together with the constant depth MBQC routing will ensure that enough magic states with adequately high fidelity are always injected into our sampling problem, thereby enabling us to observe quantum speedup \emph{deterministically} at each run of the experiment, since we would determinstically recover an encoded version of the 2D NN architecture with the single-instance hardness property described in earlier sections \cite{MGDM18}. This is contrary to what happens in \cite{KT19}, where an encoded version of this 2D NN architecture is constructed \emph{probabilistically}, albeit with exponentially low probability of success.
\textbf{\emph{Logical encoding}} -
Following \cite{BGK+19}, we use the folded surface code \cite{bravyi1998quantum,Moussa16,BGK+19}. A single logical qubit is encoded into $l$ physical qubits. We denote the logical versions of states and fault-tolerant gates using a bar, that is, a state $|\psi\rangle$ of $m$ qubits would be encoded onto its logical version $|\overline{\psi}\rangle$ on $m.l$ qubits and operator $U$ would be replaced by logical operator $\overline{U}$. The choice of encoding onto the folded surface code has two main advantages, firstly, Clifford gates have transversal fault-tolerant versions, meaning the fault-tolerant versions of a constant depth Clifford circuit are also constant depth and composed of single and two-qubit Clifford gates acting on physical qubits of the code \cite{BGK+19}. For example $$\overline{X}=\bigotimes_{i \in V_{diag}} X_i,$$
where $V_{diag}$ is the set of physical qubits lying on the main diagonal of the surface code, $X_i$ is a Pauli $X$ operator acting on physical qubit $i$. Similarly for the logical version of the Pauli $Z$ operator
$$\overline{Z}=\bigotimes_{i \in V_{diag}} Z_i.$$
Secondly, the preparation of the logical $|\overline{0}\rangle$ and $|\overline{T}\rangle$ states can be done fault-tolerantly in constant depth \cite{Li15,BGK+19}.
The preparation of the logical $|\overline{0}\rangle$ state can be done fault-tolerantly using the single-shot preparation procedure of \cite{BGK+19}. This requires a constant depth 3D quantum circuit, together with polynomial time classical post-processing, which can be pushed until after measurements of logical qubits of our circuit (see Figure \ref{FIG Overview circuit}).
This constant depth quantum circuit consists of non-adaptive measurements on a 3D cluster state composed of $O(l^{3/2})$ (physical) qubits \cite{BGK+19}.
The 3D cluster state being of regular structure can be prepared in constant depth. The non-adaptive measurements create a two-logical qubit Bell state up to a Pauli operator.
The classical post-processing is in order to trace these Paulis through the Clifford circuits (Figure \ref{FIG Overview circuit}) and correct the measurement results accordingly.
In \cite{BGK+19} it is shown that this preparation process is fault-tolerant, by showing that, in the presence of local stochastic quantum noise the overall noise induced from the preparation, measurements, and Pauli correction is a local stochastic noise with constant rate \cite{BGK+19}.
For our purposes, we will only use one logical qubit of the Bell state
\footnote{One way to do this would be measuring the other logical qubit of the Bell state non-adaptively in $\overline{Z}$, then decoding the result and applying an $\overline{X}$ to the unmeasured logical qubit dependant on the decoded measurement result. This should be done after the recovery Pauli operator of \cite{BGK+19} has been applied. The noise acting on the unmeasured qubit after completion would still be local stochastic with constant rate. Indeed, after applying the recovery operator of \cite{BGK+19}, we are left with a Bell state with some local stochastic noise $E$ \cite{BGK+19}, then after measuring one logical qubit and decoding (which succeeds with high probability if error rates are small), we apply a conditional $\overline{X}$ operator to the unmeasured logical qubit. In the case this $\overline{X}$ is applied, it introduces also a local stochastic noise $E^{'}$, but because $\overline{X}$ is a constant depth Clifford gate with only single qubit gates, $E^{'}$ can be merged with $E$ to give a single local stochastic noise $E^{''}$ which is still local stochastic with constant rate, by the likes of arguments of Equation (\ref{eq7PRL}). In what remains, we incorporate this operation into the classical post-processing needed to apply the recovery operator of the single-shot preparation procedure of \cite{BGK+19}. We will therefore mean by recovery operator hereafter, the Pauli recovery operator of \cite{BGK+19} together with the conditional $\overline{X}$ which is applied to the unmeasured logical qubit. Note also that, as mentioned in the main text, we will often push applying this recovery operator until later parts of the circuit (for example after measuring the non-outputs of all copies of $zMSD$ as well as after the final measurements of $\overline{C_2}$), in that case the arguments for the overall noise being local stochastic still hold and follow similar reasoning as above. }.
The preparation of the logical $T$-state $|\overline{T}\rangle$ can also be done in constant depth by using a technique similar to \cite{Li15}.
Indeed, in the absence of noise, a perfect logical $T$-state can be prepared by the initialization of $l$ physical qubits (over a constant number of rounds), as well as three rounds of full syndrome measurements; as detailed in \cite{Li15}
\footnote{ The constant depth procedure of \cite{Li15} also requires some post-selection (in the presence of noise).
However, this post-selection is usually over measurement results of a small (constant) number of qubits, and the success probability is also a constant \cite{Li15}. We can therefore implement in paralell $O(1)$ runs of this constant depth procedure, and we are guaranteed with high probability that at least one run corresponds to the desired post-selection. }.
Each of the syndrome measurement rounds, because of the locality of the stabilizers in the surface code, can be scheduled in such a way as to be implemented by a constant depth quantum circuit composed of Controlled Nots and ancilla qubit measurements \cite{LAR+11,Li15}. In the presence of noise, this procedure prepares a noisy logical $T$-state (Equation (\ref{eqnoisyy})), starting from a noisy physical qubit $T$-state, and noisy preparations, gates and measurements \cite{Li15}
\footnote{ Although the noise model used in \cite{Li15} is not the same as the one we use here, where in \cite{Li15} they use independent depolarizing noise for preparations and gate application, and with different rates for single and two-qubit gates, we believe their results hold in our case as well. Indeed, viewing a local stochastic noise with rate $p$ on a single qubit, this qubit could experience an error (after preparation, measurement or gate application) with probability $pr \leq p$ (from the definition of local stochastic noise with $|F|=1$, see main text), this is in line with the noise model of \cite{Li15} where the probability of error is exactly $p$. Furthermore, choosing different error rates for local stochastic noise applied after single and two-qubit gates allows mimicking what happens in the noise model of \cite{Li15}. }. However, distillation is required to get sufficiently high quality $T$-states, which will be dealt with separately later. For simplicity, for now we will assume perfect $T$-states.
Starting with the prepared logical $|\overline{0}\rangle$ and $|\overline{T}\rangle$, the logical version of Equation (\ref{EQN: G' as circuit on Ts and 0s}) is written in terms of the constant depth circuit $\overline{C_2}$, \begin{eqnarray}
|\overline{G'}\rangle = \overline{C_2} \bigotimes_{c\in V_1} |\overline{T}\rangle_c \bigotimes_{d \in V/V_1} |\overline{0}\rangle_d \label{EQN: logical G'} \end{eqnarray} where \begin{equation}
\overline{C_2} := \bigotimes_{a \in V}\overline{H_a} \bigotimes_{b\in V_2} \overline{Z_b} \left(\pi/2\right) \prod_{\{i,j\} \in E} \overline{CZ_{ij}} \bigotimes_{d \in V/V_1} \overline{H_d}.
\label{EQN: C_2} \end{equation} Since all gates are Clifford, the physical circuit implementing $\overline{C_2}$ is constant depth. This circuit is the last circuit element in Figure~\ref{FIG Overview circuit} which combines the elements of our construction.
The logical $\overline{Z}$ measurements are carried out by physical $Z$ measurements on the physical qubits of the surface code, and
several classical decoding algorithms have been established \cite{BGK+19,wang2011surface,LAR+11,DKL+02}.
In the noiseless case, the decoding algorithm consists of calculating the sum (modulo 2) of the measurement result from measuring $Z$ on the physical qubits of the main diagonal of the surface code.
In the presence of noise, the decoding algorithm takes as input the (noisy) measurement results of all the $l$ physical qubits of the surface code which are measured in the same basis as the qubits on the main diagonal, and performs a minimal weight perfect matching to correct for the error induced by the noise \cite{BGK+19,fowler2012proof,DKL+02}.
For small enough error rates (below the threshold of fault-tolerant computing with the surface code), the probability that these decoding algorithms fail, that is, the probability that the noise changes the parity of the $\overline{Z}$ measurement result after decoding, decreases exponentially with the code distance $cd$, which for surface codes scales as $cd=O(\sqrt{l})$ \cite{bravyi1998quantum,BGK+19,DKL+02,fowler2012proof}.
Let $$\overline{s}=\{\overline{s}_1,...,\overline{s}_{n.(k-1)}\},$$ denote the measurement results of the logical qubits of all but the last column of $|\overline{G'}\rangle$. Similarly, let
$$\overline{x}=\{ \overline{x}_1,...,\overline{x}_n\},$$ denote the measurement results of the logical qubits of the last column of $|\overline{G'}\rangle$.
If we call $\overline{D}(\overline{s},\overline{x})$ the probability of getting $(\overline{s},\overline{x})$ in the absence of noise, it follows straightforwardly from the logical encoding that
\begin{equation}
\label{eq9PRL}
\overline{D}(\overline{s},\overline{x})=D(s,x),
\end{equation} for all $\overline{s} \in \{0,1\}^{n.(k-1)}$, and $\overline{x} \in \{0,1\}^{n}$, where $D(s,x)$ is as defined in Equations (\ref{eq3PRL}) and (\ref{eq4PRL}).
That is, in the absence of noise, measuring non-adaptively the logical qubits of $|\overline{G'}\rangle$ in $\overline{Z}$ defines a sampling problem with probability distribution $\overline{D}$ demonstrating a quantum speedup, by Proposition \ref{prop1PRL}.
We will now see that this sampling remains robust under local stochastic noise. Noise must be addressed at each part of the construction.
The first being that each depth-one step of the circuit preparing $|\overline{G'}\rangle$ is now followed by a local stochastic noise, as in the example of Equation (\ref{eq7PRL}).
Also, the single-shot preparation procedure of \cite{BGK+19} becomes noisy, however as shown in \cite{BGK+19} this noise is local stochastic with constant error rate and therefore can be treated as a preparation noise in preparing $|\overline{G'}\rangle$, analogous to $E_{prep}$ in Equation (\ref{eq7PRL}). As seen earlier, the circuit preparing $|\overline{G'}\rangle$ is constant depth and composed of single and two-qubit Clifford gates acting on physical qubits. Therefore, we can use the result of \cite{BGK+19}, which is shown in Equation (\ref{eq7PRL}), and treat all the noise accumulating through different steps of the circuit as a single local stochastic noise $E \sim \mathcal{N}(q)$ with a constant rate $0<q<1$, acting on the (classical) measurement outcomes \cite{BGK+19}. Therefore, when $q$ is low enough \cite{BGK+19}, $E$ can be corrected with high probability using the classical decoding algorithms described earlier \cite{BGK+19}. In appendix \ref{APPC}, we show that when the number of physical qubits per logical qubit $l$ scales as \begin{equation} \label{eq10PRL}
l \geq O(log^2(n)), \end{equation}
where $n$ is the number of rows of $|G\rangle$ \footnote{ this is usually the input part of an MBQC \cite{RB01}, which is the basis of our construction }, this suffices for our needs.
More precisely, we denote $\tilde{\overline{D}}_1(\overline{s},\overline{x})$ the probability of getting outcomes $(\overline{s},\overline{x})$ in the presence of stochastic noise, after performing a classical decoding of the measurement results \cite{BGK+19}, but where logical $T$-states are assumed perfect (noisless). Then, if $l$ satisfies Equation (\ref{eq10PRL}), and for small enough error rates (below the threshold of fault-tolerant computing with the surface code \cite{DKL+02}) of preparations, single and two-qubit gates, as well as measurements, $\tilde{\overline{D}}_1(\overline{s},\overline{x})$ can be made $1/poly(n)$ close in $l_1$-norm to the noiseless version (Equation (\ref{eq9PRL})). That is, \begin{equation}
\label{eq11PRL}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}}_1(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| \leq \dfrac{1}{poly(n)}. \end{equation}
This means that for a given constant $\mu_1$, there exists a large enough constant $n_0$, such that for all $n \geq n_0$ classically sampling from $\tilde{\overline{D}}_1$ up to $l_1$-norm error $\mu-\mu_1$ implies, by a triangle inequality, sampling from $\overline{D}$ up to $l_1$-norm error $\mu$, which presents a quantum speedup by Proposition \ref{prop1PRL} \cite{BMS16PRL}. Therefore, we have recovered quantum speedup in the presence of local stochastic noise, assuming perfect $T$-states.
\textbf{\emph{Distillation of $T$-states}}- The final ingredient is the distillation of the $T$-states. The analysis we have done so far assumes we can still prepare perfect logical $T$-states. In reality, however, this is not the case. Indeed, in the presence of noise, the constant depth preparation procedure of \cite{Li15} can only prepare a logical $T$-state with error rate $0<\varepsilon<1$ \begin{equation} \label{eqnoisyy}
\overline{\rho_T}_{noisy}:=(1-\varepsilon)|\overline{T}\rangle \langle \overline{T}| + \varepsilon \eta, \end{equation} with $\eta$ an arbitrary $l$-qubit state. In order to get high purity logical $T$-states, one must employ a technique called magic state distillation (MSD) \cite{BK05}. An MSD circuit is a Clifford circuit which usually takes as input multiple copies of noisy $T$-states $\overline{\rho_T}_{noisy}$, together with some ancillas, and involves measurements and post-selection in order to purify these noisy input states \cite{BK05}. The output of an MSD circuit is a logical $T$-state $\overline{\rho_T}_{out}$ with higher purity than the input one. That is, \begin{equation}
\label{eqPRL12}
\overline{\rho_T}_{out}:=(1-\varepsilon_{out})|\overline{T}\rangle\langle\overline{T}| + \varepsilon _{out}\eta^{'}, \end{equation} with $0<\varepsilon_{out}<\varepsilon<1$, and $\eta^{'}$ an arbitrary $l$-qubit state. For small enough $\varepsilon$ \cite{reichardt2005quantum} \footnote{ This is guaranteed by using the technique of \cite{Li15} if the error rate of preparations, single and two-qubit gates is low enough. Since $\varepsilon$ in \cite{Li15} is generally a function of these error rates. }, $\varepsilon_{out}$ could be made arbitrarily small by repeating the MSD circuit an appropriate number of times \cite{BK05}.
MSD circuits need not in general be constant depth. Our approach to depth is, again, via a translation to the measurement based quantum computing (MBQC) paradigm \cite{RB01}. In MBQC one starts off with graph state, for example the 2D grid cluster state, and computation is carried out through consecutive measurements on individual qubits. In order to preserve determinism these measurements must be corrected for. For a general computation this must be done round by round (the number of rounds typically scales with the depth of the corresponding circuit, though there can be some separation thereof \cite{browne2007generalized}). If we forgo these corrections, we end up applying different unitaries, depending on the outcome of the measurement results - indeed, this is effectively what happens in Equation (\ref{eq2PRL}). Thinking of MBQC now as a circuit, if one could do all measurements at the same time, one could think of it as a constant depth circuit, since all that is needed is to construct the 2D cluster state followed by one round of measurements and corrections, which can be done in constant depth. This is possible for circuits constructed fully of Clifford operations, but not generally, and not for the MSD circuits we use here because of the $T$ gates (or feedforward), so we are forced to sacrifice determinism.
Now, in order to get constant depth MSD, we translate the MSD circuits in \cite{HHP+17} to MBQC. The choice of this MSD construction is argued in appendix \ref{APP subsec zMSD}. Since we want to maintain constant depth, we want to perform all measurements at the same time, however the cost is that it will only succeed if we get the measurement outcomes corresponding to the original circuit of \cite{HHP+17} with successful syndromes. In order to produce enough $T$ states, the trick is simply to do it many times in parallel. That is, we will effectively implement many copies of the MBQC computation, so that we get enough successes. Effectively we trade depth of the corresponding circuit for number of copies and ancillas. Fortunately, for our specifically chosen MSD protocols \cite{HHP+17,HH182}, we will see that this cost is not too high.
Furthermore, this is all done in the logical encoding of the folded surface code. Our construction for this, which we denote $zMSD$, is designed to take copies of the noisy encoded $T$-states (\cite{Li15}) and ancilla in the encoded $|\overline{0}\rangle$ state, and affect $z$ iterations of the fault-tolerant version of MSD protocol in \cite{HHP+17}. As discussed above, this happens only when the correct results occur in the MBQC. In this case we say the $zMSD$ was \emph{successful}. We denote the circuit version of this as $\overline{C_1}$ (see Figure \ref{FIG Overview circuit}). In appendix \ref{app zMSD}, we show that when $zMSD$ is successful, $\varepsilon_{out}$ satisfies \begin{equation} \label{eq13PRL}
\varepsilon_{out} \leq O(\dfrac{1}{n^4}). \end{equation}
We also show that performing $O(n^3log(n))$ copies of $zMSD$ circuits (which can be done in parallel), each of which is composed of $O(log(n))$ logical qubits as seen in appendix \ref{APP subsec zMSD}, guarantees with high probability \begin{equation} \label{eqpsuccprl}
p_{succ} \geq 1-\dfrac{1}{e^{poly(n)}}, \end{equation}
that at least $O(n^2)$ copies of $zMSD$ will be successful (we will refer to these often as successful instances of $zMSD$). Note that $O(n^2)=O(k.n)$ is the number of perfect logical $T$-states needed to create $|\overline{G'}\rangle$ \cite{MGDM18}. Furthermore, because $zMSD$ is constant depth and composed of single and two-qubit Clifford gates, errors can be treated as a single local stochastic noise after the measurements with constant rate (see Equation (\ref{eq7PRL})) which can be corrected classically with high probability when the error rates are low enough using the standard decoding algorithms described previously \cite{fowler2012proof,BGK+19}.
The remaining task is to route these good states into the inputs of the circuit $\overline{C_2}$ (Equation (\ref{EQN: C_2}))
depending on the measurement outcomes - i.e. make sure that only the good outputs go to make $|\overline{G'}\rangle$. The most obvious approach, using control SWAP gates, results in a circuit whose depth scales with $n$. Here, once more, we use MBQC techniques in order to bypass additional circuit depth. The idea is to feed the outputs through a $2D$ cluster graph state, and dependent on the measurement results of the $zMSD$, the routing can be etched out by Pauli Z measurements. Since the graph is regular, and, since the measurements can be made at the same time, this can be done in constant depth, up to Pauli corrections (which can be efficiently traced and dealt with by the classical computation at the end). We denote the fault-tolerant circuit implementing this as $\overline{C_R}$, see Figure \ref{FIG Overview circuit}. Details of the construction can be found in appendix \ref{APP routing}, where we also show that errors remain manageable.
Finally, we denote $\tilde{\overline{D}}_2(\overline{s},\overline{x})$ to mean the probability of observing the outcome $(\overline{s},\overline{x})$ after measuring all logical qubits after $\overline{C_2}$ (Equation (\ref{EQN: C_2})), in the presence of local stochastic noise, and where each $T$-state fed into $\overline{C_2}$ is replaced by $\overline{\rho_T}_{out}$, and performing a classical decoding of these measurement results \cite{BGK+19}. Then, we show, in appendix \ref{app zMSD}, that when $\varepsilon_{out}$ satisfies Equation (\ref{eq13PRL}), \begin{equation}
\label{eq14PRL}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}}_2(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| \leq \dfrac{1}{poly(n)}. \end{equation} Therefore, by the same reasoning as that for $\tilde{\overline{D}}_1$, for small enough error rates, for large enough $n$, and with very high probability $p_{succ}$, we can prepare a constant depth quantum circuit sampling from a noisy distribution $\tilde{\overline{D}}_2$ under local stochastic noise, presenting a quantum speedup.
Our main result can therefore be summarized in the following Theorem, whose proof follows directly from showing that Equation (\ref{eq14PRL}) holds and using Proposition \ref{prop1PRL}.
\begin{theorem} \label{TH1} Assuming that the $PH$ does not collapse to its third level, and that worst-case hardness of the sampling problem (\ref{eq4PRL}) extends to average-case. There exists a positive constant $0<p<1$, and a positive integer $n_o$, such that for all $n \geq n_o$, if the error rates of local stochastic noise in all preparations, gate applications, and measurements in $\overline{C_1}$, $\overline{C_R}$, and $\overline{C_2}$ are upper-bounded by $p$, then with high probability $p_{succ}$ (Equation (\ref{eqpsuccprl})), the sampling problem $\tilde{\overline{D}}_2$ defined by (\ref{eq14PRL}) can be constructed, and no $poly(n)$-time classical algorithm exists which can sample from $\tilde{\overline{D}}_2$ up to a constant $\mu^{'}$ in $l_1-$norm . \end{theorem}
\textbf{\emph{Overview of the 4D NN architecture}} - The overall construction is presented in Figure \ref{FIG Overview circuit} as a combination of the three circuits mentioned above, $\overline{C_1}$ implementing the MSD, the routing of successful $T$-states in $\overline{C_R}$, and the circuit for the construction of the state $|\overline{G'}\rangle$ in $\overline{C_2}$. Overall it takes the noisy logical $|\overline{0}\rangle$ and $\overline{\rho_T}_{noisy}$ states as inputs and the final measurements are fed back to a classical computer ($CC$) to output the error corrected results $\overline{s},\overline{x}$, according to distribution $\tilde{\overline{D}}_2$ (Equation (\ref{eq14PRL})). The preparation of the logical input states is done in constant depth \cite{BGK+19,Li15} and each of these three composite circuits are constant depth, using at most three dimensions. Furthermore, assuming that classical computation is instantaneous, our entire construction can be viewed as a constant depth quantum circuit. Indeed, as already seen $\overline{C_2}$ is constant depth, what remains is to show the same for $\overline{C_1}$ and $\overline{C_R}$. We show this in appendix \ref{APP subsec zMSD} and \ref{APP routing}.
\begin{figure*}\label{FIG Overview circuit}
\end{figure*}
During the circuit, we require some side classical computation, which inputs back into the circuit at one point. Classical information to and from the classical computer are indicated by dotted orange and black lines in Figure \ref{FIG Overview circuit}. First, the measurements of the non-outputs for the $zMSD$ in $\overline{C_1}$, along with measurement results (not illustrated in figure) of (physical) qubits used in preparing $|\overline{0}\rangle$ \cite{BGK+19} states making up the copies of $zMSD$, are fed into the classical computer in order to determine the choice of measurements after the routing circuit $\overline{C_R}$, as indicated by the orange dotted lines in Figure \ref{FIG Overview circuit}. This part simply identifies the successful $zMSD$ outcomes, followed by calculating the routing path.
This is the only point that classical results are fed back into the circuit, all other classical computations can be done after the final measurements. After these final measurements, the remaining measurements are fed back into the computer, indicated by black dotted lines in Figure \ref{FIG Overview circuit}. Together with the measurement results from the state preparations \cite{BGK+19} (not illustrated in the figure) these are incorporated into the classical error correction \cite{BGK+19,edmonds1973operation} giving the outputs $\overline{s},\overline{x}$ with probabilities $\tilde{\overline{D}}_2$. The classical computation can be done in $poly(n)$-time \cite{RBB03,wang2011surface,edmonds1973operation}.
The total number of physical qubits required scales as $O(n^5poly(log(n)))$ (where $n$ scales the size of the original sampling problem (Proposition \ref{prop1PRL})). This breaks down as follows.
$\overline{C_1}$ takes as input $O(n^3log^2(n))$ noisy logical $T$-states $\overline{\rho_T}_{noisy}$ and $O(n^3log^2(n))$ ancillas prepared in $|\overline{0}\rangle$.
$\overline{C_R}$ takes the outputs of $\overline{C_1}$, and additional $O(n^5log(n))$ logical ancillas prepared in $|\overline{0}\rangle$. This dominates the scaling.
$\overline{C_R}$ sends $O(n^2)$ distilled $T$-states to $\overline{C_2}$, which also takes in $O(n^2)$ copies of $|\overline{0}\rangle$. This means that in total we would need $O(n^5log(n))$ logical qubits. Now, each logical qubit is composed of $l \geq O(log^2(n))$ physical qubits (Equation (\ref{eq10PRL})), and some of these logical qubits ,which need to be prepared in $|\overline{0}\rangle$, require an additional overhead of $O(l^{\frac{3}{2}}) \geq O(log^3(n)))$ physical qubits, as seen previously (see also \cite{BGK+19}). Therefore, the total number of physical qubits needed is $\sim O(n^5log^4(n)))=O(n^5poly(log(n))).$
A crucial question relevant to experimental implementations would be calculating the exact values of the error rates of measurements, preparations, and gates needed to achieve fault-tolerant quantum speedup in our construction. Because the quantum depth of our construction is constant and composed of single and two-qubit Clifford gates (as seen previously), we know from \cite{BGK+19} and the likes of Equation (\ref{eq7PRL}) that these error rates are non-zero constants independent of $n$. However, their values may be pessimistically low. A crude estimate of this error rate is $p \sim e^{-4.6 \times 4^{-d-1}}$. This is assuming preparations (including preparation of noisy logical $T$-states for distillation), measurements, and gates all have the same error rate $p$. $d$ is a constant which is the total quantum depth of our construction, which is the sum of the depths of all preparations, gate applications and measurements involved in constructing $zMSD$, routing the outputs of succesful instances of $zMSD$, and constructing $|\overline{G'}\rangle$. This expression is obtained by using the same techniques as \cite{BGK+19}, where the error rate $q$ of $E$ in Equation (\ref{eq7PRL}) is chosen such that it satisfies $q \leq 0.01$. This is in order for classical decoding to fail with probability decaying exponentially with the code distance of the surface code \cite{BGK+19,fowler2012proof}.
This construction is a constant depth quantum circuit implementable on a 4D NN architecture (or a 3D architecture with long range gates). The reason for this is that our original (non fault-tolerant) construction is a 2D NN architecture \cite{MGDM18} as seen previously, and the process of making this architecture fault-tolerant requires adding an additional two dimensions \cite{BGK+19}, albeit while keeping the quantum depth constant, as explained earlier. If we do not want to use long range transversal $CZ$ gates in 3D, and want all the $CZ$ gates to be NN, the only way to do this is to work in 4D. Note that this was not a problem in \cite{BGK+19}, as there the original (non fault-tolerant) circuit was a 1D circuit, and introducing fault-tolerance added two additional dimensions, making their construction constant depth with NN gates in 3D \cite{BGK+19}. Nevertheless, we will show in the next section how to make our construction constant depth in 3D with NN two-qubit gates. We will do this by avoiding the use of transversal gates to implement encoded versions of two-qubit gates; a feature which is naturally found in defect-based topological quantum computing \cite{RHG07}. Armed with the ideas of constant depth MSD and MBQC routing, we shall present in this next section a constant quantum depth fault-tolerant construction demonstrating a quantum speedup with only nearest neighbor $CZ$ gates in 3D.
\section*{3D NN architecture} In this part of the paper, we will explain how the construction for fault-tolerant quantum speedup described earlier can be achieved using a 3D NN architecture, based on the construction of Raussendorf, Harrington, and Goyal (RHG) \cite{RHG07}. Note that in this construction (henceforth referred to as RHG construction), two types of magic states need to be distilled, the $T$-states seen previously, as well as the $Y$-states. A perfect (noiseless) $Y$-state is given by \begin{equation*}
|Y\rangle:=\dfrac{1}{\sqrt{2}}(|0\rangle+ e^{i\frac{\pi}{2}}|1\rangle). \end{equation*} This state is a resource for the phase gate $Z(\pi/2)$. The noisy $Y$-state $\rho_{Y_{noisy}}$ is defined analogously to a noisy $T$-state seen earlier \begin{equation*}
\rho_{Y_{noisy}}:=(1-\varepsilon)|Y\rangle \langle Y| + \varepsilon \eta, \end{equation*} with $0<\varepsilon<1$ representing the noise, and $\eta$ an arbitrary single qubit state.
As already mentioned, the RHG construction was also used in \cite{KT19} to achieve fault-tolerant quantum speedup. However, our construction will differ from \cite{KT19} in mainly two ways. The first, as already mentioned, is that our construction deterministically produces a hard instance, whereas that in \cite{KT19} produces such an instance with exponentially low probability. Secondly, our sampling problem verifies the anti-concentration property by construction \cite{MGDM18}, as explained previously, whereas in \cite{KT19}, this anti-concentration was conjectured. Therefore, in our proofs we assume one less complexity theoretic conjecture ( we use two conjectures in total, see Theorem \ref{TH1} and Proposition \ref{prop1PRL}) as compared to \cite{KT19}. Note that we assume the minimal number of complexity-theoretic conjectures needed to prove quantum speedup, using all currently known techniques \cite{HM17}.
\begin{figure*}\label{fig3DNN}
\end{figure*}
We now very briefly outline the key points in the RHG construction. More detailed explanations can be found in \cite{RHG07,Fujii15,FG08}. In this construction, one starts out with preparing a 3D regular lattice of qubits (call it RHG lattice). This preparation can be done in constant depth by using nearest neighbor $CZ$ gates \cite{RHG07}. This lattice is composed of elementary cells, which can be thought of as smaller 3D lattices building it up. Elementary cells are of two types, primal and dual, and the RHG lattice is composed of a number of interlocked primal and dual cells \cite{RHG07,FG08} . Each elementary cell can be pictured as a cube, with qubits (usually initialized in $|+\rangle$ state) living on the edges and faces of this cube. The RHG lattice is a graph state, and is thus characterized by a set of (local) stabilizer relations \cite{HDE+06}. Errors can be identified by looking at the parity of these stabilizers. Usually, this is done by entangling extra qubits with the systems qubits, these extra qubits are called \emph{syndrome} qubits. However, in the RHG construction this is accounted for by including these syndrome qubits \emph{a priori} when constructing the RHG lattice, this region of syndrome qubits is usually called the \emph{vacuum} region $V$ \cite{RHG07}. Logical qubits in this construction are identified with \emph{defects}. These defects are hole-like regions of the RHG lattice inside of which qubits are measured in the $Z$ basis, effectively eliminating these qubits. Eliminating these qubits (and some of their associated stabilizers) results in extra degrees of freedom which define the logical qubits \cite{RHG07}. Defects can also be primal or dual, depending on whether they are defined on primal or dual lattices. Two defects of the same type (either primal or dual) define a logical qubit. The logical operators $\overline{X}$ and $\overline{Z}$ are products of $X$ operators and $Z$ operators respectively. These products of operators act non-trivially on qubits either encircling each of the two defects, or forming a chain joining the two defects, depending on whether the logical qubit is primal or dual \cite{RHG07,FG08}. By measuring single qubits of the RHG lattice at angles $X$, $Y$, $Z$ and $\dfrac{X+Y}{\sqrt{2}}$, one can perform (primal or dual) logical qubit preparation and measurement in $\overline{X}$ and $\overline{Z}$ bases, preparation of (primal or dual) logical $T$-states and $Y$-states, and logical controlled not ($\overline{CNOT}$) gates between two defects of the same type (this however can only be accomplished by an intermediate step of \emph{braiding} two defects of different types \cite{RHG07}, which is one of the main reasons for the need for two types of defects). If performed perfectly (noiseless case), these operations are universal for quantum computation \cite{RB01}. Note that in our case, as in \cite{KT19}, we will replace measuring qubits in $Y$ and $\dfrac{X+Y}{\sqrt{2}}$ by (equivalently) initializing qubits in $|Y\rangle$ and $|T\rangle$, then measuring these qubits in the $X$ basis. In this way, we will only perform single qubit $X$ and $Z$ measurements. One of the spatial dimensions of the 3D RHG lattice is chosen as \emph{simulated time}, allowing one to perform a logical version of MBQC via single qubit measurements \cite{RHG07}.
The preparation and measurement of logical qubits in the $\overline{X}$ and $\overline{Z}$ bases, as well $\overline{CNOT}$ can all be performed by measuring qubits of the RHG lattice in $X$ and $Z$ \cite{RHG07,FG08}. All these operations can be performed fault-tolerantly, and non-adaptively (up to Pauli corrections, which can be pushed until after measurements, and accounted for, since all our circuits are Clifford \cite{RBB03}.), by choosing the defects to have a large enough perimeter, and a large enough separation \cite{FG08,RHG07}. Indeed, in appendix \ref{appRHG1}, we show that when $L_{m}=O(log(n))$, where $L_{m}$ is the minimum (measured in units of length of an elementary cell) between the perimeter of a defect and the separation between two defects in any direction, we would recover the same fault-tolerance results as our 4D NN architecture under local stochastic noise, albeit with different error rates which we will also calculate in appendix \ref{appRHG1}. The noisy logical $Y$-state and $T$-state preparations can also be prepared non-adaptively up to Pauli corrections by performing $X$ and $Z$ measurements on qubits of the RHG lattice, some of which are intialized in $|Y\rangle$ (for logical $Y$-state preparation) or $|T\rangle$ (for logical $T$-state preparation) \cite{FG08}. However, these preparations are unfortunately non-fault-tolerant (introduce logical errors), and therefore these states must be distilled \cite{RHG07}.
If we could somehow obtain perfect logical $Y$-states, then our constant-depth fault-tolerant 3D NN construction under local stochastic noise would follow a similar analysis as our 4D NN case, and have a circuit exactly the same as that in Figure \ref{FIG Overview circuit} (up to using $\overline{X}$ measurements in place of $\overline{H}$ gates followed by $\overline{Z}$ measurements), with one difference being that instead of using concatenated versions the of MSD circuits of \cite{HHP+17} to construct $\overline{C_1}$, we will use concatenated versions of the MSD circuits of \cite{HH182}. This is in order to preserve the transversality of logical $T$-gates, which allows preparation of logical $T$-states in the RHG construction by using only local measurements \cite{RHG07} \footnote{This replacement of MSD circuits does not change anything in our proofs, because both of these families of MSD circuits satisfy a specific condition regarding the number of noisy $T$-states needed to distill a $T$-state of arbitrary accuracy, and our proof hinges only on this condition being verified (see appendix \ref{APP subsec zMSD}). Furthermore, the probability of distillation succeeding for (non-concatenated) MSD circuits in \cite{HH182} approaches one for low enough $\varepsilon$, as in the MSD circuits of \cite{HHP+17}.}. Unfortunately, distilling logical $Y$-states in the RHG construction is essential. What makes matters worse is that using techniques of the likes of those used in the construction of $\overline{C_1}$, on MSD circuits capable distilling logical $Y$-states up to fidelity $1-\varepsilon_{out}$ (Equation (\ref{eq13PRL})), namely circuits based on the Steane code \cite{RHG07}, leads to circuits with a quasi-polynomial number of ancillas. This is much worse that the polynomial number of ancillas used in circuits $\overline{C_1}$ needed to distill logical $T$-states of the same fidelity $1-\varepsilon_{out}$, and based on the MSD circuits of \cite{HHP+17,HH182} (see appendices \ref{APP subsec zMSD} and \ref{appRHG2}).
Happily, we manage to overcome this limitation by observing two facts about our construction. The first is that the $Z(\pi/2)$ rotations (and thus $Y$-states) are not needed in order to construct our sampling problem. Indeed, in Figure \ref{FIG G original} every qubit measured at an $XY$ angle $\pi/2$ in $G_B$ could be replaced by a linear cluster of three qubits measured respectively at $XY$ angles $\pi/4$, $0$, and $\pi/4$ (these measurements can be implemented by only using logical $T$-states in the fault-tolerant version). To make a graph state of regular shape, we should also replace all qubits at the same vertical level as the $\pi/2$-measured qubits in $G_B$ (see Figure \ref{FIG G original}), and which are always measured at an $XY$ angle $0$, with a linear cluster of three qubits measured at an $XY$ angle $0$. By doing this replacement, the new graph gadget $G^{'}_B$ which is an extension of $G_B$ now defines a so-called partially invertible universal set \cite{MGDM19}. Therefore, by results in \cite{MGDM19}, using $G^{'}_B$ instead of $G_B$ in our construction (Figure \ref{FIG G original}) also results in a sampling problem with distribution $D^{'}=\{D^{'}(s,x)\}$ (where $s$ and $x$ are bit strings defined analogously to those in (\ref{eq3PRL}) and (\ref{eq4PRL})) satisfying both worst-case hardness and the anti-concentration property \cite{MGDM19,HBS+17}. Thus, the distribution $D^{'}$, although different than $D$ ( Equations (\ref{eq3PRL}) and (\ref{eq4PRL})), can be used in the same way as $D$ to demonstrate a quantum speedup (see Proposition \ref{prop1PRL}). Furthermore, all previous results established for $D$ also hold when $D$ is replaced by $D^{'}$.
To see why $G^{'}_B$ defines a partially invertible universal set, call $\mathcal{U}_1 \subset U(4)$ ($\mathcal{U}_2 \subset U(4)$) the set of all random unitaries which can be sampled by measuring the qubits of $G_B$ ($G^{'}_B$) non-adaptively at their perscribed angles. Straightforward calculation shows that $ \mathcal{U}_1 \subset \mathcal{U}_2$. Furthermore, both $ \mathcal{U}_1$ and its complement in $\mathcal{U}_2$ (denoted $\mathcal{U}_2-\mathcal{U}_1$) are (approximately) universal in $U(4)$ since they are composed of unitaries from the gate set of Clifford + T \cite{NC2000,MGDM18}. The set $\mathcal{U}_1$ being both universal in $U(4)$ and inverse containing \cite{MGDM18}, implies that $\mathcal{U}_2$ satisfies all the properties of a partially invertible universal set \cite{MGDM19}.
However, note that in using partially invertible universal sets, for technical reasons \cite{MGDM19}, the number of columns of $|G\rangle$ should now satisfy $k=O(n^3)$, resulting in an increase of overhead of ancilla qubits.
One could keep $k=O(n)$ (as in the original construction with $G_B$) while using only $\pi/4$ and $0$ measurements, by using one of the constructions of \cite{BHS+17}. However, the construction of \cite{BHS+17} does not have a provable anti-concentration, although extensive numerical evidence was provided to support the claim that this family of circuits does indeed anti-concentrate \cite{BHS+17}.
Although $Y$-states are not needed in the construction of our sampling problem, they are still needed to construct MSD circuits for distilling logical $T$-states of fidelity $1-\varepsilon_{out}$ (Equation (\ref{eq13PRL})) \cite{HH182}; which brings us to our second observation. In order to distill logical $T$-states of fidelity $1-\varepsilon_{out}$ (Equation (\ref{eq13PRL})), we only need logical $Y$-states of fidelity $1-\varepsilon^{'}_{out}$ with
\begin{equation}
\label{eqepsilonprimee}
\varepsilon^{'}_{out}=\dfrac{1}{O(poly(log(n)))}.
\end{equation}
In other words, the required output fidelity of the logical $Y$-states need not be as high as that of the logical $T$-states. In appendix \ref{appRHG2}, we show that this leads to a construction of a (constant-depth) non-adaptive MSD (analogous to how $\overline{C_1}$ is constructed) which takes as input a \emph{polynomial} number of logical ancillas, initialized in either noisy logical $Y$-states, $|\overline{+}\rangle$, or $|\overline{0}\rangle$, and which outputs enough logical $Y$- states of fidelity $1-\varepsilon^{'}_{out}$ needed in the subsequent distillation of logical $T$-states. This circuit, which we call $\overline{C^{'}_1}$ and which is based on concatenations of the Steane code \cite{RHG07}, is a constant depth Clifford quantum circuit composed of $\overline{CNOT}$ gates, and followed by non-adaptive $\overline{X}$ and $\overline{Z}$ measurements. $\overline{C^{'}_1}$, as $\overline{C_1}$, prepares the graph states needed for non-adaptive MSD via MBQC (as seen previously). Note that here we will use $\overline{CNOT}$ gates instead of $\overline{CZ}$ gates in order to prepare logical graph states, since these gates are more natural in the RHG construction \cite{RHG07}. The preparation procedure is essentially the same as that with $\overline{CZ}$ modulo some $\overline{H}$ gates, but these logical Hadamards can be absorbed into the initialization procedure (where some qubits become initialized in $|\overline{0}\rangle$ instead of $|\overline{+}\rangle$) and the measurements (where some $\overline{X}$ measurements after $\overline{C^{'}_1}$ are changed to $\overline{Z}$ measurements, and vise versa.). The same holds for all other circuits based on graph states in this construction.
With the distillation of logical $Y$-states taken care of, we now summarize our constant depth construction based on a 3D NN architecture. The circuit of this construction is found in Figure \ref{fig3DNN}. It takes as input logical qubits initialized in the states $|\overline{+}\rangle$, $|\overline{0}\rangle$, $\overline{\rho_T}_{noisy}$, and $\overline{\rho_Y}_{noisy}$, and outputs a bit string $(\overline{s},\overline{x})$ sampled from the distribution $\tilde{\overline{D^{'}}}_2$ demonstrating a quantum speedup (see Theorem \ref{TH1} and Proposition \ref{prop1PRL}). Note that $\tilde{\overline{D^{'}}}_2$ is the fault-tolerant version of the distribution $D^{'}$ defined earlier. $\tilde{\overline{D^{'}}}_2$ is defined analogously to $\tilde{\overline{D}}_2$ in Equation (\ref{eq14PRL}), which is the fault-tolerant version of the distribution $D$ (Equation (\ref{eq4PRL})). Our 3D NN architecture is composed of five constant depth circuits acting on logical qubits, $\overline{C^{'}_1}$, $\overline{C^{'}_R}$, $\overline{C_1}$, $\overline{C_R}$, and $\overline{C_2}$. $\overline{C^{'}_1}$, $\overline{C_1}$, $\overline{C_R}$, and $\overline{C_2}$ are as defined previously, and $\overline{C^{'}_R}$ is a routing circuit, analogous to $\overline{C_R}$, which routes succesfully distilled logical $Y$-states to be used in $\overline{C_1}$. Furthermore, all of these circuits, as well as the preparation of logical qubits, can be constructed by non-adaptive single-qubit $X$ and $Z$ measurements on physical qubits arranged in a 3D RHG lattice, whose preparation is constant depth and involves only nearest neighbor $CZ$ gates. These physical qubits are initialized in the (noisy) states $|+\rangle$, $|Y\rangle$, and $|T\rangle$ \cite{RHG07}. Our construction has two layers of interaction with a classical computer, needed to identify succesfully distilled logical $Y$ and $T$-states respectively. The number of physical qubits needed is $O(n^{11}poly(log(n))$, this calculation is performed in appendix \ref{appRHG2}. The additional overhead as compared to our 4D NN construction comes from mainly two sources, the partially invertible universal set condition \cite{MGDM19}, and the circuits $\overline{C^{'}_R}$ and $\overline{C^{'}_1}$ which arise as a result of needing to distill logical $Y$-states in the 3D RHG construction \cite{RHG07}.
As in our 4D NN architecture, the noise model we use here is the local stochastic quantum noise defined earlier \cite{BGK+19,fawzi2018constant}. Since the circuit needed to construct the 3D RHG lattice is composed of single and two-qubit Clifford gates acting on prepared qubits \cite{RHG07}, all errors of preparations and gate applications can be pushed, together with the measurement errors, until after the measurements; as seen previously. Because the circuit preparing the RHG lattice is constant depth, the overall local stochastic noise has a constant rate (see Equation (\ref{eq7PRL})), and therefore could be corrected with high probability for low enough (constant) error rates of preparation, gate application, and measurements \cite{BGK+19} (see appendix \ref{appRHG1} where we calculate an estimate of these error rates). The error correction, as in our 4D NN architecture, is completely classical and involves minimal weight matching \cite{edmonds1973operation}. This error correction is $poly(n)$-time and is performed at each of the two layers of interaction with the classical computer, as well as after the final measurements. Also, as in the 4D NN case, other $poly(n)$-time classical algorithms are included in the classical post processing; these are in order to identify succesful MSD instances, and identify the measurement patterns of the routing circuits. The classical computer at each layer of interaction as well as after the final measurements takes as input measurement results of qubits involved in the computation, as well as measured qubits in the vacuum region $V$. These vacuum qubits give the error syndrome at multiple steps in the computation, and are therefore needed for the minimal weight matching \cite{RHG07}.
$\textbf{\emph{Discussion}}-$ In summary, we have presented a construction sampling from a distribution demonstrating a quantum speedup, which is robust to noise. Our construction has constant depth in its quantum circuit, and can be thought of as a fault-tolerant version of the (noise free) constant depth quantum speedup based on generating and measuring graph states \cite{gao17,BHS+17,HBS+17,MGDM18,MGDM19,HHB+19}. We have shown how to implement this construction both by using a 4D architecture with nearest neighbor two-qubit gates, or by using a 3D architecture with nearest neighbor two-qubit gates. The circuits of each of these architectures interact at most twice with an (efficient) classical device while running, and have different requirements in terms of overhead of physical ancilla qubits, owing to the fact that they are based on two different constructions for fault-tolerance \cite{BGK+19,RHG07}.
The overheads are large in terms of the number of (physical) qubits, however these may be improved. In any case, our construction is considerably simpler than fault-tolerant full blown quantum computation where circuits are scaling in depth and many adaptive layers are required. Therefore our architectures demonstrate potential for interim demonstration of quantum computational advantage, which may be much more practical. Indeed, if one considers classical computation temporally free, our construction represents a constant time implementation of a sampling problem with fault-tolerance.
We note that although we have presented here a fault-tolerant construction for a specific graph state architecture \cite{MGDM18}, the same techniques can be applied to any of the sampling schemes based on making local $XY$ measurements from the set $\{0,\pi/2,\pi/4\}$ on regular graph states \cite{gao17,BHS+17,HBS+17,MGDM18,MGDM19}.
In particular it can be easily adapted to cases where the measurements are not fixed but chosen at random before the running of the circuit \cite{BHS+17,HBS+17,gao17}. This would essentially just fix the locations of the distilled $T$-states, but it could be done before hand, and would not effect the efficiency of the routing circuits. This has the potential of relating the average-case hardness conjecture to that of other more familiar problems \cite{gao17,BHS+17,BMS16PRL}.
Our work also has potentially another interest, as it can alternatively be viewed as a constant depth quantum circuit which samples from an approximate unitary $t$-design \cite{DCE+09} fault-tolerantly. Indeed, our techniques can be used to directly implement a logical version of Equation (\ref{eq2PRL}), which samples from an approximate $t$-design. These $t$-designs have many useful applications across quantum information theory \cite{DCE+09,emerson2003pseudo,hayden2004randomizing,matthews2015testing,muller2015thermalization,hayden2007black}.
Several interesting approaches for optimization may be considered. One could think of using different quantum error correcting codes, such as those of \cite{LAR+11,fawzi2018constant}, to decrease the overhead of physical qubits. One could also aim to optimize the overhead of both gates and physical qubits of the MSD by using techniques similar to those of \cite{CC19,CN20}.
The ability to efficiently verify quantum speedup is also an important goal. Although this question has already been pursued in the regime of fault-tolerance in \cite{KT19}, and the techniques developped there are directly applicable to our 3D NN architecture; it would be interesting to develop verification techniques more naturally tailored to the graph state approach \cite{HDE+06,RB01} and MBQC \cite{RB01,RBB03}, which we use heavily here. In this direction, the work of \cite{MK18,TMM+19} can be used for this purpose when the measurements (both Clifford and non-Clifford) as well as the $CZ$ and Hadamard gates (needed for the preparation of the graph states \cite{HDE+06}) are assumed $perfect$ (noiseless). Indeed, in this case the verification amounts to verifying that the graph state was correctly prepared, for which \cite{MK18,TMM+19} provide a natural path to do so, by giving good lower bounds (with high confidence) on the fidelity (with respect to the ideal graph state corresponding to the sampling problem) of the prepared graph state in the case where a sufficient amount of stabilizer tests pass \cite{MK18,TMM+19}. These lower bounds on the fidelity, tending asymptotically to one \cite{MK18,TMM+19}, allow one to verify that quantum speedup is being observed, as long as one trusts the local measurement devices (which, being small, can be checked by other means efficiently). This verification of quantum speedup can be done by using the standard relation between the fidelities of two quantum states (which in our case are the ideal state and the state accepted by the verification protocol) and the $l_1$-norm of the two output probability distributions corresponding to measuring the qubits of these two states \cite{NC2000}.
These techniques, however, do not easily extend to the case where the measurements and gates needed for preparation are noisy; since for graph states of size $m$, even for an arbitrarily small (but $constant$, for example below the threshold for fault-tolerant computing) noise strength, the verification protocol might fail (not accept a good state) in the asymptotic ($m \to \infty$) limit (see for example \cite{TMM+19} where the verification accepts with probability one asymptotically only if the noise strenght scales as $1/poly(m)$). We leave this problem for future investigation.
\begin{acknowledgments} We thank David Gosset, Elham Kashefi, Theodoros Kapourniotis, Anthony Leverrier, Ashley Montanaro, Micha\l{} Oszmaniec, and Peter Turner for fruitful discussions and comments. The Authors would like to acknowledge the National Council for Scientific Research of Lebanon (CNRS-L) and the Lebanese University (LU) for granting a doctoral fellowship to R. Mezher. We acknowledge support of the ANR through the ANR-17-CE24-0035 VanQute project. \end{acknowledgments} \nocite{*}
\onecolumngrid
\appendix
\section{Size of encoding and intermediate case hardness of sampling.} \label{APPC}
Here we prove our statements regarding the sufficiency of the size of logical encoding $l$ (Equation (\ref{eq10PRL})) and the proof of hardness in the intermediate case where we have noise in the circuit, but assume perfect $T$-states (Equation (\ref{eq11PRL})). As mentioned in the main text, the probability $p_f$ that the classical decoding fails to correct an error $E \sim \mathcal{N}(p)$ affecting a surface code composed of $l$ physical qubits is given by \cite{DKL+02,BGK+19,fowler2012proof} \begin{equation}
\label{eqappC1}
p_f=e^{-O(cd)}=e^{-O(\sqrt{l})}, \end{equation} when the error rate $p$ is below the threshold for fault-tolerant computing with the surface code \cite{DKL+02}. We will assume, as mentioned in the main text, that the error rates of preparation, single and two qubit gates, and measurements in our construction are small enough, that is, below the threshold of fault-tolerant computing with the surface code, and classical postprocessing is instantaneous. We will also assume that the probabilities of failure of the classical decoding algorithms in each logical qubit are independent (see \footnote{This assumption may seem strong, but it is actually very mild and has no effect on our end result. To see this, suppose we drop this assumption, then the probability of success of all $k.n$ decodings should now be calcuated by a union bound. From the properties of local stochastic noise (namely that local stochastic noise on a subset of qubits of the system is still local stochastic with the same rate \cite{BGK+19}) a decoding of a logical qubit succeeds (is able to identify and correct for the error) with probability $p_{single}=1-p_f=1-e^{-O(\sqrt{l})}$ (when the error rates of all local stochastic noise in our construction are adequately low, i.e below the threshold of fault-tolerant computing with the surface code), therefore the probability that all $k.n$ decodings succeed is given by $P=1-k.n+k.n.p_{single}=1-k.n.e^{-O(\sqrt{l})}$, by a standard bound on the intersection of $k.n$ events derived from a union bound. The assumption we make in the main text results in a good approximation of $P$, and is simpler to state (which is why we used it in the main text). Finally, note that this does not mean that errors between physical qubits of two entangled logical qubits are uncorrelated. Indeed, the correlation between these qubits is accounted for in the propagation rules of local stochastic noise \cite{BGK+19}, since forward propagating local stochastic noise in Clifford circuits composed of single and two-qubit gates generally results in local stochastic noise with higher error rate \cite{BGK+19}.}). Our construction involves classical decoding of the measurement results of $O(k.n)$ logical qubits \footnote{Actually, it is something like $2.k.n$ if we include decoding of measured logical qubits of the Bell states obtained at the end of the single shot procedure of \cite{BGK+19} (see $[57]$). This changes nothing in the analysis we have done, so we chose to omit it in the main text for simplicity.}. After decoding, the probability of observing outcome $(\overline{s},\overline{x})$ is given by \begin{equation}
\label{eqappC2}
\tilde{\overline{D}_1}(\overline{s},\overline{x})=(1-e^{-O(\sqrt{l})})^{O(k.n)}\overline{D}(\overline{s},\overline{x})+\sum_{i}p_{i}\overline{D}_i^{e}(\overline{s},\overline{x}) \end{equation}
where $\overline{D}_i^{e}$ is a distribution corresponding to sampling from the outputs $(\overline{s},\overline{x})$ of $|\overline{G'}\rangle$ in the presence of local stochastic noise, and where the decoding algorithm has failed in at least one logical qubit. $\sum_{i}p_i\overline{D}_i^{e}(\overline{s},\overline{x})$ enumerates all possible ways in which decoding on the $k.n$ logical qubits of $|\overline{G'}\rangle$ can fail. Note that \begin{equation*}
\sum_{i}p_i= 1-(1-e^{-O(\sqrt{l})})^{O(k.n)}. \end{equation*} Now, \begin{equation} \label{eqappCf}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}_1}(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| = \sum_{\overline{s},\overline{x}}|(1-e^{-O(\sqrt{l})})^{O(k.n)}\overline{D}(\overline{s},\overline{x})+\sum_{i}p_{i}\overline{D}_i^{e}(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| \leq 2(1-(1-e^{-O(\sqrt{l})})^{O(k.n)}). \end{equation} The bound on the right hand side is obtained from a triangle inequality and by noting that $\sum_{\overline{s},\overline{x}}\overline{D}(\overline{s},\overline{x})=\sum_{\overline{s},\overline{x}}\overline{D}_i^{e}(\overline{s},\overline{x})=1$. Choosing \begin{equation}
\label{eqblocksize}
l=r.log^{2}(n)=O(log^2(n)), \end{equation} where $r$ is a positive constant chosen large enough so that the following inequality holds \begin{equation}
\label{eqcondblocksize}
deg(e^{O(\sqrt{l})}) > deg(k.n), \end{equation} where $deg(.)$ represents the highest power of $n$ in the expressions of $e^{O(\sqrt{l})}$ and \\ $O(k.n)$. We can now use (for large enough $n$) the approximation \begin{equation} \label{eqappCapprox} 2\big(1-(1-e^{-O(\sqrt{l})})^{k.n}) \sim 2e^{-O(\sqrt{l})}.k.n=O(\dfrac{1}{n^{\beta}}), \end{equation} with $\beta=deg(e^{O(\sqrt{l})})-deg(k.n)$. Plugging Equation (\ref{eqappCapprox}) in Equation (\ref{eqappCf}) we get \begin{equation*}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}_1}(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| \leq O(\dfrac{1}{n^{\beta}})=\dfrac{1}{poly(n)}. \end{equation*} This completes the proof of Equations (\ref{eq10PRL}) and (\ref{eq11PRL}).
\section{Bounding $\tilde{\overline{D}}_2(\overline{s},\overline{x})$ and Properties of $zMSD$} \label{app zMSD}
\subsection{Bounding $\tilde{\overline{D}}_2(\overline{s},\overline{x})$ (proof of Equation (\ref{eq14PRL}))} Let \begin{equation}
\label{eqappD1}
\tilde{\rho}_{|\overline{G'}\rangle}=\bigotimes_{a\in V} \overline{H}_a \bigotimes_{b \in V_2}\overline{Z(\pi/2)}_b \prod_{\{i,j\}\in E}\overline{CZ}_{ij} \bigotimes_{c\in V/V_1}\overline{H}_c\overline{|0}\rangle_c \langle \overline{0}|_c\overline{H}_c \bigotimes_{d\in V_1}\overline{\rho_T}_{out} \prod_{\{i,j\}\in E}\overline{CZ}_{ij}\bigotimes_{b \in V_2}\overline{Z(-\pi/2)}_b \bigotimes_{a\in V} \overline{H}^{\dagger}_a. \end{equation}
$\tilde{\rho}_{|\overline{G'}\rangle}$ is exactly the same as $|\overline{G'}\rangle$, but with each single logical qubit state $|\overline{T}\rangle$ replaced with $\overline{\rho_T}_{out}$, the output of a succesful instance of $zMSD$ (Equations (\ref{eqPRL12}) and (\ref{eq13PRL})). The probability $\tilde{\overline{D}}_2(\overline{s},\overline{x})$ can be calculated by using the following simple observation \begin{equation}
\label{eqappD2}
\tilde{\overline{D}}_2(\overline{s},\overline{x})=p(\{\overline{s},\overline{x}\}\cap ne)+p(\{\overline{s},\overline{x}\}\cap e), \end{equation}
where $p(\{\overline{s},\overline{x}\}\cap ne)$ is the probability of observing outcome $\{\overline{s},\overline{x}\}$ when no logical error (ne) has occured (that is, that classical decoding did not fail in any logical qubit) , neither in the distillation process, nor in the routing, nor in constructing and measuring $\tilde{\rho}_{|\overline{G'}\rangle}$. $p(\{\overline{s},\overline{x}\}\cap e)$ is the probability of observing $\{\overline{s},\overline{x}\}$ when the decoding algorithm has failed (e) at least on one logical qubit. We will assume that in the case where no logical error has occured, for large enough $n$, the probability $p_{succ}$ (Equation (\ref{eqpsuccprl})) of distilling enough ($O(n^2))$ states $\overline{\rho_T}_{out}$ to construct $\tilde{\rho}_{|\overline{G'}\rangle}$ is equal to one. This is a reasonable assumption since the exponential term in $p_{succ}$ varies much more rapidly than the polynomial terms in our bounds, for large enough $n$. Now, \begin{equation*}
p(\{\overline{s},\overline{x}\}\cap ne)=p(ne).p( \{\overline{s},\overline{x}\}| ne).
\end{equation*}
\begin{equation}
\label{eqpne}
p(ne)=(1-e^{-O(\sqrt{l})})^{O(n^5log^2(n))},
\end{equation} is the probability that that the decoding does not fail on all our $O(n^5log^2(n))$ logical qubits (logical qubits of all copies of $zMSD$, the routing circuit, as well as
$\tilde{\rho}_{|\overline{G'}\rangle}$).
Now, \begin{equation} \label{eqpsxne}
p(\{\overline{s},\overline{x}\}|ne)=\sum_{i_1,...,i_{k.n.l}}\langle i_1...i_{k.n.l} |\tilde{\rho}_{|\overline{G'}\rangle}| i_1...i_{k.n.l}\rangle, \end{equation}
where $|i_1...i_{k.n.l}\rangle$ is a state of $k.l.n$ physical qubits, corresponding to the measurement of the $k.n$ logical qubits of $\tilde{\rho}_{|\overline{G'}\rangle}$, which when decoded gives rise to the bit string $(\overline{s},\overline{x})$.
\begin{equation} \label{eqpsxe}
p(\{\overline{s},\overline{x}\}\cap e)=\sum_{j}p_{e_j}p(\{\overline{s},\overline{x}\}|e_j), \end{equation} where the right hand of Equation (\ref{eqpsxe}) enumerates all possible ways in which decoding on the $O(n^5log^2(n))$ logical qubits could fail. Note that \begin{equation}
\label{eqboundpsxe}
\sum_{\overline{s},\overline{x}}p(\{\overline{s},\overline{x}\}\cap e) \leq \sum_jp_{e_j} \leq 1-p(ne) \leq 1-(1-e^{-O(\sqrt{l})})^{O(n^5log^2(n))}. \end{equation} Replacing Equations (\ref{eqpne})-(\ref{eqpsxe}) in Equation (\ref{eqappD2}) we get
\begin{equation}
\label{eqappD7}
\tilde{\overline{D}}_2(\overline{s},\overline{x})=(1-e^{-O(\sqrt{l})})^{O(n^5log^2(n)))}p(\{\overline{s},\overline{x}\}|ne)+\sum_{j}p_{e_j}p(\{\overline{s},\overline{x}\}|e_j). \end{equation} By using Equations (\ref{eqboundpsxe}) and (\ref{eqappD7}) as well as a triangle inequality. We get that \begin{equation}
\label{eqappD8}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}}_2(\overline{s},\overline{x})-p(\{\overline{s},\overline{x}\}|ne)| \leq 2(1-(1-e^{-O(\sqrt{l})})^{O(n^5log^2(n))}). \end{equation} As in appendix \ref{APPC}, chosing $$l =r.log^2(n),$$ but with $r$ chosen so that $$deg(e^{O(\sqrt{l})}) > deg(O(n^5log^2(n))),$$ we get that \begin{equation} \label{eqappD9}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}}_2(\overline{s},\overline{x})-p(\{\overline{s},\overline{x}\}|ne)| \leq \dfrac{1}{poly(n)} \end{equation}
by using the same approximations as in appendix \ref{APPC} to bound $2(1-(1-e^{-O(\sqrt{l})})^{O(n^5log^2(n))})$. Now, remark that the fidelity between $\tilde{\rho}_{|\overline{G'}\rangle}$ and $|\overline{G'}\rangle$, denoted as $F$, satisfies (from Equations (\ref{eqappD1}) and (\ref{EQN: logical G'})) \begin{equation} \label{eqappD10}
F \geq (1-\varepsilon_{out})^{O(n^2)}, \end{equation} with $\varepsilon_{out}$ given by Equation (\ref{eq13PRL}). Furthermore, the probabilities $\overline{D}(\overline{s},\overline{x})$
and $p(\{\overline{s},\overline{x}\}|ne)$ satisfy \cite{NC2000} \footnote{Actually, this relation holds for the $l_1$-norm distance between the probability distributions over physical qubits. However, as the absolute value of the sum is less than the sum of absolute values, this relation also holds for the probabilities in Equation (\ref{eqappD11}).} \begin{equation} \label{eqappD11}
\sum_{\overline{s},\overline{x}}|\overline{D}(\overline{s},\overline{x})-p(\{\overline{s},\overline{x}\}|ne)| \leq 2\sqrt{1-F^2}. \end{equation} when $\varepsilon_{out}$ satisfies Equation (\ref{eq13PRL}), $$2\sqrt{1-F^2} \leq 2\sqrt{1-(1-\varepsilon_{out})^{O(n^2)}} \sim 2 \sqrt{O(n^2)\varepsilon_{out}} \leq \dfrac{1}{poly(n)}.$$ Plugging this into Equation (\ref{eqappD11}), then using Equations (\ref{eqappD9}) and (\ref{eqappD11}) and a triangle inequality, we obtain \begin{equation*}
\sum_{\overline{s},\overline{x}}|\tilde{\overline{D}_2}(\overline{s},\overline{x})-\overline{D}(\overline{s},\overline{x})| \leq \dfrac{1}{poly(n)}. \end{equation*} This completes the proof of Equation (\ref{eq14PRL}).
\subsection{Properties of $zMSD$} \label{APP subsec zMSD}
$zMSD$ implements non-adaptively $z$ iterations of the MSD protocol of Theorem 4.1 in \cite{HHP+17}. Note that in the protocol of \cite{HHP+17}, the MSD circuit was for magic states of the form $|H\rangle=cos(\pi/8)|0\rangle+sin(\pi/8)|1\rangle$ whereas in our case we need distillation circuits for $T$-states $|T\rangle $ defined in the main text. However, since $HZ(-\pi/2)|H\rangle=e^{-i\pi/8}|T\rangle$, the circuits in \cite{HHP+17} can be adapted to our case by adding a constant depth layer of $H$ and $Z(-\pi/2)$ gates, whose logical versions can be done fault-tolerantly and also in constant depth in our construction . We call $1MSD$ a circuit which implements non-adaptively one iteration of the protocol of Theorem 4.1 in \cite{HHP+17}. Note that both $zMSD$ and $1MSD$ will be based on non-adaptive MBQC. We will begin by calculating the number of qubits of $1MSD$.
In Theorem 4.1 in \cite{HHP+17}, the MSD circuit takes as input $O(d)$ qubits, where $d$ is a positive integer, uses $O(d^2)$ noisy input $T$-states with fidelity 1-$\varepsilon$ with respect to an ideal (noiseless) $T$-state, and outputs $O(d)$ distilled $T$-states with fidelity 1-$O(\varepsilon^{d})$ with respect to an ideal $T$-state (note that the ratio of the number of noisy input $T$-states to the number of distilled output $T$-states is $\sim d$ for large enough constant $d$ \cite{HHP+17}.). Each time a noisy $T$-state is inserted it affects a noisy $T$-gate, inducing a so-called $T$-gate depth \cite{HHP+17}. The depth of the entire circuit is $O(d^2.log(d))$, where $O(d)$ is the $T$-gate depth, and $O(d.log(d))$ is the depth of the Clifford part of the circuit, which is composed of long-range Cliffords \cite{HHP+17}. Therefore, the MSD circuit is an $O(d)$-qubit circuit of depth $O(d^2.log(d))$. In order to implement this circuit on a regular graph state (for example, the cluster state \cite{RB01}), one must transform the Clifford circuit composed of long range gates, to that composed of nearest neighbor and single qubit Clifford gates, since these single qubit and nearest neighbor two-qubit gates can be implemented by measuring $O(1)$ qubits of a cluster state in the $X$ and $Y$ bases \cite{RB01,RBB03}. An $m$-qubit Clifford gate can be implemented by an $O(m^2)$-depth circuit composed only of gates from the set $\{CZ_{ij},H,Z(\pi/2)\}$ \cite{gottesman1997stabilizer}. Furthermore, $CZ_{ij}$ could be implemented by a circuit of depth $O(i-j)$ composed of nearest neighbor CZ gates \cite{mantri2017universality}. The same arguments hold in the logical picture by replacing $H$, $CZ$, $Z(\pi/2)$, and noisy input $T$-states with their logical versions $\overline{H}$, $\overline{CZ}$, $\overline{Z(\pi/2)}$, and $\overline{\rho_T}_{noisy}$. $m=O(d)$ in our case, thus the number of columns of the cluster state needed to implement $1MSD$ is \begin{equation}
\label{eqnumberofcolumns}
n_{c}=O(d^2log(d)).O(d^2).O(d)=O(d^5log(d)), \end{equation} where the $O(d^2log(d))$ comes from the depth of the MSD circuit with long range Cliffords, $O(d^2)$ is the depth needed to implement an arbitrary Clifford using $\overline{H}$ gates, $\overline{Z(\pi/2)}$ gates, and long range $\overline{CZ}$'s, and the $O(d)$ is an overestimate and represents the number of nearest neighbor $\overline{CZ}$'s needed to give a long range $\overline{CZ}$. The total number of qubits of the cluster state implementing $1MSD$ is then \begin{equation}
\label{eqtotalqubits}
n_{T}=O(d).n_{c}=O(d^{6}.log(d)). \end{equation} $zMSD$ can be thought of as a concatenation of $z$ layers of $1MSD$, where the output of layer $j$ is the input of layer $j+1$. Because the noisy input $T$-states in the protocol of \cite{HHP+17} are injected at different parts of the circuit, this means that the output qubits of layer $j$ should be connected to layer $j+1$ at different positions by means of long range $\overline{CZ}$ gates. Therefore, the graph state implementing $zMSD$ can be seen as cluster states composed of logical qubits, and connected by long range $\overline{CZ}$ gates, as shown in Figure (\ref{fig1}). One could equivalently replace these long range $\overline{CZ}$ gates with a series of $\overline{SWAP}$ gates, which can be implemented (up to Pauli correction by means of non-adaptive $\overline{X}$ and $\overline{Z}$ measurements) on a 2D cluster state with only nearest neighbor $\overline{CZ}$ gates \cite{RB01,RBB03}. Because these long range $\overline{CZ}$ gates act on qubits separated by a distance $poly(d)$, the introduction of $\overline{SWAP}$ gates introduces an additional (constant) overhead of $O(poly(d))$ qubits to $n_T$, but makes the construction of $1MSD$ implementable on a 2D cluster state with only nearest neighbor $CZ$ gates.
\begin{figure}
\caption{Part of the graph state implementing the circuit $zMSD$. Blue filled circles represent logical qubits in the $|\overline{+}\rangle$ state, which when measured implement the Clifford part of the MSD protocol of Theorem 4.1 in \cite{HHP+17}. The green filled circles are noisy input $T$-states $\overline{\rho_T}_{noisy}$. Purple filled circles are the output qubits of the first layer of $zMSD$. When $zMSD$ is successful, these qubits are in a state with fidelity $1-O(\varepsilon^d)$ with respect to the ideal $T$-state $|\overline{T}\rangle$. The orange lines are $\overline{CZ}$ gates. Note that the output qubits of the first layer (purple circles) are connected to the second layer at different positions by means of long range $\overline{CZ}$ gates. These long range $\overline{CZ}$ gates can be implemented in constant depth, since they act each on distinct pairs of qubits. Also, as mentioned in the main text in this appendix, these long range $\overline{CZ}$ gates can be replaced by a series of $\overline{SWAP}$ gates making this construction a constant-depth 2D construction with only nearest-neighbor $\overline{CZ}$ gates. Measurements consist of non-adaptive $\overline{X}$ measurements, $\overline{Z}$ measurements, as well as $\overline{Y}$ measurements. As described in the main text, we could equivalently perform all measurements in $\overline{Z}$, by introducing additional constant depth layers of $\overline{H}$ and $\overline{Z(\pi/2)}$ gates.}
\label{fig1}
\end{figure}
The first layer consists of $N$ copies of cluster states implementing $1MSD$ (see Figure \ref{fig1}), and outputs, when succesful, $N.O(d)=\dfrac{N}{d}.O(d^2)$ $T$-states with fidelity $1-C.\varepsilon^d$ with respect to $|\overline{T}\rangle$, $C$ being a positive constant \cite{HHP+17}. These $T$-states are the input of the second layer which consists of $\dfrac{N}{d}$ copies of cluster states implementing $1MSD$, and outputs, when succesful, $\dfrac{N}{d}.O(d)=\dfrac{N}{d^2}.O(d^2)$ $T$-states with fidelity $C.(C.\varepsilon^{d})^{d}=C^{d+1}.\varepsilon^{d^2}$ with respect to $|\overline{T}\rangle$. Similarly, the $z$th layer will consist of $\dfrac{N}{d^{z-1}}$ copies, and will output, when successful, $\dfrac{N}{d^{z-1}}.O(d)$ $T$-states with fidelity \begin{equation} \label{eqepsilonout} \varepsilon_{out} \sim C^{d^{z-1}}.\varepsilon^{d^z}, \end{equation}
with respect to $|\overline{T}\rangle$. The total number of qubits of the graph state implementing $zMSD$ is then given by \begin{equation}
\label{eqtotalqubitsnmsdl}
n_{NMSD}=(N+\dfrac{N}{d}+\dfrac{N}{d^2}+...).n_T=O(N). \end{equation} $z$ is the last layer, therefore $\dfrac{N}{d^{z-1}} = 1$ and thus \begin{equation} \label{eqN1}
N = d^{z-1}. \end{equation} For a succesful instance of $zMSD$, in order to arrive at Equation (\ref{eq13PRL}), choose $$d^z \geq O(log(n)),$$ this implies that each copy of $zMSD$ is composed of $$n_{NMSD}=O(N)=O(d^{z-1}) \geq O(log(n)),$$ logical qubits, as mentioned in the main text. Indeed, replacing $d^z = a.log(n),$ with $a$ a positive constant in Equation (\ref{eqepsilonout}) yields $$\varepsilon_{out} = \dfrac{1}{n^{a.\alpha}},$$ by a direct calculation, where $\alpha=\dfrac{log(\dfrac{1}{C.\varepsilon^d})}{d}$ while noting that $C.\varepsilon^d < 1$ \cite{HHP+17}. Equation (\ref{eq13PRL}) is therefore obtained for an appropriate choice of $a$ or $\varepsilon$.
Now, we will calculate the probability $p_{szMSD}$ of a single successful instance of $zMSD$. We will assume, rather pessimistically, that only one string of non-adaptive measurement results of $zMSD$ corresponds to a successful instance. This string we will take, by convention, to be the one where all the measurement binaries (after decoding ) are zero. In this case, \begin{equation}
\label{eqpszMSD}
p_{szMSD} \geq \dfrac{1}{2^{n_{NMSD}}}. \end{equation} Note that the lower bound is actually higher than that Equation (\ref{eqpszMSD}) for two reasons. The first is that not all qubits of the graph state implementing $zMSD$ are measured. Indeed, the output qubits of the last layer of $zMSD$ are unmeasured and, in the case when $zMSD$ is successful, are in the state $\overline{\rho_T}_{out}$. The second reason is that some of the measurements correspond, in the successful case, to post-selections which in the protocol of \cite{HHP+17} occur with probability greater than $1/2$ . Indeed, for small enough $\varepsilon$ , the acceptance rate of the protocol of \cite{HHP+17} is approximately 1. Now, $\varepsilon_{out} = \dfrac{1}{n^{\beta}}$, with $\beta \geq 4$, and $n_{NMSD}=\gamma.d^z$ (Equations (\ref{eqtotalqubitsnmsdl}) and (\ref{eqN1})), with $\gamma$ a positive constant. By choosing $$\varepsilon=\dfrac{e^{-\gamma.\beta.log(2)}}{C^{1/d}},$$ and performing a direct calculation using Equation (\ref{eqepsilonout}), we get that $n_{NMSD}=log_2(n)$. Therefore, \begin{equation}
\label{eqpszmsd2}
p_{szMSD} \geq \dfrac{1}{n}. \end{equation}
One might ask, why do other MSD protocols like those of \cite{BK05,BH12}, for example, not work (using our techniques)? The answer to this question has to do with the number of noisy input $T$ states $n_{noisy}$ with fidelity $1-\varepsilon$ with respect to an ideal $T$-state, needed to distill a single $T$-state of sufficiently high fidelity $1-\varepsilon_{out}$ with respect to an ideal $T$-state. $n_{noisy}$ is usually given by \cite{BK05} \begin{equation} \label{eqnnoisy} n_{noisy}=O \Big (log^{\gamma}(\dfrac{1}{\varepsilon_{out}}) \Big ). \end{equation} $\gamma$ is a constant which depends on the error correcting code from which the MSD protocol is derived \cite{HH18}. In the protocol of \cite{HHP+17} (as well as those in \cite{HH182}), $\gamma \sim 1$. Whereas for the Steane code for example \cite{RHG07}, which we used to distill $Y$-states in our 3D NN architecture, $\gamma >1$. $\gamma \sim 1$ in the protocol of \cite{HHP+17} is what allowed us to get a $p_{szMSD}$ of the form of Equation (\ref{eqpszmsd2}). On the other hand, the protocols of \cite{BK05,BH12,RHG07} have a $\gamma >1$, which leads to a lower bound of $p_{szMSD}$ which looks like $1/qp(n)$- by using similar arguments for calculating $n_{NMSD}$- where $qp(n)$ is $quasi$-$polynomial$ in $n$ (if one requires $\varepsilon_{out}=1/poly(n)$). Indeed, $N$ is proportional to $\alpha.n_{noisy}$, where $\alpha$ is the number of output $T$-states with error $\varepsilon_{out}$. Therefore, it follows that $n_{NMSD}=O(N)=O(n_{noisy})$, and that $2^{n_{NMSD}}=2^{O(n_{noisy})}$, which is a quasi-polynomial when $\gamma > 1$. This would mean, using our proof techniques, that we would need a quasi-polynomial in $n$ (which is greater than polynomial in $n$) number of $zMSD$ copies to get a succesful instance, thereby taking us out of the scope of what is considered quantum speedup \footnote{Since quantum speedup is usually defined with respect to quantum devices using polynomial quantum resources \cite{NC2000}.}. Other protocols which we could have used and could have worked are those of \cite{Jones13,HH182} which gives $\gamma \sim 1$, or that of \cite{HH18} which gives $\gamma < 1$, albeit with a huge constant overhead of $2^{58}$ qubits \cite{HH18}.
\subsection{Proof of Equation (\ref{eqpsuccprl})} \label{sec exp fail} We begin by calculating $p_{fail}=1-p_{succ}.$ Suppose we have constructed $M$ copies of $zMSD$, the probability $p_{fail}$ of not getting at least $O(n^2)$ succesful instances of $zMSD$ is given by \begin{equation}
\label{eqappD31}
p_{fail}=\sum_{m=0,...,O(n^2)}{M \choose O(n^2)-m } p^{O(n^2)-m}_{szMSD}(1-p_{szMSD})^{M-O(n^2)+m}. \end{equation} $p_{szMSD} \leq 1-p_{szMSD}$ (Equation (\ref{eqpszmsd2})), \begin{equation}
\label{eqappD32}
p_{fail} \leq \sum_{m=0,...,O(n^2)}{M \choose O(n^2)-m } (1-p_{szMSD})^{M} \end{equation} Taking $M > 2O(n^2),$ \begin{equation}
\label{eqappD33}
\sum_{m=0,...,O(n^2)}{M \choose O(n^2)-m } \leq O(n^2) {M \choose O(n^2)}. \end{equation} Replacing Equation (\ref{eqappD33}) in Equation (\ref{eqappD32}), and using Equation (\ref{eqpszmsd2}), we get \begin{equation}
\label{eqappD34}
p_{fail} \leq O(n^2) {M \choose O(n^2)}(1-\dfrac{1}{n})^{M}. \end{equation} Also, $${M \choose O(n^2)}<M^{O(n^2)}.$$ Replacing this in Equation (\ref{eqappD34}) we get \begin{equation}
p_{fail} \leq O(n^2) M^{ O(n^2)}(1-\dfrac{1}{n})^{M}. \end{equation} Noting that for large enough $n$ $$(1-\dfrac{1}{n})^{n} \sim \dfrac{1}{e},$$ and taking $\dfrac{M}{n}=p(n).O(n^2)$ \begin{equation}
p_{fail} \leq O(n^2)\big(\dfrac{M}{e^{p(n)}}\big)^{O(n^2)}. \end{equation} Choosing $p(n) \geq log(M)=O(log(n))$, we get that $\dfrac{M}{e^{p(n)}} \leq c$, with $c<1$ a constant. In this case, $$p_{fail} \leq O(n^2)c^{O(n^2)} \leq O(n^2)\dfrac{1}{e^{O(n^2)}} \sim \dfrac{1}{e^{O(n^2)}},$$ for large enough $n$. Thus, $$p_{succ} \geq 1-\dfrac{1}{e^{O(n^2)}}.$$ Note that for our choice of $p(n) \geq O(log(n))$, we get that $M=O(n^3)p(n) \geq O(n^3log(n)).$ This completes the proof of Equation (\ref{eqpsuccprl}).
\section{The routing circuit $\overline{C_R}$} \label{APP routing}
The main idea of the MBQC based routing is to use the fact that in a graph state, measurements allow us to etch out desired paths. In particular performing a $\overline{Z}$ measurement removes a vertex and its edges \cite{HDE+06}, as illustrated in Figure \ref{FIG Z measurements on GS}. Once a path is etched out, $\overline{X}$ measurements teleport the state along it. Given $m$ systems to route out of a possible $p$, a grid of size $2pm$ is sufficient for unique paths to be etched out. An example of how this works for a grid is illustrated in Figure \ref{FIG routing} for $m=2$ and $p=7$ \footnote{Again, we can measure all qubits only in $\overline{Z}$ if we add a constant-depth layer of $\overline{H}$ gates to the graph state.}. In our case, we have a total of $O(n^3log(n))$ outputs of all the $zMSD$, of which $O(n^2)$ will be successful, hence the number of ancilla we require scales as $O(n^5log(n))$.
\begin{figure}
\caption{Performing a $Z$ measurement on a vertex of a graph state removes it, up to local Pauli corrections.}
\label{FIG Z measurements on GS}
\end{figure}
\begin{figure}
\caption{Routing via etching out from a grid. The purple vertices on the left represent outputs of the $zMSD$. a) The filled purple vertices are identified as the successful distilled $T$ states from previous measurement results, and the paths to the outputs are identified. b) All other qubits are measured out in $Z$ and the succesful outputs are teleported via $X$ measurements.}
\label{FIG routing}
\end{figure}
It is worth explaining why the overall noise on the routed $\overline{\rho_T}_{out}$ will still be local stochastic with constant rate. Firstly, note that $\overline{C_R}$ is a constant depth Clifford circuit composed of single and two-qubit Clifford gates acting on outputs of $zMSD$ circuits, and therefore all local stochastic noise after each depth one step of this circuit can be treated as a single local stochastic noise $E_{d} \sim \mathcal{N}(m)$ with constant rate $m$ at the end of this circuit, as in Equation (\ref{eq7PRL}) \cite{BGK+19}. The outputs of $zMSD$ circuits are acted upon by local stochastic noise with constant rate (as seen earlier overall noise on $zMSD$ is local stochastic with constant rate, therefore noise acting on a subset of qubits of $zMSD$ (the outputs) is also local stochastic with the same rate \cite{BGK+19}), and therefore can be incorporated as preparation noise (analogous to $E_{prep}$ in Equation (\ref{eq7PRL})) with $E_{d}$ to give a net local stochastic noise $E \sim \mathcal{N}(c)$ with constant rate $c$ acting on qubits of $\overline{C_R}$. After measurements, the unmeasured outputs of $\overline{C_R}$ will also be acted upon by $E^{'} \sim \mathcal{N}(c)$ which is local stochastic with same rate as $E$, but with smaller support, from the properties of local stochastic noise \cite{BGK+19}.
\section{Error correction in our 3D NN architecture} \label{appRHG1} In this section we will show how the probability of failure $p_{fail}$ of decoding in our 3D NN architecture can be made $polynomially$ low. $p_{fail}$ here is equivalent to $1-p(ne)=1/poly(n)$ in appendix \ref{app zMSD}. Thus; obtaining $p_{fail}=1/poly(n)$ allows us to recover the same results for error correction as the 4D NN architecture. We will assume that classical postprocessing is instantaneous. We will work with local stochastic noise and, as discussed in the main text, deal with a single local stochastic noise $E \sim \mathcal{N}(q)$ which is pushed forward until after the measurements \cite{BGK+19} (see Equation (\ref{eq7PRL})). The (constant) rate $q$ satisfies $q \leq 0.0075$ \cite{RHG07}, that is, it is below the threshold of fault-tolerant computing in the RHG construction. As argued in \cite{DKL+02}, the probability $p_{fail}$ can be calculated by calculating the number of ways in which the minimal weight matching results in a non-trivial error, that is, an error stretching across at least $L_{m}$ qubits, where $L_{m}$ is the minimum between the perimeter of the defect and the (minimal) distance between two defects \cite{RHG07,DKL+02}. $p_{fail}$ can be calculated by using the following relation \cite{DKL+02}
\begin{equation}
\label{eqSAP1}
p_{fail} \leq P(n) \sum_{L \geq L_{m}} n(L).prob(L). \end{equation} This relation simply counts the number of ways in which a relevant non-trivial error can occur, this type of error is restricted to errors induced by self-avoiding walks (SAWs) on the lattice, as argued in \cite{DKL+02}. $n(L)=6.5^{L-1}$ calculates all possible SAWs of total lenght $L$ originating from a fixed point in the lattice \cite{DKL+02}, $P(n)=poly(n)$ is the the total number of fixed points (i.e physical qubits) on the lattice, since SAWs can originate at any fixed point, and $prob(L) \leq (4q)^{\frac{L}{2}}$ is the probability that the minimal matching induces an error chain (SAW) of length $L$, this probability is calculated using the techniques in \cite{DKL+02}, but adapted to local stochastic noise (whereas independent depolarizing noise acting on each qubit was considered in \cite{DKL+02}). The sum is over all non-trivial errors of lenght $L_m \leq L \leq poly(n)$. Noting that \begin{equation} \label{eqSAP2}
P(n)\sum_{L \geq L_{m}} n(L).prob(L) \leq poly(n)(poly(n)-L_m)\dfrac{6}{5}(100q)^{\frac{L_{m}}{2}} \leq poly^{'}(n).(0.75)^{\dfrac{L_m}{2}} \sim \dfrac{poly^{'}(n)}{e^{0.06L_m}}, \end{equation} where $poly^{'}(n)$ is some polynomial in $n$. Choosing $L_m=\alpha.log(n)$ with $\alpha$ a positive constant, and replacing Equation (\ref{eqSAP2}) in (\ref{eqSAP1}) we get \begin{equation}
\label{eqSAP3}
p_{fail} \leq \dfrac{poly^{'}(n)}{n^{0.06\alpha}}. \end{equation} Finally, choosing $$0.06\alpha > deg(poly^{'}(n)),$$ and replacing this in Equation (\ref{eqSAP3}) we obtain our desired polynomially low bound for $p_{fail}$ \begin{equation}
\label{eqSAP4}
p_{fail} \leq \dfrac{1}{poly(n)}.
\end{equation}
Now, we want to find an estimate of the individual rates of preparation, gate application and measurement in our 3D NN architecture. Assuming at each layer of the circuit, qubits are acted upon by a local stochastic noise $E \sim N(p)$ with $0<p<1$ a constant, we get that $q \leq 4p^{4^{-D-1}}$ \cite{BGK+19}, where $D$ is the total quantum depth of the RHG construction. $D=6$, one step for preparation, one for (non-adaptive) measurements (assuming instantaneous classical computing as mentioned earlier), and four steps for preparing the RHG lattice \cite{RHG05}. Setting $q \leq 0.0075$ \cite{RHG07}, we get that the errors in preparation, gate application, and measurement should satisfy $p \leq \sim e^{-40000}$. Note that, for completeness, the threshold error rate for the distillation $\varepsilon$ should also be taken into account. Usually, $\varepsilon$ should be lower than some constant \cite{reichardt2005quantum} in order for distilation to be possible, but this is accounted for in the chosen value of $q$ \cite{RHG07}.
\section{ Distillation and overhead in our 3D NN architecture} \label{appRHG2} \subsection{Distillation} In this subsection, we will discuss distillation of logical $Y$-states in our 3D NN construction. The distillation of $T$-states in this construction is exactly the same as in appendix \ref{app zMSD}, but instead of using the protocol of Theorem 4.1 in \cite{HHP+17}, we use the protocol with $\gamma \sim 1$ (see appendix \ref{app zMSD}) in \cite{HH182} which allows transversal implementation of logical $T$-gates and is thus compatible with the RHG construction \cite{RHG07,RHG05}.
The distillation of $Y$-states is done with the $[7, 1, 3]$ Steane code \cite{RHG07}. This code has a $\gamma \sim log(7)/log(3) \sim 1.77$ \cite{HH18}. Therefore, the total number of logical ancilla qubits (including qubits prepared in initial noisy logical $Y$-states $\overline{\rho_{Y}}_{noisy}$) needed to distill a logical $Y$-state of fidelity $1-\varepsilon^{'}_{out}$ is given by \cite{BH12} (see appendix \ref{app zMSD}) \begin{equation}
\label{eqdistillY1}
N_Y=O(log^{1.77}(\dfrac{1}{\varepsilon^{'}_{out}})). \end{equation} Choosing $\varepsilon^{'}_{out}=1/O(poly(log(n)))$ as in the main text, we get that \begin{equation}
\label{eqdistillY2}
N_Y=O(log^{1.77}(poly(log(n))) \sim O(log^{1.77}(log(n))). \end{equation} It is straightforward to see that, for high enough $n$, \begin{equation}
\label{eqdistillY3}
N_Y < log(n). \end{equation} $N_Y$ can be though of as the number of logical qubits of a 2D logical cluster state needed to distill a logical $Y$-state of fidelity $1-\varepsilon^{'}_{out}$. As in appendix \ref{app zMSD}, if we do this MBQC non-adaptively, we only succeed with probability \begin{equation}
\label{eqdistillY4}
P_s \geq \dfrac{1}{2^{N_Y}} \geq \dfrac{1}{n}. \end{equation}
In our case, we need $O(n^5log^2(n))$ logical $Y$-states of fidelity $1-\varepsilon^{'}_{out}$ in order to distill $O(k.n)=O(n^4)$ $T$-states to be used in the construction of $\overline{C_2}$. $O(n^5log^2(n))$ is the number of qubits of $\overline{C_1}$ when $k=O(n^3)$ (number of columns of $|G\rangle$ ). Therefore, by results in appendix \ref{sec exp fail}, we would need $\overline{C^{'}_1}$ to be composed of $O(n^6log^3(n))$ logical qubits in order to distill, with exponentially high probability of success, enough ($O(n^5log^2(n))$) logical $Y$-states with fidelity $1-\varepsilon^{'}_{out}$.
Now, we will see why logical $Y$-states of fidelity $1-\varepsilon^{'}_{out}=1-1/O(poly(log(n)))$ suffice to disill $O(n^5log^2(n))$ $T$-states with fidelity $1-\varepsilon_{out}=1-1/O(poly(n))$. In the construction of $\overline{C_1}$ in appendix \ref{APP subsec zMSD}, replacing a perfect logical $Y$-state with a logical $Y$-state of fidelity $1-\varepsilon^{'}_{out}$, then measuring this state, results in applying the gate $\overline{H}\overline{Z(\pi/2)}$ with probability $1/2(1-\varepsilon^{'}_{out})$ instead of $1/2$ in the perfect logical $Y$-state case. Therefore, the success probability of $zMSD$ becomes in this case \begin{equation}
\label{eqdistillY5}
p_{zMSD} \geq \dfrac{1}{n}(1-\varepsilon^{'}_{out})^{O(log(n))}, \end{equation} as compared with Equation (\ref{eqpszmsd2}) in the perfect logical $Y$ case. By choosing, as we did, $\varepsilon^{'}_{out}=1/poly(log(n))$, the above equation can be rewritten, for large enough $n$, as \begin{equation}
\label{eqdistillY6}
p_{zMSD} \geq \dfrac{1}{n}(1-\dfrac{1}{O(poly(log(n)))})^{O(log(n))} \sim \dfrac{1}{n}(1-\dfrac{1}{O(poly^{'}(log(n)))}) \sim \dfrac{1}{n}. \end{equation} Thus, we have recovered Equation (\ref{eqpszmsd2}), and therefore can now use the same analysis as in appendix \ref{app zMSD} to distill logical $T$-states of fidelity $1-\varepsilon_{out}$ in our 3D NN construction. This will allow us to construct the sampling problem Equation (\ref{eq14PRL}) showing a quantum speedup.
\subsection{Overhead}
In this subsection, we will estimate the overhead (number of physical qubits in the 3D RHG lattice) of our 3D NN construction. As in \cite{RHG07}, we will make use of the concept of a logical elementary cell. Each logical elementary cell is a 3D cluster state composed of $\lambda \times \lambda \times \lambda$ elementary cells (each of which has eighteen qubits). Logical elementary cells can be either primal or dual. Each logical elementary cell contains a single defect. A defect inside a logical elementary cell has a cross section of $d \times d$ (perimeter $4d$) on any plane perpendicular to the direction of simulated time. For our purposes, we will choose $\lambda=O(d)$, and $d=O(log(n))$. This will ensure that the perimeter of the defect ($4d$) and the distance between two defects ($\lambda-d$) satisfy the conditions in appendix \ref{appRHG1}. In this picture, every logical qubit (composed of two defects of the same type) needs $2\times 18 \times \lambda^{3}=O(log^3(n))$ physical qubits. In order to not talk about primal or dual logical qubits (recall that computation is always carried out on logical qubits of same type, but we need braiding between two defects of different type in order to implement some gates such as $\overline{CNOT}$), we will assume each logical qubit needs four cells (two primal, two dual) to be defined, and therefore the number of physical qubits per logical qubit is $4 \times 18 \times \lambda^{3}=O(log^3(n))$. Now, all we need to do is calculate the number of logical qubits we need in total. Preparations of logical qubits in states $|\overline{+}\rangle$, $\overline{\rho_T}_{noisy}$, and $\overline{\rho_Y}_{noisy}$, and applying $\overline{CNOT}$ gates can be done using a constant number of intermediate elementary logical cells \cite{RHG07}. Therefore, we will only need to count the total number of logical qubit inputs for circuits $\overline{C^{'}_1}$, $\overline{C^{'}_R}$, $\overline{C_1}$, $\overline{C_R}$, and $\overline{C_2}$, then multiply this by a constant in order to get the total number of needed logical qubits including preparations and logical CNOT applications. As already calculated in the previous subsection, the total number of logical qubits of $\overline{C^{'}_1}$ is $O(n^6log^3(n))$. The total overhead of circuits $\overline{C_1}$, $\overline{C_R}$, and $\overline{C_2}$ is $O(n^9poly(log(n))$ logical qubits, this is obtained by the same calculations as done in our 4D NN architecture, but with replacing $k=O(n)$ with $k=O(n^3)$, in order for the partially invertible universal set condition to be satisfied \cite{MGDM19}. Finally, the routing circuit $\overline{C^{'}_R}$ (see appendix \ref{APP routing}) needs $O(n^6log^3(n).n^5log^2(n))=O(n^{11}log^5(n))$, this term dominates the scaling. Multiplying $O(n^{11}log^5(n))$ by a constant (to account for preparation and logical CNOT gates overhead), then by $O(log^3(n))$ (to get the number of physical qubits), we get that the overall number of physical qubits needed is $O(n^{11}poly(log(n))$.
\end{document} |
\begin{document}
\title{Two cores of a nonnegative matrix} \thanks{This research was supported by EPSRC grant RRAH15735. Serge\u{\i} Sergeev also acknowledges the support of RFBR-CNRS grant 11-0193106 and RFBR grant 12-01-00886. Bit-Shun Tam acknowledges the support of National Science Council of the Republic of China (Project No. NSC 101-2115-M-032-007)}
\author{Peter Butkovi{\v{c}}} \address{Peter Butkovi{\v{c}}, University of Birmingham, School of Mathematics, Watson Building, Edgbaston B15 2TT, UK} \email{[email protected]}
\author{Hans Schneider} \address{Hans Schneider, University of Wisconsin-Madison, Department of Mathematics, 480 Lincoln Drive, Madison WI 53706-1313, USA} \email{[email protected]}
\author{Serge\u{\i} Sergeev} \address{Serge\u{\i} Sergeev, University of Birmingham, School of Mathematics, Watson Building, Edgbaston B15 2TT, UK} \email{[email protected]}
\author{Bit-Shun Tam} \address{Bit-Shun Tam, Tamkang University, Department of Mathematics, Tamsui, Taiwan 25137, R.O.C. } \email{[email protected]}
\begin{abstract} We prove that the sequence of eigencones (i.e., cones of nonnegative eigenvectors) of positive powers $A^k$ of a nonnegative square matrix $A$ is periodic both in max algebra and in nonnegative linear algebra. Using an argument of Pullman, we also show that the Minkowski sum of the eigencones of powers of $A$ is equal to the core of $A$ defined as the intersection of nonnegative column spans of matrix powers, also in max algebra. Based on this, we describe the set of extremal rays of the core.
The spectral theory of matrix powers and the theory of matrix core is developed in max algebra and in nonnegative linear algebra simultaneously wherever possible, in order to unify and compare both versions of the same theory.
{\it{Keywords:}} Max algebra, nonnegative matrix theory, Perron-Frobenius theory, matrix power, eigenspace, core. \vskip0.1cm {\it{AMS Classification:}} 15A80, 15A18, 15A03,15B48
\end{abstract}
\maketitle
\section{Introduction}
The nonnegative reals $\R_+$ under the usual multiplication give rise to two semirings with addition defined in two ways: first with the usual addition, and second where the role of addition is played by maximum.
Thus we consider the properties of nonnegative matrices with entries in two semirings, the semiring of nonnegative numbers with usual addition and multiplication called ``{\bf nonnegative algebra}'', and the semiring called ``{\bf max(-times) algebra}''.
Our chief object of study is the {\bf core} of a nonnegative matrix $A$. This concept was introduced by Pullman in ~\cite{Pul-71}, and is defined as the intersection of the cones generated by the columns of matrix powers $A^k$. Pullman provided a geometric approach to the Perron-Frobenius theory of nonnegative matrices based on the properties of the core. He investigated the action of a matrix on its core showing that it is bijective and that the extremal rays of the core can be partitioned into periodic orbits. In other words, extremal rays of the core of $A$ are nonnegative eigenvectors of the powers of $A$ (associated with positive eigenvalues).
One of the main purposes of the present paper is to extend Pullman's core to max algebra, thereby investigating the periodic sequence of eigencones of max-algebraic matrix powers. However, following the line of~\cite{BSS,BSS-zeq,KSS}, we develop the theory in max algebra and nonnegative algebra simultaneously, in order to emphasize common features as well as differences, to provide general (simultaneous) proofs where this is possible. We do not aim to obtain new results, relative to~\cite{Pul-71,TS-94}, on the usual core of a nonnegative matrix. However, our unifying approach leads in some cases (e.g., Theorem~\ref{t:tam-schneider} (iii)) to new and more elementary proofs than those given previously. Our motivation is closely related to the Litvinov-Maslov correspondence principle~\cite{LM-98}, viewing the idempotent mathematics (in particular, max algebra) as a ``shadow'' of the ``traditional'' mathematics over real and complex fields.
\if{ Pullman's core can be also seen as closely related to the limits of powers of nonnegative matrices. However it is a different concept. Consider the simple example $$ \begin{pmatrix} 1 & 0 \\ 0 & 0.5 \end{pmatrix}. $$ Then, for any nonnegative $x$, $A^kx$ will tend to a multiple of $( 1,\ 0)^T$ while the core of $A$ is the entire nonnegative orthant ${\mathbb R}^2_+$. }\fi
To the authors' knowledge, the core of a nonnegative matrix has not received much attention in linear algebra. However, a more detailed study has been carried out by Tam and Schneider~\cite{TS-94}, who extended the concept of core to linear mappings preserving a proper cone. The case when the core is a polyhedral (i.e., finitely generated) cone was examined in detail in~\cite[Section 3]{TS-94}, and the results were applied to study the case of nonnegative matrix in~\cite[Section 4]{TS-94}. This work has found further applications in the theory of dynamic systems acting on the path space of a stationary Bratteli diagram. In particular, Bezuglyi~et~al.~\cite{BKMS} describe and exploit a natural correspondence between ergodic measures and extremals of the core of the incidence matrix of such a diagram.
On the other hand, there is much more literature on the related but distinct question of the limiting sets of homogeneous and non-homogeneous Markov chains in nonnegative algebra; see the books by Hartfiel~\cite{Har:02} and Seneta~\cite{Sen:81} and, e.g., the works of Chi~\cite{Chi-96} and Sierksma~\cite{Sie-99}. In max algebra, see the results on the ultimate column span of matrix powers for irreducible matrices (\cite[Theorem 8.3.11]{But:10}, \cite{Ser-09}), and by Merlet~\cite{Mer-10} on the invariant max cone of non-homogeneous matrix products.
The theory of the core relies on the behaviour of matrix powers. In the nonnegative algebra, recall the works of Friedland-Schneider~\cite{FS-80} and Rothblum-Whittle~\cite{RW-82} (on the role of distinguished classes which we call ``spectral classes'', algebraic and geometric growth rates, and various applications). The theory of max-algebraic matrix powers is similar. However, the max-algebraic powers have a well-defined periodic ultimate behaviour starting after sufficiently large time. This ultimate behaviour has been known since the work of Cuninghame-Green~\cite[Theorem 27-9]{CG:79}, Cohen~et~al.~\cite{CDQV-83} (irreducible case), and is described in greater generality and detail, e.g., by Akian, Gaubert and Walsh~\cite{AGW-05}, Gavalec~\cite{Gav:04}, De Schutter~\cite{BdS}, and the authors~\cite{But:10,Ser-11<attr>, SS-11} of the present paper. In particular, the Cyclicity Theorem of Cohen~et~al.~\cite{BCOQ,But:10,CDQV-83,HOW:05}) implies that extremals of the core split into periodic orbits for any irreducible matrix (see Subsection 4.2 below)\footnote{ In fact, many of the cited works and monographs like~\cite{BCOQ,But:10,Gav:04,HOW:05} are written in the setting of {\bf max-plus algebra}. However, this algebra is isomorphic to the max algebra considered here, so the results can be readily translated to the present (max-times) setting.}.
Some results on the eigenvectors of max-algebraic matrix powers have been obtained by Butkovi\v{c} and Cuninghame-Green~\cite{But:10,CGB-07}. The present paper also aims to extend and complete the research initiated in that work.
This paper is organized as follows. In Section~\ref{s:prel} we introduce the basics of irreducible and reducible Perron-Frobenius theory in max algebra and in nonnegative linear algebra.
In Section~\ref{s:key} we formulate the two key results of this paper. The first key result is Main Theorem~\ref{t:core} stating that the matrix core equals to the Minkowski sum of the eigencones of matrix powers (that is, for each positive integer $k$, we take the sum of the eigencones associated with $A^k$, and then we sum over all $k$). The second key result is Main Theorem~\ref{t:periodicity} stating that the sequence of eigencones of matrix powers is periodic and defining the period. This section also contains a table of notations used throughout the paper. Section~\ref{s:core} is devoted to the proof of Main Theorem~\ref{t:core}, taking in ``credit'' the result of Main Theorem~\ref{t:periodicity} (whose proof is deferred to the end of the paper).
In Section~\ref{s:sameaccess} we explain the relation between spectral classes of different matrix powers, and how the eigencones associated with general eigenvalues can be reduced to the case of the greatest eigenvalue, see in particular Theorems~\ref{t:samespectrum} and~\ref{t:reduction}.
In Section~\ref{s:extremals} we describe extremals of the core in both algebras extending~\cite[Theorem~4.7]{TS-94}, see Theorem~\ref{t:tam-schneider}. Prior to this result we formulate the Frobenius-Victory Theorems~\ref{t:FVnonneg} and~\ref{t:FVmaxalg} giving a parallel description of extremals of eigencones in both algebras. In Section~\ref{s:eigencones}, our first goal is to show that the sequence of eigencones of matrix powers in max algebra is periodic, comparing this result with the case of nonnegative matrix algebra, see Theorem~\ref{t:girls}.
Then we study the inclusion relation on eigencones and deduce Main Theorem~\ref{t:periodicity}. The key results are illustrated by a pair of examples in Section~\ref{s:examples}.
\section{Preliminaries} \label{s:prel}
\subsection{Nonnegative matrices and associated graphs} \label{ss:nonneg}
In this paper we are concerned only with nonnegative eigenvalues and nonnegative eigenvectors of a nonnegative matrix. In order to bring our terminology into line with the corresponding theory for max algebra we use the terms eigenvalue and eigenvector in a restrictive fashion appropriate to our semiring point of view. Thus we shall call $\rho$ an {\em eigenvalue} of a nonnegative matrix $A$ (only) if there is a nonnegative eigenvector $x$ of $A$ for $\rho$. Further $x$ will be called an {\em eigenvector} (only) if it is nonnegative. (In the literature $\rho$ is called a distinguished eigenvalue and $x$ a distinguished eigenvector of $A$.) For $x\in\Rp^n$, the {\em support} of $x$, denoted by $\operatorname{supp}(x)$, is the set of indices where $x_i>0$.
In this paper we are led to state the familiar Perron-Frobenius theorem in slightly unusual terms: An irreducible nonnegative matrix $A$ has a unique eigenvalue denoted by $\rho^+(A)$, which is positive (unless $A$ is the $1\times 1$ matrix $0$). Further, the eigenvector $x$ associated with $\rho^+(A)$ is essentially unique, that is all eigenvectors are multiples of $x$. The nonnegative multiples of $x$ constitute the cone of eigenvectors (in the above sense) $V_+(A,\rho^+(A))$ associated with $\rho^+(A)$.
A general (reducible) matrix $A\in\Rp^{n\times n}$ may have several nonnegative eigenvalues with associated cones of nonnegative eigenvectors ({\em eigencones}), and $\rho^+(A)$ will denote the biggest such eigenvalue, in general. Eigenvalue $\rho^+(A)$ is also called the {\em principal eigenvalue}, and $V_+(A,\rho^+(A))$ is called the {\em principal eigencone}.
Recall that a subset $V\subseteq\Rp^n$ is called a (convex) cone if 1) $\alpha v\in V$ for all $v\in V$ and $\alpha\in\R_+$, 2) $u+v\in V$ for $u,v\in V$. Note that cones in the nonnegative orthant can be considered as ``subspaces'', with respect to the semiring of nonnegative numbers (with usual addition and multiplication). In this vein, a cone $V$ is said to be {\em generated} by $S\subseteq\Rp^n$ if each $v\in V$ can be represented as a nonnegative combination $v=\sum_{x\in S} \alpha_x x$ where only finitely many $\alpha_x\in\R_+$ are different from zero. When $V$ is generated (we also say ``spanned'') by $S$, this is denoted $V=\spann_+(S)$. A vector $z$ in a cone $V$ is called an {\em extremal}, if $z=u+v$ and $u,v\in V$ imply $z=\alpha_u u=\alpha_v v$ for some scalars $\alpha_u$ and $\alpha_v$. Any closed cone in $\Rp^n$ is generated by its extremals; in particular, this holds for any finitely generated cone.
Let us recall some basic notions related to (ir)reducibility, which we use also in max algebra. With a matrix $A=(a_{ij})\in\R_+^{n\times n}$ we associate a weighted (di)graph ${\mathcal G}(A)$ with the set of nodes $N=\{1,\dots,n\}$ and set of edges~$E\subseteq N\times N$ containing a pair~$(i,j)$ if and only if~$a_{ij}\neq 0$; the weight of an edge~$(i,j)\in E$ is defined to be~$w(i,j):=a_{ij}$. A graph with just one node and no edges will be called {\em trivial}. A graph with at least one node and at least one edge will be called {\em nontrivial}.
A path $P$ in ${\mathcal G}(A)$ consisting\footnote{In our terminology, a path can visit some nodes more than once.} of the edges $(i_0,i_1),(i_1,i_2),\ldots,(i_{t-1},i_t)$ has {\em length} $l(P):=t$ and {\em weight} $w(P):=w(i_0,i_1) \cdot w(i_1,i_2) \cdots w(i_{t-1},i_t)$, and is called an $i\;-\;j$ path if $i_0=i$ and $i_t=j$. $P$ is called a {\em cycle} if $i_0=i_t$. $P$ is an {\em elementary cycle}, if, further, $i_k\neq i_l$ for all $k,l\in\{1,\ldots,t-1\}$.
Recall that $A=\left( a_{ij}\right) \in \R_+^{n\times n}$ is irreducible if ${\mathcal G}(A)$ is trivial or for any $i,j\in \{1,\ldots, n\}$ there is an $i\;-\;j$ path. Otherwise $A$ is reducible.
Notation $A^{\times k}$ will stand for the usual $k$th power of a nonnegative matrix.
\subsection{Max algebra} \label{ss:max}
By max algebra we understand the set of nonnegative numbers $\R_+$ where the role of addition is played by taking maximum of two numbers: $a\oplus b:=\max(a,b)$, and the multiplication is as in the usual arithmetics. This is carried over to matrices and vectors like in the usual linear algebra so that for two matrices $A=(a_{ij})$ and $B=(b_{ij})$ of appropriate sizes, $(A\oplus B)_{ij}=a_{ij}\oplus b_{ij}$ and $(A\otimes B)_{ij}=\bigoplus_k a_{ik}b_{kj}$.
Notation $A^{\otimes k}$ will stand for the $k$th max-algebraic power.
In max algebra, we have the following analogue of a convex cone. A set $V\subseteq\Rp^n$ is called a {\em max cone} if 1) $\alpha v\in V$ for all $v\in V$ and $\alpha\in\R_+$, 2) $u\oplus v\in V$ for $u,v\in V$. Max cones are also known as idempotent semimodules~\cite{KM:97,LM-98}. A max cone $V$ is said to be {\em generated} by $S\subseteq\Rp^n$ if each $v\in V$ can be represented as a max combination $v=\bigoplus_{x\in S} \alpha_x x$ where only finitely many (nonnegative) $\alpha_x$ are different from zero. When $V$ is generated (we also say ``spanned'') by $S$, this is denoted $V=\spann_{\oplus}(S)$. When $V$ is generated by the columns of a matrix $A$, this is denoted $V=\spann_{\oplus}(A)$. This cone is closed with respect to the usual Euclidean topology~\cite{BSS}.
A vector $z$ in a max cone $V\subseteq\Rp^n$ is called an {\em extremal} if $z=u\oplus v$ and $u,v\in V$ imply $z=u$ or $z=v$. Any finitely generated max cone is generated by its extremals, see Wagneur~\cite{Wag-91} and~\cite{BSS,GK-07} for recent extensions.
The {\em maximum cycle geometric mean} of~$A$ is defined by \begin{equation}\label{mcgm} \lambda(A)=\max\{ w(C)^{1/l(C)}\colon C \text{ is a cycle in }{\mathcal G}(A)\} \enspace. \end{equation}
The cycles with the cycle geometric mean equal to $\lambda(A)$ are called {\em critical}, and the nodes and the edges of ${\mathcal G}(A)$ that belong to critical cycles are called {\em critical}. The set of critical nodes is denoted by $N_c(A)$, the set of critical edges by $E_c(A)$, and these nodes and edges give rise to the {\em critical graph} of $A$, denoted by ${\mathcal C}(A) = (N_c(A), E_c(A))$.
A maximal strongly connected subgraph of ${\mathcal C}(A)$ is called a strongly connected component of ${\mathcal C}(A)$. Observe that ${\mathcal C}(A)$, in general, consists of several nontrivial strongly connected components, and that it never has any edges connecting different strongly connected components.
\if{ The {\em critical graph} of $A$, denoted by ${\mathcal C}(A)$, consists of all nodes and edges belonging to the cycles which attain the maximum in~\eqref{mcgm}. The set of such nodes will be called {\em critical} and denoted $N_c$; the set of such edges will be called {\em critical} and denoted $E_c$. Observe that the critical graph consists of several strongly connected subgraphs of ${\mathcal G}(A)$. Maximal such subgraphs are the {\em strongly connected components} of ${\mathcal C}(A)$, and there are no critical edges connecting different strongly connected components of ${\mathcal C}(A)$. }\fi
If for $A\in\Rp^{n\times n}$ we have $A\otimes x=\rho x$ with $\rho\in\R_+$ and a nonzero $x\in\Rp^n$, then $\rho$ is a {\em max(-algebraic) eigenvalue} and $x$ is a {\em max(-algebraic) eigenvector} associated with $\rho$. The set of max eigenvectors $x$ associated with~$\rho$, with the zero vector adjoined to it, is a max cone. It is denoted by $V_{\oplus}(A,\rho)$.
An irreducible $A\in\Rp^{n\times n}$ has a unique max-algebraic eigenvalue equal to $\lambda(A)$~\cite{BCOQ,But:10,CG:79,HOW:05}. In general $A$ may have several max eigenvalues, and the greatest of them equals $\lambda(A)$. The greatest max eigenvalue will also be denoted by $\rho^{\oplus}(A)$ (thus $\rho^{\oplus}(A)=\lambda(A)$), and called the {\em principal max eigenvalue} of $A$. In the irreducible case, the unique max eigenvalue $\rho^{\oplus}(A)=\lambda(A)$ is also called the {\em max(-algebraic) Perron root}. When max algebra and nonnegative algebra are considered simultaneously (e.g., Section~\ref{s:key}), the principal eigenvalue is denoted by $\rho(A)$.
Unlike in nonnegative algebra, there is an explicit description of $V_{\oplus}(A,\rho^{\oplus}(A))$, see Theorem~\ref{t:FVmaxalg}. This description uses the {\em Kleene star} \begin{equation} \label{def:kleenestar} A^*=I\oplus A\oplus A^{\otimes 2}\oplus A^{\otimes 3}\oplus\ldots. \end{equation}
Series~\eqref{def:kleenestar} converges if and only if $\rho^{\oplus}(A)\leq 1$, in which case $A^*=I\oplus A\oplus\ldots\oplus A^{\otimes(n-1)}$~\cite{BCOQ,But:10,HOW:05}. Note that if $\rho^{\oplus}(A)\neq 0$, then $\rho^{\oplus}(A/\rho^{\oplus}(A))=1$, hence $(A/\rho^{\oplus}(A))^*$ always converges.
The {\em path interpretation} of max-algebraic matrix powers $A^{\otimes l}$ is that each entry $a^{\otimes l}_{ij}$ is equal to the greatest weight of $i\;-\;j$ path with length $l$. Consequently, for $i\neq j$, the entry $a^*_{ij}$ of $A^*$ is equal to the greatest weight of an $i\;-\;j$ path (with no length restrictions).
\subsection{Cyclicity and periodicity} \label{ss:cycl} Consider a nontrivial strongly connected graph ${\mathcal G}$ (that is, a strongly connected graph with at least one node and one edge). Define its {\em cyclicity} $\sigma$ as the gcd of the lengths of all elementary cycles. It is known that for any vertices $i,j$ there exists a number $l$ such that $l(P)\equiv l(\text{mod}\ \sigma)$
for all $i\;-\;j$ paths $P$.
When the length of an $i\;-\;j$ path is a multiple of $\sigma$ (and hence we have the same for all $j\;-\;i$ paths), $i$ and $j$ are said to belong to the same {\em cyclic class}. When the length of this path is $1$ modulo $\sigma$ (in other words, if $l(P)-1$ is a multiple of $\sigma$), the cyclic class of $i$ (resp., of $j$) is {\em previous} (resp., {\em next}) with respect to the class of $j$ (resp., of $i$).
See~\cite[Chapter 8]{But:10} and~\cite{BR,Ser-09,Ser-11<attr>} for more information. Cyclic classes are also known as {\em components of imprimitivity}~\cite{BR}.
The cyclicity of a trivial graph is defined to be $1$, and the unique node of a trivial graph is defined to be its only cyclic class.
We define the cyclicity of a (general) graph containing several strongly connected components to be the lcm of the cyclicities of the components.
For a graph ${\mathcal G}=(N,E)$ with $N=\{1,\ldots,n\}$, define the {\em associated matrix} $A=(a_{ij})\in\{0,1\}^{n\times n}$ by $ a_{ij}=1\Leftrightarrow (i,j)\in E. $ This is a matrix over the Boolean semiring $\mathbb{B}:=\{0,1\}$, where addition is the disjunction and multiplication is the conjunction operation. This semiring is a subsemiring of max algebra, so that it is possible to consider the associated matrix as a matrix in max algebra whose entries are either $0$ or $1$.
For a graph ${\mathcal G}$ and any $k\geq 1$, define ${\mathcal G}^k$ as a graph that has the same vertex set as ${\mathcal G}$ and $(i,j)$ is an edge of ${\mathcal G}^k$ if and only if there is a path of length $k$ on ${\mathcal G}$ connecting $i$ to $j$. Thus, if a Boolean matrix $A$ is associated with ${\mathcal G}$, then the Boolean matrix power $A^{\otimes k}$ is associated with ${\mathcal G}^k$. Powers of Boolean matrices (over the Boolean semiring) are a topic of independent interest, see~Brualdi-Ryser~\cite{BR}, Kim~\cite{Kim}. We will need the following observation.
\begin{theorem}[cf. {\cite[Theorem 3.4.5]{BR}}] \label{t:brualdi} Let ${\mathcal G}$ be a strongly connected graph with cyclicity $\sigma$. \begin{itemize} \item[{\rm (i)}] ${\mathcal G}^k$ consists of gcd $(k,\sigma)$ nontrivial strongly connected components not accessing each other. If ${\mathcal G}$ is nontrivial, then so are all the components of ${\mathcal G}^k$.
\item[{\rm (ii)}] The node set of each component of ${\mathcal G}^k$ consists of $\sigma/$(gcd$(k,\sigma))$ cyclic classes of ${\mathcal G}$. \end{itemize} \end{theorem}
\begin{corollary} \label{c:div-boolean} Let ${\mathcal G}$ be a strongly connected graph with cyclicity $\sigma$, and let $k,l\geq 1$. Then gcd$(k,\sigma)$ divides gcd$(l,\sigma)$ if and only if ${\mathcal G}^k$ and ${\mathcal G}^l$ are such that the node set of every component of ${\mathcal G}^l$ is contained in the node set of a component of ${\mathcal G}^k$. \end{corollary} \begin{proof} Assume that ${\mathcal G}$ is nontrivial.\\
``If''. Since the node set of each component of ${\mathcal G}^k$ consists of $\sigma/$gcd$(k,\sigma)$ cyclic classes of ${\mathcal G}$ and is the disjoint union of the node sets of certain components of ${\mathcal G}^l$, and the node set of each component of ${\mathcal G}^l$ consists of $\sigma/$gcd$(l,\sigma)$ cyclic classes of ${\mathcal G}$, it follows that the node set of each component of ${\mathcal G}^k$ consists of $\frac{\sigma}{\text{gcd}(k,\sigma)}/\frac{\sigma}{\text{gcd}(l,\sigma)}= \frac{\text{gcd}(l,\sigma)}{\text{gcd}(k,\sigma)}$ components of ${\mathcal G}^l$. Therefore, gcd$(k,\sigma)$ divides gcd$(l,\sigma)$.
``Only if.'' Observe that the node sets of the compopnents ${\mathcal G}^k$ and ${\mathcal G}^{\text{gcd}(k,\sigma)}$ (or ${\mathcal G}^l$ and ${\mathcal G}^{\text{gcd}(l,\sigma)}$) are the same: since gcd$(k,\sigma)$ divides $k$, each component of ${\mathcal G}^{\text{gcd}(k,\sigma)}$ splits into several components of ${\mathcal G}^k$, but the total number of components is the same (as gcd$($gcd$(k,\sigma),\sigma)=$gcd$(k,\sigma)$), hence their node sets are the same. The claim follows since the node set of each component of ${\mathcal G}^{\text{gcd}(k,\sigma)}$ splits into several components of ${\mathcal G}^{\text{gcd}(l,\sigma)}$. \end{proof}
Let us formally introduce the definitions related to periodicity and ultimate periodicity of sequences (whose elements are of arbitrary nature). A sequence $\{\Omega_k\}_{k\geq 1}$ is called {\em periodic} if there exists an integer $p$ such that $\Omega_{k+p}$ is identical with $\Omega_k$ for all $k$. The least such $p$ is called the {\em period} of $\{\Omega_k\}_{k\geq 1}$. A sequence $\{\Omega_k\}_{k\geq 1}$ is called {\em ultimately periodic} if the sequence $\{\Omega_k\}_{k\geq T}$ is periodic for some $T\geq 1$. The least such $T$ is called the {\em periodicity threshold} of $\{\Omega_k\}_{k\geq 1}$.
The following observation is crucial in the theory of Boolean matrix powers.
\begin{theorem}[Boolean Cyclicity~\cite{Kim}] \label{t:BoolCycl} Let ${\mathcal G}$ be a strongly connected graph on $n$ nodes, with cyclicity $\sigma$. \begin{itemize} \item [{\rm (i)}] The sequence $\{{\mathcal G}^k\}_{k\geq 1}$ is ultimately periodic with the period $\sigma$. The periodicity threshold, denoted by $T({\mathcal G})$, does not exceed $(n-1)^2+1$. \item[{\rm (ii)}] If ${\mathcal G}$ is nontrivial, then for $k\geq T({\mathcal G})$ and a multiple of $\sigma$, ${\mathcal G}^k$ consists of $\sigma$ complete subgraphs not accessing each other. \end{itemize} \end{theorem} For brevity, we will refer to $T({\mathcal G})$ as the periodicity threshold of ${\mathcal G}$.
We have the following two max-algebraic extensions of Theorem~\ref{t:BoolCycl}.
\begin{theorem}[Cyclicity Theorem, Cohen~et~al.~\cite{CDQV-83}] \label{t:Cycl1} Let $A\in\Rp^{n\times n}$ be irreducible, let $\sigma$ be the cyclicity of ${\mathcal C}(A)$ and $\rho:=\rho^{\oplus}(A)$. Then the sequence $\{(A/\rho)^{\otimes k}\}_{k\geq 1}$ is ultimately periodic with period $\sigma$. \end{theorem}
\begin{theorem}[Cyclicity of Critical Part, Nachtigall~\cite{Nacht}] \label{t:nacht} Let $A\in\Rp^{n\times n}$, $\sigma$ be the cyclicity of ${\mathcal C}(A)$ and $\rho:=\rho^{\oplus}(A)$. Then the sequences $\{(A/\rho)^{\otimes k}_{i\cdot}\}_{k\geq 1}$ and $\{(A/\rho)^{\otimes k}_{\cdot i}\}_{k\geq 1}$, for $i\in N_c(A)$, are ultimately periodic with period $\sigma$. The greatest of their periodicity thresholds, denoted by $T_c(A)$, does not exceed $n^2$. \end{theorem}
\if{ The least number $T$ (resp. $T_c$) satisfying the condition of Theorem~\ref{t:Cycl1} (resp. Theorem~\ref{t:nacht}) is denoted by $T(A)$ (resp. $T_c(A)$), and called the {\em cyclicity threshold} (resp. the {\em critical cyclicity threshold}) of $A$. }\fi
Theorem~\ref{t:Cycl1} is standard~\cite{BCOQ,But:10,HOW:05}, and Theorem~\ref{t:nacht} can also be found as~\cite[Theorem 8.3.6]{But:10}. Here $A_{i\cdot}$ (resp. $A_{\cdot i}$) denote the $i$th row (resp. the $i$th column) of $A$.
When the sequence $\{(A/\rho)^{\otimes k}\}_{k\geq 1}$ (resp. the sequences $\{(A/\rho)^{\otimes k}_{i\cdot}\}_{k\geq 1}$,\\ $\{(A/\rho)^{\otimes k}_{\cdot i}\}_{k\geq 1}$) are ultimately periodic, we also say that the sequence $\{A^{\otimes k}\}_{k\geq 1}$ (resp. $\{A^{\otimes k}_{i\cdot}\}_{k\geq 1}$, $\{A^{\otimes k}_{\cdot i}\}_{k\geq 1}$) is {\em ultimately periodic with growth rate $\rho$}.
Let us conclude with a well-known number-theoretic result concerning the coin problem of Frobenius, which we see as basic for both Boolean and max-algebraic cyclicity.
\begin{lemma}[e.g.,{\cite[Lemma 3.4.2]{BR}}] \label{l:schur} Let $n_1,\ldots, n_m$ be integers such that\\ gcd$(n_1,\ldots,n_m)=k$. Then there exists a number $T$ such that for all integers $l$ with $kl\geq T$, we have $kl=t_1n_1+\ldots +t_mn_m$ for some $t_1,\ldots,t_m\geq 0$. \end{lemma}
\subsection{Diagonal similarity and visualization} \label{ss:viz}
For any $x\in\Rp^n$, we can define $X=\operatorname{diag}(x)$ as the {\em diagonal matrix} whose diagonal entries are equal to the corresponding entries of $x$, and whose off-diagonal entries are zero. If $x$ does not have zero components, the diagonal similarity scaling $A\mapsto X^{-1}AX$ does not change the weights of cycles and eigenvalues (both nonnegative and max); if $z$ is an eigenvector of $X^{-1}AX$ then $Xz$ is an eigenvector of $A$ with the same eigenvalue. This scaling does not change the critical graph ${\mathcal C}(A)=(N_c(A),E_c(A))$. Observe that $(X^{-1}AX)^{\otimes k}=X^{-1}A^{\otimes k}X$, also showing that the periodicity thresholds of max-algebraic matrix powers (Theorems~\ref{t:Cycl1} and~\ref{t:nacht})
do not change after scaling. Of course, we also have $(X^{-1}AX)^{\times k}=X^{-1}A^{\times k}X$ in nonnegative algebra. The technique of nonnegative scaling can be traced back to the works of Fiedler-Pt\'ak~\cite{FP-67}.
When working with the max-algebraic matrix powers, it is often convenient to ``visualize'' the powers of the critical graph. Let $A$ have $\lambda(A)=1$. A diagonal similarity scalling $A\mapsto X^{-1}AX$ is called a {\em strict visualization scaling}~\cite{But:10,SSB} if the matrix $B=X^{-1}AX$ has $b_{ij}\leq 1$, and moreover, $b_{ij}=1$ if and only if $(i,j)\in E_c(A)(=E_c(B))$. Any matrix $B$ satisfying these properties is called {\em strictly visualized}.
\begin{theorem}[Strict Visualization~\cite{But:10,SSB}] \label{t:strictvis} For each $A\in\Rp^{n\times n}$ with $\rho^{\oplus}(A)=1$ {\rm (}that is, $\lambda(A)=1${\rm )}, there exists a strict visualization scaling. \end{theorem}
If $A=(a_{ij})$ has all entries $a_{ij}\leq 1$, then we define the Boolean matrix $A^{[1]}$ with entries \begin{equation} \label{def:A1} a_{ij}^{[1]}= \begin{cases} 1, &\text{if $a_{ij}=1$},\\ 0, &\text{if $a_{ij}<1$}. \end{cases} \end{equation} If $A$ has all entries $a_{ij}\leq 1$ then \begin{equation} \label{e:unimatrix} (A^{\otimes k})^{[1]}=(A^{[1]})^{\otimes k}. \end{equation} Similarly if a vector $x\in\Rp^n$ has $x_i\leq 1$, we define $x^{[1]}$ having $x^{[1]}_i=1$ if $x_i=1$ and $x^{[1]}_i=0$ otherwise. Obviously if $A$ and $x$ have all entries not exceeding $1$ then $(A\otimes x)^{[1]}=A^{[1]}\otimes x^{[1]}$.
If $A$ is strictly visualized, then $a_{ij}^{[1]}=1$ if and only if $(i,j)$ is a critical edge of ${\mathcal G}(A)$. Thus $A^{[1]}$ can be treated as the associated matrix of ${\mathcal C}(A)$ (disregarding the formal difference in dimension). We now show that ${\mathcal C}(A^{\otimes k})={\mathcal C}(A)^k$ and that any power of a strictly visualized matrix is strictly visualized.
\begin{lemma}[cf.~\cite{CGB-07}, {\cite[Prop.~3.3]{Ser-09}}] \label{l:CAk} Let $A\in\Rp^{n\times n}$ and $k\geq 1$. \begin{itemize} \item[{\rm (i)}] ${\mathcal C}(A)^k={\mathcal C}(A^{\otimes k})$. \item[{\rm (ii)}] If $A$ is strictly visualized, then so is $A^{\otimes k}$. \end{itemize} \end{lemma} \begin{proof} Using Theorem~\ref{t:strictvis}, we can assume without loss of generality that $A$ is strictly visualized. Also note that both in ${\mathcal C}(A^{\otimes k})$ and in ${\mathcal C}(A)^k$, each node has ingoing and outgoing edges, hence for part (i) it suffices to prove that the two graphs have the same set of edges.
Applying Theorem~\ref{t:brualdi} (i) to every component of ${\mathcal C}(A)$, we obtain that ${\mathcal C}(A)^k$ also consists of several isolated nontrivial strongly connected graphs. In particular, each edge of ${\mathcal C}(A)^k$ lies on a cycle, so ${\mathcal C}(A)^k$ contains cycles. Observe that ${\mathcal G}(A^{\otimes k})$ does not have edges with weight greater than $1$, while all edges of ${\mathcal C}(A)^k$ have weight $1$, hence all cycles of ${\mathcal C}(A)^k$ have weight $1$. As ${\mathcal C}(A)^k$ is a subgraph of ${\mathcal G}(A^{\otimes k})$, this shows that $\rho^{\oplus}(A^{\otimes k})=\lambda(A^{\otimes k})=1$ and that all cycles of ${\mathcal C}(A)^k$ are critical cycles of ${\mathcal G}(A^{\otimes k})$. Since each edge of ${\mathcal C}(A)^k$ lies on a critical cycle, all edges of ${\mathcal C}(A)^k$ are critical edges of ${\mathcal G}(A^{\otimes k})$.
${\mathcal G}(A^{\otimes k})$ does not have edges with weight greater than $1$, hence every edge of ${\mathcal C}(A^{\otimes k})$ has weight $1$. Equation~\eqref{e:unimatrix} implies that if $a_{ij}^{\otimes k}=1$ then there is a path from $i$ to $j$ composed of the edges with weight $1$. Since $A$ is strictly visualized, such edges are critical. This shows that if $a_{ij}^{\otimes k}=1$ and in particular if $(i,j)$ is an edge of ${\mathcal C}(A^{\otimes k})$, then $(i,j)$ is an edge of ${\mathcal C}(A)^k$. Hence $A^{\otimes k}$ is strictly visualized, and all edges of ${\mathcal C}(A^{\otimes k})$ are edges of ${\mathcal C}(A)^k$.
Thus ${\mathcal C}(A^{\otimes k})$ and ${\mathcal C}(A)^k$ have the same set of edges, so ${\mathcal C}(A^{\otimes k})={\mathcal C}(A)^k$ (and we also showed that $A^{\otimes k}$ is strictly visualized).
\end{proof}
Let $T({\mathcal C}(A))$ be the greatest periodicity threshold of the strongly connected components of ${\mathcal C}(A)$. The following corollary of Lemma~\ref{l:CAk} will be required in Section~\ref{s:eigencones}.
\begin{corollary} \label{TcaTcrit} Let $A\in\Rp^{n\times n}$. Then $T_c(A)\geq T({\mathcal C}(A))$.
\end{corollary} \if{ \begin{proof} Let $\sigma$ be the cyclicity of ${\mathcal C}(A)$. Assume that $A$ is strictly visualized. We are going to show that if $(A^k)_{\cdot i}=A^{k+\sigma}_{\cdot i}$ for all $i\in N_c(A)$ then $({\mathcal C}(A))^k=({\mathcal C}(A))^{k+\sigma}$. Indeed, Lemma~\ref{l:CAk} implies that $({\mathcal C}(A))^k=({\mathcal C}(A))^{k+\sigma}$ is equivalent to $(A^[1])^{\otimes k}= (A^[1])^{\otimes (k+\sigma)}$ and that all nonzero entries of these matrices are in the critical columns and rows. As $(A^k)_{\cdot i}=A^{k+\sigma}_{\cdot i}$ for all $i\in N_c(A)$ is sufficient for $(A^[1])^{\otimes k}= (A^[1])^{\otimes (k+\sigma)}$, the claim follows. \end{proof} }\fi
\subsection{Frobenius normal form}
Every matrix $A=(a_{ij})\in \R_+^{n\times n}$ can be transformed by simultaneous permutations of the rows and columns in almost linear time to a {\em Frobenius normal form}~\cite{BP,BR} \begin{equation} \left( \begin{array}{cccc} A_{11} & 0 & ... & 0 \\ A_{21} & A_{22} & ... & 0 \\ ... & ... & A_{\mu\mu} & ... \\ A_{r1} & A_{r2} & ... & A_{rr} \end{array} \right) , \label{fnf} \end{equation} where $A_{11},...,A_{rr}$ are irreducible square submatrices of $A$. They correspond to the sets of nodes $N_1,\ldots,N_r$ of the strongly connected components of ${\mathcal G}(A)$. Note that in~\eqref{fnf} an edge from a node of $N_{\mu}$ to a node of $N_{\nu}$ in ${\mathcal G}(A)$ may exist only if $\mu\geq \nu.$
Generally, $A_{KL}$ denotes the submatrix of $A$ extracted from the rows with indices in $K\subseteq \{1,\ldots,n\}$ and columns with indices in $L\subseteq \{1,\ldots,n\}$, and $A_{\mu\nu}$ is a shorthand for $A_{N_{\mu}N_{\nu}}$. Accordingly, the subvector $x_{N_{\mu}}$ of $x$ with indices in $N_{\mu}$ will be written as $x_{\mu}$.
If $A$ is in the Frobenius Normal Form \eqref{fnf} then the {\em reduced graph}, denoted by $R(A)$, is the (di)graph whose nodes correspond to $N_{\mu}$, for $\mu=1,\ldots,r$, and the set of arcs is $ \{(\mu,\nu);(\exists k\in N_{\mu})(\exists \ell \in N_{\nu})a_{k\ell }>0\}. $ In max algebra and in nonnegative algebra, the nodes of $R(A)$ are {\em marked} by the corresponding eigenvalues (Perron roots), denoted by $\rho^{\oplus}_{\mu}:=\rho^{\oplus}(A_{\mu\mu})$ (max algebra), $\rho^+_{\mu}:=\rho^+(A_{\mu\mu})$ (nonnegative algebra), and by $\rho_{\mu}$ when both algebras are considered simultaneously.
By a {\em class} of $A$ we mean a node $\mu$ of the reduced graph $R(A)$. It will be convenient to attribute to class $\mu$ the node set and the edge set of ${\mathcal G}(A_{\mu\mu})$, as well as the cyclicity and other parameters, that is, we will say ``nodes of $\mu$'', ``edges of $\mu$'', ``cyclicity of $\mu$'', etc.\footnote{The sets $N_{\mu}$ are also called classes, in the literature. To avoid the confusion, we do not follow this in the present paper.}
A class $\mu$ is trivial if $A_{\mu\mu}$ is the $1\times 1$ zero matrix. Class $\mu$ {\em accesses} class $\nu$, denoted $\mu\to\nu$, if $\mu=\nu$ or if there exists a $\mu\;-\;\nu$ path in $R(A)$. A class is called {\em initial}, resp. {\em final}, if it is not accessed by, resp. if it does not access, any other class. Node $i$ of ${\mathcal G}(A)$ accesses class $\nu$, denoted by $i\to\nu$, if $i$ belongs to a class $\mu$ such that $\mu\to\nu$.
Note that simultaneous permutations of the rows and columns of $A$ are equivalent to calculating $P^{-1}AP,$ where $P$ is a permutation matrix. Such transformations do not change the eigenvalues, and the eigenvectors before and after such a transformation may only differ by the order of their components. Hence we will assume without loss of generality that $A$ is in Frobenius normal form~(\ref{fnf}). Note that a permutation bringing matrix to this form is (relatively) easy to find~\cite{BR}. We will refer to the transformation $A\mapsto P^{-1}AP$ as {\em permutational similarity}.
\subsection{Elements of the Perron-Frobenius theory} \label{ss:pfelts}
In this section we recall the spectral theory of reducible matrices in max algebra and in nonnegative linear algebra. All results are standard: the nonnegative part goes back to Frobenius~\cite{Fro-1912}, Sect.~11, and the max-algebraic counterpart is due to Gaubert~\cite{Gau:92}, Ch.~IV (also see~\cite{But:10} for other references).
A class $\nu$ of $A$ is called a {\em spectral class} of $A$ associated with eigenvalue $\rho\neq 0$, or sometimes $(A,\rho)$-spectral class for short, if \begin{equation} \label{e:speclass} \begin{split} &\rho^{\oplus}_{\nu}=\rho,\ \text{and}\ \mu\to\nu\ \text{implies}\ \rho^{\oplus}_{\mu}\leq\rho^{\oplus}_{\nu}\ \text{(max algebra)},\\ &\rho^+_{\nu}=\rho,\ \text{and}\ \mu\to\nu,\mu\neq\nu\ \text{implies}\ \rho^{+}_{\mu}<\rho^{+}_{\nu}\ \text{(nonnegative algebra)}. \end{split} \end{equation} In both algebras, note that there may be several spectral classes associated with the same eigenvalue.
In nonnegative algebra, spectral classes are called distinguished classes~\cite{Sch-86}, and there are also semi-distinguished classes associated with distinguished generalized eigenvectors of order two or more~\cite{TS-00}. However, these vectors are not contained in the core\footnote{For a polyhedral cone, the core of the cone-preserving map does not contain generalized eigenvectors of order two or more~{\cite[Corollary 4.3]{TS-94}}.}. Also, no suitable max-algebraic analogue of generalized eigenvectors is known to us.
If all classes of $A$ consist of just one element, then the nonnegative and max-algebraic Perron roots are the same. In this case, the spectral classes in nonnegative algebra are also spectral in max algebra. However, in general this is not so. In particular, for a nonnegative matrix $A$, a cycle of ${\mathcal G}(A)$ attaining the maximum cycle geometric mean $\rho^{\oplus}(A)=\lambda(A)$ need not lie in a strongly connected component corresponding to a class with spectral radius $\rho^+(A)$. This is because, if $A_1$, $A_2$ are irreducible nonnegative matrices such that $\rho^+(A_1)<\rho^+(A_2)$, then we need not have $\rho^{\oplus}(A_1)<\rho^{\oplus}(A_2)$. For example, let $A$ be the $3\times 3$ matrix of all $1$'s, and let $B(\epsilon)=(3/2,\epsilon,\epsilon)^T(3/2,\epsilon,\epsilon)$. Then $\rho^+(A)=3$, $\rho^+(B(\epsilon))=9/4+2\epsilon^2$, so $\rho^+(A)>\rho^+(B(\epsilon))$ for sufficiently small $\epsilon>0$, but $\rho^{\oplus}(B(\epsilon))=9/4>1=\rho^{\oplus}(A)$.
Denote by $\Lambda_+(A)$, resp. $\Lambda_{\oplus}(A)$, the set of {\bf nonzero} eigenvalues of $A\in\Rp^{n\times n}$ in nonnegative linear algebra, resp. in max algebra. It will be denoted by $\Lambda(A)$ when both algebras are considered simultaneously, as in the following standard description.
\begin{theorem}[{\cite[Th.~4.5.4]{But:10}}, {\cite[Th.~3.7]{Sch-86}}] \label{t:spectrum} Let $A\in\Rp^{n\times n}$. Then $\Lambda(A)=\{\rho_{\nu}\neq 0\colon \nu\ \text{is spectral}\}$.
\end{theorem}
Theorem~\ref{t:spectrum} encodes the following {\bf two} statements: \begin{equation} \label{e:spectrum2} \Lambda_{\oplus}(A)=\{\rho^{\oplus}_{\nu}\neq 0\colon \nu\ \text{is spectral}\},\quad \Lambda_+(A)=\{\rho^+_{\nu}\neq 0\colon \nu\ \text{is spectral}\}, \end{equation} where the notion of spectral class is defined in two different ways by~\eqref{e:speclass}, in two algebras.
In both algebras, for each $\rho\in\Lambda(A)$ define \begin{equation} \label{amrho} \begin{split} &A_{\rho}:=\rho^{-1} \begin{pmatrix} 0 & 0\\ 0 & A_{\mrho\mrho} \end{pmatrix},\ \text{where}\\ & M_{\rho}:=\{i\colon i\to\nu,\; \nu\ \text{is $(A,\rho)$-spectral}\}\,\enspace . \end{split} \end{equation}
By ``$\nu$ is $(A,\rho)$-spectral'' we mean that $\nu$ is a spectral class of $A$ with $\rho_{\nu}=\rho$. The next proposition, holding both in max algebra and in nonnegative algebra, allows us to reduce the case of arbitrary eigencone to the case of principal eigencone. Here we assume that $A$ is in Frobenius normal form.
\begin{proposition}[\cite{But:10,Gau:92,Sch-86}] \label{p:vamrho} For $A\in\Rp^{n\times n}$ and each $\rho\in\Lambda(A)$, we have $V(A,\rho)=V(A_{\rho},1)$, where $1$ is the principal eigenvalue of $A_{\rho}$. \end{proposition}
For a parallel description of extremals of eigencones\footnote{In nonnegative algebra, \cite[Th. 3.7]{Sch-86} immediately describes both spectral classes and eigencones associated with any eigenvalue.
However, we prefer to split the formulation, following the exposition of~\cite{But:10}. An alternative simultaneous exposition of spectral theory in both algebras can be found in~\cite{KSS}.} in both algebras see Section~\ref{ss:FV}.
In max algebra, using Proposition~\ref{p:vamrho}, we define the {\em critical graph associated with} $\rho\in\Lambda_{\oplus}(A)$ as the critical graph of $A_{\rho}$. By a {\em critical component of $A$} we mean a strongly connected component of the critical graph associated with some $\rho\in\Lambda_{\oplus}(A)$.
In max algebra, the role of spectral classes of $A$ is rather played by these critical components, which will be (in analogy with classes of Frobenius normal form) denoted by $\Tilde{\mu}$, with the node set $N_{\Tilde{\mu}}$. See Section~\ref{ss:critcomp}.
\section{Notation table and key results} \label{s:key}
The following notation table shows how various objects are denoted in nonnegative algebra, max algebra and when both algebras are considered simultaneously.
\begin{equation*} \begin{array}{cccc} & \text{Nonnegative} & \text{Max} & \text{Both}\\ \text{Sum} & \sum & \bigoplus & \sum\\ \text{Matrix power} & A^{\times t} & A^{\otimes t} & A^t\\ \text{Column span} & \spann_+(A) & \spann_{\oplus}(A) & \operatorname{span}(A)\\ \text{Perron root} & \rho^+_{\mu} & \rho^{\oplus}_{\nu} & \rho_{\mu}\\ \text{Spectrum (excl. $0$)} & \Lambda_+(A) & \Lambda_{\oplus}(A) & \Lambda(A)\\ \text{Eigencone} & V_+(A,\rho^+) & V_{\oplus}(A,\rho^{\oplus}) & V(A,\rho)\\ \text{Sum of eigencones} & V_+^{\Sigma}(A) & V_{\oplus}^{\Sigma}(A) & V^{\Sigma}(A)\\ \text{Core} & \core_+(A) & \core_{\oplus}(A) & \operatorname{core}(A)
\end{array} \end{equation*} In the case of max algebra, we also have the critical graph ${\mathcal C}(A)$ (with related concepts and notation), not used in nonnegative algebra.
The core and the sum of eigencones appearing in the table have not been formally introduced. These are the two central notions of this paper, and we now introduce them for both algebras simultaneously.
The {\em core} of a nonnegative matrix $A$ is defined as the intersection of the column spans (in other words, images) of its powers: \begin{equation} \label{def:core} \operatorname{core}(A):=\cap_{i=1}^{\infty} \operatorname{span}(A^i). \end{equation}
The ({\em Minkowski}) {\em sum of eigencones} of a nonnegative matrix $A$ is the cone consisting of all sums of vectors in all $V(A,\rho)$: \begin{equation} \label{va-def} V^{\Sigma}(A):=\sum_{\rho\in\Lambda(A)} V(A,\rho) \end{equation} If $\Lambda(A)=\emptyset$, which happens when $\rho(A)=0$, then we assume that the sum on the right-hand side is $\{0\}$.
The following notations can be seen as the ``global'' definition of cyclicity in nonnegative algebra and in max algebra.
\begin{itemize} \item[1.] Let $\sigma_{\rho}$ be the the lcm of all cyclicities of spectral classes associated with $\rho\in\Lambda_+(A)$ ({\bf nonnegative algebra}), or the cyclicity of critical graph associated with $\rho\in\Lambda_{\oplus}(A)$ ({\bf max algebra}). \item[2.] Let $\sigma_{\Lambda}$ be the lcm of all $\sigma_{\rho}$ where $\rho\in\Lambda(A)$. \end{itemize} The difference between the definitions of $\sigma_{\rho}$ in max algebra and in nonnegative algebra comes from the corresponding versions of Perron-Frobenius theory. In particular, let $A\in\Rp^{n\times n}$ be an irreducible matrix. While in nonnegative algebra the eigencone associated with the Perron root of $A$ is always reduced to a single ray, the number of (appropriately normalized) extremals of the eigencone of $A$ in max algebra is equal to the number of critical components, so that there may be up to $n$ such extremals.
One of the key results of the present paper relates the core with the sum of eigencones. The nonnegative part of this result can be found in Tam-Schneider~\cite[Th.~4.2, part~(iii)]{TS-94}. \begin{maintheorem} \label{t:core} Let $A\in\Rp^{n\times n}$. Then $$\operatorname{core}(A)=\sum_{k\geq 1,\rho\in\Lambda(A)} V(A^k,\rho^k) =V^{\Sigma}(A^{\sigma_{\Lambda}}).$$ \end{maintheorem}
The main part of the proof is given in Section~\ref{s:core}, for both algebras simultaneously. However, this proof takes in ``credit'' some facts, which we will have to show. First of all, we need the equality \begin{equation} \label{e:samespectrum} \Lambda(A^k)=\{\rho^k\colon \rho\in\Lambda(A)\}. \end{equation} This simple relation between $\Lambda(A^k)$ and $\Lambda(A)$, which can be seen as a special case of~\cite[Theorem 3.6(ii)]{KSS}, will be also proved below as Corollary~\ref{c:samespectrum}.
To complete the proof of Main Theorem~\ref{t:core} we also have to study the periodic sequence of eigencones of matrix powers and their sums. On this way we obtain the following key result, both in max and nonnegative algebra.
\begin{maintheorem} \label{t:periodicity} Let $A\in\Rp^{n\times n}$. Then \begin{itemize} \item[{\rm (i)}] $\sigma_{\rho}$, for $\rho\in\Lambda(A)$, is the period of the sequence $\{V(A^k,\rho^k)\}_{k\geq 1}$, and $V(A^k,\rho^k)\subseteq V(A^{\sigma_{\rho}},\rho^{\sigma_{\rho}})$ for all $k\geq 1$; \item[{\rm (ii)}] $\sigma_{\Lambda}$ is the period of the sequence $\{V^{\Sigma}(A^k)\}_{k\geq 1}$, and $V^{\Sigma}(A^k)\subseteq V^{\Sigma}(A^{\sigma_{\Lambda}})$ for all $k\geq 1$. \end{itemize} \end{maintheorem}
Main Theorem~\ref{t:periodicity} is proved in Section~\ref{s:eigencones} as a corollary of Theorems~\ref{t:girls-major} and~\ref{t:girls-minor}, where the inclusion relations between eigencones of matrix powers are studied in detail.
Theorem~\ref{t:tam-schneider}, which gives a detailed description of extremals of both cores, can be also seen as a key result of this paper. However, it is too long to be formulated in advance.
\section{Two cores} \label{s:core}
\subsection{Basics} \label{ss:basics} In this section we investigate the core of a nonnegative matrix defined by~\eqref{def:core}. In the main argument, we consider the cases of max algebra and nonnegative algebra simultaneously.
One of the most elementary and useful properties of the intersection in~\eqref{def:core} is that, actually, \begin{equation} \label{e:monotonic} \operatorname{span}(A)\supseteq\operatorname{span}(A^2)\supseteq\operatorname{span}(A^3)\supseteq\ldots \end{equation}
Generalizing an argument of Pullman~\cite{Pul-71} we will prove that \begin{equation} \label{e:maingoal} \operatorname{core}(A)=\sum_{k\geq 1} V^{\Sigma}(A^k)=\sum_{k\geq 1,\rho\in\Lambda(A)} V(A^k,\rho^k) \end{equation} also in max algebra.
Note that the following inclusion is almost immediate.
\begin{lemma} \label{l:natural} $\sum_{k\geq 1} V^{\Sigma}(A^k)\subseteq\operatorname{core} (A)$. \end{lemma} \begin{proof} $x\in V(A^k,\rho)$ implies that $A^k x=\rho x$ and hence $x=\rho^{-t} A^{kt}x$ for all $t\geq 1$ (using the invertibility of multiplication). Hence $x\in\bigcap_{t\geq 1} \operatorname{span} A^{kt}=\bigcap_{t\geq 1} \operatorname{span}(A^t)$. \end{proof}
So it remains to show the opposite inclusion \begin{equation} \label{e:maingoal2} \operatorname{core}(A)\subseteq\sum_{k\geq 1} V^{\Sigma}(A^k). \end{equation}
Let us first treat the trivial case $\rho(A)=0$, i.e., $\Lambda(A)=\emptyset$. There are only trivial classes in the Frobenius normal form, and ${\mathcal G}(A)$ is acyclic. This implies $A^k=0$ for some $k\geq 1$. In this case $\operatorname{core}(A)=\{0\}$, the sum on the right-hand side is $\{0\}$ by convention,
so~\eqref{e:maingoal} is the trivial "draw" $\{0\}=\{0\}$.
\subsection{Max algebra: cases of ultimate periodicity} \label{ss:maxalg-easy}
In max algebra, unlike the nonnegative algebra, there are wide classes of matrices
for which~\eqref{e:maingoal2} and~\eqref{e:maingoal} follow almost immediately. We list some of them below.\\
${\mathcal S}_1:$ {\em Irreducible matrices}.\\ ${\mathcal S}_2:$ {\em Ultimately periodic matrices.} This is when the sequence $\{A^{\otimes k}\}_{k\geq 1}$ is ultimately periodic with a growth rate $\rho$ (in other words, when the sequence $\{(A/\rho)^{\otimes k}\}_{k\geq 1}$ is ultimately periodic). As shown by Moln\'arov\'a-Pribi\v{s}~\cite{MP-00}, this happens if and only if the Perron roots of all nontrivial classes of $A$ equal $\rho^{\oplus}(A)=\rho$. \\ ${\mathcal S}_3:$ {\em Robust matrices.} For any vector $x\in\Rp^n$ the orbit $\{A^{\otimes k}\otimes x\}_{k\geq 1}$ hits an eigenvector of $A$, meaning that $A^{\otimes T}\otimes x$ is an eigenvector of $A$ for some $T$. This implies that the whole remaining part $\{A^{\otimes k}\otimes x\}_{k\geq T}$ of the orbit (the ``tail'' of the orbit) consists of multiples of that eigenvector $A^{\otimes T}\otimes x$. The notion of robustness was introduced and studied in~\cite{BCG-09}.\\ ${\mathcal S}_4:$ {\em Orbit periodic matrices:} For any vector $x\in\Rp^n$ the orbit $\{A^{\otimes k}\otimes x\}_{k\geq 1}$ hits an eigenvector of $A^{\otimes \sigma_x}$ for some $\sigma_x$, implying that the remaining ``tail'' of the orbit $\{(A^{\otimes k}\otimes x\}_{k\geq 1}$ is periodic with some growth rate. See~\cite[Section 7]{SS-11} for characterization.\\ ${\mathcal S}_5:$ {\em Column periodic matrices.} This is when for all $i$ we have $(A^{\otimes (t+\sigma_i)})_{\cdot i}=\rho_i^{\sigma_i} A^{\otimes t}_{\cdot i}$ for all large enough $t$ and some $\rho_i$ and $\sigma_i$.
Observe that ${\mathcal S}_1\subseteq{\mathcal S}_2\subseteq{\mathcal S}_4\subseteq{\mathcal S}_5$ and ${\mathcal S}_3\subseteq{\mathcal S}_4$. Indeed, ${\mathcal S}_1\subseteq{\mathcal S}_2$ is the Cyclicity Theorem~\ref{t:Cycl1}. For the inclusion ${\mathcal S}_2\subseteq{\mathcal S}_4$ observe that, if $A$ is ultimately periodic then $A^{\otimes(t+\sigma)}=\rho^{\sigma}A^{\otimes t}$ and hence $A^{\otimes (t+\sigma)}\otimes x=\rho^{\sigma} A^{\otimes t}\otimes x$ holds for all $x\in\Rp^n$ and all big enough $t$. Observe that ${\mathcal S}_3$ is a special case of ${\mathcal S}_4$, which is a special case of ${\mathcal S}_5$ since the columns of matrix powers can be considered as orbits of the unit vectors.
To see that~\eqref{e:maingoal2} holds in all these cases, note that in the column periodic case all column sequences $\{A_{\cdot i}^t\}_{t\geq 1}$ end up with periodically repeating eigenvectors of $A^{\otimes \sigma_i}$ or the zero vector, which implies that $\spann_{\oplus}(A^{\otimes t})\subseteq\bigoplus_{k\geq 1} V_{\oplus}^{\Sigma}(A^{\otimes k})\subseteq\core_{\oplus}(A)$ and hence $\spann_{\oplus}(A^{\otimes t})=\core_{\oplus}(A)$ for all large enough $t$. Thus, {\em finite stabilization of the core} occurs in all these classes. A necessary and sufficient condition for this finite stabilization is described in~\cite{BSS-inprep}.
\subsection{Core: a general argument} \label{ss:pullman} The original argument of Pullman~\cite[Section 2]{Pul-71} used the separation of a point from a closed convex cone by an open homogeneous halfspace (that contains the cone and does not contain the point).
In the case of max algebra, Nitica and Singer~\cite{NS-07I} showed that at each point $x\in\Rp^n$ there are at most $n$ maximal max-cones not containing this point. These {\em conical semispaces}, used to separate $x$ from any max cone not containing $x$, turn out to be open. Hence they can be used in the max version of Pullman's argument.
However, for the sake of a simultaneous proof we will exploit the following analytic argument instead of separation. By $B(x,\epsilon)$ we denote the intersection of the open ball centered at $x\in\Rp^n$ of radius $\epsilon$ with $\Rp^n$. In the remaining part of Section~\ref{s:core} we consider {\bf both algebras simultaneously.}
\begin{lemma} \label{l:analytic} Let $x^1,\ldots,x^m\in\Rp^n$ be nonzero and let $z\notin\operatorname{span}(x^1,\ldots,x^m)$. Then there exists $\epsilon>0$ such that $z\notin\operatorname{span}(B(x^1,\epsilon),\ldots,B(x^m,\epsilon))$. \end{lemma} \begin{proof} By contradiction assume that for each $\epsilon$ there exist points $y^i(\epsilon)\in B(x^i,\epsilon)$ and nonnegative scalars $\mu_i(\epsilon)$ such that \begin{equation} \label{e:epscomb} z=\sum_{i=1}^m \mu_i(\epsilon) y^i(\epsilon). \end{equation} Since $y^i(\epsilon)\to x^i$ as $\epsilon\to 0$ and $x^i$ are nonzero, we can assume that $y^i(\epsilon)$ are bounded from below by nonzero vectors $v^i$, and then
$z\geq \sum_{i=1}^m \mu_i(\epsilon) v^i$ for all $\epsilon$,
implying that $\mu_i(\epsilon)$ are uniformly bounded from above. By compactness we can assume that $\mu_i(\epsilon)$ converge to some $\mu_i\in\R_+$, and then~\eqref{e:epscomb} implies by continuity that $z=\sum_{i=1}^m \mu_i x^i$, a contradiction. \end{proof}
\begin{theorem}[{\cite[Theorem~2.1]{Pul-71}}] \label{t:pullman} Assume that $\{K_l\}$ for $l\geq 1$, is a sequence of cones in $\Rp^n$ such that $K_{l+1}\subseteq K_l$ for all $l$, and each of them generated by no more than $k$ nonzero vectors. Then the intersection $K=\cap_{l=1}^{\infty}K_l$ is also generated by no more than $k$ vectors. \end{theorem} \begin{proof}
Let $K_l=\operatorname{span}(y^{l1},\ldots,y^{lk})$ (where some of the vectors
$y^{l1},\ldots,y^{lk}$ may be repeated when $K_l$ is generated by less than $k$ nonzero vectors), and consider the sequences of normalized vectors $\{y^{li}/||y^{li}||\}_{l\geq 1}$ for
$i=1,\ldots,k$, where $||u||:=\max u_i$ (or any other norm). As the set $\{u\colon ||u||=1\}$ is compact, we can find a subsequence $\{l_t\}_{t\geq 1}$ such that for $i=1,\ldots,k$, the sequence
$\{y^{l_ti}/||y^{l_ti}||\}_{t\geq 1}$ converges to a finite vector
$u^i$, which is nonzero since $||u^i||=1$. We will assume that
$||y^{l_t i}||=1$ for all $i$ and $t$.
We now show that $u^1,\ldots,u^k\in K$. Consider any $i=1,\ldots,k$. For each $s$, $y^{l_t i}\in K_s$ for all sufficiently large $t$. As $\{y^{l_ti}\}_{t\geq 1}$ converges to $u^i$ and $K_s$ is closed, we have $u^i\in K_s$. Since this is true for each $s$, we have $u^i\in\cap_{s=1}^{\infty} K_s=K$.
Thus $u^1,\ldots,u^k\in K$, and $\operatorname{span}(u^1,\ldots,u^k)\subseteq K$. We claim that also $K\subseteq \operatorname{span}(u^1,\ldots, u^k)$. Assume to the contrary that there is $z\in K$ that is not in $\operatorname{span}(u^1,\ldots, u^k)$. Then by Lemma~\ref{l:analytic} there exists $\epsilon>0$ such that $z\notin\operatorname{span}(B(u^1,\epsilon),\ldots,B(u^k,\epsilon))$. Since the sequence $\{y^{l_ti}\}_{t\geq 1}$ converges to $u^i$, we have $y^{l_ti}\in B(u^i,\epsilon)$ for $t$ large enough, and $$ \operatorname{span}(y^{l_t1},\ldots,y^{l_tk})\subseteq \operatorname{span}(B(u^1,\epsilon),\ldots,B(u^k,\epsilon)) $$ But $z$ belongs to $K_{l_t}=\operatorname{span}(y^{l_t1},\ldots,y^{l_tk})$ since it belongs to the intersection of all these cones, a contradiction. \end{proof}
Theorem~\ref{t:pullman} applies to the sequence $\{\operatorname{span}(A^t)\}_{t\geq 1}$,
so $\operatorname{core}(A)$ is generated by no more than $n$ vectors.
\begin{proposition}[{\cite[Lemma~2.3]{Pul-71}}] \label{p:surj} The mapping induced by $A$ on its core is a surjection. \end{proposition} \begin{proof} First note that $A$ does induce a mapping on its core. If $z\in\operatorname{core}(A)$ then for each $t$ there exists $x^t$ such that $z=A^tx^t$. Hence $Az=A^{t+1}x^t$, so $Az\in\cap_{t\geq 2}\operatorname{span} A^t=\operatorname{core}(A)$.
Next, let $m$ be such that $A^m$ has the greatest number of zero columns (we assume that $A$ is not nilpotent; recall that a zero column in $A^k$ remains zero in all subsequent powers). If $z=A^tx^t$ for $t\geq m+1$, we also can represent it as $A^{m+1}u^t$, where $u^t:=A^{t-m-1}x^t$. The components of $u^t$ corresponding to the nonzero columns of $A^{m+1}$ are bounded since $A^{m+1}u^t=z$. So we can assume that the sequence of subvectors of $u^{t}$ with these components converges. Then the sequence $y^t:=A^m u^t$ also converges, since the indices of nonzero columns of $A^m$ coincide with those of $A^{m+1}$, which are the indices of the converging subvectors of $u^t$. Let $y$ be the limit of $y^t$. Since $y^s=A^{s-1} x^s$ are in $\operatorname{span}(A^t)$ for all $s>t$, and since $\operatorname{span}(A^t)$ are closed, we obtain $y\in\operatorname{span}(A^t)$ for all $t$. Thus we found $y\in\operatorname{core}(A)$ satisfying $Ay=z$. \end{proof}
Theorem~\ref{t:pullman} and Proposition~\ref{p:surj} show that the core is generated by finitely many vectors in $\Rp^n$ and that the mapping induced by $A$ on its core is ``onto''.
Now we use the fact that a finitely generated cone in the nonnegative orthant (and more generally, closed cone) is generated by its extremals both in nonnegative algebra and in max algebra, see~\cite{BSS,Wag-91}.
\begin{proposition}[{\cite[Theorem 2.2]{Pul-71}}] \label{p:permute} The mapping induced by $A$ on the extremal generators of its core is a permutation (i.e., a bijection). \end{proposition} \begin{proof}
Let $\operatorname{core}(A)=\operatorname{span}(u^1,\ldots,u^k)$ where $u^1,\ldots,u^k$ are extremals of the core. Suppose that $x^j$ is a preimage of $u^j$ in the core, that is, $Ax^j=u^j$ for some $x^j\in\operatorname{core}(A)$, $j=1,\ldots,k$. Then $x^j=\sum_{i=1}^k \alpha_i u^i$ for some nonnegative coefficients $\alpha_1,\ldots,\alpha_k$, and $u^j=\sum_{i=1}^k\alpha_i A u^i$. Since $u^j$ is extremal, it follows that $u^j$ is proportional to $Au^i$ for some $i$. Thus for each $j\in\{1,\ldots,k\}$ there exists an $i\in\{1,\ldots,k\}$ such that $Au^i$ is a positive multiple of $u^j$. But since for each $i\in\{1,\ldots,k\}$ there is at most one $j$ such that $Au^i$ is a positive multiple of $u^j$, it follows that $A$ induces a bijection on the set of extremal generators of its core. \end{proof}
We are now ready to prove~\eqref{e:maingoal} and Main Theorem~\ref{t:core} taking the periodicity of the eigencone sequence (Main Theorem~\ref{t:periodicity}) in ``credit''.
\begin{proof}[Proof of Main Theorem~\ref{t:core}] Proposition~\ref{p:permute} implies that all extremals of $\operatorname{core}(A)$ are eigenvectors of $A^q$, where $q$ denotes the order of the permutation induced by $A$ on the extremals of $\operatorname{core}(A)$.
Hence $\operatorname{core}(A)$ is a subcone of the sum of all eigencones of all powers of $A$, which is the inclusion relation~\eqref{e:maingoal2}. Combining this with the reverse inclusion of Lemma~\ref{l:natural} we obtain that $\operatorname{core}(A)$ is precisely the sum of all eigencones of all powers of $A$, and using ~\eqref{e:samespectrum} (proved in Section~\ref{s:sameaccess} below), we obtain the first part of the equality of Main Theorem~\ref{t:core}. The last part of the equality of Main Theorem~\ref{t:core} now follows from the periodicity of eigencones formulated in Main Theorem~\ref{t:periodicity}, or more precisely, from the weaker result of Theorem~\ref{t:girls} proved in Section~\ref{s:eigencones}. \end{proof}
\section{Spectral classes and critical components of matrix powers} \label{s:sameaccess}
This section is rather of technical importance. It shows that the union of node sets of all spectral classes is invariant under matrix powering, and that access relations between spectral classes in all matrix powers are essentially the same. Further, the case of an arbitrary eigenvalue can be reduced to the case of the principal eigenvalue for all powers simultaneously (in both algebras). At the end of the section we consider the critical components of max-algebraic powers.
\subsection{Classes and access relations}
As in Section~\ref{s:core}, the arguments are presented in both algebras simultaneously. This is due to the fact that the edge sets of ${\mathcal G}(A^{\otimes k})$ and ${\mathcal G}(A^{\times k})$ are the same for any $k$ and that the definitions of spectral classes in both algebras are alike. Results of this section can be traced back, for the case of nonnegative algebra, to the classical work of Frobenius~\cite{Fro-1912}, see remarks on the very first page of \cite{Fro-1912} concerning the powers of an irreducible nonnegative matrix\footnote{Frobenius defines (what we could call) the cyclicity or index of imprimitivity $k$ of an irreducible $S$ as the number of eigenvalues that lie on the spectral circle. He then remarks ``If $A$ is primitive, then every power of $A$ is again primitive and a certain power and all subsequent powers are positive''. This is followed by ``If $A$ is imprimitive, then $A^m$ consists of $d$ irreducible parts where $d$ is the greatest common divisor of $m$ and $k$. Further, $A^m$ is completely reducible. The characteristic functions of the components differ only in the powers of the variable'' (which provides a converse to the preceding assertion). And then ``The matrix $A^k$ is the lowest power of $A$ whose components are all primitive''. The three quotations cover Lemma~\ref{l:sameperron} in the case of nonnegative algebra.}.
The reader is also referred to the monographs of Minc~\cite{Minc}, Ber\-man-Plem\-mons~\cite{BP}, Brua\-ldi-Ry\-ser~\cite{BR}, and we will often cite the work of Tam-Sch\-nei\-der~\cite[Section 4]{TS-94} containing all of our results in this section, in nonnegative algebra.
\begin{lemma}[cf. {\cite[Ch.~5, Ex.~6.9]{BP}}, {\cite[Lemma~4.5]{TS-94}} ] \label{l:sameperron} Let $A$ be irreducible with the unique eigenvalue $\rho$, let ${\mathcal G}(A)$ have cyclicity $\sigma$ and $k$ be a positive integer. \begin{itemize} \item[{\rm (i)}] $A^k$ is permutationally similar to a direct sum of gcd$(k,\sigma)$ irreducible blocks with eigenvalues $\rho^k$, and $A^k$ does not have eigenvalues other than $\rho^k$. \item[{\rm (ii)}] If $k$ is a multiple of $\sigma$, then the sets of indices in these blocks coincide with the cyclic classes of ${\mathcal G}(A)$. \item[{\rm (iii)}] If $\operatorname{supp}(x)$ is a cyclic class of ${\mathcal G}(A)$, then $\operatorname{supp}(Ax)$ is the previous cyclic class.
\end{itemize} \end{lemma} \begin{proof}
(i): Assuming without loss of generality $\rho=1$, let $X=\operatorname{diag}(x)$ for a positive eigenvector $x\in V(A,\rho)$ and consider $B:=X^{-1}AX$ which is stochastic (nonnegative algebra), or max-stochastic, i.e., such that $\bigoplus_{j=1}^n b_{ij}=1$ holds for all $i$ (max algebra). By Theorem~\ref{t:brualdi}, $B^k$ is permutationally similar to a direct sum of gcd$(k,\sigma)$ irreducible isolated blocks. These blocks are stochastic (or max-stochastic), hence they all have an eigenvector $(1,\ldots,1)$ associated with the unique eigenvalue $1$. If $x\in V(A^k,\Tilde{\rho})$ for some $\Tilde{\rho}$, then its subvectors corresponding to the irreducible blocks of $A^k$ are also eigenvectors of those blocks, or zero vectors. Hence $\Tilde{\rho}=1$, which is the only eigenvalue of $A^k$.
(ii): By Theorem~\ref{t:brualdi}, ${\mathcal G}(A)$ splits into gcd$(k,\sigma)=\sigma$ components, and each of them contains exactly one cyclic class of ${\mathcal G}(A)$.
(iii): Use the definition of cyclic classes and that each node has an ingoing edge. \end{proof}
\begin{lemma} \label{l:samenonzero} Both in max algebra and in nonnegative linear algebra, the trivial classes of $A^k$ are the same for all $k$.
\end{lemma} \begin{proof} In both algebras, an index belongs to a class with nonzero Perron root if and only if the associated graph contains a cycle with a nonzero weight traversing the node with that index. This property is invariant under taking matrix powers, hence the claim. \end{proof}
In both algebras, each class $\mu$ of $A$ with cyclicity $\sigma$ corresponds to an irreducible submatrix $A_{\mu\mu}$. It is easy to see that $(A^k)_{\mu\mu}=(A_{\mu\mu})^k$. Applying Lemma~\ref{l:sameperron} to $A_{\mu\mu}$ we see that $\mu$ gives rise to gcd$(k,\sigma)$ classes in $A^k$, which are said to be {\em derived} from their common {\em ancestor} $\mu$. If $\mu$ is trivial, then it gives rise to a unique trivial derived class of $A^k$, and if $\mu$ is non-trivial then all the derived classes are nontrivial as well. The classes of $A^k$ and $A^l$ derived from the common ancestor will be called {\em related}. Note that this is an equivalence relation on the set of classes of all powers of $A$. Evidently, a class of $A^k$ is derived from a class of $A$ if and only if its index set is contained in the index set of the latter class. It is also clear that each class of $A^k$ has an ancestor in $A$.
We now observe that access relations in matrix powers are ``essentially the same''. This has identical proof in max algebra and nonnegative algebra.
\begin{lemma} \label{l:sameaccess} Let $A\in\Rp^{n\times n}$. For all $k,l\geq 1$ and $\rho>0$, if an index $i\in\{1,\ldots,n\}$ accesses (resp. is accessed by) a class with Perron root $\rho^k$ in $A^k$ then $i$ accesses (resp. is accessed by) a related class with Perron root $\rho^l$ in $A^l$. \end{lemma} \begin{proof}
We deduce from Lemma~\ref{l:sameperron} and Lemma~\ref{l:samenonzero} that the index set of each class of $A^k$ with Perron root $\rho^k$ is contained in the ancestor class of $A$ with Perron root $\rho$. Then, $i$ accessing (resp. being accessed by) a class in $A^k$ implies $i$ accessing (resp. being accessed by) its ancestor in $A$. Since $\rho>0$, this ancestor class is nontrivial, so the access path can be extended to have a length divisible by $l$, by means of a path contained in the ancestor class. By Lemma~\ref{l:sameperron}, the ancestor decomposes in $A^l$ into several classes with the common Perron root $\rho^l$, and $i$ accesses (resp. is accessed by) one of them.
\end{proof}
\begin{theorem}[{\cite[Corollary 4.6]{TS-94}}] \label{t:samespectrum} Let $A\in\Rp^{n\times n}$. \begin{itemize} \item[{\rm (i)}] If a class $\mu$ is spectral in $A$, then so are the classes derived from it in $A^k$. Conversely, each spectral class of $A^k$ is derived from a spectral class of $A$. \item[{\rm (ii)}] For each class $\mu$ of $A$ with cyclicity $\sigma$, there are gcd$(k,\sigma)$ classes of $A^k$ derived from it. If $k$ is a multiple of $\sigma$ then the index sets of derived classes are the cyclic classes of $\mu$. \end{itemize} \end{theorem} \begin{proof} (i): We will prove the following equivalent statement: For each pair $\mu,\nu$ where $\mu$ is a class in $A$ and $\nu$ is a class derived from $\mu$ in $A^k$, we have that $\mu$ is non-spectral if and only if $\nu$ is non-spectral.
Observe that by Lemma~\ref{l:samenonzero}, the Perron root of $\mu$ is $0$ if and only if the Perron root of $\nu$ is $0$. In this case, both $\mu$ and $\nu$ are non-spectral (by definition). Further, let $\rho>0$ be the Perron root of $\mu$. Then, by Lemma~\ref{l:sameperron}, the Perron root of $\nu$ is $\rho^k$. Let $i$ be an index in $\nu$. It also belongs to $\mu$.
If $\mu$ is non-spectral, then $i$ is accessed in $A$ by a class with Perron root $\rho'$ such that $\rho'>\rho$ in max algebra, resp. $\rho'\geq\rho$ in nonnegative algebra. By Lemma~\ref{l:sameaccess}, there is a class of $A^k$, which accesses $i$ in $A^k$ and has Perron root $(\rho')^k$. Since we have $(\rho')^k>\rho^k$ in max algebra or resp. $(\rho')^k\geq \rho^k$ in nonnegative algebra, we obtain that $\nu$, being the class to which $i$ belongs in $A^k$, is also non-spectral.
Conversely, if $\nu$ is non-spectral, then $i$ is accessed in $A^k$ by a class $\theta$ with Perron root equal to $\Tilde{\rho}^k$ for some $\Tilde{\rho}$, and such that $\Tilde{\rho}^k>\rho^k$ in max algebra, resp. $\Tilde{\rho}^k\geq\rho^k$ in nonnegative algebra. The ancestor of $\theta$ in $A$ accesses\footnote{This can be observed immediately, or obtained by applying Lemma~\ref{l:sameaccess}.} $i$ in $A$ and has Perron root $\Tilde{\rho}$. Since we have $\Tilde{\rho}>\rho$ in max algebra or resp. $\Tilde{\rho}\geq \rho$ in nonnegative algebra, we obtain that $\mu$, being the class to which $i$ belongs in $A$, is also non-spectral. Part (i) is proved.
(ii): This part follows directly from Lemma~\ref{l:sameperron} parts (i) and (ii). \end{proof}
\begin{corollary} \label{c:samespectrum} Let $A\in\Rp^{n\times n}$ and $k\geq 1$. Then $\Lambda(A^k)=\{\rho^k\colon\rho\in\Lambda(A)\}.$ \end{corollary} \begin{proof} By Theorem~\ref{t:spectrum}, the nonzero eigenvalues of $A$ (resp. $A^k$) are precisely the Perron roots of the spectral classes of $A$ (resp. $A^k$). By Theorem~\ref{t:samespectrum}(i), if a class of $A$ is spectral, then so is any class derived from it in $A^k$. This implies
that $\Lambda(A^k)\subseteq \{\rho^k\colon \rho\in\Lambda(A)\}.$ The converse inclusion follows from the converse part of Theorem~\ref{t:samespectrum}(i). \end{proof}
Let us note yet another corollary of Theorem~\ref{t:samespectrum}. For $A\in\Rp^{n\times n}$ and $\rho\geq 0$,
let $N(A,\rho)$ be the union of index sets of all classes of $A$ with Perron root $\rho$, and $N^s(A,\rho)$ be the union of index sets of all {\bf spectral} classes of $A$ with Perron root $\rho$. Obviously, $N^s(A,\rho)\subseteq N(A,\rho)$, and both sets (as defined for arbitrary $\rho\geq 0$) are possibly empty.
\begin{corollary} \label{c:specindex} Let $A\in\Rp^{n\times n}$, $\rho\in\R_+$ and $k\geq 1$. Then \begin{itemize} \item[{\rm (i)}] $N(A^k,\rho^k)=N(A,\rho)$, \item[{\rm (ii)}] $N^s(A^k,\rho^k)=N^s(A,\rho)$. \end{itemize} \end{corollary} \begin{proof} (i): This part follows from Lemmas~\ref{l:sameperron} and~\ref{l:samenonzero}.
(ii): Inclusion $N^s(A,\rho)\subseteq N^s(A^k,\rho^k)$ follows from the direct part of Theorem~\ref{t:samespectrum}(i), and inclusion\\ $N^s(A^k,\rho^k)\subseteq N^s(A,\rho)$ follows from the converse part of Theorem~\ref{t:samespectrum}(i).
\end{proof}
For the eigencones of $A\in\Rp^{n\times n}$, the case of an arbitrary $\rho\in\Lambda(A)$ can be reduced to the case of the principal eigenvalue: $V(A,\rho)=V(A_{\rho},1)$ (Proposition~\ref{p:vamrho}). Now we extend this reduction to the case of $V(A^k,\rho^k)$, for any $k\geq 1$. As in the case of Propositon~\ref{p:vamrho}, we assume that $A$ is in Frobenius normal form.
\begin{theorem} \label{t:reduction} Let $k\geq 1$ and $\rho\in \Lambda(A)$. \begin{itemize} \item[{\rm (i)}] The set of all indices having access to the spectral classes of $A^k$ with the eigenvalue $\rho^k$ equals $M_{\rho}$, for each $k$. \item[{\rm (ii)}] $(A^k)_{M_{\rho}M_{\rho}}=\rho^k (A_{\rho})^k_{M_{\rho}M_{\rho}}$. \item[{\rm (iii)}] $V(A^k,\rho^k)=V((A_{\rho})^k,1)$.
\end{itemize} \end{theorem} \begin{proof} (i): Apply Corollary~\ref{c:specindex}~part~(ii) and Lemma~\ref{l:sameaccess}. (ii): Use that $M_{\rho}$ is initial in ${\mathcal G}(A)$. (iii): By Proposition~\ref{p:vamrho} we have (assuming that $A^k$ is in Frobenius normal form) that $V(A^k,\rho^k)=V((A_{\rho^k})^k,1)$ where, instead of~\eqref{amrho}, \begin{equation} \label{amrhok} \begin{split} & A^k_{\rho^k}:=\rho^{-k} \begin{pmatrix} 0 & 0\\ 0 & A^k_{M_{\rho}^kM_{\rho}^k} \end{pmatrix},\ \text{and}\\ & M_{\rho}^k:=\{i\colon i\to\nu,\; \nu\ \text{is $(A^k,\rho^k)$-spectral}\} \end{split} \end{equation} By part (i) $M_{\rho}^k=M_{\rho}$, hence $A_{\rho^k}^k=(A_{\rho})^k$ and the claim follows. \end{proof}
\subsection{Critical components}
\label{ss:critcomp}
In max algebra, when $A$ is assumed to be strictly visualized, each component $\Tilde{\mu}$ of ${\mathcal C}(A)$ with cyclicity $\sigma$ corresponds to an irreducible submatrix $A^{[1]}_{\Tilde{\mu}\Tilde{\mu}}$ (as in the case of classes, $A_{\Tilde{\mu}\Tilde{\mu}}$ is a shorthand for $A_{N_{\Tilde{\mu}}N_{\Tilde{\mu}}}$). Using the strict visualization and Lemma~\ref{l:CAk} we see that $(A^{\otimes k})^{[1]}_{\Tilde{\mu}\Tilde{\mu}}=(A^{[1]}_{\Tilde{\mu}\Tilde{\mu}})^{\otimes k}$. Applying Lemma~\ref{l:sameperron}(i) to $A^{[1]}_{\Tilde{\mu}\Tilde{\mu}}$ we see that $\Tilde{\mu}$ gives rise to gcd$(k,\sigma)$ critical components in $A^{\otimes k}$. As in the case of classes, these components are said to be derived from their common ancestor $\Tilde{\mu}$.
Evidently a component of ${\mathcal C}(A^{\otimes k})$ is derived from a component of ${\mathcal C}(A)$ if and only if its index set is contained in the index set of the latter component. Following this line we now formulate an analogue of Theorem~\ref{t:samespectrum} (and some other results).
\begin{theorem}[cf. {\cite[Theorem 8.2.6]{But:10}}, {\cite[Theorem 2.3]{CGB-07}}] \label{t:samecritical} Let $A\in\Rp^{n\times n}$. \begin{itemize} \item[{\rm (i)}] For each component $\Tilde{\mu}$ of ${\mathcal C}(A)$ with cyclicity $\sigma$, there are gcd$(k,\sigma)$ components of ${\mathcal C}(A^{\otimes k})$ derived from it. Conversely, each component of ${\mathcal C}(A^{\otimes k})$ is derived from a component of ${\mathcal C}(A)$. If $k$ is a multiple of $\sigma$, then index sets in the derived components are the cyclic classes of $\Tilde{\mu}$. \item[{\rm (ii)}] The sets of critical indices of $A^{\otimes k}$ for $k=1,2,\ldots$ are identical. \item[{\rm (iii)}] If $A$ is strictly visualized, $x_i\leq 1$ for all $i$ and $\operatorname{supp}(x^{[1]})$ is a cyclic class of $\Tilde{\mu}$, then $\operatorname{supp}((A\otimes x)^{[1]})$ is the previous cyclic class of $\Tilde{\mu}$. \end{itemize} \end{theorem} \begin{proof} (i),(ii): Both statements are based on the fact that ${\mathcal C}(A^{\otimes k})=({\mathcal C}(A))^k$, shown in Lemma~\ref{l:CAk}. To obtain (i), also apply Theorem~\ref{t:brualdi} to a component $\Tilde{\mu}$ of ${\mathcal C}(A)$. (iii): Use $(A\otimes x)^{[1]}=A^{[1]}\otimes x^{[1]}$, the definition of cyclic classes and the fact that each node in $\Tilde{\mu}$ has an ingoing edge. \end{proof}
\section{Describing extremals} \label{s:extremals}
The aim of this section is to describe the extremals of the core, in both algebras. To this end, we first give a parallel description of extremals of eigencones (the Frobenius-Victory theorems).
\subsection{Extremals of the eigencones} \label{ss:FV}
We now describe the principal eigencones in nonnegative linear algebra and then in max algebra. By means of Proposition~\ref{p:vamrho}, this description can be obviously extended to the general case. As in Section~\ref{ss:pfelts}, both descriptions are essentially known: see~\cite{But:10,Fro-1912,Gau:92, Sch-86}.
We emphasize that the vectors $x^{(\mu)}$ and $x^{(\Tilde{\mu})}$ appearing below are {\bf full-size}.
\begin{theorem}[Frobenius-Victory{\cite[Th.~3.7]{Sch-86}}] \label{t:FVnonneg} Let $A\in\Rp^{n\times n}$ have $\rho^+(A)=1$. \begin{itemize} \item[{\rm (i)}] Each spectral class $\mu$ with $\rho^+_{\mu}=1$ corresponds to an eigenvector $x^{(\mu)}$, whose support consists of all indices in the classes that have access to $\mu$, and all vectors $x$ of $V_+(A,1)$ with $\operatorname{supp} x=\operatorname{supp} x^{(\mu)}$ are multiples of $x^{(\mu)}$. \item[{\rm (ii)}] $V_+(A,1)$ is generated by $x^{(\mu)}$ of {\rm (i)}, for $\mu$ ranging over all spectral classes with $\rho^+_{\mu}=1$. \item[{\rm (iii)}] $x^{(\mu)}$ of {\rm (i)} are extremals of $V_+(A,1)$. {\rm (}Moreover, $x^{(\mu)}$ are linearly independent.{\rm )} \end{itemize} \end{theorem}
Note that the extremality and the {\bf usual} linear independence of $x^{(\mu)}$ (involving linear combinations with possibly negative coefficients) can be deduced from the description of supports in part (i), and from the fact that in nonnegative algebra, spectral classes associated with the same $\rho$ do not access each other. This linear independence also means that $V_+(A,1)$ is a simplicial cone. See also~\cite[Th.~4.1]{Sch-86}.
\begin{theorem}[{\cite[Th.~4.3.5]{But:10}}, {\cite[Th.~2.8]{SSB}}] \label{t:FVmaxalg} Let $A\in\Rp^{n\times n}$ have $\rho^{\oplus}(A)=1$. \begin{itemize} \item[{\rm (i)}] Each component $\Tilde{\mu}$ of ${\mathcal C}(A)$ corresponds to an eigenvector $x^{(\Tilde{\mu})}$ defined as one of the columns $A^*_{\cdot i}$ with $i\in N_{\Tilde{\mu}}$, all columns with $i\in N_{\Tilde{\mu}}$ being multiples of each other. \item[{\rm (i')}] Each component $\Tilde{\mu}$ of ${\mathcal C}(A)$ is contained in a (spectral) class $\mu$ with $\rho^{\oplus}_{\mu}=1$, and the support of each $x^{(\Tilde{\mu})}$ of {\rm (i)} consists of all indices in the classes that have access to $\mu$.
\item[{\rm (ii)}] $V_{\oplus}(A,1)$ is generated by $x^{(\Tilde{\mu})}$ of {\rm (i)}, for $\Tilde{\mu}$ ranging over all components of ${\mathcal C}(A)$. \item[{\rm (iii)}] $x^{(\Tilde{\mu})}$ of {\rm (i)} are extremals in $V_{\oplus}(A,1)$. {\rm (}Moreover, $x^{(\Tilde{\mu})}$ are strongly linearly independent in the sense of~\cite{But-03}.{\rm )} \end{itemize} \end{theorem}
To verify (i'), not explicitly stated in the references, use (i) and the path interpretation of~$A^*$.
Vectors $x^{(\Tilde{\mu})}$ of Theorem~\ref{t:FVmaxalg} are also called the {\em fundamental eigenvectors} of $A$, in max algebra. Applying a strict visualization scaling (Theorem~\ref{t:strictvis}) allows us to get further details on these fundamental eigenvectors.
\begin{proposition}[{\cite[Prop.~4.1]{SSB}}] \label{p:xmuvis} Let $A\in\Rp^{n\times n}$ be strictly visualized (in particular, $\rho^{\oplus}(A)=1$). Then \begin{itemize} \item[{\rm (i)}] For each component $\Tilde{\mu}$ of ${\mathcal C}(A)$, $x^{(\Tilde{\mu})}$ of Theorem~\ref{t:FVmaxalg} can be canonically chosen as $A^*_{\cdot i}$ for any $i\in N_{\Tilde{\mu}}$, all columns with $i\in N_{\Tilde{\mu}}$ being {\em equal} to each other. \item[{\rm (ii)}] $x^{(\Tilde{\mu})}_i\leq 1$ for all $i$. Moreover, $\operatorname{supp}(x^{(\Tilde{\mu})[1]})=N_{\Tilde{\mu}}$.
\end{itemize} \end{proposition}
\subsection{Extremals of the core} \label{ss:extremals}
Let us start with the following observation in both algebras.
\begin{proposition} \label{p:extremals} For each $k\geq 1$, the set of extremals of $V^{\Sigma}(A^k)$ is the union of the sets of extremals of $V(A^k,\rho^k)$ for $\rho\in\Lambda(A)$. \end{proposition} \begin{proof} Due to the fact that $\Lambda(A^k)=\{\rho^k\colon\rho\in\Lambda(A)\}$, we can assume without loss of generality that $k=1$.
1. As $V^{\Sigma}(A)$ is the sum of $V(A,\rho)$ for $\rho\in\Lambda(A)$, it is generated by the extremals of $V(A,\rho)$ for $\rho\in\Lambda(A)$. Hence each extremal of $V^{\Sigma}(A)$ is an extremal of $V(A,\rho)$ for some $\rho\in\Lambda(A)$.
2. Let $x\in V(A,\rho_{\mu})$, for some spectral class $\mu$, be extremal. Assume without loss of generality that $\rho_{\mu}=1$, and by contradiction that there exist vectors $y^{\kappa}$, all of them extremals of $V^{\Sigma}(A)$, such that $x=\sum_{\kappa} y^{\kappa}$. By above, all vectors $y^{\kappa}$ are eigenvectors of $A$. If there is $y^{\kappa}$ associated with an eigenvalue $\rho_{\nu}>1$, then applying $A^t$ we obtain $x= (\rho_{\nu})^t y^{\kappa}+\ldots$, which is impossible at large enough $t$. So $\rho_{\nu}\leq 1$. With this in mind, if there is $y^{\kappa}$ associated with $\rho_{\nu}<1$, then 1) in nonnegative algebra we obtain $Ax>A\sum_{\kappa} y^{\kappa}$, a contradiction; 2) in max algebra, all nonzero entries of $A\otimes y^{\kappa}$ go below the corresponding entries of $x$ meaning that $y^{\kappa}$ is redundant. Thus we are left only with $y^{\kappa}$ associated with $\rho^{\oplus}_{\nu}=1$, which is a contradiction: an extremal $x\in V_{\oplus}(A,1)$ appears as a ``sum'' of other extremals of $V_{\oplus}(A,1)$ not proportional to $x$. \end{proof}
A vector $x\in\Rp^n$ is called {\em normalized} if $\max x_i=1$. Recall the notation $\sigma_{\rho}$ introduced in Section~\ref{s:key}.
\begin{theorem}[cf.~{\cite[Theorem~4.7]{TS-94}}] \label{t:tam-schneider} Let $A\in\Rp^{n\times n}$. \begin{itemize} \item[{\rm (i)}] The set of extremals of $\operatorname{core}(A)$ is the union of the sets of extremals of $V(A^{\sigma_{\rho}},\rho^{\sigma_{\rho}})$ for all $\rho\in\Lambda(A)$. \item[{\rm (ii)}] {\bf In nonnegative algebra}, each spectral class $\mu$ with cyclicity $\sigma_{\mu}$ corresponds to a set of distinct $\sigma_{\mu}$ normalized extremals of $\core_+(A)$, such that there exists an index in their support that belongs to $\mu$, and each index in their support has access to $\mu$.\\ {\bf In max algebra}, each critical component $\Tilde{\mu}$ with cyclicity $\sigma_{\Tilde{\mu}}$ associated with some $\rho\in\Lambda_{\oplus}(A)$ corresponds to a set of distinct $\sigma_{\Tilde{\mu}}$ normalized extremals $x$ of $\core_{\oplus}(A)$, which are (normalized) columns of $(A_{\rho}^{\sigma_{\rho}})^*$ with indices in $N_{\Tilde{\mu}}$. \item[{\rm (iii)}] Each set of extremals described in {\rm (ii)} forms a simple cycle under the action of $A$. \item[{\rm (iv)}] There are no normalized extremals other than those described in {\rm (ii)}. {\bf In nonnegative algebra,} the total number of normalized extremals equals the sum of cyclicities of all spectral classes of $A$. {\bf In max algebra,} the total number of normalized extremals equals the sum of cyclicities of all critical components of $A$.
\end{itemize} \end{theorem}
\begin{proof} (i) follows from Main Theorem~\ref{t:core} and Proposition~\ref{p:extremals}.
For the proof of (ii) and (iii) we can fix $\rho=\rho_{\mu}\in\Lambda(A)$, assume $A=A_{\rho}$ (using Theorem~\ref{t:reduction}) and $\sigma:=\sigma_{\rho}$. In max algebra, we also assume that $A$ is strictly visualized.
(ii) {\bf In nonnegative algebra,} observe that by Theorem~\ref{t:samespectrum}, each spectral class $\mu$ of $A$ gives rise to $\sigma_{\mu}$ spectral classes in $A^{\times\sigma}$, whose node sets are cyclic classes of $\mu$ (note that $\sigma_{\mu}$ divides $\sigma$). According to Frobenius-Victory Theorem~\ref{t:FVnonneg}, these classes give rise to normalized extremals of $V_+(A^{\times\sigma},1)$, and the conditions on support follow from Theorem~\ref{t:FVnonneg} and Lemma~\ref{l:sameaccess}.
(iii):
Let $x$ be an extremal described above. Then $\operatorname{supp}(x)\cap N_{\mu}$ is a cyclic class of $\mu$ and $\operatorname{supp}(Ax)\cap N_{\mu}$ is the previous cyclic class of $\mu$, by Lemma~\ref{l:sameperron} part (iii). It can be checked that all indices in $\operatorname{supp}(Ax)$ also have access to $\mu$. By Proposition~\ref{p:permute}, $Ax$ is an extremal of $\core_+(A)$, and hence an extremal of $V_+(A^{\times\sigma},1)$. Theorem~\ref{t:FVnonneg} identifies $Ax$ with the extremal associated with the previous cyclic class of $\mu$.
Vectors $x$, $Ax,\ldots,A^{\times\sigma_{\mu}-1}x$ are distinct since the intersections of their supports with $N_{\mu}$ are disjoint, so they are exactly the set of extremals associated with $\mu$. Note that $A^{\times\sigma_{\mu}}x=x$, as $\operatorname{supp}(A^{\times\sigma_{\mu}}x)\cap N_{\mu}=\operatorname{supp}(x)\cap N_{\mu}$, and both vectors are extremals of $V_+(A^{\times\sigma},1)$.
(ii): {\bf In max algebra,} observe that by Theorem~\ref{t:samecritical}(i) each component $\Tilde{\mu}$ of ${\mathcal C}(A)$ gives rise to $\sigma_{\Tilde{\mu}}$ components of ${\mathcal C}(A^{\otimes\sigma})$, whose node sets are the cyclic classes of $\Tilde{\mu}$ (note that $\sigma_{\Tilde{\mu}}$ divides $\sigma$). These components correspond to $\sigma_{\Tilde{\mu}}$ columns of $(A^{\otimes\sigma})^*$ with indices in different cyclic classes of $\Tilde{\mu}$, which are by Theorem~\ref{t:samecritical}(i) the node sets of components of ${\mathcal C}(A^{\otimes\sigma})$. By Theorem~\ref{t:FVmaxalg} these columns of $(A^{\otimes\sigma})^*$ are extremals of $V_{\oplus}(A^{\otimes\sigma},1)$, and Proposition~\ref{p:xmuvis}(ii) implies that they are normalized.
(iii):
Let $x$ be an extremal described above. By Proposition~\ref{p:xmuvis} and Theorem~\ref{t:samecritical}(i) $\operatorname{supp}(x^{[1]})$ is a cyclic class of $\Tilde{\mu}$, and by Theorem~\ref{t:samecritical}(iii) $\operatorname{supp}((A\otimes x)^{[1]})$ is the previous cyclic class of $\Tilde{\mu}$. By Proposition~\ref{p:permute}, $A\otimes x$ is an extremal of $\core_{\oplus}(A)$, and hence an extremal of $V_{\oplus}(A^{\otimes\sigma},1)$. Proposition~\ref{p:xmuvis} identifies $A\otimes x$ with the extremal associated with the previous cyclic class of $\Tilde{\mu}$.
Vectors $x$, $A\otimes x,\ldots,A^{\otimes\sigma_{\Tilde{\mu}}-1}x$ are distinct since their booleanizations $x^{[1]}$, $(A\otimes x)^{[1]},\ldots,(A^{\otimes\sigma_{\Tilde{\mu}}-1}\otimes x)^{[1]}$ are distinct,
so they are exactly the set of extremals associated with $\Tilde{\mu}$. Note that $A^{\otimes\sigma_{\Tilde{\mu}}}\otimes x=x$, as $(A^{\otimes\sigma_{\Tilde{\mu}}}\otimes x)^{[1]}=x^{[1]}$ and both vectors are extremals of $V_{\oplus}(A^{\otimes\sigma},1)$.
(iv): {\bf In both algebras}, the converse part of Theorem~\ref{t:samespectrum} (i) shows that there are no spectral classes of $A^{\sigma}$ other than the ones derived from the spectral classes of $A$. {\bf In nonnegative algebra,} this shows that there are no extremals other than described in (ii). {\bf In max algebra}, on top of that, the converse part of Theorem~\ref{t:samecritical} (i) shows that there are no components of ${\mathcal C}(A_{\rho}^{\otimes\sigma})$ other than the ones derived from the components ${\mathcal C}(A_{\rho})$, for $\rho\in\Lambda_{\oplus}(A)$, hence there are no extremals other than described in (ii). {\bf In both algebras}, it remains to count the extremals described in (ii). \end{proof}
\section{Sequence of eigencones}
\label{s:eigencones}
The main aim of this section is to investigate the periodicity of eigencones and to prove Main Theorem~\ref{t:periodicity}. Unlike in Section~\ref{s:core}, the proof of periodicity will be different for the cases of max algebra and nonnegative algebra. The periods of eigencone sequences in max algebra and in nonnegative linear algebra are also in general different, for the same nonnegative matrix (see Section~\ref{s:examples} for an example). To this end, recall the definitions of $\sigma_{\rho}$ and $\sigma_{\Lambda}$ given in Section~\ref{s:key}, which will be used below.
\subsection{Periodicity of the sequence}
\label{ss:period}
We first observe that in both algebras \begin{equation} \label{e:inclusion-eig} \begin{split} k\;\makebox{divides}\; l &\Rightarrow V(A^k,\rho^k)\subseteq V(A^l,\rho^l)\quad\forall\rho\in\Lambda(A),\\ k\;\makebox{divides}\; l &\Rightarrow V^{\Sigma}(A^k)\subseteq V^{\Sigma}(A^l). \end{split} \end{equation}
We now prove that the sequence of eigencones is periodic.
\begin{theorem} \label{t:girls} Let $A\in\Rp^{n\times n}$ and $\rho\in\Lambda(A)$. \begin{itemize} \item[{\rm (i)}] $V(A^k,\rho^k)=V(A^{k+\sigma_{\rho}},\rho^{k+\sigma_{\rho}})$, and $V(A^k,\rho^k)\subseteq V(A^{\sigma_{\rho}},\rho^{\sigma_{\rho}})$ for all $k\geq 1$. \item[{\rm (ii)}] $V^{\Sigma}(A^k)=V^{\Sigma}(A^{k+\sigma_{\Lambda}})$ and $V^{\Sigma}(A^k)\subseteq V^{\Sigma}(A^{\sigma_{\Lambda}})$ for all $k\geq 1$. \end{itemize} \end{theorem} \begin{proof} We will give two separate proofs of part (i), for the case of max algebra and the case of nonnegative algebra. Part (ii) follows from part (i).
In both algebras, we can assume without loss of generality that $\rho=1$, and using Theorem~\ref{t:reduction}, that this is the greatest eigenvalue of $A$.
{\bf In max algebra}, by Theorem~\ref{t:nacht}, columns of $A^{\otimes r}$ with indices in $N_c(A)$ are periodic for $r\geqT_c(A)$. Recall that by Corollary~\ref{TcaTcrit}, $T_c(A)$ is not less than $T({\mathcal C}(A))$, which is the greatest periodicity threshold of the strongly connected components of ${\mathcal C}(A)$. By Theorem~\ref{t:BoolCycl} part (ii), $({\mathcal C}(A))^{t\sigma}$ consists of complete graphs for $t\sigma\geq T({\mathcal C}(A))$, in particular, it contains loops $(i,i)$ for all $i\inN_c(A)$. Hence $$ a_{ii}^{\otimes(t\sigma)}=1\quad\forall i\inN_c(A),\ t\geq\lceil\frac{T_c(A)}{\sigma}\rceil, $$ and $$ a_{ki}^{\otimes (l+t\sigma)}\geq a^{\otimes l}_{ki} a_{ii}^{\otimes (t\sigma)}=a_{ki}^{\otimes l} \quad\forall i\inN_c(A),\ \forall k,l,\ \forall t\geq\lceil\frac{T_c(A)}{\sigma}\rceil, $$ or, in terms of columns of matrix powers, $$ A_{\cdot i}^{\otimes (l+t\sigma)}\geq A_{\cdot i}^{\otimes l}\quad \forall i\inN_c(A),\ \forall l,\ \forall t\geq\lceil\frac{T_c(A)}{\sigma}\rceil. $$ Multiplying this inequality repeatedly by $A^{\otimes l}$ we obtain $A_{\cdot i}^{\otimes (kl+t\sigma)}\geq A_{\cdot i}^{\otimes(kl)}$ for all $k\geq 1$, and $A_{\cdot i}^{\otimes (k(l+t\sigma))}\geq A_{\cdot i}^{\otimes(kl)}$ for all $k\geq 1$.
Hence we obtain \begin{equation} \label{e:*ineq} (A^{\otimes (l+t\sigma)})^*_{\cdot i}\geq (A^{\otimes l})^*_{\cdot i}\quad\forall i\inN_c(A),\ \forall l,\ \forall t\geq\lceil\frac{T_c(A)}{\sigma}\rceil. \end{equation} On the other hand, using the ultimate periodicity of critical columns we have \begin{equation*}
(A^{\otimes (l+t\sigma)})^*_{\cdot i}=\bigoplus\{A_{\cdot i}^{\otimes s}\colon s\equiv kl\text{(mod} \sigma),\ k\geq 1,\ s\geqT_c(A) \} \end{equation*} for all $l$ and all $t\sigma\geqT_c(A)$, while generally \begin{equation*} \label{e:sweepineq} (A^{\otimes l})^*_{\cdot i}\geq \bigoplus\{A_{\cdot i}^{\otimes s}\colon s\equiv kl\text{(mod} \sigma),\ k\geq 1,\ s\geqT_c(A) \}\ \forall l, \end{equation*} implying the reverse inequality with respect to~\eqref{e:*ineq}. It follows that \begin{equation} \label{e:*eq} (A^{\otimes (l+t\sigma)})^*_{\cdot i}= (A^{\otimes l})^*_{\cdot i}\quad\forall i\inN_c(A),\ \forall l,\ \forall t\geq\lceil\frac{T_c(A)}{\sigma}\rceil,
\end{equation} therefore $(A^{\otimes (l+\sigma)})^*_{\cdot i}=(A^{\otimes (l+t\sigma+\sigma)})^*_{\cdot i}=(A^{\otimes (l+t\sigma)})^*_{\cdot i} =(A^{\otimes l})^*_{\cdot i}$ for all critical indices $i$ and all $l$. Since $V(A^{\otimes l},1)$ is generated by the critical columns of $(A^{\otimes l})^*$, and the critical indices of $A^{\otimes l}$ are $N_c(A)$ by Theorem~\ref{t:samecritical}(ii), the periodicity $V_{\oplus}(A^{\otimes l},\rho^l)=V_{\oplus}(A^{\otimes (l+\sigma)},\rho^{l+\sigma})$ follows. Using this and~\eqref{e:inclusion-eig} we obtain $V_{\oplus}(A^{\otimes l},\rho^l)\subseteqV_{\oplus}(A^{\otimes(l\sigma)},\rho^{l\sigma})=V_{\oplus}(A^{\otimes\sigma},\rho^{\sigma})$ for each $l$ and $\rho\in\Lambda_{\oplus}(A)$.
{\bf In nonnegative algebra,} also assume that all final classes (and hence only them) have Perron root $\rho=1$. Final classes of $A^{\times l}$ are derived from the final classes of $A$; they (and no other classes) have Perron root $\rho^l$. By Theorem~\ref{t:samespectrum}(i) and~Corollary~\ref{c:div-boolean}, for any $t\geq 0$, the spectral classes of $A^{\times l}$ and $A^{\times (l+t\sigma)}$ with Perron root $1$ have the same sets of nodes, which we denote by $N_1,\ldots,N_m$ (assuming that their number is $m\geq 1$).
By the Frobenius-Victory Theorem~\ref{t:FVnonneg}, the cone $V_+(A^{\times l},1)$ is generated by $m$ extremals $x^{(1)},\ldots,x^{(m)}$ with the support condition of Theorem~\ref{t:FVnonneg}(i) from which we infer that the subvectors $x^{(\mu)}_{\mu}$ (i.e., $x^{(\mu)}_{N_{\mu}}$) are positive, while $x^{(\mu)}_{\nu}$ (i.e., $x^{(\mu)}_{N_{\nu}}$) are zero for all $\mu\neq \nu$ from $1$ to $m$, since the different spectral classes by~\eqref{e:speclass} do not access each other, in the nonnegative linear algebra. Analogously the cone $V_+(A^{\times(l+t\sigma)},1)$ is generated by $m$ eigenvectors $y^{(1)},\ldots,y^{(m)}$ such that the subvectors $y^{(\mu)}_{\mu}$ are positive, while $y^{(\mu)}_{\nu}=0$ for all $\mu\neq \nu$ from $1$ to $m$.
Assume first that $l=\sigma$. As $V_+(A^{\times\sigma},1)\subseteq V_+(A^{\times (t\sigma)},1)$, each $x^{(\mu)}$ is a nonnegative
linear combination of $y^{(1)},\ldots,y^{(m)}$, and this implies $x^{(\mu)}=y^{(\mu)}$ for all $\mu=1,\ldots,m$. Hence $V_+(A^{\times (t\sigma)},1)=V_+(A^{\times\sigma},1)$ for all $t\geq 0$.
We also obtain $V_+(A^{\times l},1)\subseteq V_+(A^{\times (\sigma l)},1)=V_+(A^{\times\sigma},1)$ for all $l$. Thus $V_+(A^{\times l},1)\subseteqV_+(A^{\times (t\sigma)},1)$, and therefore $V_+(A^{\times l},1)\subseteq V_+(A^{\times (l+t\sigma)},1)$. Now if $V_+(A^{\times l},1)$, resp. $V_+(A^{\times (l+t\sigma)},1)$ are generated by $x^{(1)},\ldots,x^{(m)}$, resp. $y^{(1)},\ldots, y^{(m)}$ described above and each $x^{(\mu)}$ is a nonnegative
linear combination of $y^{(1)},\ldots,y^{(m)}$, this again implies $x^{(\mu)}=y^{(\mu)}$ for all $\mu=1,\ldots,m$, and $V_+(A^{\times (l+t\sigma)},1)=V_+(A^{\times l},1)$ for all $t\geq 0$ and all $l$.
Using this and~\eqref{e:inclusion-eig} we obtain $V_+(A^{\times l},\rho^l)\subseteqV_+(A^{\times(l\sigma)},\rho^{l\sigma})=V_+(A^{\times\sigma},\rho^{\sigma})$ for each $l$ and $\rho\in\Lambda_+(A)$. \end{proof}
\if{ for the sum of all eigencones $V^{\Sigma}(A^k)=\sum_{\rho\in\Lambda(A)} V(A^k,\rho^k)$. Summarizing Theorems~\ref{t:girls-max} and~\ref{t:girls-nonneg} and using $\sigma_{\rho}$ and $\sigma_{\Lambda}$
we obtain the following formulation (now uniting both algebras).
\begin{theorem} \label{t:girls} Let $A\in\Rp^{n\times n}$. For all $l\geq 1$ and $\rho\in\Lambda(A)$, \begin{itemize} \item[(i)] $V(A^l,\rho^l)=V(A^{l+\sigma_{\rho}},\rho^{l+\sigma_{\rho}})$ and $V(A^l,\rho^l)\subseteq V(A^{\sigma_{\rho}},\rho^{\sigma_{\rho}})$, \item[(ii)] $V^{\Sigma}(A^l)=V^{\Sigma}(A^{l+\sigma_{\Lambda}})$ and $V^{\Sigma}(A^l)\subseteq V^{\Sigma}(A^{\sigma_{\Lambda}})$. \end{itemize} \end{theorem} }\fi
\subsection{Inclusion and divisibility} \label{ss:incl}
We now show that the inclusion relations between the eigencones of different powers of a matrix, in both algebras, strictly follow divisibility of exponents of matrix powers with respect to $\sigma_{\rho}$ and $\sigma_{\Lambda}$.
We start with a corollary of Theorem~\ref{t:girls}.
\begin{lemma} \label{l:gcd-if} Let $k,l\geq 1$ and $\rho\in\Lambda(A)$. \begin{itemize} \item[{\rm (i)}] $V(A^k,\rho^k)=V(A^{\text{gcd}(\sigma_{\rho},k)},\rho^{\text{gcd}(\sigma_{\rho},k)})$ and $V^{\Sigma}(A^k)=V^{\Sigma}(A^{\text{gcd}(\sigma_{\Lambda},k)})$. \item[{\rm (ii)}] gcd$(k,\sigma_{\rho})=$ gcd$(l,\sigma_{\rho})$ implies $V(A^k,\rho^k)=V(A^l,\rho^l)$, and gcd$(k,\sigma_{\Lambda})=$ gcd$(l,\sigma_{\Lambda})$ implies $V^{\Sigma}(A^k)=V^{\Sigma}(A^l)$. \end{itemize} \end{lemma} \begin{proof} (i): Let $\sigma:=\sigma_{\rho}$, and $s:=$gcd$(k,\sigma)$. If $s=\sigma$ then $k$ is a multiple of $\sigma$ and $V(A^k,\rho^k)=V(A^s,\rho^s)$ by Theorems~\ref{t:girls}(i). Otherwise, since $s$ divides $k$, we have $V(A^s,\rho^s)\subseteq V(A^k,\rho^k)$. In view of the periodicity (Theorem~\ref{t:girls}(i)), it suffices to find $t$ such that $V(A^k,\rho^k)\subseteq V(A^{s+t\sigma},\rho^{s+t\sigma})$. For this, observe that $s+t\sigma$ is a multiple of $s=$ gcd$(k,\sigma)$. By Lemma~\ref{l:schur} (the Frobenius coin problem), for big enough $t$ it can be expressed as $t_1 k + t_2\sigma$ where $t_1,t_2\geq 0$. Moreover $t_1\neq 0$, for otherwise we have $s=\sigma$. Then we obtain \begin{equation*} \begin{split} V(A^k,\rho^k)&\subseteq V(A^{t_1k},\rho^{t_1k})=V(A^{t_1k+t_2\sigma},\rho^{t_1 k+t_2\sigma})\\ &=V(A^{s+t\sigma},\rho^{s+t\sigma})=V(A^s,\rho^s), \end{split} \end{equation*} and the first part of the claim follows. The second part is obtained similarly, using Theorem~\ref{t:girls}(ii) instead of Theorems~\ref{t:girls}(i).
(ii) follows from (i). \end{proof}
\begin{theorem} \label{t:girls-major} Let $A\in\Rp^{n\times n}$ and $\sigma$ be either the cyclicity of a spectral class of $A$ {\bf (nonnegative algebra)} or the cyclicity of a critical component of $A$ {\bf (max algebra)}.The following are equivalent for all positive $k,l$: \begin{itemize} \item[{\rm (i)}] gcd$(k,\sigma)$ divides gcd$(l,\sigma)$ for all cyclicities $\sigma$; \item[{\rm (ii)}] gcd$(k,\sigma_{\rho})$ divides gcd$(l,\sigma_{\rho})$ for all $\rho\in\Lambda(A)$; \item[{\rm (iii)}] gcd$(k,\sigma_{\Lambda})$ divides gcd$(l,\sigma_{\Lambda})$; \item[{\rm (iv)}] $V(A^k,\rho^k)\subseteq V(A^l,\rho^l)$ for all $\rho\in\Lambda(A)$ and \item[{\rm (v)}] $V^{\Sigma}(A^k)\subseteq V^{\Sigma}(A^l)$. \end{itemize} \end{theorem} \begin{proof} (i)$\Rightarrow$(ii)$\Rightarrow$(iii) follow from elementary number theory. (ii)$\Rightarrow$ (iv) and (iii)$\Rightarrow$(v) follow from~\eqref{e:inclusion-eig} and Lemma~\ref{l:gcd-if} part (i) (which is essentially based on Theorem~\ref{t:girls}). (iv)$\Rightarrow$(v) is trivial. It only remains to show that (v)$\Rightarrow$ (i).
(v)$\Rightarrow$ (i): {\bf In both algebras}, take an extremal $x\in V(A^k,\rho^k)$. As $V^{\Sigma}(A^k)\subseteq V^{\Sigma}(A^l)$, this vector can be represented as $x=\sum_i y^i$, where $y^i$ are extremals of $V^{\Sigma}(A^l)$. Each $y^i$ is an extremal of $V(A^l,\Tilde{\rho}^l)$ for some $\Tilde{\rho}\in\Lambda(A)$ (as we will see, only the extremals with $\Tilde{\rho}=\rho$ are important). By Frobenius-Victory Theorems~\ref{t:FVnonneg} and~\ref{t:FVmaxalg} and Theorem~\ref{t:samespectrum}(i), there is a unique spectral class $\mu$ of $A$ to which all indices in $\operatorname{supp}(x)$ have access. Since $\operatorname{supp}(y^i)\subseteq\operatorname{supp}(x)$, we are restricted to the submatrix $A_{JJ}$ where $J$ is the set of all indices accessing $\mu$ in $A$. In other words, we can assume without loss of generality that $\mu$ is the only final class in $A$, hence $\rho$ is the greatest eigenvalue, and $\rho=1$. Note that $\operatorname{supp}(x)\cap N_{\mu}\neq\emptyset$.
{\bf In nonnegative algebra,} restricting the equality $x=\sum_i y^i$ to $N_{\mu}$ we obtain \begin{equation} \label{e:eqsupps1} \operatorname{supp}(x_{\mu})=\bigcup_i \operatorname{supp}(y^i_{\mu}). \end{equation} If $\operatorname{supp}(y^i_{\mu})$ is non-empty, then $y^i$ is associated with a spectral class of $A^{\times l}$ whose nodes are in $N_{\mu}$.
Theorem~\ref{t:FVnonneg}(i) implies that $\operatorname{supp}(y^i_{\mu})$ consists of all indices in a class of $A_{\mu\mu}^{\times l}$. As $x$ can be any extremal eigenvector of $A^{\times k}$ with $\operatorname{supp} x\cap N_{\mu}\neq\emptyset$, \eqref{e:eqsupps1} shows that each class of $A_{\mu\mu}^{\times k}$ (corresponding to $x$) splits into several classes of $A_{\mu\mu}^{\times l}$ (corresponding to $y^i$). By Corollary~\ref{c:div-boolean} this is only possible when gcd$(k,\sigma)$ divides gcd$(l,\sigma)$, where $\sigma$ is the cyclicity of the spectral class $\mu$.
{\bf In max algebra,} since $\rho=1$, assume without loss of generality that $A$ is strictly visualized. In this case $A$ and $x$ have all coordinates not exceeding $1$. Recall that $x^{[1]}$ is the Boolean vector defined by $x^{[1]}_i=1$ $\Leftrightarrow$ $x_i=1$. Vector $x$ corresponds to a unique critical component $\Tilde{\mu}$ of ${\mathcal C}(A)$ with the node set $N_{\Tilde{\mu}}$. Then instead of~\eqref{e:eqsupps1} we obtain \begin{equation} \label{e:eqsupps2} x^{[1]}=\bigoplus_i y^{i[1]}\quad \Rightarrow\quad \operatorname{supp}(x^{[1]}_{\Tilde{\mu}})=\bigcup_i \operatorname{supp}(y^{i[1]}_{\Tilde{\mu}}), \end{equation} where $\operatorname{supp}(x^{[1]})=\operatorname{supp}(x^{[1]}_{\Tilde{\mu}})$ by Proposition~\ref{p:xmuvis}(ii) and Theorem~\ref{t:samecritical}(i), and hence also $\operatorname{supp}(y^{i[1]})=\operatorname{supp}(y^{i[1]}_{\Tilde{\mu}})$. If $\operatorname{supp}(y^{i[1]}_{\Tilde{\mu}})$ is non-empty then also $\operatorname{supp}(y^i_{N_{\mu}})$ is non-empty so that $y^i$ is associated with the eigenvalue $1$.
\if{Lemma~\ref{l:sameperron} implies that $\operatorname{supp}(y^{i[1]}_{N_{\Tilde{\mu}}})$ comprises the set of indices in several classes of $(A^{[1]}_{N_{\Tilde{\mu}}N_{\Tilde{\mu}}})^{\otimes l}$ (as it can be argued, in only one class if $y^{i}$ is extremal). }\fi As $y^i$ is extremal, Proposition~\ref{p:xmuvis}(ii) implies that $\operatorname{supp}(y^{i[1]}_{\Tilde{\mu}})$ consists of all indices in a class of $(A^{[1]}_{\Tilde{\mu}\Tilde{\mu}})^{\otimes l}$. As $x$ can be any extremal eigenvector of $A^{\otimes k}$ with $\operatorname{supp} (x^{[1]})\cap N_{\Tilde{\mu}}\neq\emptyset$, \eqref{e:eqsupps2} shows that each class of $(A^{[1]}_{\Tilde{\mu}\Tilde{\mu}})^{\otimes k}$ splits into several classes of $(A^{[1]}_{\Tilde{\mu}\Tilde{\mu}})^{\otimes l}$. By Corollary~\ref{c:div-boolean} this is only possible when gcd$(k,\sigma)$ divides gcd$(l,\sigma)$, where $\sigma$ is the cyclicity of the critical component $\Tilde{\mu}$. \end{proof}
Let us also formulate the following version restricted to some $\rho\in\Lambda(A)$.
\begin{theorem} \label{t:girls-minor} Let $A\in\Rp^{n\times n}$, and let $\sigma$ be either the cyclicity of a spectral class {\bf (nonnegative algebra)} or the cyclicity of a critical component {\bf (max algebra)} associated with some $\rho\in\Lambda(A)$. The following are equivalent for all positive $k,l$: \begin{itemize} \item[{\rm (i)}] gcd$(k,\sigma)$ divides gcd$(l,\sigma)$ for all cyclicities $\sigma$; \item[{\rm (ii)}] gcd$(k,\sigma_{\rho})$ divides gcd$(l,\sigma_{\rho})$; \item[{\rm (iii)}] $V(A^k,\rho^k)\subseteq V(A^l,\rho^l)$. \end{itemize} \end{theorem} \begin{proof} (i)$\Rightarrow$(ii) follows from the elementary number theory, and (ii)$\Rightarrow$(iii) follows from~\eqref{e:inclusion-eig} and Lemma~\ref{l:gcd-if}(i). The proof of (iii)$\Rightarrow$(i) follows the lines of the proof of Theorem~\ref{t:girls-major} (v)$\Rightarrow$ (i), with a slight simplification that $\Tilde{\rho}=\rho$ and further, $x$ and all $y^i$ in $x=\sum_i y^i$ are associated with the same eigenvalue. \end{proof}
We are now ready to deduce Main Theorem~\ref{t:periodicity}
\begin{proof}[Proof of Main Theorem~\ref{t:periodicity}]
We prove the first part. The inclusion $V(A^k,\rho^k)\subseteq V(A^{\sigma},\rho^{\sigma})$ was proved in Theorem~\ref{t:girls} (i), and we are left to show that $\sigma_{\rho}$ is the least such $p$ that $V(A^{k+p},\rho^{k+p})=V(A^k,\rho^k)$ for all $k\geq 1$. But taking $k=\sigma_{\rho}$ and using Theorem~\ref{t:girls-minor} (ii)$\Leftrightarrow$(iii), we obtain gcd$(\sigma_{\rho}+p,\sigma_{\rho})=\sigma_{\rho}$, implying that $\sigma_{\rho}$ divides $\sigma_{\rho}+p$, so $\sigma_{\rho}$ divides $p$. Since Theorem~\ref{t:girls} (i) also shows that $V(A^{k+\sigma_{\rho}},\rho^{k+\sigma_{\rho}})=V(A^k,\rho^k)$ for all $k\geq 1$, the result follows.
The second part can be proved similarly, using Theorem~\ref{t:girls}(ii) and Theorem~\ref{t:girls-major} (iii)$\Leftrightarrow$(v). \end{proof}
\section{Examples} \label{s:examples} We consider two examples of reducible nonnegative matrices, examining their core in max algebra and in nonnegative linear algebra.
\noindent {\em Example 1.} Take \begin{equation} A= \begin{pmatrix}
0.1206 & 0 & 0 & 0 & 0\\
0.5895 & 0.2904 & 1 & 0.8797 & 0.4253\\
0.2262 & 0.6171 & 0.3439 & 1 & 0.3127\\
0.3846 & 0.2653 & 0.5841 & 0.2607 & 1\\
0.5830 & 1 & 0.1078 & 0.5944 & 0.1788 \end{pmatrix}\,\enspace . \end{equation} $A$ has two classes with node sets $\{1\}$ and $\{2,3,4,5\}$. Both in max algebra and in nonnegative linear algebra, the only spectral class arises from $M:=\{2,3,4,5\}$. The max-algebraic Perron root of this class is $\rho^{\oplus}(A)=1$, and the critical graph consists of just one cycle $2\to 3\to 4\to 5\to 2$.
The eigencones $V_{\oplus}(A,1)$, $V_{\oplus}(A^{\otimes 2},1)$, $V_{\oplus}(A^{\otimes 3},1)$ and $V_{\oplus}(A^{\otimes 4},1)$ are generated by the last four columns of the Kleene stars $A^*$, $(A^{\otimes 2})^*$, $(A^{\otimes 3})^*$, $(A^{\otimes 4})^*$. Namely, \begin{equation*} \begin{split} V_{\oplus}(A,1)&=V_{\oplus}(A^{\otimes 3},1)=\spann_{\oplus}\{(0\ 1\ 1\ 1\ 1)\},\\ V_{\oplus}(A^{\otimes 2},1)=\spann_{\oplus} &\{(0,1,0.8797,1,0.8797),\ (0,0.8797,1,0.8797,1)\},\\ V_{\oplus}(A^{\otimes 4},1)=\spann_{\oplus} &\{(0,1,0.6807,0.7738,0.8797),\ (0,0.8797,1,0.6807,0.7738),\\
&(0,0.7738,0.8797,1,0.6807),\ (0,0.6807,0.7738,0.8797,1)\} \end{split} \end{equation*}
By Main Theorem~\ref{t:core}, $\core_{\oplus}(A)$ is equal to $V_{\oplus}(A^{\otimes 4},1)$. Computing the max-algebraic powers of $A$ we see that the sequence of submatrices $A^{\otimes t}_{MM}$ becomes periodic after $t=10$, with period $4$. In particular, \begin{equation} A^{\otimes 10}= \begin{pmatrix}
\alpha & 0 & 0 & 0 & 0\\
0.4511 & 0.7738 & 0.6807 & 1 & 0.8797\\
0.5128 & 0.8797 & 0.7738 & 0.6807 & 1\\
0.5830 & 1 & 0.8797 & 0.7738 & 0.6807\\
0.5895 & 0.6807 & 1 & 0.8797 & 0.7738 \end{pmatrix}, \end{equation} where $0<\alpha<0.0001$. Observe that the last four columns are precisely the ones that generate $V_{\oplus}(A^{\otimes 4},1)$. Moreover, if $\alpha$ was $0$ then the first column would be the following max-combination of the last four columns: $$ a^{\otimes 10}_{41}A^{\otimes 10}_{\cdot 2}\oplus a^{\otimes 10}_{51}A^{\otimes 10}_{\cdot 3}\oplus a^{\otimes 10}_{21}A^{\otimes 10}_{\cdot 4}\oplus a^{\otimes 10}_{31}A^{\otimes 10}_{\cdot 5}. $$ On the one hand, the first column of $A^{\otimes t}$ cannot be a max-combination of the last four columns for any $t>0$ since $a_{11}^{\otimes t}>0$. On the other hand, $a_{11}^{\otimes t}\to 0$ as $t\to\infty$ ensuring that the first column belongs to the core ``in the limit''.
Figure~\ref{f:petersfigure1} gives a symbolic illustration of what is going on in this example.
\begin{figure}
\caption{The spans of matrix powers (upper curve) and the periodic sequence of their eigencones (lower graph) in Example 1 (max algebra).}
\label{f:petersfigure1}
\end{figure}
\if{ \begin{figure}
\caption{The spans of matrix powers (upper curve) and the periodic sequence of their eigencones (lowergraph) in Example 1 (max algebra).}
\label{f:petersfigure1}
\end{figure} }\fi
In {\bf nonnegative algebra}, the block $A_{MM}$ with $M=\{2,3,4,5\}$ is also the only spectral block . Its Perron root is approximately $\rho^+(A)=2.2101$, and the corresponding eigencone is $$ V_+(A,\rho^+(A))=\spann_+\{(0,\ 0.5750,\ 0.5107,\ 0.4593,\ 0.4445)\}. $$ Taking the usual powers of $(A/\rho^+(A))$ we see that $$ \left(A/\rho^+(A)\right)^{\times 12}= \begin{pmatrix}
\alpha & 0 & 0 & 0 & 0\\
0.2457 & 0.2752 & 0.2711 & 0.3453 & 0.2693\\
0.2182 & 0.2444 & 0.2408 & 0.3067 & 0.2392\\
0.1963 & 0.2198 & 0.2165 & 0.2759 & 0.2151\\
0.1899 & 0.2127 & 0.2096 & 0.2670 & 0.2082 \end{pmatrix}, $$ where $0<\alpha<0.0001$, and that the first four digits of all entries in all higher powers are the same. It can be verified that the submatrix $(A/\rho^+(A))^{\times 12}_{MM}$ is, approximately, the outer product of the Perron eigenvector with itself, while the first column is also almost proportional to it.
\noindent {\em Example 2.} Take \begin{equation} A= \begin{pmatrix}
0 & 1 & 0 & 0\\
1 & 0 & 0 & 0\\
0.6718 & 0.2240 & 0.5805 & 0.1868\\
0.6951 & 0.6678 & 0.4753 & 0.3735 \end{pmatrix}\,\enspace . \end{equation}
This matrix has two classes $\mu$ and $\nu$ with index sets $\{1,2\}$ and $\{3,4\}$, and both classes are spectral, in both algebras. In max algebra $\rho^{\oplus}_{\mu}=1$ and $\rho^{\oplus}_{\nu}=a_{33}<1$. The eigencones of matrix powers associated with $\rho^{\oplus}_{\mu}=1$ are \begin{equation*} \begin{split} V_{\oplus}(A,1)&=\spann_{\oplus}\{(1,1,0.6718,0.6951)\},\\ V_{\oplus}(A^{\otimes 2},1)&= \spann_{\oplus}\{(1,0,0.3900,0.6678),\ (0,1,0.6718,0.6951)\},\\ \end{split} \end{equation*} and the eigencone associated with $\rho^{\oplus}_{\nu}$ is generated by the third column of the matrix: $$ V_{\oplus}(A,\rho^{\oplus}_{\nu})=\spann_{\oplus}\{(0,\ 0,\ 0.5805,\ 0.4753)\}. $$
By Main Theorem~\ref{t:core}, $\core_{\oplus}(A)$ is equal to the (max-algebraic) sum of $V_{\oplus}(A^{\otimes 2},1)$ and $V_{\oplus}(A,\rho^{\oplus}_{\nu})$. To this end, observe that already in the second max-algebraic power \begin{equation} A^{\otimes 2}= \begin{pmatrix} 1 & 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0.3900 & 0.6718 & 0.3370 & 0.1084\\
0.6678 & 0.6951 & 0.2759 & 0.1395 \end{pmatrix} \end{equation} the first two columns are the generators of $V_{\oplus}(A^{\otimes 2},1)$. However, the last column is still not proportional to the third one which shows that $\spann_{\oplus}(A^{\otimes 2})\neq \core_{\oplus}(A)$. However, it can be checked that this happens in $\spann_{\oplus}(A^{\otimes 4})$, with the first two columns still equal to the generators of $V_{\oplus}(A^{\otimes 2},1)$, which shows that $\spann_{\oplus}(A^{\otimes 4})$ is the sum of above mentioned max cones, and hence $\spann_{\oplus}(A^{\otimes 4})=\spann_{\oplus}(A^{\otimes 5})=\ldots=\core_{\oplus}(A)$. Hence we see that $A$ is column periodic (${\mathcal S}_5$) and the core finitely stabilizes. See Figure~\ref{f:petersfigure2} for a symbolic illustration.
\begin{figure}
\caption{The spans of matrix powers (upper graph) and the periodic sequence of their eigencones (lower graph) in Example 2 (max algebra)}
\label{f:petersfigure2}
\end{figure}
\if{ \begin{figure}
\caption{The spans of matrix powers (upper curve) and the periodic sequence of their eigencones (lower graph) in Example 2 (max algebra)}
\label{f:petersfigure2}
\end{figure} }\fi
In {\bf nonnegative algebra}, $\rho^+_{\mu}=1$ and $\rho^+_{\nu}=0.7924$. Computing the eigenvectors of $A$ and $A^{\times 2}$ yields \begin{equation*} \begin{split} V_+(A,1)&=\spann_+\{(0.1326,0.1326,0.6218,0.7604)\},\\ V_+(A^{\times 2},1)&= \spann_+\{(0.2646,0,0.5815,0.7693),\ (0,0.2566,0.6391,0.7251)\},\\ \end{split} \end{equation*} and $$ V_+(A,\rho^+_{\nu})=\spann_+\{(0,\ 0,\ 0.6612,\ 0.7502)\}. $$
Here $\core_+(A)$ is equal to the ordinary (Minkowski) sum of $V_+(A^{\times 2},1)$ and $V_+(A,\rho^+_{\nu})$. To this end, it can be observed that, within the first $4$ digits, the first two columns of $A^{\times t}$ become approximately periodic after $t=50$, and the columns of powers of the normalized submatrix $A_{\nu\nu}/\rho^+_{\nu}$ approximately stabilize after $t=7$. Of course, there is no finite stabilization of the core in this case. However, the structure of the nonnegative core is similar to the max-algebraic counterpart described above.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Capacity of the range in dimension $5$} \runtitle{Capacity of the range in dimension $5$}
\author{\fnms{Bruno} \snm{Schapira}\ead[label=e1]{[email protected]}} \address{Aix-Marseille Universit\'e, CNRS, Centrale Marseille, I2M, UMR 7373, 13453 Marseille, France\\ \printead{e1}}
\runauthor{Bruno Schapira}
\begin{abstract} We prove a Central limit theorem for the capacity of the range of a symmetric random walk on $\mathbb Z^5$, under only a moment condition on the step distribution. The result is analogous to the central limit theorem for the size of the range in dimension three, obtained by Jain and Pruitt in 1971. In particular an atypical logarithmic correction appears in the scaling of the variance. The proof is based on new asymptotic estimates, which hold in any dimension $d\ge 5$, for the probability that the ranges of two independent random walks intersect. The latter are then used for computing covariances of some intersection events, at the leading order. \end{abstract}
\begin{keyword}[class=MSC]
\kwd{60F05; 60G50; 60J45}
\end{keyword}
\begin{keyword} \kwd{Random Walk} \kwd{Range} \kwd{Capacity} \kwd{Central Limit Theorem} \kwd{Intersection of random walk ranges} \end{keyword}
\end{frontmatter}
\section{Introduction}\label{sec:intro} Consider a random walk $(S_n)_{n\ge 0}$ on $\mathbb{Z}^d$, that is a process of the form $S_n=S_0+X_1+\dots + X_n$, where the $(X_i)_{i\ge 1}$ are independent and identically distributed. A general question is to understand the geometric properties of its range, that is the random set $\mathcal{R}_n:=\{S_0,\dots,S_n\}$, and more specifically to analyze its large scale limiting behavior as the time $n$ is growing. In their pioneering work, Dvoretzky and Erd\'os \cite{DE51} proved a strong law of large numbers for the number of distinct sites in $\mathcal{R}_n$, in any dimension $d\ge 1$. Later a central limit theorem was obtained first by Jain and Orey \cite{JO69} in dimensions $d\ge 5$, then by Jain and Pruitt \cite{JP71} in dimension $3$ and higher, and finally by Le Gall \cite{LG86} in dimension $2$, under fairly general hypotheses on the common law of the $(X_i)_{i\ge 1}$. Furthermore, a lot of activity has been focused on analyzing the large and moderate deviations, which we will not discuss here.
More recently some papers were concerned with other functionals of the range, including its entropy \cite{BKYY}, and its boundary \cite{AS17, BKYY, BY, DGK, Ok16}. Here we will be interested in another natural way to measure the size of the range, which also captures some properties of its shape. Namely we will consider its Newtonian capacity, defined for a finite subset $A\subset \mathbb{Z}^d$, as \begin{equation}\label{cap.def} \mathrm{Cap}(A) :=\sum_{x\in A} \mathbb{P}_x[H_A^+ = \infty], \end{equation} where $\mathbb{P}_x$ is the law of the walk starting from $x$, and $H_A^+$ denotes the first return time to $A$ (see \eqref{HAHA+} below). Actually the first study of the capacity of the range goes back to the earlier work by Jain and Orey \cite{JO69}, who proved a law of large numbers in any dimension $d\ge 3$; and more precisely that almost surely, as $n\to \infty$, \begin{equation}\label{LLN.cap} \frac 1n \mathrm{Cap}(\mathcal{R}_n)\to \gamma_d, \end{equation} for some constant $\gamma_d$, which is nonzero if and only if $d\ge 5$ -- the latter observation being actually directly related to the fact that it is only in dimension $5$ and higher that two independent ranges have a positive probability not to intersect each other. However, until very recently to our knowledge there were no other work on the capacity of the range, even though the results of Lawler on the intersection of random walks incidentally gave a sharp asymptotic behavior of the mean in dimension four, see \cite{Law91}.
In a series of recent papers \cite{Chang, ASS18, ASS19}, the central limit theorem has been established for the simple random walk in any dimension $d\ge 3$, except for the case of dimension $5$, which remained unsolved so far. The main goal of this paper is to fill this gap, but in the mean time we obtain general results on the probability that the ranges of two independent walks intersect, which might be of independent interest. We furthermore obtain estimates for the covariances between such events, which is arguably one of the main novelty of our work; but we shall come back on this point a bit later.
Our hypotheses on the random walk are quite general: we only require that the distribution of the $(X_i)_{i\ge 1}$ is a symmetric and irreducible probability measure on $\mathbb{Z}^d$, which has a finite $d$-th moment. Under these hypotheses our first result is the following. \begin{theoremA}\label{theoremA} Assume $d=5$. There exists a constant $\sigma>0$, such that as $n\to \infty$, $$\operatorname{Var}(\mathrm{Cap}(\mathcal{R}_n)) \sim \sigma^2 \, n\log n.$$ \end{theoremA} We then deduce a central limit theorem. \begin{theoremB}\label{theoremB} Assume $d=5$. Then, $$\frac{\mathrm{Cap}(\mathcal{R}_n) - \gamma_5 n}{\sigma \sqrt{n\log n}}\quad \stackrel{(\mathcal L)}{\underset{n\to \infty}{\Longrightarrow}} \quad \mathcal N(0,1).$$ \end{theoremB} As already mentioned, along the proof we also obtain a precise asymptotic estimate for the probability that the ranges of two independent walks starting from far away intersect. Previously to our knowledge only the order of magnitude up to multiplicative constants had been established, see \cite{Law91}. Since our proof works the same in any dimension $d\ge 5$, we state our result in this general setting. Recall that to each random walk one can associate a norm (see below for a formal definition), which we denote here by $\mathcal J(\cdot)$ (in particular in the case of the simple random walk it coincides with the Euclidean norm). \begin{theoremC}\label{theoremC} Assume $d\ge 5$. Let $S$ and $\widetilde S$ be two independent random walks starting from the origin (with the same distribution).
There exists a constant $c>0$, such that as $\|x\|\to \infty$, $$\mathbb{P}\left[\mathcal{R}_\infty \cap (x+\widetilde \mathcal{R}_\infty)\neq \varnothing\right]\sim \frac{c}{\mathcal J(x)^{d-4}}.$$ \end{theoremC}
In fact we obtain a stronger and more general result. Indeed, first we get some control on the second order term, and show that it is $\mathcal{O} (\|x\|^{4-d-\nu})$, for some constant $\nu>0$. Moreover, we also consider some functionals of the position of one of the two walks at its hitting time of the other range. More precisely, we obtain asymptotic estimates for quantities of the form $\mathbb{E}[F(S_\tau){\text{\Large $\mathfrak 1$}}\{\tau<\infty\}]$, with $\tau$ the hitting time of the range $x+\widetilde \mathcal{R}_\infty$, for functions $F$ satisfying some regularity property, see \eqref{cond.F}. In particular, it applies to functions of the form $F(x)=1/\mathcal J(x)^\alpha$, for any $\alpha\in [0,1]$, for which we obtain that for some constants $\nu>0$, and $c>0$,
$$ \mathbb{E}\left[\frac{ {\text{\Large $\mathfrak 1$}}\{\tau<\infty\} }{1+\mathcal J(S_\tau)^\alpha}\right] = \frac{c}{\mathcal J(x)^{d-4+\alpha}} + \mathcal{O}\left(\|x\|^{4-\alpha-d- \nu}\right).$$ Moreover, the same kind of estimates is obtained when one considers rather $\tau$ as the hitting time of $x+\widetilde \mathcal{R}[0,\ell]$, with $\ell$ a finite integer. These results are then used to derive asymptotic estimates for covariances of hitting events in the following four situations: let $S$, $S^1$, $S^2$, and $S^3$, be four independent random walks on $\mathbb{Z}^5$, all starting from the origin and consider either \begin{itemize} \item[$(i)$] $A=\{\mathcal{R}_\infty^1 \cap \mathcal{R}[k,\infty)\neq \varnothing\}, \ \text{and}\ B= \{\mathcal{R}_\infty^2 \cap (S_k + \mathcal{R}_\infty^3 )\neq \varnothing\}$, \item[$(ii)$] $A=\{\mathcal{R}_\infty^1 \cap \mathcal{R}[k,\infty)\neq \varnothing\}, \text{ and } B= \{(S_k+\mathcal{R}_\infty^2) \cap \mathcal{R}[k+1,\infty)\neq \varnothing\}$, \item[$(iii)$] $A=\{\mathcal{R}_\infty^1 \cap \mathcal{R}[k,\infty)\neq \varnothing\}, \ \text{and}\ B= \{(S_k+\mathcal{R}_\infty^2) \cap \mathcal{R}[0,k-1] \neq \varnothing\}$, \item[$(iv)$] $A=\{\mathcal{R}_\infty^1 \cap \mathcal{R}[1,k] \neq \varnothing\}, \ \text{and}\ B= \{(S_k+\mathcal{R}_\infty^2) \cap \mathcal{R}[0,k-1] \neq \varnothing\}$. \end{itemize} In all these cases, we show that for some constant $c>0$, as $k\to \infty$, $$\operatorname{Cov}(A,B) \sim \frac{c}{k}. $$ Case $(i)$ is the easiest, and follows directly from Theorem C, since actually one can see that in this case both $\mathbb{P}[A\cap B]$ and $\mathbb{P}[A]\cdot \mathbb{P}[B]$ are asymptotically equivalent to a constant times the inverse of $k$. However, the other cases are more intricate, partly due to some cancellations that occur between the two terms, which, if estimated separately, are both of order $1/\sqrt{k}$ in cases $(ii)$ and $(iii)$, or even of order $1$ in case $(iv)$. In these cases, we rely on the extensions of Theorem C, that we just mentioned above. More precisely in case $(ii)$ we rely on the general result applied
with the functions $F(x)=1/\|x\|$, and its convolution with the distribution of $S_k$, while in cases $(iii)$ and $(iv)$ we use the extension to hitting times of finite windows of the range. We stress also that showing the positivity of the constants $c$ here is a delicate part of the proof, especially in case $(iv)$, where it relies on the following inequality:
$$\int_{0\le s \le t\le 1}\left( \mathbb{E}\left[\frac{1}{\|\beta_s-\beta_1\|^3\cdot \|\beta_t\|^3}\right] -\mathbb{E}\left[\frac{1}{\|\beta_s-\beta_1\|^3}\right] \mathbb{E}\left[\frac{1}{\|\beta_t\|^3}\right]\right) \, ds\, dt>0,$$ with $(\beta_u)_{u\ge 0}$ a standard Brownian motion in $\mathbb{R}^5$.
The paper is organized as follows. The next section is devoted to preliminaries, in particular we fix the main notation, recall known results on the transition kernel and the Green's function, and derive some basic estimates. In Section 3 we give the plan of the proof of Theorem A, which is cut into a number of intermediate results: Propositions \ref{prop.error}--\ref{prop.phipsi.2}. Propositions \ref{prop.error}--\ref{prop.phi0} are then proved in Sections $4$--$6$. The last one, which is also the most delicate one, requires Theorem C and its extensions. Its proof is therefore postponed to Section 8, while we first prove our general results on the intersection of two independent ranges in Section $7$, which is written in the general setting of random walks on $\mathbb{Z}^d$, for any $d\ge 5$, and can be read independently of the rest of the paper. Finally Section 9 is devoted to the proof of Theorem B, which is done by following a relatively well-established general scheme, based on the Lindeberg-Feller theorem for triangular arrays.
\section{Preliminaries} \subsection{Notation} We recall that we assume the law of the $(X_i)_{i\ge 1}$ to be a symmetric and irreducible probability measure\footnote{symmetric means that for all $x\in \mathbb{Z}^d$, $\mathbb{P}[X_1=x]=\mathbb{P}[X_1=-x]$, and irreducible means that for all $x$,
$\mathbb{P}[S_n=x]>0$, for some $n\ge 1$.} on $\mathbb{Z}^d$, $d\ge 5$, with a finite $d$-th moment\footnote{this means that $\mathbb{E}[\|X_1\|^d]<\infty$, with $\|\cdot \|$ the Euclidean norm.}. The walk is called aperiodic if the probability to be at the origin at time $n$ is nonzero for all $n$ large enough, and it is called bipartite if this probability is nonzero only when $n$ is even. Note that only these two cases may appear for a symmetric random walk.
Recall also that for $x\in \mathbb{Z}^d$, we denote by $\mathbb{P}_x$ the law of the walk starting from $S_0=x$. When $x=0$, we simply write it as $\mathbb{P}$. We denote its total range as $\mathcal{R}_\infty :=\{S_k\}_{k\ge 0}$, and for $0\le k\le n\le +\infty$, set $\mathcal{R}[k,n]:=\{S_k,\dots,S_n\}$.
For an integer $k\ge 2$, the law of $k$ independent random walks (with the same step distribution) starting from some $x_1,\dots, x_k\in \mathbb{Z}^5$, is denoted by $\mathbb{P}_{x_1,\dots,x_k}$, or simply by $\mathbb{P}$ when they all start from the origin.
We define \begin{equation}\label{HAHA+} H_A:=\inf\{n\ge 0\ : \ S_n\in A\},\quad \text{and} \quad H_A^+ :=\inf\{n\ge 1\ :\ S_n\in A\}, \end{equation} respectively for the hitting time and first return time to a subset $A\subset \mathbb{Z}^d$, that we abbreviate respectively as $H_x$ and $H_x^+$ when $A$ is a singleton $\{x\}$.
We let $\|x\|$ be the Euclidean norm of $x\in \mathbb{Z}^d$. If $X_1$ has covariance matrix $\Gamma= \Lambda \Lambda^t$, we define its associated norm as
$$\mathcal J^*(x) := |x\cdot \Gamma^{-1} x|^{1/2} = \|\Lambda^{-1} x\|,$$ and set $\mathcal J(x)= d^{-1/2}\mathcal J^*(x)$ (see \cite{LL} p.4 for more details).
For $a$ and $b$ some nonnegative reals, we let $a\wedge b:=\min(a,b)$ and $a\vee b:= \max(a,b)$.
We use the letters $c$ and $C$ to denote constants (which could depend on the covariance matrix of the walk), whose values might change from line to line. We also use standard notation for the comparison of functions: we write $f=\mathcal{O}(g)$, or sometimes $f\lesssim g$, if there exists a constant $C>0$, such that $f(x) \le Cg(x)$, for all $x$. Likewise, $f=o(g)$ means that $f/g \to 0$, and $f\sim g$ means that $f$ and $g$ are equivalent, that is if $|f-g| = o(f)$. Finally we write $f\asymp g$, when both $f=\mathcal{O}(g)$, and $g=\mathcal{O}(f)$.
\subsection{Transition kernel and Green's function} We denote by $p_n(x)$ the probability that a random walk starting from the origin ends up at position $x\in \mathbb{Z}^d$ after $n$ steps, that is $p_n(x):=\mathbb{P}[S_n=x]$, and note that for any $x,y\in \mathbb{Z}^d$, one has $\mathbb{P}_x[S_n=y] = p_n(y-x)$. Recall the definitions of $\Gamma$ and $\mathcal J^*$ from the previous subsection, and define \begin{equation}\label{pnbar} \overline p_n(x) := \frac {1}{(2\pi n)^{d/2} \sqrt{\det \Gamma}} \cdot e^{-\frac{\mathcal J^*(x)^2}{2n}}. \end{equation} The first tool we shall need is a local central limit theorem, roughly saying that $p_n(x)$ is well approximated by $\overline p_n(x)$, under appropriate hypotheses. Such result has a long history, see in particular the standard books by Feller \cite{Feller} and Spitzer \cite{Spitzer}. We refer here to the more recent book of Lawler and Limic \cite{LL}, and more precisely to their Theorem 2.3.5 in the case of an
aperiodic random walk, and to (the proof of) their Theorem 2.1.3 in the case of bipartite walks, which provide the result we need under minimal hypotheses (in particular it only requires a finite fourth-moment for $\|X_1\|$). \begin{theorem}[{\bf Local Central Limit Theorem}]\label{LCLT} There exists a constant $C>0$, such that for all $n\ge 1$, and all $x\in \mathbb{Z}^d$, \begin{eqnarray*}
|p_n(x)-\overline p_n(x)| \le \frac{C}{n^{(d+2)/2}}, \end{eqnarray*} in the case of an aperiodic walk, and for bipartite walks,
$$|p_n(x)+p_{n+1}(x)-2\overline p_n(x)| \le \frac{C}{n^{(d+2)/2}}.$$ \end{theorem}
In addition, under our hypotheses (in particular assuming $\mathbb{E}[\|X_1\|^d]<\infty$), there exists a constant $C>0$, such that for any $n\ge 1$ and any $x\in \mathbb{Z}^d$ (see Proposition 2.4.6 in \cite{LL}), \begin{equation}\label{pn.largex} p_n(x)\le C\cdot \left\{ \begin{array}{ll}
n^{-d/2} & \text{if }\|x\|\le \sqrt n,\\
\|x\|^{-d} & \text{if }\|x\|>\sqrt n. \end{array} \right. \end{equation} It is also known (see the proof of Proposition 2.4.6 in \cite{LL}) that \begin{equation}\label{norm.Sn}
\mathbb{E}[\|S_n\|^d] =\mathcal{O}(n^{d/2}). \end{equation} Together with the reflection principle (see Proposition 1.6.2 in \cite{LL}), and Markov's inequality, this gives that for any $n\ge 1$ and $r\ge 1$, \begin{equation}\label{Sn.large}
\mathbb{P}\left[\max_{0\le k\le n} \|S_k\|\ge r\right] \le C \cdot \left(\frac{\sqrt n}{r}\right)^{d}. \end{equation} Now we define for $\ell \ge 0$, $G_\ell(x) := \sum_{n\ge \ell} p_n(x)$. The {\bf Green's function} is the function $G:=G_0$. A union bound gives \begin{equation}\label{Green.hit}
\mathbb{P}[x\in \mathcal{R}[\ell,\infty)] \le G_\ell(x).
\end{equation} By \eqref{pn.largex} there exists a constant $C>0$, such that for any $x\in \mathbb{Z}^d$, and $\ell \ge 0$, \begin{equation}\label{Green}
G_\ell(x) \le \frac{C}{\|x\|^{d-2} + \ell^{\frac{d-2}{2}} + 1}. \end{equation}
It follows from this bound (together with the corresponding lower bound $G(x)\ge c\|x\|^{2-d}$, which can be deduced from Theorem \ref{LCLT}), and the fact that $G$ is harmonic on $\mathbb{Z}^d\setminus\{0\}$, that the hitting probability of a ball is bounded as follows (see the proof of \cite[Proposition 6.4.2]{LL}): \begin{equation}\label{hit.ball}
\mathbb{P}_x\left[\eta_r<\infty \right] =\mathcal{O}\left(\frac{r^{d-2}}{1+\|x\|^{d-2}}\right), \quad \text{with}\quad \eta_r:=\inf\{n\ge 0\ :\ \|S_n\|\le r\}. \end{equation} We shall need as well some control on the overshoot. We state the result we need as a lemma and provide a short proof for the sake of completeness. \begin{lemma}[{\bf Overshoot Lemma}]\label{hit.ball.overshoot}
There exists a constant $C>0$, such that for all $r\ge 1$, and all $x\in \mathbb{Z}^d$, with $\|x\|\ge r$, \begin{equation*}
\mathbb{P}_x[\eta_r<\infty,\, \|S_{\eta_r}\| \le r/2] \le \frac{C}{1+\|x\|^{d-2}}. \end{equation*} \end{lemma} \begin{proof} We closely follow the proof of Lemma 5.1.9 in \cite{LL}. Note first that one can alway assume that $r$ is large enough, for otherwise the result follows from \eqref{hit.ball}. Then define for $k\ge 0$,
$$Y_k:= \sum_{n= 0}^{\eta_r} {\text{\Large $\mathfrak 1$}}\{r+k \le \|S_n\|< r+(k+1)\}.$$ Let
$$g(x,k) = \mathbb{E}_x[Y_k] = \sum_{n=0}^\infty \mathbb{P}_x[r+k \le \|S_n\|\le r+k+1,\, n< \eta_r].$$ One has \begin{align*}
& \mathbb{P}_x[\eta_r<\infty, \, \|S_{\eta_r}\| \le r/2] = \sum_{n=0}^\infty \mathbb{P}_x[\eta_r=n+1, \, \|S_{\eta_r}\| \le r/2] \\ & = \sum_{n=0}^\infty \sum_{k=0}^\infty
\mathbb{P}_x[\eta_r=n+1, \, \|S_{\eta_r}\| \le r/2,\, r+k \le \|S_n\|< r+k+1]\\
& \le \sum_{k=0}^\infty \sum_{n=0}^\infty \mathbb{P}_x\left[\eta_r>n,\, r+k \le \|S_n\|\le r+k+1,\, \|S_{n+1}-S_n\| \ge \frac r2 + k\right]\\
& = \sum_{k=0}^\infty g(x,k) \mathbb{P}\left[\|X_1\| \ge \frac r2 + k \right] = \sum_{k=0}^\infty g(x,k) \sum_{\ell = k}^\infty \mathbb{P}\left[\frac r2 +\ell \le \|X_1\|< \frac r2 + \ell+1 \right]\\
& = \sum_{\ell = 0}^\infty \mathbb{P}\left[\frac r2 +\ell \le \|X_1\|< \frac r2 + \ell+1 \right]\sum_{k=0}^\ell g(x,k). \end{align*}
Now Theorem \ref{LCLT} shows that one has $\mathbb{P}_z[\|S_{\ell^2}\| \le r]\ge \rho$, for some constant $\rho>0$, uniformly in $r$ (large enough), $\ell\ge 1$, and $r\le \|z\|\le r+\ell$. It follows, exactly as in the proof of Lemma 5.1.9 from \cite{LL}, that for any $\ell\ge 1$,
$$\max_{\|z\|\le r+\ell} \sum_{0\le k< \ell} g(z,k) \le \frac{\ell^2}{\rho}.$$ Using in addition \eqref{hit.ball}, we get with the Markov property,
$$\sum_{0\le k< \ell} g(x,k) \le C \frac{(r+\ell)^{d-2}}{1+\|x\|^{d-2}} \cdot \ell^2,$$ for some constant $C>0$. As a consequence one has \begin{align*}
& \mathbb{P}_x[\eta_r<\infty, \, \|S_{\eta_r}\| \le r/2] \\
& \le \frac C{1+\|x\|^{d-2}} \sum_{\ell = 0}^\infty \mathbb{P}\left[\frac r2+ \ell \le \|X_1\|< \frac r2 +\ell + 1\right](r+\ell)^{d-2}(\ell+1) ^2\\
& \le \frac C{1+\|x\|^{d-2}} \mathbb{E}\left[\|X_1\|^{d-2}(\|X_1\| - r/2)^2{\text{\Large $\mathfrak 1$}}\{\|X_1\|\ge r/2\}\right] \le \frac{C}{1+\|x\|^{d-2}},
\end{align*}
since by hypothesis, the $d$-th moment of $X_1$ is finite. \end{proof}
\subsection{Basic tools} We prove here some elementary facts, which will be needed throughout the paper, and which are immediate consequences of the results from the previous subsection.
\begin{lemma}\label{lem.upconvolG} There exists $C>0$, such that for all $x\in \mathbb{Z}^d$, and $\ell \ge 0$,
$$ \sum_{z\in \mathbb{Z}^d} G_\ell(z) G(z-x) \le \frac{C}{\|x\|^{d-4}+\ell^{\frac{d-4}{2}} + 1}.$$ \end{lemma} \begin{proof} Assume first that $\ell =0$. Then by \eqref{Green}, \begin{align*}
\sum_{z\in \mathbb{Z}^d} G(z) G(z-x) & \lesssim \frac{1}{1+\|x\|^{d-2}} \left(\sum_{\|z\|\le 2\|x\|} \frac{1}{1+\|z\|^{d-2}} +\sum_{\|z-x\|\le \frac{\|x\|}{2}} \frac{1}{1+\|z-x\|^{d-2}} \right)\\
& \quad + \sum_{\|z\|\ge 2\|x\|}\frac{1}{1+\|z\|^{2(d-2)}} \lesssim \frac{1}{1+\|x\|^{d-4}}. \end{align*}
Assume next that $\ell\ge 1$. We distinguish two cases: if $\|x\|\le \sqrt \ell$, then by using \eqref{Green} again we deduce,
$$\sum_{z\in \mathbb{Z}^d} G_\ell(z) G(z-x) \lesssim \frac{1}{\ell^{d/2}}\cdot \sum_{\|z\|\le 2\sqrt \ell} \frac{1}{1+ \|z-x\|^{d-2}} + \sum_{\|z\|\ge 2\sqrt \ell} \frac 1{\|z\|^{2(d-2)}} \lesssim \frac{1}{ \ell^{\frac{d-4}2}}.$$
When $\|x\| >\sqrt \ell$, the result follows from case $\ell =0$, since $G_\ell(z) \le G(z)$. \end{proof}
\begin{lemma}\label{lem.sumG} One has, \begin{equation}\label{exp.Green} \sup_{x\in \mathbb{Z}^d} \, \mathbb{E}[G(S_n-x)] = \mathcal{O}\left(\frac 1{n^{\frac{d-2}{2}}}\right), \end{equation} and for any $\alpha \in [0,d)$, \begin{equation}\label{exp.Green.x}
\sup_{n\ge 0} \, \mathbb{E}\left[\frac 1{1+\|S_n-x\|^\alpha } \right] = \mathcal{O}\left(\frac 1{1+\|x\|^\alpha}\right). \end{equation} Moreover, when $d=5$, \begin{equation} \label{sumG2} \mathbb{E}\left[\Big(\sum_{n\ge k} G(S_n)\Big)^2\right] = \mathcal{O}\left(\frac 1k\right). \end{equation} \end{lemma} \begin{proof}
For \eqref{exp.Green}, we proceed similarly as in the proof of Lemma \ref{lem.upconvolG}. If $\|x\| \le \sqrt{n}$, one has using \eqref{pn.largex} and \eqref{Green}, \begin{align*}
& \mathbb{E}[G(S_n-x)] = \sum_{z\in \mathbb{Z}^d}p_n(z) G(z-x) \\
& \lesssim \frac 1{n^{d/2}} \sum_{\|z\|\le 2\sqrt n} \frac{1}{1+\|z-x\|^{d-2}} + \sum_{\|z\|>2\sqrt n} \frac {1}{\|z\|^{2d-2}} \lesssim n^{\frac{2-d}{2}}, \end{align*}
while if $\|x\|>\sqrt{n}$, we get as well
$$\mathbb{E}[G(S_n-x)]\lesssim \frac 1{n^{d/2}} \sum_{\|z\|\le \sqrt n/2} \frac{1}{\|x\|^{d-2}} + \sum_{\|z\|>\sqrt n/2} \frac {1}{\|z\|^d(1+\|z-x\|)^{d-2}} \lesssim n^{\frac{2-d}{2}}.$$ Considering now \eqref{exp.Green.x}, we write \begin{align*}
& \mathbb{E}\left[\frac 1{1+\|S_n-x\|^\alpha} \right] \le \frac{C}{1+\|x\|^\alpha} + \sum_{\|z-x\|\le \|x\|/2} \frac{p_n(z)}{1+\|z-x\|^\alpha} \\
& \stackrel{\eqref{pn.largex}}{\lesssim} \frac{1}{1+\|x\|^\alpha} + \frac{1}{1+\|x\|^d} \sum_{\|z-x\|\le \|x\|/2} \frac{1}{1+\|z-x\|^\alpha} \lesssim \frac{1}{1+\|x\|^\alpha}. \end{align*} Finally for \eqref{sumG2}, one has using the Markov property at the second line, \begin{align*} &\mathbb{E}\left[\Big(\sum_{n\ge k} G(S_n)\Big)^2\right] = \sum_{x,y} G(x)G(y)\mathbb{E}\left[\sum_{n,m\ge k}{\text{\Large $\mathfrak 1$}}\{S_n=x,S_m=y\}\right]\\ &\le 2\sum_{x,y} G(x)G(y)\sum_{n\ge k}\sum_{\ell \ge 0} p_n(x)p_\ell(y-x)= 2\sum_{x,y} G(x)G(y)G_k(x)G(y-x)\\
& \stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} \sum_x \frac{1}{\|x\|^4}G_k(x) \stackrel{\eqref{Green}}{\lesssim} \frac 1k. \end{align*} \end{proof}
The next result deals with the probability that two independent ranges intersect. Despite its proof is a rather straightforward consequence of the previous results, it already provides upper bounds of the right order (only off by a multiplicative constant). \begin{lemma}\label{lem.simplehit} Let $S$ and $\widetilde S$ be two independent walks starting respectively from the origin and some $x\in \mathbb{Z}^d$. Let also $\ell$ and $m$ be two given nonnegative integers (possibly infinite for $m$). Define $$\tau:=\inf\{n\ge 0\ :\ \widetilde S_n\in \mathcal{R}[\ell,\ell+m]\}.$$ Then, for any function $F:\mathbb{Z}^d\to \mathbb{R}_+$, \begin{equation}\label{lem.hit.1} \mathbb{E}_{0,x}[{\text{\Large $\mathfrak 1$}}\{\tau <\infty\}F(\widetilde S_\tau)] \le \sum_{i=\ell}^{\ell + m} \mathbb{E}[G(S_i-x)F(S_i)]. \end{equation} In particular, uniformly in $\ell$ and $m$, \begin{equation} \label{lem.hit.2}
\mathbb{P}_{0,x}[\tau<\infty] = \mathcal{O}\left(\frac{1}{1+\|x\|^{d-4} }\right). \end{equation} Moreover, uniformly in $x\in \mathbb{Z}^d$, \begin{equation}\label{lem.hit.3} \mathbb{P}_{0,x}[\tau<\infty] = \left\{ \begin{array}{ll}
\mathcal{O}\left(m\cdot \ell^{\frac{2-d}2}\right) & \text{if }m<\infty \\ \mathcal{O}\left(\ell^{\frac{4-d}2} \right) & \text{if }m=\infty. \end{array} \right. \end{equation} \end{lemma} \begin{proof} The first statement follows from \eqref{Green.hit}. Indeed using this, and the independence between $S$ and $\widetilde S$, we deduce that \begin{align*} \mathbb{E}_{0,x}[{\text{\Large $\mathfrak 1$}}\{\tau <\infty\}F(\widetilde S_\tau)] & \le \sum_{i=\ell}^{\ell + m} \mathbb{E}_{0,x}[{\text{\Large $\mathfrak 1$}}\{S_i\in \widetilde \mathcal{R}_\infty \} F(S_i)] \stackrel{\eqref{Green.hit}}{\le} \sum_{i=\ell}^{\ell +m} \mathbb{E}[G(S_i-x)F(S_i)]. \end{align*} For \eqref{lem.hit.2}, note first that it suffices to consider the case when $\ell=0$ and $m=\infty$, as otherwise the probability is just smaller. Taking now $F\equiv 1$ in \eqref{lem.hit.1}, and using Lemma \ref{lem.upconvolG} gives the result. Similarly \eqref{lem.hit.3} directly follows from \eqref{lem.hit.1} and \eqref{exp.Green}. \end{proof}
\section{Scheme of proof of Theorem A} \subsection{A last passage decomposition for the capacity of the range} We provide here a last passage decomposition for the capacity of the range, in the same fashion as the well-known decomposition for the size of the range, which goes back to the seminal paper by Dvoretzky and Erd\'os \cite{DE51}, and which was also used by Jain and Pruitt \cite{JP71} for their proof of the central limit theorem. We note that Jain and Orey \cite{JO69} used as well a similar decomposition in their analysis of the capacity of the range (in fact they used instead a first passage decomposition).
So let $(S_n)_{n\ge 0}$ be some random walk starting from the origin, and set $$\varphi_k^n:= \mathbb{P}_{S_k}[H_{\mathcal{R}_n}^+=\infty\mid \mathcal{R}_n], \text{ and } Z_k^n := {\text{\Large $\mathfrak 1$}}\{S_\ell \neq S_k,\ \text{for all } \ell = k+1,\dots, n\},$$ for all $0\le k\le n$, By definition of the capacity \eqref{cap.def}, one can write by recording the sites of $\mathcal{R}_n$ according to their last visit, $$\mathrm{Cap}(\mathcal{R}_n)=\sum_{k=0}^n Z_k^n\cdot \varphi_k^n.$$ A first simplification is to remove the dependance in $n$ in each of the terms in the sum. To do this, we need some additional notation: we consider $(S_n)_{n\in \mathbb{Z}}$ a two-sided random walk starting from the origin (that is $(S_n)_{n\ge 0}$ and $(S_{-n})_{n\ge 0}$ are two independent walks starting from the origin), and denote its total range by $\overline \mathcal{R}_\infty :=\{S_n\}_{n\in \mathbb{Z}}$. Then for $k\ge 0$, let $$\varphi(k):=\mathbb{P}_{S_k}[H_{\overline \mathcal{R}_\infty}^+ = \infty \mid (S_n)_{n\in \mathbb{Z}}], \text{ and } Z(k):= {\text{\Large $\mathfrak 1$}}\{S_\ell \neq S_k,\text{ for all }\ell \ge k+1 \}.$$ We note that $\varphi(k)$ can be zero with nonzero probability, but that $\mathbb{E}[\varphi(k)]\neq 0$ (see the proof of Theorem 6.5.10 in \cite{LL}). We then define $$\mathcal C_n : = \sum_{k=0}^n Z(k)\varphi(k),\quad \text{ and }\quad W_n:=\mathrm{Cap}(\mathcal{R}_n) - \mathcal C_n.$$ We will prove in a moment the following estimate. \begin{lemma} \label{lem.Wn} One has $$\mathbb{E}[W_n^2] = \mathcal O(n).$$ \end{lemma} Given this result, Theorem A reduces to an estimate of the variance of $\mathcal C_n$. To this end, we first observe that $$\operatorname{Var}(\mathcal C_n) = 2 \sum_{0\le \ell < k\le n} \operatorname{Cov}( Z(\ell)\varphi(\ell), Z(k)\varphi(k)) + \mathcal{O}(n).$$ Furthermore, by translation invariance, for any $\ell < k$, $$\operatorname{Cov}( Z(\ell)\varphi(\ell), Z(k)\varphi(k)) = \operatorname{Cov}(Z(0)\varphi(0), Z(k-\ell)\varphi(k-\ell)),$$ so that in fact $$\operatorname{Var}(\mathcal C_n) = 2\sum_{\ell = 1}^n \sum_{k=1}^{\ell} \operatorname{Cov}( Z(0)\varphi(0), Z(k)\varphi(k)) + \mathcal{O}(n).$$ Thus Theorem A is a direct consequence of the following theorem. \begin{theorem}\label{prop.cov} There exists a constant $\sigma>0$, such that \begin{equation*} \operatorname{Cov}( Z(0)\varphi(0), Z(k)\varphi(k)) \sim \frac{\sigma^2}{2k}. \end{equation*} \end{theorem} This result is the core of the paper, and uses in particular Theorem C (in fact some more general statement, see Theorem \ref{thm.asymptotic}). More details about its proof will be given in the next subsection, but first we show that $W_n$ is negligible by giving the proof of Lemma \ref{lem.Wn}.
\begin{proof}[Proof of Lemma \ref{lem.Wn}] Note that $W_n=W_{n,1} + W_{n,2}$, with $$W_{n,1} = \sum_{k=0}^n (Z_k^n - Z(k))\varphi_k^n, \quad \text{and}\quad W_{n,2} = \sum_{k=0}^n(\varphi_k^n- \varphi(k)) Z(k).$$ Consider first the term $W_{n,1}$ which is easier. Observe that $Z_k^n-Z(k)$ is nonnegative and bounded by the indicator function of the event $\{S_k\in \mathcal{R}[n+1,\infty)\}$. Bounding also $\varphi_k^n$ by one, we get \begin{align*} \mathbb{E}[W_{n,1}^2] & \le \sum_{\ell = 0}^n \sum_{k=0}^n \mathbb{E}[(Z_\ell^n-Z(\ell))(Z_k^n -Z(k))] \\ &\le \sum_{\ell = 0}^n \sum_{k=0}^n \mathbb{P}\left[S_\ell \in \mathcal{R}[n+1,\infty), \, S_k\in \mathcal{R}[n+1,\infty)\right]. \end{align*} Then noting that $(S_{n+1-k}-S_{n+1})_{k\ge 0}$ and $(S_{n+1+k}-S_{n+1})_{k\ge 0}$ are two independent random walks starting from the origin, we obtain \begin{align*}
\mathbb{E}[W_{n,1}^2] & \le \sum_{\ell =1}^{n+1} \sum_{k=1}^{n+1} \mathbb{P}[H_{S_\ell} <\infty, \, H_{S_k}<\infty]\le 2 \sum_{\ell = 1}^{n+1} \sum_{k=1}^{n+1} \mathbb{P}[H_{S_\ell} \le H_{S_k} <\infty] \\ &\le 2 \sum_{1\le \ell \le k\le n+1} \mathbb{P}[H_{S_\ell} \le H_{S_k} <\infty] + \mathbb{P}[H_{S_k} \le H_{S_\ell} <\infty]. \end{align*} Using next the Markov property and \eqref{Green.hit}, we get with $S$ and $\widetilde S$ two independent random walks starting from the origin, \begin{align*}
\mathbb{E}[W_{n,1}^2] & \le 2 \sum_{1\le \ell \le k\le n+1} \mathbb{E}[G(S_\ell) G(S_k-S_\ell)] + \mathbb{E}[G(S_k) G(S_k- S_\ell)]\\ &\le 2 \sum_{\ell = 1}^{n+1} \sum_{k=0}^n \mathbb{E}[G(S_\ell)] \cdot \mathbb{E}[G(S_k)] + \mathbb{E}[G(S_\ell + \widetilde S_k) G(\widetilde S_k)]\\ &\le 4 \left(\sup_{x\in \mathbb{Z}^5} \sum_{\ell \ge 0} \mathbb{E}[G(x+S_\ell)]\right)^2 \stackrel{\eqref{exp.Green}}{=} \mathcal O(1). \end{align*} We proceed similarly with $W_{n,2}$. Observe first that for any $k\ge 0$, $$0 \le \varphi_k^n - \varphi(k) \le \mathbb{P}_{S_k}[H_{\mathcal{R}(-\infty,0]}<\infty\mid S] + \mathbb{P}_{S_k}[H_{\mathcal{R}[n,\infty)}<\infty\mid S].$$ Furthermore, for any $0\le \ell \le k\le n$, the two terms $\mathbb{P}_{S_\ell}[H_{\mathcal{R}(-\infty,0]}<\infty\mid S]$ and $\mathbb{P}_{S_k}[H_{\mathcal{R}[n,\infty)}<\infty\mid S]$ are independent. Therefore, \begin{align}\label{Wn2}
\nonumber \mathbb{E}[W_{n,2}^2] \le & \sum_{\ell = 0}^n \sum_{k=0}^n \mathbb{E}[(\varphi_\ell^n-\varphi(\ell))(\varphi_k^n-\varphi(k))] \le 2\left(\sum_{\ell = 0}^n \mathbb{P}\left[ H_{\mathcal{R}[\ell, \infty)}<\infty\right]\right)^2 \\ & + 4 \sum_{0\le \ell \le k\le n} \mathbb{P}\left[\mathcal{R}^3_\infty \cap (S_\ell + \mathcal{R}^1_\infty) \neq \varnothing, \, \mathcal{R}^3_\infty \cap (S_k+\mathcal{R}_\infty^2)\neq \varnothing \right], \end{align} where in the last term $\mathcal{R}^1_\infty$, $\mathcal{R}^2_\infty$ and $\mathcal{R}^3_\infty$ are the ranges of three (one-sided) independent walks, independent of $(S_n)_{n\ge 0}$, starting from the origin (denoting here $(S_{-n})_{n\ge 0}$ as another walk $(S^3_n)_{n\ge 0}$). Now \eqref{lem.hit.3} already shows that the first term on the right hand side of \eqref{Wn2} is $\mathcal{O}(n)$. For the second one, note that for any $0\le \ell \le k\le n$, one has \begin{align*} &\mathbb{P}\left[\mathcal{R}^3_\infty \cap (S_\ell + \mathcal{R}^1_\infty) \neq \varnothing, \, \mathcal{R}^3_\infty \cap (S_k+\mathcal{R}_\infty^2)\neq \varnothing \right]\\
\le & \
\mathbb{E}\left[|\mathcal{R}^3_\infty \cap (S_\ell + \mathcal{R}^1_\infty)| \cdot | \mathcal{R}^3_\infty \cap (S_k+\mathcal{R}_\infty^2)| \right]\\
= &\ \mathbb{E}\left[\mathbb{E}[|\mathcal{R}^3_\infty \cap (S_\ell + \mathcal{R}^1_\infty)|\mid S,\, S^3] \cdot \mathbb{E}[| \mathcal{R}^3_\infty \cap (S_k+\mathcal{R}_\infty^2)|\mid S,\, S^3] \right] \\
\stackrel{\eqref{Green.hit}}{\le} & \mathbb{E}\left[ \Big(\sum_{m\ge 0} G(S^3_m - S_\ell) \Big) \Big(\sum_{m\ge 0} G(S^3_m - S_k) \Big) \right] = \mathbb{E}\left[ \Big(\sum_{m\ge k} G(S_m - S_{k-\ell}) \Big) \Big(\sum_{m\ge k} G(S_m) \Big) \right]\\ \le & \ \mathbb{E}\left[ \Big(\sum_{m\ge \ell} G(S_m) \Big)^2\right]^{1/2} \cdot \mathbb{E}\left[ \Big(\sum_{m\ge k} G(S_m)\Big)^2\right]^{1/2} \stackrel{\eqref{sumG2}}{=} \mathcal{O}\left(\frac{1}{1+\sqrt{k\ell}}\right), \end{align*} using invariance by time reversal at the penultimate line, and Cauchy-Schwarz at the last one. This concludes the proof of the lemma. \end{proof}
\subsection{Scheme of proof of Theorem \ref{prop.cov}} We provide here some decomposition of $\varphi(0)$ and $\varphi(k)$ into a sum of terms involving intersection and non-intersection probabilities of different parts of the path $(S_n)_{n\in \mathbb{Z}}$. For this, we consider some sequence of integers $(\varepsilon_k)_{k\ge 1}$ satisfying $k>2\varepsilon_k$, for all $k\ge 3$, and whose value will be fixed later. A first step in our analysis is to reduce the influence of the random variables $Z(0)$ and $Z(k)$, which play a very minor role in the whole proof. Thus we define $$Z_0:={\text{\Large $\mathfrak 1$}}\{S_\ell\neq 0,\, \forall \ell=1,\dots,\varepsilon_k\}, \text{ and } Z_k:={\text{\Large $\mathfrak 1$}}\{S_\ell\neq S_k, \, \forall \ell =k+1,\dots,k+\varepsilon_k\}.$$ Note that these notation are slightly misleading (as in fact $Z_0$ and $Z_k$ depend on $\varepsilon_k$, but this shall hopefully not cause any confusion). One has
$$\mathbb{E}[|Z(0)- Z_0|]= \mathbb{P}[0\in \mathcal{R}[\varepsilon_k+1,\infty)] \stackrel{\eqref{Green.hit}}{\le} G_{\varepsilon_k}(0) \stackrel{\eqref{Green}}{=} \mathcal{O}(\varepsilon_k^{-3/2}),$$
and the same estimate holds for $\mathbb{E}[|Z(k)-Z_k|]$, by the Markov property. Therefore, $$\operatorname{Cov}(Z(0)\varphi(0),Z(k)\varphi(k)) = \operatorname{Cov}(Z_0\varphi(0),Z_k\varphi(k)) + \mathcal{O}(\varepsilon_k^{-3/2}).$$ Then recall that we consider a two-sided walk $(S_n)_{n\in \mathbb{Z}}$, and that $\varphi(0) = \mathbb{P}[H_{\mathcal{R}(-\infty, \infty)}^+=\infty\mid S]$. Thus one can decompose $\varphi(0)$ as follows: $$\varphi(0) =\varphi_0- \varphi_1- \varphi_2-\varphi_3 + \varphi_{1,2} +\varphi_{1,3} + \varphi_{2,3} - \varphi_{1,2,3},$$ with $$\varphi_0:=\mathbb{P}[H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty\mid S],\quad \varphi_1: = \mathbb{P}[H^+_{\mathcal{R}(-\infty,-\varepsilon_k-1]}<\infty, \, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S], $$ $$\varphi_2 := \mathbb{P}[H^+_{\mathcal{R}[\varepsilon_k+1,k]}<\infty, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S], \, \varphi_3 := \mathbb{P}[H^+_{\mathcal{R}[k+1,\infty)}<\infty , H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty\mid S],$$ $$\varphi_{1,2}: = \mathbb{P}[H^+_{\mathcal{R}(-\infty,-\varepsilon_k-1]}<\infty , \, H^+_{\mathcal{R}[\varepsilon_k+1,k]}<\infty , \, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S], $$ $$\varphi_{1,3} := \mathbb{P}[H^+_{\mathcal{R}(-\infty,-\varepsilon_k-1]}<\infty , \, H^+_{\mathcal{R}[k+1,\infty)}<\infty, \, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S],$$ $$\varphi_{2,3} := \mathbb{P}[H^+_{\mathcal{R}[\varepsilon_k+1,k]}<\infty,\, H^+_{\mathcal{R}[k+1,\infty)}<\infty, \, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S],$$ $$\varphi_{1,2,3} := \mathbb{P}[H^+_{\mathcal{R}(-\infty, -\varepsilon_k-1]}<\infty, H^+_{\mathcal{R}[\varepsilon_k+1,k]}<\infty, H^+_{\mathcal{R}[k+1,\infty)}<\infty, H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+=\infty \mid S].$$ We decompose similarly $$\varphi(k)=\psi_0 - \psi_1 - \psi_2 - \psi_3 + \psi_{1,2} + \psi_{1,3} + \psi_{2,3} - \psi_{1,2,3},$$ where index $0$ refers to the event of avoiding $\mathcal{R}[k-\varepsilon_k,k+\varepsilon_k]$, index $1$ to the event of hitting $\mathcal{R}(-\infty,-1]$, index $2$ to the event of hitting $\mathcal{R}[0,k-\varepsilon_k-1]$ and index $3$ to the event of hitting $\mathcal{R}[k+\varepsilon_k+1,\infty)$ (for a walk starting from $S_k$ this time). Note that $\varphi_0$ and $\psi_0$ are independent. Then write \begin{align}\label{main.dec}
& \operatorname{Cov}(Z_0\varphi(0),Z_k\varphi(k)) = -\sum_{i=1}^3 \left(\operatorname{Cov}(Z_0\varphi_i,Z_k\psi_0)+ \operatorname{Cov}(Z_0\varphi_0,Z_k\psi_i)\right) \\ \nonumber & + \sum_{i,j=1}^3 \operatorname{Cov}(Z_0\varphi_i , Z_k\psi_j) +\sum_{1\le i <j\le 3} \left(\operatorname{Cov}(Z_0\varphi_{i,j},Z_k\psi_0) + \operatorname{Cov}(Z_0\varphi_0,Z_k\psi_{i,j})\right)+ R_{0,k}, \end{align} where $R_{0,k}$ is an error term. Our first task will be to show that it is negligible. \begin{proposition}\label{prop.error}
One has $|R_{0,k}| = \mathcal{O}\left(\varepsilon_k^{-3/2}\right)$. \end{proposition} The second step is the following. \begin{proposition}\label{prop.ij0} One has \begin{itemize} \item[(i)]
$|\operatorname{Cov}(Z_0\varphi_{1,2},Z_k\psi_0)|+|\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_{2,3})| = \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right)$,
\item[(ii)] $|\operatorname{Cov}(Z_0\varphi_{1,3},Z_k\psi_0)| + |\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_{1,3})| = \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k}) + \frac{1}{\varepsilon_k^{3/4} \sqrt k}\right)$, \item[(iii)]
$|\operatorname{Cov}(Z_0\varphi_{2,3},Z_k\psi_0)| +|\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_{1,2})|= \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k})+ \frac{1}{\varepsilon_k^{3/4} \sqrt k}\right)$. \end{itemize} \end{proposition} In the same fashion as Part (i) of the previous proposition, we show: \begin{proposition}\label{prop.phipsi.1} For any $1\le i<j\le 3$,
$$|\operatorname{Cov}(Z_0\varphi_i,Z_k\psi_j)| =\mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right), \quad |\operatorname{Cov}(Z_0\varphi_j,Z_k\psi_i)| =\mathcal{O}\left(\frac{1}{\varepsilon_k}\right).$$ \end{proposition} The next step deals with the first sum in the right-hand side of \eqref{main.dec}. \begin{proposition}\label{prop.phi0} There exists a constant $\alpha\in (0,1)$, such that $$\operatorname{Cov}(Z_0\varphi_1, Z_k\psi_0) = \operatorname{Cov}(Z_0\varphi_0,Z_k\psi_3) = 0,$$
$$|\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_0)|+ |\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_2)| = \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}} \right),$$
$$|\operatorname{Cov}(Z_0\varphi_3,Z_k\psi_0)|+ |\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_1)| = \mathcal{O}\left(\frac{ \varepsilon_k^\alpha}{k^{1+\alpha}} \right).$$ \end{proposition} At this point one can already deduce the bound $\operatorname{Var}(\mathrm{Cap}(\mathcal{R}_n))= \mathcal{O}(n \log n)$, just applying the previous propositions with say $\varepsilon_k:=\lfloor k/4\rfloor$.
In order to obtain the finer asymptotic result stated in Theorem \ref{prop.cov}, it remains to identify the leading terms in \eqref{main.dec}, which is the most delicate part. The result reads as follows. \begin{proposition}\label{prop.phipsi.2} There exists $\delta>0$, such that if $\varepsilon_k\ge k^{1-\delta}$ and $\varepsilon_k = o(k)$, then for some positive constants $(\sigma_{i,j})_{1\le i\le j\le 3}$, $$\operatorname{Cov}(Z_0\varphi_j,Z_k\psi_i)\sim \operatorname{Cov}(Z_0\varphi_{4-i},Z_k\psi_{4-j}) \sim \frac{\sigma_{i,j}}{k}.$$ \end{proposition} Note that Theorem \ref{prop.cov} is a direct consequence of \eqref{main.dec} and Propositions \ref{prop.error}--\ref{prop.phipsi.2}, which we prove now in the following sections.
\section{Proof of Proposition \ref{prop.error}} We divide the proof into two lemmas. \begin{lemma}\label{lem.123} One has $$\mathbb{E}[\varphi_{1,2,3}] = \mathcal{O}\left(\frac{1}{\varepsilon_k \sqrt k}\right),\quad \text{and}\quad \mathbb{E}[\psi_{1,2,3}] = \mathcal{O}\left(\frac{1}{\varepsilon_k\sqrt k}\right).$$ \end{lemma}
\begin{lemma}\label{lem.ijl} For any $1\le i<j\le 3$, and any $1\le \ell\le 3$, $$ \mathbb{E}[\varphi_{i,j}\psi_\ell] =\mathcal{O}\left(\varepsilon_k^{-3/2} \right), \quad \text{and} \quad \mathbb{E}[\varphi_{i,j}] \cdot \mathbb{E}[\psi_\ell] =\mathcal{O}\left(\varepsilon_k^{-3/2} \right) .$$ \end{lemma} Observe that the $(\varphi_{i,j})_{i,j}$ and $(\psi_{i,j})_{i,j}$ have the same law (up to reordering), and similarly for the $(\varphi_i)_i$ and $(\psi_i)_{i}$. Furthermore, $\varphi_{i,j}\le \varphi_i$ for any $i,j$. Therefore by definition of $R_{0,k}$ the proof of Proposition \ref{prop.error} readily follows from these two lemmas. For their proofs, we will use the following fact. \begin{lemma}\label{lem.prep.123} There exists $C>0$, such that for any $x,y\in \mathbb{Z}^5$, $0\le \ell \le m$, \begin{equation*} \sum_{i=\ell}^m \sum_{z\in \mathbb{Z}^5} p_i(z) G(z-y) p_{m-i}(z-x) \le
\frac{C}{(1+\|x\|+\sqrt m)^5} \left(\frac 1{1+\|y-x\|} + \frac{1}{1+\sqrt{\ell}+\|y\|}\right). \end{equation*} \end{lemma} \begin{proof}
Consider first the case $\|x\|\le \sqrt m$. By \eqref{pn.largex} and Lemma \ref{lem.upconvolG}, $$ \sum_{i=\ell}^{\lfloor m/2\rfloor } \sum_{z\in \mathbb{Z}^5} p_i(z) G(z-y) p_{m-i}(z-x) \lesssim \frac{1}{1+m^{5/2}} \sum_{z\in \mathbb{Z}^5} G_{\ell}(z) G(z-y)
\lesssim \frac{(1+ m)^{-5/2}}{1+\sqrt{\ell}+\|y\|}, $$ with the convention that the first sum is zero when $m<2\ell$, and $$\sum_{i=\lfloor m/2\rfloor }^m \sum_{z\in \mathbb{Z}^5} p_i(z) G(z-y) p_{m-i}(z-x) \lesssim \frac{1}{1+m^{5/2}} \sum_{z\in \mathbb{Z}^5} G(z-y) G(z-x)
\lesssim \frac{(1+m)^{-5/2}} {1+\|y-x\|}. $$
Likewise, when $\|x\|>\sqrt m$, applying again \eqref{pn.largex} and Lemma \ref{lem.upconvolG}, we get \begin{align*}
& \sum_{i=\ell}^{m} \sum_{\|z-x\| \ge \frac{\|x\|}{2}} p_i(z) G(z-y) p_{m-i}(z-x) \lesssim \frac{1}{\|x\|^5} \sum_{z\in \mathbb{Z}^5} G_{\ell}(z)G(z-y) \lesssim \frac{\|x\|^{-5}}{ 1+\sqrt{\ell}+\|y\|},\\
& \sum_{i=\ell}^{m} \sum_{\|z-x\| \le \frac{\|x\|}{2}} p_i(z) G(z-y) p_{m-i}(z-x) \lesssim \frac{1}{\|x\|^5} \sum_{z\in \mathbb{Z}^5} G(z-y)G(z-x) \lesssim \frac{\|x\|^{-5}}{1+\|y-x\|}, \end{align*} which concludes the proof of the lemma. \end{proof}
One can now give the proof of Lemma \ref{lem.123}. \begin{proof}[Proof of Lemma \ref{lem.123}] Since $\varphi_{1,2,3}$ and $\psi_{1,2,3}$ have the same law, it suffices to prove the result for $\varphi_{1,2,3}$. Let $(S_n)_{n\in \mathbb{Z}}$ and $(\widetilde S_n)_{n\ge 0}$ be two independent random walks starting from the origin. Define $$\tau_1:=\inf\{n\ge 1\, :\, \widetilde S_n \in \mathcal{R}(-\infty,-\varepsilon_k-1]\},\ \tau_2:=\inf\{n\ge 1\, :\, \widetilde S_n \in \mathcal{R}[\varepsilon_k+1,k]\},$$ and $$\tau_3:= \inf\{n\ge 1\, :\, \widetilde S_n\in \mathcal{R}[k+1,\infty)\}.$$ One has \begin{equation}\label{phi123.tauij} \mathbb{E}[\varphi_{1,2,3}] \le \sum_{i_1\neq i_2 \neq i_3} \mathbb{P}[\tau_{i_1}\le \tau_{i_2}\le \tau_{i_3}]. \end{equation} We first consider the term corresponding to $i_1=1$, $i_2=2$, and $i_3=3$. One has by the Markov property, \begin{align*}
\mathbb{P}[\tau_1\le \tau_2\le \tau_3<\infty] \stackrel{\eqref{lem.hit.2}}{\lesssim} \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_2<\infty\}}{1+\|\widetilde S_{\tau_2} - S_k\|}\right]\stackrel{\eqref{lem.hit.1}}{\lesssim} \sum_{i=\varepsilon_k}^k \mathbb{E}\left[ \frac{G(S_i-\widetilde S_{\tau_1}){\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}}{1+\| S_i - S_k\|}\right]. \end{align*} Now define $\mathcal{G}_i:=\sigma((S_j)_{j\le i})\vee \sigma((\widetilde S_n)_{n\ge 0})$, and note that $\tau_1$ is $\mathcal{G}_i$-measurable for any $i\ge 0$. Moreover, the Markov property and \eqref{pn.largex} show that
$$\mathbb{E}\left[\frac{1}{1+\|S_i - S_k\|}\mid \mathcal{G}_i\right] \lesssim \frac{1}{\sqrt{k-i}}.$$ Therefore, \begin{align*} & \mathbb{P}[\tau_1\le \tau_2\le \tau_3<\infty] \lesssim \sum_{i=\varepsilon_k}^k \mathbb{E}\left[{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}\cdot \frac{G(S_i-\widetilde S_{\tau_1})}{1+\sqrt{k-i}}\right]\\ & \lesssim \sum_{z\in \mathbb{Z}^5}\mathbb{P}[\tau_1<\infty, \, \widetilde S_{\tau_1}=z] \cdot\left( \sum_{i=\varepsilon_k}^{k/2} \frac{\mathbb{E}[G(S_i-z)]}{\sqrt k} + \sum_{i=k/2}^k \frac{\mathbb{E}[G(S_i-z)]}{1+\sqrt{k-i}}\right)\\ & \stackrel{\eqref{exp.Green}}{\lesssim} \frac{1}{\sqrt{k\varepsilon_k}}\cdot \mathbb{P}[\tau_1<\infty] \stackrel{\eqref{lem.hit.2}}{\lesssim}\frac 1{\varepsilon_k\sqrt k}. \end{align*} We consider next the term corresponding to $i_1=1$, $i_2=3$ and $i_3=2$, whose analysis slightly differs from the previous one. First Lemma \ref{lem.prep.123} gives \begin{align}\label{tau132}
& \mathbb{P}[\tau_1\le \tau_3\le \tau_2<\infty] =\sum_{x,y\in \mathbb{Z}^5} \mathbb{E}\left[{\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_3<\infty, \widetilde S_{\tau_3}=y, S_k=x\} \sum_{i=\varepsilon_k}^k G(S_i-y) \right]\\ \nonumber = & \sum_{x,y\in \mathbb{Z}^5} \left(\sum_{i=\varepsilon_k}^k \sum_{z\in \mathbb{Z}^5} p_i(z)G(z-y) p_{k-i}(x-z)\right) \mathbb{P}\left[\tau_1\le \tau_3<\infty, \widetilde S_{\tau_3}=y\mid S_k=x\right]\\
\nonumber \lesssim & \sum_{x\in \mathbb{Z}^5} \frac{1}{(\|x\| + \sqrt k)^5}\left(\frac{\mathbb{P}[\tau_1\le \tau_3 <\infty\mid S_k=x]}{\sqrt{\varepsilon_k}} +
\mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big| \ S_k=x\right] \right). \end{align} We then have \begin{align*} & \mathbb{P}[\tau_1\le \tau_3<\infty\mid S_k=x] \stackrel{\eqref{lem.hit.2}}{\lesssim}
\mathbb{E}\left[ \frac { {\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}}{1+\|\widetilde S_{\tau_1}-x\|}\right] \\
&\stackrel{\eqref{lem.hit.1}}{\lesssim} \sum_{y\in \mathbb{Z}^5} \frac{G_{\varepsilon_k}(y) G(y)}{1+\|y-x\|} \stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} \frac{1}{(1+\|x\|) \sqrt \varepsilon_k} + \sum_{\|y-x\|\le \frac{\|x\|}{2}}\frac{G_{\varepsilon_k}(y) G(y)}{1+\|y-x\|} . \end{align*}
Moreover, when $\|x\|\ge \sqrt {\varepsilon_k}$, one has \begin{align*}
\sum_{\|y-x\|\le \frac{\|x\|}{2}} \frac{G_{\varepsilon_k}(y) G(y)}{1+\|y-x\|} \stackrel{\eqref{Green}}{\lesssim} \frac{1}{\|x\|^6} \sum_{\|y-x\|\le \frac{\|x\|}{2}} \frac{1}{1+\|y-x\|}
\lesssim \frac{1}{\|x\|^2}, \end{align*}
while, when $\|x\|\le \sqrt {\varepsilon_k}$,
$$\sum_{\|y-x\|\le \frac{\|x\|}{2}} \frac{G_{\varepsilon_k}(y) G(y)}{1+\|y-x\|} \stackrel{\eqref{Green}}{\lesssim} (1+\|x\|) \varepsilon_k^{-3/2} \lesssim \frac {1}{\varepsilon_k}.$$ Therefore, it holds for any $x$, \begin{equation}\label{tau132.1}
\mathbb{P}[\tau_1\le \tau_3<\infty\mid S_k=x] \lesssim \frac{1}{(1+\|x\|)\sqrt \varepsilon_k}. \end{equation} Similarly, one has \begin{align}\label{tau132.2}
\nonumber & \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big| \ S_k=x\right] \le \mathbb{E}\left[ \sum_{y\in \mathbb{Z}^5} \frac{G(y-\widetilde S_{\tau_1}) G(y-x)}{1+\|y-x\|}{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}\right]\\
& \le \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}}{1+\|\widetilde S_{\tau_1} - x\|^2}\right] \le \sum_{y\in \mathbb{Z}^5} \frac{G_{\varepsilon_k}(y) G(y)}{1+\|y-x\|^2}
\lesssim \frac{1}{(1+\|x\|^2)\sqrt{\varepsilon_k}}. \end{align} Injecting \eqref{tau132.1} and \eqref{tau132.2} into \eqref{tau132} finally gives $$\mathbb{P}[\tau_1\le \tau_2\le \tau_3<\infty] \lesssim \frac{1}{\varepsilon_k\sqrt k}.$$ The other terms in \eqref{phi123.tauij} are entirely similar, so this concludes the proof of the lemma. \end{proof}
For the proof of Lemma \ref{lem.ijl}, one needs some additional estimates that we state as two separate lemmas. \begin{lemma}\label{lem.prep.ijl} There exists a constant $C>0$, such that for any $x,y\in \mathbb{Z}^5$, \begin{align*}
\sum_{i=\varepsilon_k}^{k-\varepsilon_k}\sum_{z\in \mathbb{Z}^5} & \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}\left(\frac{1}{1+\|z-x\|} + \frac 1{\sqrt{k-i}}\right) \\ & \le C\cdot \left\{ \begin{array}{ll}
\frac{1}{k^{5/2}}\left( \frac 1{1+\|x\|^2} + \frac{1}{\varepsilon_k}\right) + \frac{1}{k^{3/2}\varepsilon_k^{3/2}(1+\|y-x\|)}&\quad \text{if }\|x\|\le \sqrt k\\
\frac{1}{\|x\|^5\varepsilon_k}\left(1+\frac{k}{\sqrt{\varepsilon_k}(1+\|y-x\|)} \right) &\quad \text{if }\|x\|>\sqrt k. \end{array} \right. \end{align*} \end{lemma} \begin{proof}
We proceed similarly as for the proof of Lemma \ref{lem.prep.123}. Assume first that $\|x\|\le \sqrt k$. On one hand, using Lemma \ref{lem.upconvolG}, we get
$$\sum_{i=\varepsilon_k}^{k/2} \frac{1}{\sqrt{k-i}} \sum_{z\in \mathbb{Z}^5} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}\lesssim \frac{1}{k^3} \sum_{z\in \mathbb{Z}^5} G_{\varepsilon_k}(z) G(z-y) \lesssim \frac{1}{k^{5/2}\sqrt{k\varepsilon_k}},$$ and, \begin{align*}
&\sum_{i=\varepsilon_k}^{k/2} \sum_{z\in \mathbb{Z}^5} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5(1+\|z-x\|)} \lesssim \frac{1}{k^{5/2}}\sum_{z\in \mathbb{Z}^5} \frac{G_{\varepsilon_k}(z)G(z-y)}{1+\|z-x\|}\\
&\lesssim \frac{1}{k^{5/2}} \left(\sum_{\|z-x\|\ge \frac{\|x\|}{2}} \frac{G_{\varepsilon_k}(z)G(z-y)}{1+\|z-x\|} + \sum_{\|z-x\|\le \frac{\|x\|}{2}} \frac{G_{\varepsilon_k}(z)G(z-y)}{1+\|z-x\|}\right) \\
& \lesssim \frac{1}{k^{5/2}} \left(\frac{1}{(1+\|x\|)\sqrt{\varepsilon_k}} + \frac{1}{1+\|x\|^2}\right) \lesssim \frac{1}{k^{5/2}} \left(\frac{1}{1+\|x\|^2}+ \frac 1{\varepsilon_k}\right). \end{align*} On the other hand, by \eqref{pn.largex} \begin{align*}
\sum_{i=k/2}^{k-\varepsilon_k} \sum_{\|z\|>2\sqrt k} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}\left(\frac{1}{1+\|z-x\|}+ \frac{1}{\sqrt{k-i}} \right)
\lesssim \frac{1}{k^2}\sum_{\|z\|>2\sqrt k} \frac{G(z-y)}{\|z\|^5} \lesssim k^{-\frac 72}. \end{align*} Furthermore, \begin{align*}
\sum_{i=k/2}^{k-\varepsilon_k} \frac 1{\sqrt{k-i}} \sum_{\|z\|\le 2\sqrt k} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}
\lesssim \frac{1}{k^2\varepsilon_k}\sum_{\|z\|\le 2\sqrt k} \frac{G(z-y)}{1+\|z-x\|^3} \lesssim \frac{(k^2\varepsilon_k)^{-1}}{1+\|y-x\|}, \end{align*} and \begin{align*}
\sum_{i=\frac k2}^{k-\varepsilon_k} \sum_{\|z\|\le 2\sqrt k} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}\frac{1}{1+\|z-x\|} & \lesssim \frac{1}{k^{3/2}\varepsilon_k^{3/2}}\sum_{\|z\|\le 2\sqrt k} \frac{G(z-y)}{1+\|z-x\|^3} \\
& \lesssim \frac{1}{k^{3/2}\varepsilon_k^{3/2}} \frac{1}{1+\|y-x\|}. \end{align*}
Assume now that $\|x\|>\sqrt k$. One has on one hand, using Lemma \ref{lem.upconvolG}, \begin{align*}
\sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{\|z-x\|\ge \frac{\|x\|}2} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5}\left(\frac{1}{1+\|z-x\|}+ \frac{1}{\sqrt{k-i}} \right)
\lesssim \frac{1}{\|x\|^5 \varepsilon_k}. \end{align*} On the other hand, \begin{align*}
\sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{\|z-x\|\le \frac{\|x\|}2} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5} \frac{1}{1+\|z-x\|} & \lesssim \frac{k}{\|x\|^5 \varepsilon_k^{3/2}} \sum_{z\in \mathbb{Z}^5} \frac{G(z-y)}{1+\|z-x\|^3} \\
& \lesssim \frac{k}{\|x\|^5 \varepsilon_k^{3/2}(1+\|y-x\|)}, \end{align*} and \begin{align*}
\sum_{i=\varepsilon_k}^{k-\varepsilon_k}\frac 1{\sqrt {k-i}} \sum_{\|z-x\|\le \frac{\|x\|}2} \frac{p_i(z) G(z-y)}{(\|z-x\| + \sqrt{k-i})^5} & \lesssim \frac{\sqrt k}{\|x\|^5 \varepsilon_k} \sum_{z\in \mathbb{Z}^5} \frac{G(z-y)}{1+\|y-x\|^3} \\
& \lesssim \frac{\sqrt k}{\|x\|^5 \varepsilon_k(1+\|y-x\|)}, \end{align*} concluding the proof of the lemma. \end{proof}
\begin{lemma}\label{lem.prep.ijl2} There exists a constant $C>0$, such that for any $x,y\in \mathbb{Z}^5$, \begin{align*}
& \sum_{v\in \mathbb{Z}^5} \frac{1}{(\|v\|+\sqrt k)^5} \left(\frac{1}{1+\|x-v\|} + \frac{1}{ 1+\|x\|}\right)\frac 1{(\|x-v\|+ \sqrt{\varepsilon_k} )^5} \left(\frac{1}{1+\|y-x\|}+\frac 1{1+\|y-v\|}\right)\\ & \le C\cdot \left\{ \begin{array}{ll}
\frac{1}{k^2\varepsilon_k} \left( \frac 1{\sqrt {\varepsilon_k}} + \frac{1}{1+\|x\|} +\frac 1{1+\|y-x\|}+\frac{\sqrt{\varepsilon_k}}{(1+\|x\|) (1+\|y-x\|)}\right) & \quad \text{if }\|x\|\le \sqrt k\\
\frac{\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})}{\|x\|^5\sqrt{\varepsilon_k}} \left(\frac{1}{1+\|y-x\|} + \frac 1{\sqrt k} \right) & \quad \text{if }\|x\|>\sqrt k. \end{array} \right. \end{align*} \end{lemma} \begin{proof}
Assume first that $\|x\| \le \sqrt k$. In this case it suffices to notice that on one hand, for any $\alpha\in \{3,4\}$, one has
$$\sum_{\|v\|\le 2 \sqrt k} \frac{1}{(1+\|x-v\|^\alpha)(1+\|y-v\|^{4-\alpha})} = \mathcal{O}(\sqrt k),$$ and on the other hand, for any $\alpha, \beta \in \{0,1\}$,
$$\sum_{\|v\|>2\sqrt k} \frac{1}{\|v\|^{10+\alpha} (1+\|y-v\|)^\beta} = \mathcal{O}(k^{-5/2 - \alpha - \beta}). $$
Assume next that $\|x\|>\sqrt k$. In this case it is enough to observe that
$$\sum_{\|v\|\le \frac{\sqrt k}{2}} \left(\frac{1}{1+\|x-v\|} + \frac{1}{\|x\|}\right) \left(\frac{1}{1+\|y-x\|}+\frac 1{1+\|y-v\|}\right) \lesssim \frac {k^2}{(1+\|y-x\|)},$$
$$\sum_{\|v\|\ge \frac{\sqrt k}{2}} \frac{1}{\|v\|^5 (\sqrt{\varepsilon_k}+\|x-v\|)^5} \lesssim \frac{\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})}{\|x\|^5} ,$$
$$\sum_{\|v\|\ge \frac{\sqrt k}{2}} \frac{1}{\|v\|^5 (\sqrt{\varepsilon_k}+\|x-v\|)^5(1+\|y-v\|)} \lesssim \frac{\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})}{\|x\|^5}\left(\frac 1{\sqrt k} + \frac 1{1+\|y-x\|}\right). $$ \end{proof}
\begin{proof}[Proof of Lemma \ref{lem.ijl}] First note that for any $\ell$, one has $\mathbb{E}[\psi_\ell] = \mathcal{O}(\varepsilon_k^{-1/2})$, by \eqref{lem.hit.3}. Using also similar arguments as in the proof of Lemma \ref{lem.123}, that we will not reproduce here, one can see that $\mathbb{E}[\varphi_{i,j}] = \mathcal{O}(\varepsilon_k^{-1})$, for any $i\neq j$. Thus only the terms of the form $\mathbb{E}[\varphi_{i,j}\psi_\ell]$ are at stake.
Let $(S_n)_{n\in \mathbb{Z}}$, $(\widetilde S_n)_{n\ge 0}$ and $(\widehat S_n)_{n\ge 0}$ be three independent walks starting from the origin. Recall the definition of $\tau_1$, $\tau_2$ and $\tau_3$ from the proof of Lemma \ref{lem.123}, and define analogously $$\widehat \tau_1:=\inf\{n\ge 1 : S_k+\widehat S_n \in \mathcal{R}(-\infty,-1]\}, \, \widehat \tau_2:=\inf\{n\ge 1 : S_k+\widehat S_n \in \mathcal{R}[0,k-\varepsilon_k-1]\},$$ and $$\widehat \tau_3:=\inf\{n\ge 1 : S_k+\widehat S_n \in \mathcal{R}[k+\varepsilon_k+1,\infty)\}.$$ When $\ell\neq i,j$, one can take advantage of the independence between the different parts of the range of $S$, at least once we condition on the value of $S_k$. This allows for instance to write $$\mathbb{E}[\varphi_{1,2}\psi_3] \le \mathbb{P}[\tau_1<\infty,\, \tau_2<\infty,\, \widehat \tau_3<\infty] =\mathbb{P}[\tau_1<\infty,\, \tau_2<\infty] \mathbb{P}[\widehat \tau_3<\infty] \lesssim \varepsilon_k^{-3/2},$$ using independence for the second equality and our previous estimates for the last one. Similarly, \begin{align*} & \mathbb{E}[\varphi_{1,3} \psi_2] \le \sum_{x\in \mathbb{Z}} \mathbb{P}[\tau_1<\infty, \, \tau_3<\infty \mid S_k=x] \times \mathbb{P}[\widehat \tau_2<\infty,\, S_k=x] \\
&\lesssim \sum_{x\in \mathbb{Z}^5} \frac{1}{(1+\|x\|)\sqrt \varepsilon_k}\cdot \frac 1{(1+\|x\| + \sqrt k)^5}\left(\frac{1}{1+\|x\|}+\frac 1{\sqrt{\varepsilon_k}}\right) \lesssim \frac{1}{\varepsilon_k \sqrt k}, \end{align*} using \eqref{tau132.1} and Lemma \ref{lem.prep.123} for the second inequality. The term $\mathbb{E}[\varphi_{2,3} \psi_1]$ is handled similarly. We consider now the other cases. One has \begin{equation}\label{phi233} \mathbb{E}[\varphi_{2,3} \psi_3] \le \mathbb{P}[\tau_2\le \tau_3<\infty,\, \widehat \tau_3<\infty] + \mathbb{P}[\tau_3\le \tau_2<\infty,\, \widehat \tau_3<\infty]. \end{equation} By using the Markov property at time $\tau_2$, one can write \begin{align*} & \mathbb{P}[\tau_2\le \tau_3<\infty,\, \widehat \tau_3<\infty] \\ & \le \sum_{x,y\in \mathbb{Z}^5} \mathbb{E}\left[\left(\sum_{i=0}^\infty G(S_i-y+x)\right)\left(\sum_{j=\varepsilon_k}^\infty G(S_j) \right) \right] \mathbb{P}[\tau_2<\infty, \widetilde S_{\tau_2}=y, S_k=x]. \end{align*} Then applying Lemmas \ref{lem.upconvolG} and \ref{lem.prep.123}, we get \begin{align}\label{tau33} \nonumber & \mathbb{E}\left[ \left(\sum_{i=0}^{\varepsilon_k} G(S_i-y+x)\right) \left(\sum_{j=\varepsilon_k}^\infty G(S_j) \right)\right]\\ & \nonumber = \sum_{v\in \mathbb{Z}^5} \mathbb{E}\left[\left(\sum_{i=0}^{\varepsilon_k} G(S_i-y+x) \right){\text{\Large $\mathfrak 1$}}\{S_{\varepsilon_k} = v\} \right] \mathbb{E}\left[\left(\sum_{j=0}^\infty G(S_j+v) \right)\right]\\
\nonumber & \lesssim \sum_{v\in \mathbb{Z}^5} \frac{1}{1+\|v\|}\cdot \left(\sum_{i=0}^{\varepsilon_k} p_i(z) G(z-y+x) p_{\varepsilon_k-i}(v-z) \right)\\
& \lesssim \sum_{v\in \mathbb{Z}^5} \frac{1}{1+\|v\|} \frac{1}{(\|v\|+ \sqrt{\varepsilon_k})^5} \left(\frac{1}{1+\|v-y+x\|} + \frac{1}{1+\|y-x\|}\right) \lesssim \frac{\varepsilon_k^{-1/2}}{ 1+\|y-x\|}. \end{align} Likewise, \begin{align}\label{tau33bis}
\nonumber \mathbb{E}\left[ \left(\sum_{i=\varepsilon_k}^\infty G(S_i-y+x)\right)\left(\sum_{j=\varepsilon_k}^\infty G(S_j) \right)\right] &\le \sum_{z\in \mathbb{Z}^5} G_{\varepsilon_k}(z) \left(\frac{G(z-y+x)}{1+\|z\|} + \frac{G(z)}{1+\|z-y+x\|}\right)\\
&\lesssim \frac{1}{\sqrt{\varepsilon_k} (1+\|y-x\|)}. \end{align} Recall now that by \eqref{lem.hit.3}, one has $\mathbb{P}[\tau_2<\infty] \lesssim \varepsilon_k^{-1/2}$. Moreover, from the proof of Lemma \ref{lem.123}, one can deduce that
$$\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}}{\|\widetilde S_{\tau_2}-S_k\|}\right] \lesssim \frac{1}{\sqrt {k \varepsilon_k}}.$$ Combining all these estimates we conclude that $$\mathbb{P}[\tau_2\le \tau_3<\infty,\, \widehat \tau_3<\infty] \lesssim \frac 1{\varepsilon_k\sqrt k}.$$ We deal next with the second term in the right-hand side of \eqref{phi233}. Applying the Markov property at time $\tau_3$, and then Lemma \ref{lem.prep.123}, we obtain \begin{align}\label{tau323.start} \nonumber & \mathbb{P}[\tau_3\le \tau_2<\infty,\, \widehat \tau_3<\infty] \\ &\nonumber \le \sum_{x,y\in \mathbb{Z}^5}\left(\sum_{i=\varepsilon_k}^k \mathbb{E}[G(S_i-y){\text{\Large $\mathfrak 1$}}\{S_k=x\}]\right) \mathbb{P}[\tau_3<\infty, \widehat \tau_3<\infty, \widetilde S_{\tau_3} = y\mid S_k=x]\\
\nonumber & \lesssim \sum_{x,y\in \mathbb{Z}^5} \frac{1}{(\|x\|+\sqrt k)^5} \left(\frac{1}{1+\|y-x\|}+\frac{1}{\sqrt{\varepsilon_k}}\right) \mathbb{P}[\tau_3<\infty, \widehat \tau_3<\infty, \widetilde S_{\tau_3} = y\mid S_k=x]\\
\nonumber &\lesssim \sum_{x\in \mathbb{Z}^5} \frac{1}{(\|x\|+\sqrt k)^5}\left( \frac{\mathbb{P}[\tau_3<\infty, \widehat \tau_3<\infty\mid S_k=x]}{\sqrt{\varepsilon_k}} +
\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty, \widehat \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big| \ S_k=x\right]\right)\\
& \lesssim \sum_{x\in \mathbb{Z}^5} \frac{1}{(\|x\|+\sqrt k)^5}\left( \frac{1}{\varepsilon_k(1+\|x\|)} + \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty, \widehat \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big| \ S_k=x\right]\right), \end{align} using also \eqref{tau33} and \eqref{tau33bis} (with $y=0$) for the last inequality. We use now \eqref{hit.ball} and Lemma \ref{hit.ball.overshoot} to remove the denominator in the last expectation above. Define for $r\ge 0$, and $x\in \mathbb{Z}^5$,
$$\eta_r(x):=\inf\{n\ge 0\ :\ \|\widetilde S_n -x\|\le r\}.$$
On the event when $r/2\le \|\widetilde S_{\eta_r(x)} -x\| \le r$, one applies the Markov property at time $\eta_r(x)$, and we deduce from \eqref{hit.ball} and Lemma \ref{hit.ball.overshoot} that \begin{align*}
&\mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty, \, \widehat \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big|\ S_k=x\right] \le \frac{\mathbb{P}[\tau_3<\infty, \, \widehat \tau_3<\infty\mid S_k=x]}{1+\|x\| } \\
&\qquad + \sum_{i=0}^{\log_2\|x\|}
\frac{\mathbb{P}\left[\tau_3<\infty, \, \widehat \tau_3<\infty,\, 2^i \le \|\widetilde S_{\tau_3} -x\| \le 2^{i+1}\mid S_k=x\right]}{2^i} \\
& \lesssim \frac{1}{\sqrt{\varepsilon_k}(1+\|x\|^2)} + \sum_{i=0}^{\log_2\|x\|} \frac{\mathbb{P}\left[\eta_{2^{i+1}}(x)\le \tau_3<\infty, \, \widehat \tau_3<\infty\mid S_k=x\right]}{2^i} \\
& \lesssim \frac{\varepsilon_k^{-1/2}}{1+\|x\|^2} + \frac{\mathbb{P}[\widehat \tau_3<\infty]}{1+\|x\|^3} + \sum_{i=0}^{\log_2\|x\|} \frac{2^{2i}}{1+\|x\|^3} \max_{\|z\|\ge 2^i} \mathbb{P}_{0,0,z}\left[H_{\mathcal{R}[\varepsilon_k,\infty)}<\infty, \widetilde H_{\mathcal{R}_\infty}<\infty \right], \end{align*} where in the last probability, $H$ and $\widetilde H$ refer to hitting times by two independent walks, independent of $S$, starting respectively from the origin and from $z$. Then it follows from \eqref{tau33} and \eqref{tau33bis} that \begin{equation}\label{remove.denominator}
\mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty, \widehat \tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} - x\|}\ \Big|\ S_k=x\right] \lesssim \frac{1}{\sqrt{\varepsilon_k}(1+\|x\|^2)}. \end{equation} Combining this with \eqref{tau323.start}, it yields that \begin{align*} \mathbb{P}[\tau_2\le \tau_3<\infty, \widehat \tau_3 <\infty] \lesssim \frac 1{\varepsilon_k\sqrt k}. \end{align*} The terms $\mathbb{E}[\varphi_{1,3} \psi_3]$ and $\mathbb{E}[\varphi_{1,3}\psi_1]$ are entirely similar, and we omit repeating the proof. Thus it only remains to consider the terms $\mathbb{E}[\varphi_{2,3} \psi_2]$ and $\mathbb{E}[\varphi_{1,2} \psi_2]$. Since they are also similar we only give the details for the former. We start again by writing \begin{equation}\label{tau232} \mathbb{E}[\varphi_{2,3} \psi_2]\le \mathbb{P}[\tau_2\le \tau_3<\infty, \, \widehat \tau_2 <\infty] + \mathbb{P}[\tau_3\le \tau_2<\infty, \, \widehat \tau_2<\infty]. \end{equation} Then one has \begin{align}\label{Sigmai} &\mathbb{P}[\tau_3\le \tau_2<\infty, \, \widehat \tau_2 <\infty] \\ \nonumber \le & \sum_{x,y\in \mathbb{Z}^5}\mathbb{E}\left[\left(\sum_{i=\varepsilon_k}^k G(S_i-y)\right) \left( \sum_{j=0}^{k-\varepsilon_k} G(S_j-x)\right) {\text{\Large $\mathfrak 1$}}\{S_k=x\} \right]
\mathbb{P}[\tau_3<\infty, \widetilde S_{\tau_3}=y\mid S_k=x]\\ \nonumber \le & \sum_{x,y\in \mathbb{Z}^5} \left(\sum_{i=\varepsilon_k}^k \sum_{j=0}^{k-\varepsilon_k}\sum_{z,w\in \mathbb{Z}^5} \mathbb{P}[S_i= z, S_j=w, S_k=x] G(z-y)G(w-x)\right) \\ \nonumber & \qquad \times \mathbb{P}[\tau_3<\infty, \widetilde S_{\tau_3}=y\mid S_k=x]. \end{align} Now for any $x,y\in \mathbb{Z}^5$, \begin{align*} & \Sigma_1(x,y):= \sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{j=\varepsilon_k}^{k-\varepsilon_k} \sum_{z,w\in \mathbb{Z}^5} \mathbb{P}[S_i= z,\, S_j=w,\, S_k=x] G(z-y)G(w-x)\\
& \le 2\sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{z\in \mathbb{Z}^5}p_i(z) G(z-y) \left(\sum_{j=i}^{k-\varepsilon_k} \sum_{w\in \mathbb{Z}^5} p_{j-i}(w-z)G(w-x) p_{k-j}(x-w) \right)\\ & = 2\sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{z\in \mathbb{Z}^5}p_i(z) G(z-y) \left(\sum_{j=\varepsilon_k}^k \sum_{w\in \mathbb{Z}^5} p_j(w) G(w) p_{k-i-j}(w+x-z) \right)\\
& \stackrel{\text{Lemma } \ref{lem.prep.123}}{\lesssim} \sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{z\in \mathbb{Z}^5} \frac{p_i(z)G(z-y)}{(\|z-x\| + \sqrt{k-i})^5} \left(\frac 1{1+\|z-x\|} + \frac{1}{\sqrt{k-i}}\right)\\
& \stackrel{\text{Lemma } \ref{lem.prep.ijl}}{\lesssim} \left\{ \begin{array}{ll}
\frac{1}{k^{5/2}}\left( \frac 1{1+\|x\|^2} + \frac{1}{\varepsilon_k}\right) + \frac{1}{k^{3/2}\varepsilon_k^{3/2}(1+\|y-x\|)} & \text{if }\|x\|\le \sqrt k\\
\frac{1}{\|x\|^5\varepsilon_k}\left(1+ \frac{k}{\sqrt{\varepsilon_k}(1+\|y-x\|)}\right) &\text{if }\|x\|>\sqrt k. \end{array} \right. \end{align*} We also have \begin{align*} & \Sigma_2(x,y) := \sum_{i=k-\varepsilon_k}^k \sum_{j=0}^{k-\varepsilon_k} \sum_{z,w\in \mathbb{Z}^5} \mathbb{P}[S_i= z, S_j=w, S_k=x] G(z-y)G(w-x)\\
=& \sum_{i=k-\varepsilon_k}^k \sum_{j=0}^{k-\varepsilon_k} \sum_{z,v,w\in \mathbb{Z}^5} \mathbb{P}[S_j= w, S_{k-\varepsilon_k} = v, S_i=z, S_k=x] G(z-y)G(w-x)\\
=& \sum_{v\in \mathbb{Z}^5} \left(\sum_{j=0}^{k-\varepsilon_k} \sum_{w\in \mathbb{Z}^5} p_j(w) p_{k-\varepsilon_k-j}(v-w) G(w-x)\right) \left(\sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} p_i(z-v) p_{\varepsilon_k-i}(x-z)G(z-y)\right), \end{align*} and applying then Lemmas \ref{lem.prep.123} and \ref{lem.prep.ijl2}, gives \begin{align*} & \Sigma_2(x,y) \\
\lesssim & \sum_{v\in \mathbb{Z}^5} \frac{1}{(\|v\|+\sqrt k)^5}\left(\frac{1}{1+\|x-v\|} + \frac{1}{ 1+\|x\|}\right)\frac 1{(\|x-v\|+ \sqrt{\varepsilon_k} )^5} \left(\frac{1}{1+\|y-x\|}+\frac 1{1+\|y-v\|}\right)\\
\lesssim & \left\{ \begin{array}{ll}
\frac{1}{k^2\varepsilon_k} \left( \frac 1{\sqrt {\varepsilon_k}} + \frac{1}{1+\|x\|} +\frac 1{1+\|y-x\|}+\frac{\sqrt{\varepsilon_k}}{(1+\|x\|) (1+\|y-x\|)}\right) & \quad \text{if }\|x\|\le \sqrt k\\
\frac{\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})}{\|x\|^5\sqrt{\varepsilon_k}} \left(\frac{1}{1+\|y-x\|} + \frac 1{\sqrt k} \right) &\quad \text{if }\|x\|>\sqrt k. \end{array} \right. \end{align*} Likewise, by reversing time, one has \begin{align*} & \Sigma_3(x,y):= \sum_{i=\varepsilon_k}^k \sum_{j=0}^{\varepsilon_k} \sum_{z,w\in \mathbb{Z}^5} \mathbb{P}[S_i= z, S_j=w, S_k=x] G(z-y)G(w-x)\\
=& \sum_{i=0}^{k-\varepsilon_k} \sum_{j=k-\varepsilon_k}^k \sum_{z,v,w\in \mathbb{Z}^5} \mathbb{P}[S_i = z-x, S_{k-\varepsilon_k} = v-x, S_j= w-x, S_k=-x] G(z-y)G(w-x)\\
=& \sum_{v\in \mathbb{Z}^5} \left(\sum_{i=0}^{k-\varepsilon_k} \sum_{z\in \mathbb{Z}^5} p_i(z-x) p_{k-\varepsilon_k-i}(v-z) G(z-y)\right) \left(\sum_{j=0}^{\varepsilon_k} \sum_{w\in \mathbb{Z}^5} p_j(w-v) p_{\varepsilon_k-j}(w)G(w-x)\right) \\
\lesssim & \sum_{v\in \mathbb{Z}^5} \frac{1}{(\|v-x\|+\sqrt k)^5}\left(\frac{1}{1+\|y-v\|} + \frac{1}{ 1+\|y-x\|}\right)\frac 1{(\|v\|+ \sqrt{\varepsilon_k} )^5} \left(\frac{1}{1+\|x\|}+\frac 1{1+\|x-v\|}\right), \end{align*} and then a similar argument as in the proof of Lemma \ref{lem.prep.ijl2} gives the same bound for $\Sigma_3(x,y)$ as for $\Sigma_2(x,y)$. Now recall that \eqref{Sigmai} yields $$\mathbb{P}[\tau_3\le \tau_2<\infty, \widehat \tau_2 <\infty] \le \sum_{x,y\in \mathbb{Z}^5} \left(\Sigma_1(x,y) + \Sigma_2(x,y) + \Sigma_3(x,y)\right) \mathbb{P}[\tau_3<\infty, \widetilde S_{\tau_3}=y\mid S_k=x]. $$ Recall also that by \eqref{lem.hit.2},
$$\mathbb{P}[\tau_3<\infty\mid S_k=x] \lesssim \frac{1}{1+\|x\|},$$ and \begin{align*}
\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty\}}{1+\|\widetilde S_{\tau_3} -x \| }\, \Big|\, S_k=x\right]
\le \sum_{y\in \mathbb{Z}^5} \frac{G(y) G(y-x)}{1+\|y-x\|}
\lesssim \frac{1}{1+\|x\|^2}. \end{align*}
Furthermore, for any $\alpha\in\{1,2,3\}$, and any $\beta\ge 6$,
$$\sum_{\|x\|\le \sqrt k} \frac{1}{1+\|x\|^\alpha} \lesssim k^{\frac{5-\alpha}{2}}, \quad \sum_{\|x\|\ge \sqrt k} \frac {\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})} {\|x\|^{\beta}} \le \sum_{\|x\|\ge \sqrt{\varepsilon_k}} \frac {\log (\frac{\|x\|}{\sqrt{\varepsilon_k}})} {\|x\|^{\beta}}\lesssim \varepsilon_k^{\frac{5-\beta}{2}}.$$
Putting all these pieces together we conclude that
$$\mathbb{P}[\tau_3\le \tau_2<\infty, \, \widehat \tau_2 <\infty] \lesssim \varepsilon_k^{-3/2}. $$ We deal now with the other term in \eqref{tau232}. As previously, we first write using the Markov property, and then using \eqref{lem.hit.1} and Lemma \ref{lem.upconvolG}, \begin{align*}
\mathbb{P}[\tau_2\le \tau_3<\infty, \, \widehat \tau_2 <\infty] \le \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_2<\infty, \, \widehat \tau_2<\infty\}}{1+\|\widetilde S_{\tau_2} - S_k\|}\right]. \end{align*} Then using \eqref{hit.ball} and Lemma \ref{hit.ball.overshoot} one can handle the denominator in the last expectation, the same way as for \eqref{remove.denominator}, and we conclude similarly that \begin{align*} \mathbb{P}[\tau_2\le \tau_3<\infty, \, \widehat \tau_2 <\infty] \lesssim \varepsilon_k^{-3/2}. \end{align*} This finishes the proof of Lemma \ref{lem.ijl}. \end{proof}
\section{Proof of Propositions \ref{prop.ij0} and \ref{prop.phipsi.1}} For the proof of these propositions we shall need the following estimate. \begin{lemma}\label{lem.prep.0ij} One has for all $x,y\in \mathbb{Z}^5$, \begin{align*} \sum_{i=k-\varepsilon_k}^k & \mathbb{E}\left[G(S_i-y) {\text{\Large $\mathfrak 1$}}\{S_k=x\}\right] \\
& \lesssim \varepsilon_k \left(\frac{\log (2+\frac{\|y-x\|}{\sqrt{\varepsilon_k}})}{(\|x\|+\sqrt k)^5(\|y-x\| + \sqrt{\varepsilon_k})^3}
+ \frac{\log (2+\frac{\|y\|}{\sqrt{k}})}{(\|x\|+\sqrt{\varepsilon_k})^5(\|y\| + \sqrt{k})^3}\right). \end{align*} \end{lemma} \begin{proof} One has using \eqref{pn.largex} and \eqref{Green}, \begin{align*} &\sum_{i=k-\varepsilon_k}^k \mathbb{E}\left[G(S_i-y){\text{\Large $\mathfrak 1$}}\{S_k=x\}\right] = \sum_{i=k-\varepsilon_k}^k \sum_{z\in \mathbb{Z}^5} p_i(z) G(z-y) p_{k-i}(x-z)\\
& \lesssim \sum_{z\in \mathbb{Z}^5} \frac{\varepsilon_k}{(\|z\| + \sqrt k)^5(1+\|z-y\|^3) (\|x-z\| +\sqrt{\varepsilon_k})^5} \\
& \lesssim \frac{1}{\varepsilon_k^{3/2}(\|x\|+\sqrt k)^5} \sum_{\|z-x\|\le \sqrt{\varepsilon_k}}\frac 1{1+\|z-y\|^3} \\
& \quad + \frac{\varepsilon_k}{(\|x\|+\sqrt k)^5} \sum_{\sqrt{\varepsilon_k}\le \|z-x\|\le \frac{\|x\|}{2}} \frac{1}{(1+\|z-y\|^3)(1+\|z-x\|^5)} \\
&\quad + \frac{\varepsilon_k}{(\|x\|+\sqrt {\varepsilon_k})^5} \sum_{\|z-x\|\ge \frac{\|x\|}{2}}\frac{1}{(\|z\|+\sqrt k)^5(1+\|z-y\|^3)}. \end{align*} Then it suffices to observe that
$$ \sum_{\|z-x\|\le \sqrt{\varepsilon_k}}\frac 1{1+\|z-y\|^3} \lesssim \frac{\varepsilon_k^{5/2}}{(\|y-x\| + \sqrt{\varepsilon_k})^3},$$
$$ \sum_{\sqrt{\varepsilon_k}\le \|z-x\|\le \frac{\|x\|}{2}} \frac{1}{(1+\|z-y\|^3)(1+\|z-x\|^5)} \lesssim
\frac{\log(2+\frac{\|y-x\|}{\sqrt{\varepsilon_k}})}{(\|y-x\| + \sqrt{\varepsilon_k})^3}, $$
$$ \sum_{z\in \mathbb{Z}^5}\frac{1}{(\|z\|+\sqrt k)^5(1+\|z-y\|^3)} \lesssim \frac{\log (2+\frac{\|y\|}{\sqrt{k}})}{(\|y\| + \sqrt{k})^3}. $$ \end{proof}
\begin{proof}[Proof of Proposition \ref{prop.ij0} (i)] This part is the easiest: it suffices to observe that $\varphi_{1,2}$ is a sum of one term which is independent of $Z_k\psi_0$ and another one, whose expectation is negligible. To be more precise, define $$\varphi_{1,2}^0 := \mathbb{P}\left[H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+ = \infty,\, H^+_{\mathcal{R}(-\infty,-\varepsilon_k-1]}<\infty, \, H^+_{\mathcal{R}[\varepsilon_k+1,k-\varepsilon_k-1]}<\infty \mid S\right],$$ and note that $Z_0\varphi_{1,2}^0$ is independent of $Z_k\psi_0$. It follows that
$$|\operatorname{Cov}(Z_0\varphi_{1,2},Z_k\psi_0)| =|\operatorname{Cov}(Z_0(\varphi_{1,2}-\varphi_{1,2}^0),Z_k\psi_0)|\le \mathbb{P}\left[\tau_1<\infty, \, \tau_*<\infty\right],$$ with $\tau_1$ and $\tau_*$ the hitting times respectively of $\mathcal{R}(-\infty,-\varepsilon_k]$ and $\mathcal{R}[k-\varepsilon_k,k]$ by another walk $\widetilde S$ starting from the origin, independent of $S$. Now, using \eqref{pn.largex}, we get \begin{align*} \mathbb{P}[\tau_1\le \tau_*<\infty] & \le \mathbb{E}\left[{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty \} \left(\sum_{i=k-\varepsilon_k}^k G(S_i-\widetilde S_{\tau_1})\right)\right] \\ & \le \sum_{y\in \mathbb{Z}^5} \left(\sum_{z\in \mathbb{Z}^5} \sum_{i=k-\varepsilon_k}^k p_i(z) G(z-y) \right)\mathbb{P}[\tau_1<\infty, \, \widetilde S_{\tau_1} = y]\\ & \lesssim \frac{\varepsilon_k}{k^{3/2}} \, \mathbb{P}[\tau_1<\infty] \stackrel{\eqref{lem.hit.3}}{\lesssim} \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} Likewise, using now Lemma \ref{lem.upconvolG}, \begin{align*} \mathbb{P}[\tau_*\le \tau_1<\infty] & \le \mathbb{E}\left[{\text{\Large $\mathfrak 1$}}\{\tau_*<\infty \} \left(\sum_{i=\varepsilon_k}^\infty G(S_{-i}-\widetilde S_{\tau_*})\right)\right] \\ & \le \sum_{y\in \mathbb{Z}^5} \left(\sum_{z\in \mathbb{Z}^5} G_{\varepsilon_k}(z) G(z-y) \right)\mathbb{P}[\tau_*<\infty, \, \widetilde S_{\tau_*} = y]\\ & \lesssim \frac{1}{\sqrt{\varepsilon_k}}\, \mathbb{P}[\tau_*<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}, \end{align*} and the first part of (i) follows. But since $Z_0$ and $Z_k$ have played no role here, the same computation gives the result for the covariance between $Z_0\varphi_0$ and $Z_k \psi_{2,3}$ as well. \end{proof}
\begin{proof}[Proof of Proposition \ref{prop.ij0} (ii)-(iii)] These parts are more involved. Since they are entirely similar, we only prove (iii), and as for (i) we only give the details for the covariance between $Z_0 \varphi_{2,3}$ and $Z_k\psi_0$, since $Z_0$ and $Z_k$ will not play any role here. We define similarly as in the proof of (i), $$\varphi_{2,3}^0 := \mathbb{P}\left[H_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}^+ = \infty,\, H^+_{\mathcal{R}[\varepsilon_k,k-\varepsilon_k]}<\infty, \, H^+_{\mathcal{R}[k+\varepsilon_k,\infty)}<\infty \mid S\right],$$ but observe that this time, the term $\varphi_{2,3}^0$ is no more independent of $\psi_0$. This entails some additional difficulty, on which we shall come back later, but first we show that one can indeed replace $\varphi_{2,3}$ by $\varphi_{2,3}^0$ in the computation of the covariance. For this, denote respectively by $\tau_2$, $\tau_3$, $\tau_*$ and $\tau_{**}$ the hitting times of $\mathcal{R}[\varepsilon_k,k]$, $\mathcal{R}[k,\infty)$, $\mathcal{R}[k-\varepsilon_k,k]$, and $\mathcal{R}[k,k+\varepsilon_k]$ by $\widetilde S$. One has \begin{align*}
\mathbb{E}[|\varphi_{2,3} - \varphi_{2,3}^0|]& \le \mathbb{P}[\tau_2<\infty, \, \tau_{**}<\infty] +\mathbb{P}[\tau_3<\infty, \, \tau_*<\infty]. \end{align*} Using \eqref{pn.largex}, \eqref{lem.hit.1} and Lemma \ref{lem.upconvolG}, we get \begin{align*}
\mathbb{P}[\tau_*\le \tau_3<\infty] & \le \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\tau_*<\infty\}}{1+\|\widetilde S_{\tau_*} - S_k\|}\right] \le \sum_{i=k-\varepsilon_k}^k \mathbb{E}\left[ \frac{G(S_i)}{1+\|S_i-S_k\|}\right] \\ &\lesssim \sum_{i=k-\varepsilon_k}^k \mathbb{E}\left[ \frac{G(S_i)}{1+\sqrt{k-i} }\right] \lesssim \sum_{z\in \mathbb{Z}^5} \sum_{i=k-\varepsilon_k}^k \frac{p_i(z) G(z)}{1+\sqrt{k-i}}\\
& \lesssim \sqrt{\varepsilon_k} \sum_{z\in \mathbb{Z}^5}\frac 1{(\|z\| + \sqrt k)^5} G(z) \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} Next, applying Lemma \ref{lem.prep.0ij}, we get \begin{align*} & \mathbb{P}[\tau_3\le \tau_*<\infty] \\ & \le \sum_{x,y\in \mathbb{Z}^5} \mathbb{E}\left[\left(\sum_{i=k-\varepsilon_k}^k G(S_i-y)\right){\text{\Large $\mathfrak 1$}}\{S_k=x\}\right] \mathbb{P}[\tau_3<\infty, \widetilde S_{\tau_3} = y\mid S_k=x]\\
& \lesssim \varepsilon_k \sum_{x\in \mathbb{Z}^5} \left( \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty\}\log (2+\frac{\|\widetilde S_{\tau_3}-x\|}{\sqrt{\varepsilon_k}})}{(\|x\|+\sqrt k)^5(\sqrt{\varepsilon_k}+\|\widetilde S_{\tau_3} - x\|)^3}\, \Big|\, S_k=x \right] \right. \\
& \qquad \left. + \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty\}\log(2+\frac{\|\widetilde S_{\tau_3}\|}{\sqrt k})}{(\|x\|+\sqrt{\varepsilon_k})^5(\sqrt{k}+\|\widetilde S_{\tau_3}\|)^3}\, \Big|\, S_k=x \right] \right). \end{align*} Moreover, \begin{align*}
\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty\}\log (2+\frac{\|\widetilde S_{\tau_3}-x\|}{\sqrt{\varepsilon_k}})}{(\sqrt{\varepsilon_k}+\|\widetilde S_{\tau_3} - x\|)^3}\, \Big|\, S_k=x \right] & \stackrel{\eqref{lem.hit.1}}{\le} \sum_{y\in \mathbb{Z}^5} \frac{G(y) G(y-x)
\log (2+\frac{\|y-x\|}{\sqrt{\varepsilon_k}})}{ (\sqrt{\varepsilon_k}+\|y - x\|)^3} \\
& \lesssim \frac{1}{\sqrt{\varepsilon_k}(1+\|x\|)^3}, \end{align*} and \begin{align*}
\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_3<\infty\}\log(2+\frac{\|\widetilde S_{\tau_3}\|}{\sqrt k})}{(\sqrt{k}+\|\widetilde S_{\tau_3}\|)^3}\, \Big|\, S_k=x \right] & \stackrel{\eqref{lem.hit.1}}{\le} \sum_{y\in \mathbb{Z}^5} \frac{G(y) G(y-x)\log(2+\frac{\|y\|}{\sqrt k})}{ (\sqrt{k}+\|y \|)^3} \\
& \lesssim \frac{1}{\sqrt{k}(1+\|x\|)(\sqrt k + \|x\|)^2}. \end{align*} Furthermore, it holds
$$\sum_{x\in \mathbb{Z}^5} \frac{1}{(\|x\|+\sqrt k)^5(1+\|x\|)^3} \lesssim \frac{1}{k^{3/2}},$$
$$\sum_{x\in \mathbb{Z}^5} \frac{1}{(\|x\|+\sqrt{\varepsilon_k})^5(1+\|x\|)(\sqrt k + \|x\|)^2} \lesssim \frac{1}{\sqrt{k\varepsilon_k}},$$ which altogether proves that $$ \mathbb{P}[\tau_3\le \tau_*<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}.$$ Likewise, \begin{align*}
\mathbb{P}[\tau_2\le \tau_{**}<\infty] \le \sum_{x,y\in \mathbb{Z}^5} \mathbb{E}\left[\sum_{i=0}^{\varepsilon_k} G(S_i-y+x) \right] \mathbb{P}[\tau_2<\infty, \widetilde S_{\tau_2} = y, S_k=x], \end{align*} and using \eqref{Green}, we get \begin{align*} & \mathbb{E}\left[\sum_{i=0}^{\varepsilon_k} G(S_i-y+x) \right] = \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} p_i(z) G(z-y+x)\\
& \lesssim \sum_{\|z\|\le \sqrt{\varepsilon_k}} G(z) G(z-y+x) + \varepsilon_k\, \sum_{\|z\|\ge \sqrt{\varepsilon_k}} \frac{G(z-y+x)}{\|z\|^5} \\
&\lesssim \frac{\varepsilon_k}{(\|y-x\| +\sqrt{\varepsilon_k})^2(1+\|y-x\|)} + \varepsilon_k \, \frac{\log\left(2+\frac{\|y-x\|}{\sqrt{\varepsilon_k}}\right) }{(\|y-x\| + \sqrt{\varepsilon_k})^3}\\
& \lesssim \varepsilon_k \, \frac{\log\left(2+\frac{\|y-x\|}{\sqrt{\varepsilon_k}}\right) }{(\|y-x\| + \sqrt{\varepsilon_k})^2(1+\|y-x\|)}. \end{align*} Therefore, using the Markov property, \begin{align*}
&\mathbb{P}[\tau_2\le \tau_{**}<\infty] \lesssim \varepsilon_k \cdot \mathbb{E}\left[ \frac{\log\left(2+\frac{\|\widetilde S_{\tau_2}-S_k\|}{\sqrt{\varepsilon_k}}\right) \cdot {\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}}{(\|\widetilde S_{\tau_2}-S_k\| + \sqrt{\varepsilon_k})^2(1+\|\widetilde S_{\tau_2} - S_k\|)} \right]\\
&\lesssim \varepsilon_k \sum_{i=\varepsilon_k}^k \mathbb{E}[G(S_i)] \cdot \mathbb{E}\left[\frac{\log\left(2+\frac{\|S_{k-i}\|}{\sqrt{\varepsilon_k}}\right) }{(\|S_{k-i}\| + \sqrt{\varepsilon_k})^2(1+\| S_{k-i}\|)} \right]. \end{align*} Furthermore, using \eqref{pn.largex} we obtain after straightforward computations,
$$\mathbb{E}\left[\frac{\log\left(2+\frac{\|S_{k-i}\|}{\sqrt{\varepsilon_k}}\right) }{(\|S_{k-i}\| + \sqrt{\varepsilon_k})^2(1+\| S_{k-i}\|)} \right] \lesssim \frac{\log\left(2+\frac{k-i}{\varepsilon_k}\right) }{\sqrt{k-i}(\varepsilon_k + k-i)},$$ and using in addition \eqref{exp.Green}, we conclude that $$\mathbb{P}[\tau_2\le \tau_{**}<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k}).$$ Similarly, using Lemma \ref{lem.prep.123} we get \begin{align*} &\mathbb{P}[\tau_{**} \le \tau_2<\infty] \\
& = \sum_{x,y\in \mathbb{Z}^5} \mathbb{P}[\tau_{**}<\infty,\, \widetilde S_{\tau_{**}} = y \mid S_k=x] \cdot \mathbb{E}\left[\sum_{i=\varepsilon_k}^k G(S_i-y) {\text{\Large $\mathfrak 1$}}\{S_k=x\}\right] \\
& \lesssim \sum_{x\in\mathbb{Z}^5} \frac{1}{(\|x\| + \sqrt{k})^5} \left(\mathbb{E}\left[\frac{1\{\tau_{**}<\infty\}}{ 1+\|\widetilde S_{\tau_{**}}-x\|}\, \Big|\, S_k=x\right] + \frac{\mathbb{P}[\tau_{**}<\infty\mid S_k=x]}{\sqrt{\varepsilon_k}}\right). \end{align*} Moreover, one has \begin{align*}
\mathbb{P}[\tau_{**}<\infty\mid S_k=x] &\le \sum_{i=0}^{\varepsilon_k} \mathbb{E}[G(S_i+x)]\lesssim \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} \frac{1}{(1+\|z\| + \sqrt i)^5(1+\|z+x\|^3)} \\
&\lesssim \sum_{\|z\| \le \sqrt{\varepsilon_k}} \frac{1}{(1+\|z\|^3)(1+\|z+x\|^3)} + \sum_{\|z\|\ge \sqrt{\varepsilon_k}} \frac{\varepsilon_k}{\|z\|^5(1+\|z+x\|^3)}\\
&\lesssim \frac{\varepsilon_k \log(2+\frac{\|x\|}{\sqrt{\varepsilon_k}})}{(\sqrt{\varepsilon_k}+\|x\|)^2(1+\|x\|)}, \end{align*} and likewise \begin{align*}
\mathbb{E}\left[\frac{1\{\tau_{**}<\infty\}}{ 1+\|\widetilde S_{\tau_{**}}-x\|}\, \Big|\, S_k=x\right] &\le \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} \frac{1}{(1+\|z\| + \sqrt i)^5(1+\|z-x\|^3)(1+\|z\|)} \\
&\lesssim \sum_{\|z\| \le \sqrt{\varepsilon_k}} \frac{1}{(1+\|z\|^4)(1+\|z-x\|^3)} + \sum_{\|z\|\ge \sqrt{\varepsilon_k}} \frac{\varepsilon_k}{\|z\|^6(1+\|z-x\|^3)}\\
&\lesssim \frac{\sqrt{\varepsilon_k} }{(\|x\|+\sqrt{\varepsilon_k})(1+\|x\|^2)}. \end{align*} Then it follows as above that $$\mathbb{P}[\tau_{**} \le \tau_2<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k}).$$ In other words we have proved that
$$\mathbb{E}[|\varphi_{2,3} - \varphi_{2,3}^0|]\lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k}).$$ We then have to deal with the fact that $Z_0\varphi_{2,3}^0$ is not really independent of $Z_k\psi_0$. Therefore, we introduce the new random variables $$\widetilde Z_k := {\text{\Large $\mathfrak 1$}}\{S_i \neq S_k \ \forall i=k+1,\dots,\varepsilon'_k\}, \ \widetilde \psi_0:=\mathbb{P}_{S_k}\left[H^+_{\mathcal{R}[k-\varepsilon'_k,k+\varepsilon'_k]}=\infty\mid S\right],$$ where $(\varepsilon'_k)_{k\ge 0}$ is another sequence of integers, whose value will be fixed later. For the moment we only assume that it satisfies $\varepsilon'_k\le \varepsilon_k/4$, for all $k$. One has by \eqref{Green} and \eqref{lem.hit.3}, \begin{equation}\label{tildepsi0}
\mathbb{E}[|Z_k\psi_0 - \widetilde Z_k\widetilde \psi_0|] \lesssim \frac{1}{\sqrt{\varepsilon'_k}}. \end{equation} Furthermore, for any $y\in \mathbb{Z}^5$, \begin{align}\label{cov.230} & \mathbb{E}\left[\varphi_{2,3}^0 \mid S_{k+\varepsilon_k} -S_{k-\varepsilon_k}= y\right] = \sum_{x\in \mathbb{Z}^5} \mathbb{E}\left[\varphi_{2,3}^0 {\text{\Large $\mathfrak 1$}}\{S_{k-\varepsilon_k}=x\}\mid S_{k+\varepsilon_k} -S_{k-\varepsilon_k}= y\right]\\
\nonumber & \le \sum_{x\in \mathbb{Z}^5}\mathbb{P}\left[\widetilde \mathcal{R}_\infty \cap \mathcal{R}[\varepsilon_k,k-\varepsilon_k]\neq \varnothing,\, \widetilde \mathcal{R}_\infty \cap (x+y+\widehat \mathcal{R}_\infty)\neq \varnothing,\, S_{k-\varepsilon_k}=x\right], \end{align} where in the last probability, $\widetilde \mathcal{R}_\infty$ and $\widehat \mathcal{R}_\infty$ are the ranges of two independent walks, independent of $S$, starting from the origin. Now $x$ and $y$ being fixed, define $$\tau_1:=\inf\{n\ge 0 : \widetilde S_n\in \mathcal{R}[\varepsilon_k,k-\varepsilon_k]\}, \ \tau_2:= \inf\{n\ge 0 : \widetilde S_n\in (x+y + \widehat \mathcal{R}_\infty)\}.$$ Applying \eqref{lem.hit.1} and the Markov property we get \begin{align*}
&\mathbb{P}[\tau_1\le \tau_2<\infty,\, S_{k-\varepsilon_k} = x] \le \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty,\, S_{k-\varepsilon_k}=x\}}{1+\|\widetilde S_{\tau_1} - (x+y)\|}\right] \\
& \le \sum_{i=\varepsilon_k}^{k-\varepsilon_k} \sum_{z\in \mathbb{Z}^5}\frac{p_i(z) G(z) p_{k-\varepsilon_k-i}(x-z)}{1+\|z-(x+y)\|} \\
&\lesssim \frac{1}{(\|x\|+\sqrt k)^5}\left(\frac{1}{\sqrt{\varepsilon_k} (1+\|x+y\|)} + \frac{1}{1+\|x\|^2}\right), \end{align*} using also similar computations as in the proof of Lemma \ref{lem.prep.123} for the last inequality. It follows that for some constant $C>0$, independent of $y$, $$\sum_{x\in \mathbb{Z}^5} \mathbb{P}[\tau_1\le \tau_2<\infty,\, S_{k-\varepsilon_k} = x] \lesssim \frac{1}{\sqrt{k\varepsilon_k}}.$$ On the other hand, by Lemmas \ref{lem.prep.123} and \ref{lem.simplehit}, \begin{align*}
\mathbb{P}[\tau_2\le \tau_1<\infty,\, S_{k-\varepsilon_k} = x] & \lesssim \frac{1}{( \| x \| +\sqrt k)^5} \left(\mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\} }{1+\|\widetilde S_{\tau_2} - x\|}\right] + \frac{\mathbb{P}[\tau_2<\infty]}{\sqrt{\varepsilon_k} }\right) \\
& \lesssim \frac{1}{(\|x\|+\sqrt k)^5}\left(\frac{1}{\sqrt{\varepsilon_k} (1+\|x+y\|)} + \frac{1}{1+\|x\|^2}\right), \end{align*} and it follows as well that $$\sum_{x\in \mathbb{Z}^5} \mathbb{P}[\tau_2\le \tau_1<\infty, S_{k-\varepsilon_k} = x] \lesssim \frac{1}{\sqrt{k\varepsilon_k}}.$$ Coming back to \eqref{cov.230}, we deduce that \begin{equation}\label{phi230.cond}
\mathbb{E}\left[\varphi_{2,3}^0 \mid S_{k+\varepsilon_k} -S_{k-\varepsilon_k}= y\right] \lesssim \frac{1}{\sqrt{k\varepsilon_k}},
\end{equation}
with an implicit constant independent of $y$. Together with \eqref{tildepsi0}, this gives \begin{align*}
& \mathbb{E}\left[\varphi_{2,3}^0|Z_k\psi_0-\widetilde Z_k\widetilde \psi_0|\right] \\
& = \sum_{y\in \mathbb{Z}^5} \mathbb{E}\left[\varphi_{2,3}^0 \mid S_{k+\varepsilon_k} -S_{k-\varepsilon_k}= y\right]\cdot \mathbb{E}\left[|Z_k\psi_0-\widetilde Z_k\widetilde \psi_0|{\text{\Large $\mathfrak 1$}}\{S_{k+\varepsilon_k}-S_{k-\varepsilon_k} = y\}\right] \\ &\lesssim \frac{1}{\sqrt{k\varepsilon_k\varepsilon'_k}}. \end{align*} Thus at this point we have shown that $$ \operatorname{Cov}(Z_0\varphi_{2,3},Z_k\psi_0) = \operatorname{Cov}(Z_0\varphi_{2,3}^0,\widetilde Z_k\widetilde \psi_0) +\mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\cdot \log(\frac k{\varepsilon_k}) + \frac{1}{\sqrt{k\varepsilon_k\varepsilon'_k}}\right).$$ Note next that \begin{align*}
\operatorname{Cov}(Z_0\varphi_{2,3}^0,\widetilde Z_k\widetilde \psi_0) & = \sum_{y,z\in \mathbb{Z}^5} \mathbb{E}\left[Z_0\varphi_{2,3}^0 \mid S_{k+\varepsilon_k} - S_{k-\varepsilon'_k} =y\right] \\
& \times \mathbb{E}\left[\widetilde Z_k \widetilde \psi_0{\text{\Large $\mathfrak 1$}}\{S_{k+\varepsilon'_k}-S_{k-\varepsilon'_k}=z\}\right] \left(p_{\varepsilon_k-\varepsilon'_k}(y-z) - p_{\varepsilon_k+\varepsilon'_k}(y)\right). \end{align*} Moreover, one can show exactly as \eqref{phi230.cond} that uniformly in $y$, $$\mathbb{E}\left[\varphi_{2,3}^0 \mid S_{k+\varepsilon_k} - S_{k-\varepsilon'_k} =y\right] \lesssim \frac{1}{\sqrt{k\varepsilon_k}}.$$ Therefore by using also \eqref{Sn.large} and Theorem \ref{LCLT}, we see that \begin{align*}
& |\operatorname{Cov}(Z_0\varphi_{2,3}^0,\widetilde Z_k\widetilde \psi_0) | \\
& \lesssim \frac{1}{\sqrt{k\varepsilon_k}} \sum_{\|y\| \le \varepsilon_k^{\frac 6{10}}} \sum_{\|z\|\le \varepsilon_k^{\frac 1{10}}\cdot \sqrt{\varepsilon'_k} } p_{2\varepsilon'_k}(z)\, |\overline p_{\varepsilon_k-\varepsilon'_k}(y-z) - \overline p_{\varepsilon_k+\varepsilon'_k}(y)| + \frac 1{\varepsilon_k \sqrt{k} }. \end{align*} Now straightforward computations show that for $y$ and $z$ as in the two sums above, one has for some constant $c>0$,
$$|\overline p_{\varepsilon_k-\varepsilon'_k}(y-z) - \overline p_{\varepsilon_k+\varepsilon'_k}(y)| \lesssim \left(\frac{\|z\|}{\sqrt{\varepsilon_k}} + \frac{\varepsilon'_k}{\varepsilon_k}\right)\overline p_{\varepsilon_k-\varepsilon'_k}(cy),$$ at least when $\varepsilon'_k\le \sqrt{\varepsilon_k}$, as will be assumed in a moment.
Using also that $\sum_z \|z\|p_{2\varepsilon'_k}(z) \lesssim \sqrt{\varepsilon'_k}$, we deduce that
$$ |\operatorname{Cov}(Z_0\varphi_{2,3}^0,\widetilde Z_k\widetilde \psi_0) | = \mathcal{O}\left(\frac{\sqrt{\varepsilon'_k}}{\varepsilon_k \sqrt k}\right).$$ This concludes the proof as we choose $\varepsilon'_k = \lfloor \sqrt{\varepsilon_k} \rfloor$. \end{proof}
We can now quickly give the proof of Proposition \ref{prop.phipsi.1}.
\begin{proof}[Proof of Proposition \ref{prop.phipsi.1}] \underline{Case $1\le i<j\le 3$.} First note that $Z_0\varphi_1$ and $Z_k\psi_3$ are independent, so only the cases $i=1$ and $j=2$, or $i=2$ and $j=3$ are at stake. Let us only consider the case $i=2$ and $j=3$, since the other one is entirely similar. Define, in the same fashion as in the proof of Proposition \ref{prop.ij0}, $$\varphi_2^0:= \mathbb{P}\left[H^+_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}=\infty,\, H^+_{\mathcal{R}[\varepsilon_k+1,k-\varepsilon_k]}<\infty\mid S\right]. $$ One has by using independence and translation invariance,
$$\mathbb{E}[|\varphi_2-\varphi_2^0| \psi_3] \le \mathbb{P}[H_{\mathcal{R}[k-\varepsilon_k,k]}<\infty]\cdot \mathbb{P}[H_{\mathcal{R}[\varepsilon_k,\infty)}<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}},$$ which entails $$\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_3) = \operatorname{Cov}(Z_0\varphi_2^0,Z_k\psi_3) + \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right) \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}},$$ since $Z_0\varphi_2^0$ and $Z_k\psi_3$ are independent.
\underline{Case $1\le j\le i\le 3$}. Here one can use entirely similar arguments as those from the proof of Lemma \ref{lem.ijl}, and we therefore omit the details. \end{proof}
\section{Proof of Proposition \ref{prop.phi0}} We need to estimate here the covariances $\operatorname{Cov}(Z_0\varphi_i, Z_k\psi_0)$ and $\operatorname{Cov}(Z_0\varphi_0, Z_k\psi_{4-i})$, for all $1\le i \le 3$.
\underline{Case $i=1$.} It suffices to observe that $Z_0\varphi_1$ and $Z_k\psi_0$ are independent, as are $Z_0\varphi_0$ and $Z_k\psi_3$. Thus their covariances are equal to zero.
\underline{Case $i=2$.} We first consider the covariance between $Z_0\varphi_2$ and $Z_k\psi_0$, which is easier to handle. Define $$\widetilde \varphi_2:=\mathbb{P}\left[H^+_{\mathcal{R}[-\varepsilon_k,k-\varepsilon_k-1]}=\infty,\, H^+_{\mathcal{R}[k-\varepsilon_k,k]}<\infty\mid S\right],$$ and note that $Z_0(\varphi_2 - \widetilde \varphi_2)$ is independent of $Z_k\psi_0$. Therefore $$\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_0) = \operatorname{Cov}(Z_0\widetilde \varphi_2, Z_k\psi_0).$$ Then we decompose $\psi_0$ as $\psi_0=\psi_0^1-\psi_0^2$, where $$\psi_0^1:=\mathbb{P}_{S_k}[H^+_{\mathcal{R}[k,k+\varepsilon_k]}=\infty\mid S],\ \psi_0^2:=\mathbb{P}_{S_k}[H^+_{\mathcal{R}[k,k+\varepsilon_k]}=\infty, H^+_{\mathcal{R}[k-\varepsilon_k,k-1]}<\infty \mid S].$$ Using now that $Z_k\psi_0^1$ is independent of $Z_0\widetilde \varphi_2$ we get $$\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_0) =- \operatorname{Cov}(Z_0\widetilde \varphi_2, Z_k\psi_0^2).$$ Let $(\widetilde S_n)_{n\ge 0}$ and $(\widehat S_n)_{n\ge 0}$ be two independent walks starting from the origin, and define $$\tau_1:=\inf \{n\ge 0 : S_{k-n}\in \widetilde \mathcal{R}[1,\infty)\},\ \tau_2:=\inf\{n\ge 0 : S_{k-n}\in (S_k + \widehat \mathcal{R}[1,\infty))\}.$$ We decompose \begin{align*} & \operatorname{Cov}(Z_0\widetilde \varphi_2, Z_k\psi_0^2)\\ & = \mathbb{E}\left[Z_0\widetilde \varphi_2Z_k\psi_0^2{\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_2\}\right] + \mathbb{E}\left[Z_0\widetilde \varphi_2Z_k\psi_0^2{\text{\Large $\mathfrak 1$}}\{\tau_1> \tau_2\}\right] - \mathbb{E}[Z_0\widetilde \varphi_2] \mathbb{E}[Z_k\psi_0^2]. \end{align*} We bound the first term on the right-hand side simply by the probability of the event $\{\tau_1\le \tau_2\le \varepsilon_k\}$, which we treat later, and for the difference between the last two terms, we use that
$$\left| {\text{\Large $\mathfrak 1$}}\{\tau_2<\tau_1\le \varepsilon_k\} - \sum_{i=0}^{\varepsilon_k} {\text{\Large $\mathfrak 1$}}\left\{\tau_2=i,\, H^+_{\mathcal{R}[k-\varepsilon_k,k-i-1]}<\infty\right\}\right| \le {\text{\Large $\mathfrak 1$}}\{\tau_1\le \tau_2\le \varepsilon_k\}.$$ Using also that the event $\{\tau_2=i\}$ is independent of $(S_n)_{n\le k-i}$, we deduce that \begin{align*}
& |\operatorname{Cov}(Z_0\widetilde \varphi_2, Z_k\psi_0^2)| \\
&\le 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \sum_{i=0}^{\varepsilon_k} \mathbb{P}[\tau_2=i] \left|\mathbb{P}\left[H^+_{\mathcal{R}[k-\varepsilon_k,k-i]}<\infty \right] - \mathbb{P}\left[H^+_{\mathcal{R}[k-\varepsilon_k,k]}<\infty \right] \right|\\
& \le 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \sum_{i=0}^{\varepsilon_k} \mathbb{P}[\tau_2=i] \cdot \mathbb{P}\left[H^+_{\mathcal{R}[k-i,k]}<\infty \right] \\
& \stackrel{\eqref{lem.hit.3}}{\le} 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \frac{C}{k^{3/2}}\sum_{i=0}^{\varepsilon_k} i \mathbb{P}[\tau_2=i] \\
& \le 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \frac{C}{k^{3/2}}\sum_{i=0}^{\varepsilon_k} \mathbb{P}[\tau_2\ge i]\\
& \stackrel{\eqref{lem.hit.3}}{\le} 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \frac{C}{k^{3/2}}\sum_{i=0}^{\varepsilon_k} \frac{1}{\sqrt i} \le 2\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] + \frac{C\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} Then it amounts to bound the probability of $\tau_1$ being smaller than $\tau_2$: \begin{align*} &\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] =\sum_{x,y\in \mathbb{Z}^5} \sum_{i=0}^{\varepsilon_k} \mathbb{P}\left[\tau_1=i, i\le \tau_2\le \varepsilon_k, S_k=x,\, S_{k-i} = x+y\right]\\
\le & \sum_{x,y\in \mathbb{Z}^5} \sum_{i=0}^{\varepsilon_k} \mathbb{P}\left[\tau_1=i, S_{k-i}=x+y, (x+\widehat \mathcal{R}_\infty) \cap \mathcal{R}[k-\varepsilon_k,k-i]\neq \varnothing, S_k=x\right]\\ \le & \sum_{x,y\in \mathbb{Z}^5} \sum_{i=0}^{\varepsilon_k} \mathbb{P}\left[\widetilde \mathcal{R}_\infty \cap (x+\mathcal{R}[0,i-1])=\varnothing, S_i=y,\, x+y\in \widetilde \mathcal{R}_\infty\right]\\ & \qquad \times \mathbb{P}\left[ \widehat \mathcal{R}_\infty \cap (y+ \mathcal{R}[0,\varepsilon_k-i])\neq \varnothing, S_{k-i}=-x-y\right], \end{align*} using invariance by time reversal of $S$, and where we stress the fact that in the first probability in the last line, $\mathcal{R}$ and $\widetilde \mathcal{R}$ are two independent ranges starting from the origin. Now the last probability can be bounded using \eqref{Green.hit} and Lemma \ref{lem.prep.123}, which give \begin{align*}
&\mathbb{P}\left[ \widehat \mathcal{R}_\infty \cap (y+ \mathcal{R}[0,\varepsilon_k-i])\neq \varnothing, S_{k-i}=-x-y\right] \le \sum_{j=0}^{\varepsilon_k-i} \mathbb{E}\left[G(S_j+y){\text{\Large $\mathfrak 1$}}\{S_{k-i} = -x-y\}\right]\\ =& \sum_{j=0}^{\varepsilon_k-i} \sum_{z\in \mathbb{Z}^5} p_j(z) G(z+y)p_{k-i-j}(z+x+y) =\sum_{j=k-\varepsilon_k}^{k-i} \sum_{z\in \mathbb{Z}^5} p_j(z) G(z-x)p_{k- i-j}(z-x-y) \\
\lesssim & \frac{1}{(\|x+y\|+\sqrt k)^5}\left(\frac 1{1+\|y\|} + \frac 1{\sqrt k + \|x\|}\right). \end{align*} It follows that \begin{align*}
\mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] \lesssim \sum_{x,y\in \mathbb{Z}^5} \sum_{i=0}^{\varepsilon_k} \frac{G(x+y) p_i(y)}{(\|x+y\| + \sqrt k)^5}\left(\frac 1{1+\|y\|} + \frac 1{\sqrt k+\|x\|}\right), \end{align*} and then standard computations show that \begin{equation}\label{tau12eps} \mathbb{P}[\tau_1\le \tau_2\le \varepsilon_k] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{equation} Taking all these estimates together proves that $$ \operatorname{Cov}(Z_0\varphi_2,Z_k\psi_0) \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}.$$ We consider now the covariance between $Z_0\varphi_0$ and $Z_k\psi_2$. Here a new problem arises due to the random variable $Z_0$, which does not play the same role as $Z_k$, but one can use similar arguments. In particular the previous proof gives $$\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_2) = -\operatorname{Cov}((1-Z_0)\varphi_0,Z_k\psi_2) + \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right).$$ Then we decompose as well $\varphi_0=\varphi_0^1 - \varphi_0^2$, with $$\varphi_0^1:=\mathbb{P}[H^+_{\mathcal{R}[k-\varepsilon_k,k]}=\infty\mid S],\ \varphi_0^2:=\mathbb{P}[H^+_{\mathcal{R}[k-\varepsilon_k,k]}=\infty, H^+_{\mathcal{R}[k+1,k+\varepsilon_k]}<\infty \mid S].$$ Using independence we get $$\operatorname{Cov}((1-Z_0)\varphi_0^1,Z_k\psi_2) = \mathbb{E}[\varphi_0^1]\cdot \operatorname{Cov}((1-Z_0),Z_k\psi_2).$$ Then we define in the same fashion as above, $$\widetilde \tau_0:= \inf\{n\ge 1 : S_n=0\}, \ \widetilde \tau_2:= \inf\{n\ge 0 : S_n\in (S_k+\widehat \mathcal{R}[1,\infty))\},$$ with $\widehat \mathcal{R}$ the range of an independent walk starting from the origin. Recall that by definition $1-Z_0 = {\text{\Large $\mathfrak 1$}}\{\widetilde \tau_0 \le \varepsilon_k\}$. Thus one can write $$\operatorname{Cov}((1-Z_0),Z_k\psi_2) = \mathbb{E}[Z_k\psi_2 {\text{\Large $\mathfrak 1$}}\{\widetilde \tau_2 \le \widetilde \tau_0\le \varepsilon_k\}] + \mathbb{E}[Z_k\psi_2 {\text{\Large $\mathfrak 1$}}\{\widetilde \tau_0 < \widetilde \tau_2\}] - \mathbb{P}[\widetilde \tau_0\le \varepsilon_k] \mathbb{E}[Z_k\psi_2].$$ On one hand, using \eqref{Green.hit}, the Markov property, and \eqref{exp.Green}, \begin{align*} & \mathbb{E}[Z_k\psi_2 {\text{\Large $\mathfrak 1$}}\{\widetilde \tau_2 \le \widetilde \tau_0\le \varepsilon_k\}] \le \mathbb{P}[\widetilde \tau_2 \le \widetilde \tau_0 \le \varepsilon_k]\le \sum_{y\in \mathbb{Z}^5} \mathbb{P}[\widetilde \tau_2\le \varepsilon_k,\, S_{\widetilde \tau_2} =y] \cdot G(y)\\ & \le \sum_{i=0}^{\varepsilon_k} \mathbb{E}\left[G(S_i-S_k)G(S_i)\right] \le \sum_{i=0}^{\varepsilon_k} \mathbb{E}[G(S_{k-i})]\cdot \mathbb{E}[G(S_i)] \lesssim \frac{1}{k^{3/2}} \sum_{i=0}^{\varepsilon_k} \frac{1}{1+i^{3/2}} \lesssim \frac{1}{k^{3/2}}. \end{align*} On the other hand, similarly as above, \begin{align}\label{tau20eps} \nonumber & \mathbb{E}[Z_k\psi_2 {\text{\Large $\mathfrak 1$}}\{\widetilde \tau_0 < \widetilde \tau_2\}] - \mathbb{P}[\widetilde \tau_0\le \varepsilon_k]\cdot \mathbb{E}[Z_k\psi_2] \\ \nonumber & \le \mathbb{P}[\widetilde \tau_2 \le \widetilde \tau_0 \le \varepsilon_k] + \sum_{i=1}^{\varepsilon_k}\mathbb{P}[\widetilde \tau_0=i] \left(\mathbb{P}\left[(S_k+\widehat \mathcal{R}[1,\infty))\cap \mathcal{R}[i+1,\varepsilon_k]\neq \varnothing\right] - \mathbb{P}[\widetilde \tau_2\le \varepsilon_k]\right)\\ \nonumber &\lesssim \frac{1}{k^{3/2}} + \sum_{i=1}^{\varepsilon_k}\mathbb{P}[\widetilde \tau_0=i] \mathbb{P}[\widetilde \tau_2\le i] \stackrel{\eqref{lem.hit.3}}{\lesssim} \frac{1}{k^{3/2}} + \frac{1}{k^{3/2}}\sum_{i=1}^{\varepsilon_k} i \mathbb{P}[\widetilde \tau_0=i] \\ & \lesssim \frac{1}{k^{3/2}} + \frac{1}{k^{3/2}}\sum_{i=1}^{\varepsilon_k} \mathbb{P}[\widetilde \tau_0\ge i] \stackrel{\eqref{Green.hit}, \eqref{Green}}{\lesssim} \frac{1}{k^{3/2}}+ \frac{1}{k^{3/2}}\sum_{i=1}^{\varepsilon_k} \frac{1}{1+i^{3/2}} \lesssim \frac{1}{k^{3/2}}. \end{align} In other terms, we have already shown that
$$|\operatorname{Cov}((1-Z_0)\varphi_0^1,Z_k\psi_2) | \lesssim \frac{1}{k^{3/2}}.$$ The case when $\varphi_0^1$ is replaced by $\varphi_0^2$ is entirely similar. Indeed, we define $$\widetilde \tau_1:=\inf\{n\ge 0 : S_n\in \widetilde \mathcal{R}[1,\infty)\},$$ with $\widetilde \mathcal{R}$ the range of a random walk starting from the origin, independent of $S$ and $\widehat \mathcal{R}$. Then we set $\widetilde \tau_{0,1} := \max(\widetilde \tau_0,\widetilde \tau_1)$, and exactly as for \eqref{tau12eps} and \eqref{tau20eps}, one has \begin{equation*} \mathbb{P}[\widetilde \tau_2\le \widetilde \tau_{0,1} \le \varepsilon_k] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}, \end{equation*} and \begin{align*} & \mathbb{E} \left[(1-Z_0) \varphi_0^2 Z_k\psi_2 {\text{\Large $\mathfrak 1$}}\{ \widetilde \tau_{0,1}<\widetilde \tau_2 \} \right] - \mathbb{E}[(1-Z_0) \varphi_0^2]\cdot \mathbb{E}[ Z_k\psi_2] \\ & \le \mathbb{P}[\widetilde \tau_2\le \widetilde \tau_{0,1} \le \varepsilon_k] +\sum_{i=0}^{\varepsilon_k} \mathbb{P}[\widetilde \tau_{0,1}= i] \cdot \mathbb{P}[\widetilde \tau_2\le i] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} Altogether, this gives
$$|\operatorname{Cov}(Z_0\varphi_0,Z_k\psi_2)|\lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}.$$
\underline{Case $i=3$.} We only need to treat the case of the covariance between $Z_0\varphi_3$ and $Z_k\psi_0$, as the other one is entirely similar here. Define $$\widetilde \varphi_3:=\mathbb{P}\left[H^+_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]\cup \mathcal{R}[k+\varepsilon_k+1,\infty)}=\infty,\, H^+_{\mathcal{R}[k,k+\varepsilon_k]}<\infty\mid S\right].$$ The proof of the case $i=2$, already shows that
$$|\operatorname{Cov}(Z_0\widetilde \varphi_3,Z_k\psi_0)|\lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}.$$ Define next $$h_3:=\varphi_3-\widetilde \varphi_3=\mathbb{P}\left[H^+_{\mathcal{R}[-\varepsilon_k,\varepsilon_k]}=\infty,\, H^+_{\mathcal{R}[k+\varepsilon_k+1,\infty)}<\infty\mid S\right].$$ Assume for a moment that $\varepsilon_k\ge k^{\frac{9}{20}}$. We will see later another argument when this condition is not satisfied. Then define $\varepsilon_k':= \lfloor \varepsilon_k^{10/9}/k^{1/9}\rfloor$, and note that one has $\varepsilon'_k\le \varepsilon_k$. Write $\psi_0=\psi'_0+h_0$, with $$\psi_0':= \mathbb{P}\left[H^+_{\mathcal{R}[k-\varepsilon'_k+1,k+\varepsilon'_k-1]}=\infty \mid S\right], $$ and $$h_0:= \mathbb{P}\left[H^+_{\mathcal{R}[k-\varepsilon'_k+1,k+\varepsilon'_k-1]}=\infty, \, H^+_{\mathcal{R}[k-\varepsilon_k,k-\varepsilon'_k]\cup\mathcal{R}[k+\varepsilon'_k,k+\varepsilon_k] } <\infty \mid S\right]. $$ Define also $$Z'_k:={\text{\Large $\mathfrak 1$}}\{S_\ell\neq S_k,\text{ for all }\ell=k+1,\dots,k+\varepsilon'_k-1\}.$$ One has $$\operatorname{Cov}(Z_0h_3,Z_k\psi_0) = \operatorname{Cov}(Z_0h_3,Z'_k\psi'_0) + \operatorname{Cov}(Z_0h_3,Z'_kh_0) +\operatorname{Cov}(Z_0h_3,(Z_k-Z'_k)\psi_0) .$$ For the last of the three terms, one can simply notice that, using the Markov property at the first return time to $S_k$ (for the walk $S$), and then \eqref{Green.hit}, \eqref{Green}, and \eqref{lem.hit.3}, we get \begin{align*} \mathbb{E}[h_3(Z_k-Z'_k)] & \le \mathbb{E}[Z_k-Z'_k] \times \mathbb{P}[\widetilde \mathcal{R}_\infty \cap \mathcal{R}\left[k,\infty)\neq \varnothing\right] \\ & \lesssim \frac{1}{(\varepsilon'_k)^{3/2}\sqrt k} \lesssim \frac 1{\varepsilon_k^{5/3}k^{1/3} }\lesssim \frac{1}{k^{\frac{13}{12}}}, \end{align*} using our hypothesis on $\varepsilon_k$ for the last equality. As a consequence, it also holds
$$|\operatorname{Cov}(Z_0h_3,(Z_k-Z'_k)\psi_0)| \lesssim k^{-\frac{13}{12}}. $$ Next we write \begin{equation}\label{cov.i3} \operatorname{Cov}(Z_0h_3,Z'_kh_0) = \sum_{x,y\in \mathbb{Z}^5} (p_{k-2\varepsilon_k}(x-y) - p_k(x)) H_1(y)H_2(x), \end{equation} where $$H_1(y) := \mathbb{E}\left[Z'_kh_0 {\text{\Large $\mathfrak 1$}}\{S_{k+\varepsilon_k} - S_{k-\varepsilon_k} = y\}\right], \ H_2(x) := \mathbb{E}\left[Z_0h_3 \mid S_{k+\varepsilon_k} - S_{\varepsilon_k} = x\right].$$ Define $r_k:=(k/\varepsilon'_k)^{1/8}$. By using symmetry and translation invariance, \begin{align*}
&\sum_{\|y\|\ge \sqrt{\varepsilon_k} r_k} H_1(y) \le \mathbb{P}\left[H_{\mathcal{R}[-\varepsilon_k,-\varepsilon'_k]\cup\mathcal{R}[\varepsilon'_k,\varepsilon_k]}<\infty, \, \|S_{\varepsilon_k} - S_{-\varepsilon_k}\|\ge \sqrt{\varepsilon_k} r_k\right]\\
&\le 2 \mathbb{P}\left[H_{\mathcal{R}[\varepsilon'_k,\varepsilon_k]}<\infty, \, \|S_{\varepsilon_k} \|\ge \sqrt{\varepsilon_k} \frac{r_k}{2}\right] + 2\mathbb{P}\left[H_{\mathcal{R}[\varepsilon'_k,\varepsilon_k]}<\infty, \, \|S_{-\varepsilon_k} \|\ge \sqrt{\varepsilon_k} \frac{r_k}{2} \right] \\
& \stackrel{\eqref{lem.hit.3}, \, \eqref{Sn.large}}{\le} 2 \mathbb{P}\left[H_{\mathcal{R}[\varepsilon'_k,\varepsilon_k]}<\infty, \, \|S_{\varepsilon_k} \|\ge \sqrt{\varepsilon_k} \frac{r_k}2\right] + \frac{C}{\sqrt{\varepsilon'_k} r_k^5}. \end{align*} Considering the first probability on the right-hand side, define $\tau$ as the first hitting time (for $S$), after time $\varepsilon'_k$, of another independent walk $\widetilde S$ (starting from the origin). One has \begin{align*}
& \mathbb{P}\left[H_{\mathcal{R}[\varepsilon'_k,\varepsilon_k]}<\infty, \, \|S_{\varepsilon_k} \|\ge \sqrt{\varepsilon_k} \frac{r_k}2\right] \\
& \le \mathbb{P}[\|S_\tau\| \ge \sqrt{\varepsilon_k} \frac{r_k}4,\, \tau\le \varepsilon_k] + \mathbb{P}[\|S_{\varepsilon_k} - S_\tau\| \ge \sqrt{\varepsilon_k} \frac{r_k}4,\, \tau\le \varepsilon_k] . \end{align*} Using then the Markov property at time $\tau$, we deduce with \eqref{lem.hit.3} and \eqref{Sn.large},
$$\mathbb{P}[\|S_{\varepsilon_k} - S_\tau\| \ge \sqrt{\varepsilon_k} \frac{r_k}4,\, \tau\le \varepsilon_k] \lesssim \frac{1}{\sqrt{\varepsilon'_k} r_k^5}.$$ Likewise, using the Markov property at the first time when the walk exit the ball of radius $\sqrt{\varepsilon_k} r_k/4$, and applying then \eqref{Sn.large} and \eqref{lem.hit.2}, we get as well
$$ \mathbb{P}[\|S_\tau\| \ge \sqrt{\varepsilon_k} \frac{r_k}4,\, \tau\le \varepsilon_k] \lesssim \frac{1}{\sqrt{\varepsilon_k} r_k^6}.$$ Furthermore, for any $y$, one has
$$\sum_{x\in \mathbb{Z}^5} p_{k-2\varepsilon_k}(x-y) H_2(x) \stackrel{\eqref{pn.largex}, \eqref{lem.hit.2}}{\lesssim} \sum_{x\in \mathbb{Z}^5} \frac{1}{(1+\|x+y\|)(\|x\|+\sqrt{k})^5} \lesssim \frac{1}{\sqrt k},$$ with an implicit constant, which is uniform in $y$ (and the same holds with $p_k(x)$ instead of $p_{k-2\varepsilon_k}(x-y)$).
Similarly, define $r'_k:=(k/\varepsilon'_k)^{\frac 1{10}}$. One has for any $y$, with $\|y\|\le \sqrt{\varepsilon_k}r_k$,
$$\sum_{\|x\|\ge \sqrt{k}r_k'} p_{k-2\varepsilon_k}(x-y) H_2(x) \stackrel{\eqref{Sn.large}, \eqref{lem.hit.2}}{\lesssim} \frac{1}{\sqrt{k}(r'_k)^6}.$$ Therefore coming back to \eqref{cov.i3}, and using that by \eqref{lem.hit.2}, $\sum_y H_1(y)\lesssim 1/\sqrt{\varepsilon'_k}$, we get \begin{align*} & \operatorname{Cov}(Z_0h_3,Z'_kh_0) \\
& = \sum_{\|x\|\le \sqrt{k}r'_k} \sum_{\|y\|\le \sqrt{\varepsilon_k}r_k} (p_{k-2\varepsilon_k}(x-y) - p_k(x)) H_1(y)H_2(x) + \mathcal{O}\left(\frac{1}{\sqrt{k\varepsilon'_k}(r'_k)^6} + \frac{1}{\sqrt{k\varepsilon'_k} r_k^5}\right)\\
& = \sum_{\|x\|\le \sqrt{k}r'_k} \sum_{\|y\|\le \sqrt{\varepsilon_k}r_k} (p_{k-2\varepsilon_k}(x-y) - p_k(x)) H_1(y)H_2(x) + \mathcal{O}\left(\frac{(\varepsilon'_k)^{\frac{1}{10} } }{k^{\frac{11}{10}}}\right). \end{align*} Now we use the fact $H_1(y) = H_1(-y)$. Thus the last sum is equal to half of the following: \begin{align*}
&\sum_{\|x\|\le \sqrt{k}r'_k} \sum_{\|y\|\le \sqrt{\varepsilon_k}r_k} (p_{k-2\varepsilon_k}(x-y) + p_{k-2\varepsilon_k}(x+y) - 2p_k(x)) H_1(y)H_2(x) \\
&\stackrel{\text{Theorem }\ref{LCLT},\eqref{lem.hit.2}}{\le} \sum_{\|x\|\le \sqrt{k} r'_k} \sum_{\|y\|\le \sqrt{\varepsilon_k}r_k} (\overline p_{k-2\varepsilon_k}(x-y) + \overline p_{k-2\varepsilon_k}(x+y) - 2\overline p_k(x)) H_1(y)H_2(x) \\ & \qquad + \mathcal{O}\left(\frac{(r'_k)^4}{k^{3/2}\sqrt{\varepsilon'_k}} \right), \end{align*} (with an additional factor $2$ in front in case of a bipartite walk). Note that the error term above is $\mathcal{O}(k^{-11/10})$, by definition of $r'_k$. Moreover, straightforward computations show that for any $x$ and $y$ as in the sum above,
$$|\overline p_{k-2\varepsilon_k}(x-y) + \overline p_{k-2\varepsilon_k}(x+y) - 2\overline p_k(x)| \lesssim \left(\frac{\|y\|^2+\varepsilon_k}{k} \right) \overline p_k(cx). $$ In addition one has (with the notation as above for $\tau$), \begin{align*}
&\sum_{y\in \mathbb{Z}^5} \|y\|^2\, H_1(y) \le 2 \mathbb{E}\left[\|S_{\varepsilon_k} - S_{-\varepsilon_k}\|^2 {\text{\Large $\mathfrak 1$}}\{\tau\le \varepsilon_k\}\right] \\
& \le 4 \mathbb{E}[\|S_{\varepsilon_k}\|^2] \mathbb{P}[\tau\le \varepsilon_k] + 4 \mathbb{E}\left[\|S_{\varepsilon_k}\|^2 {\text{\Large $\mathfrak 1$}}\{\tau\le \varepsilon_k\}\right] \\
& \stackrel{\eqref{Sn.large}, \eqref{lem.hit.3}}{\lesssim} \frac{ \varepsilon_k}{\sqrt{\varepsilon'_k}} + \mathbb{E}\left[\|S_{\tau}\|^2 {\text{\Large $\mathfrak 1$}}\{\tau\le \varepsilon_k\}\right] + \mathbb{E}\left[\|S_{\varepsilon_k}-S_{\tau}\|^2 {\text{\Large $\mathfrak 1$}}\{\tau\le \varepsilon_k\}\right] \\
& \stackrel{\eqref{Sn.large}, \eqref{lem.hit.3}}{\lesssim} \frac{ \varepsilon_k}{\sqrt{\varepsilon'_k}} + \sum_{r\ge \sqrt{\varepsilon_k}} r \mathbb{P}\left[\|S_{\tau}\|\ge r, \, \tau\le \varepsilon_k\right] \stackrel{\eqref{Sn.large}, \eqref{lem.hit.2}}{\lesssim} \frac{ \varepsilon_k}{\sqrt{\varepsilon'_k}}, \end{align*} using also the Markov property in the last two inequalities (at time $\tau$ for the first one, and at the exit time of the ball of radius $r$ for the second one). Altogether, this gives
$$|\operatorname{Cov}(Z_0h_3,Z'_kh_0)| \lesssim \frac{\varepsilon_k }{k^{3/2}\sqrt{\varepsilon'_k}} + \frac{(\varepsilon'_k)^{\frac{1}{10} } }{k^{\frac{11}{10}}} \lesssim \frac{(\varepsilon_k)^{\frac{1}{9} } }{k^{ \frac{10}{9} }}. $$ In other words, for any sequence $(\varepsilon_k)_{k\ge 1}$, such that $\varepsilon_k\ge k^{9/20}$, one has $$\operatorname{Cov}(Z_0h_3,Z_k\psi_0) = \operatorname{Cov}(Z_0h_3,Z'_k\psi'_0) + \mathcal{O}\left(\frac{(\varepsilon_k)^{\frac{1}{9} } }{k^{ \frac{10}{9} }} + \frac{1}{k^{\frac{13}{12}} }\right).$$ One can then iterate the argument with the sequence $(\varepsilon'_k)$ in place of $(\varepsilon_k)$, and (after at most a logarithmic number of steps), we are left to consider a sequence $(\varepsilon_k)$, satisfying $\varepsilon_k\le k^{9/20}$. In this case, we use similar arguments as above. Define $\widetilde H_1(y)$ as $H_1(y)$, but with $Z_k\psi_0$ instead of $Z'_kh_0$ in the expectation, and choose $r_k:= \sqrt{k/\varepsilon_k}$, and $r'_k=k^{\frac 1{10}}$. Then we obtain exactly as above, \begin{align*} &\operatorname{Cov}(Z_0h_3,Z_k\psi_0) \\
& = \sum_{\|x\|\le \sqrt k r'_k} \sum_{\|y\|\le \sqrt{k}} ( p_{k-2\varepsilon_k}(x-y) - p_k(x)) \widetilde H_1(y) H_2(x) + \mathcal{O}\left(\frac{1}{r_k^5\sqrt k} + \frac 1{(r'_k)^6 \sqrt k}\right)\\
& = \sum_{\|x\|\le \sqrt k r'_k} \sum_{\|y\|\le \sqrt{k}} ( \overline p_{k-2\varepsilon_k}(x-y) - \overline p_k(x)) \widetilde H_1(y) H_2(x) + \mathcal{O}\left(\frac{1}{k^{\frac {11}{10}}} \right)\\ & \lesssim \frac{\varepsilon_k}{k^{3/2}} + \frac{1}{k^{\frac {11}{10}}} \lesssim \frac{1}{k^{\frac {21}{20}}}, \end{align*} which concludes the proof of the proposition.
\section{Intersection of two random walks and proof of Theorem C} \label{sec.thmC} In this section we prove a general result, which will be needed for proving Proposition \ref{prop.phipsi.2}, and which also gives Theorem C as a corollary. First we introduce some general condition for a function $F:\mathbb{Z}^d\to \mathbb{R}$, namely: \begin{equation}\label{cond.F} \begin{array}{c} \text{there exists a constant $C_F>0$, such that }\\
|F(y) - F(x)|\le C_F\, \frac{\|y-x\|}{1+\|y\|} \cdot |F(x)|, \quad \text{for all }x,y\in \mathbb{Z}^d. \end{array} \end{equation} Note that any function satisfying \eqref{cond.F} is automatically bounded. Observe also that this condition is satisfied by functions which are equivalent to $c/\mathcal J(x)^\alpha$, for some constants $\alpha\in [0,1]$, and $c>0$.
On the other hand, it is not satisfied by functions which are $o(1/\| x\|)$, as $\|x\|\to \infty$.
However, this is fine, since the only two cases that will be of interest for us here are when either $F$ is constant, or when $F(x)$ is of order $1/\|x\|$. Now for a general function $F:\mathbb{Z}^d\to \mathbb{R}$, we define for $r>0$,
$$\overline F(r) := \sup_{r\le \|x\|\le r+1} |F(x)|.$$ Then, set $$I_F(r):= \frac {\log (2+r)}{r^{d-2}}\int_0^r s\cdot \overline F(s)\, ds + \int_r^\infty \frac{\overline F(s)\log(2+s)}{s^{d-3} }\, ds,$$ and, with $\chi_d(r):= 1+(\log (2+r)){\text{\Large $\mathfrak 1$}}_{\{d=5\}}$, $$ J_F(r):= \frac{\chi_d(r)}{r^{d-2}} \int_0^r \overline F(s)\, ds + \int_r^\infty \frac{\overline F(s)\chi_d(s)}{s^{d-2}}\, ds.$$
\begin{theorem}\label{thm.asymptotic} Let $(S_n)_{n\ge 0}$ and $(\widetilde S_n)_{n\ge 0}$ be two independent random walks on $\mathbb{Z}^d$, $d\ge 5$, starting respectively from the origin and some $x\in \mathbb{Z}^d$. Let $\ell \in \mathbb{N}\cup \{\infty\}$, and define $$\tau:=\inf\{n\ge 0\, : \, \widetilde S_n \in \mathcal{R}[0,\ell] \}.$$ There exists $\nu\in (0,1)$, such that for any $F:\mathbb{Z}^d\to \mathbb{R}$, satisfying \eqref{cond.F}, \begin{align}\label{thm.asymp.formula} \mathbb{E}_{0,x}\left[F(\widetilde S_\tau) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\}\right] = &\ \frac {\gamma_d}{\kappa}\cdot \mathbb{E}\left[\sum_{i=0}^\ell G(S_i-x)F(S_i)\right] \\
\nonumber & + \mathcal{O}\left(\frac{I_F(\|x\|)}{(\ell\wedge \|x\|)^\nu} + (\ell\wedge \|x\|)^\nu J_F(\|x\|)\right), \end{align} where $\gamma_d$ is as in \eqref{LLN.cap}, and $\kappa$ is some positive constant given by $$\kappa:=\mathbb{E}\left[\Big(\sum_{n\in \mathbb{Z}} G(S_n)\Big)\cdot \mathbb{P}\left[H^+_{\overline \mathcal{R}_\infty}=+\infty \mid \overline \mathcal{R}_\infty \right]\cdot {\text{\Large $\mathfrak 1$}}\{S_n\neq 0,\, \forall n\ge 1\}\right],$$ with $(S_n)_{n\in\mathbb{Z}}$ a two-sided walk starting from the origin and $\overline \mathcal{R}_\infty := \{S_n\}_{n\in \mathbb{Z}}$. \end{theorem}
\begin{remark}\emph{Note that when $F(x) \sim c/\mathcal J(x)^{\alpha}$, for some constants $\alpha \in [0,1]$ and $c>0$, then $I_F(r)$ and $J_F(r)$ are respectively of order $1/r^{d-4+ \alpha}$, and $1/r^{d-3+\alpha}$ (up to logarithmic factors), while one could show that
$$\mathbb{E}\left[\sum_{i=0}^\ell G(S_i-x)F(S_i)\right] \sim \frac{c'}{\mathcal J(x)^{d-4+\alpha}}, \quad \text{as }\|x\|\to \infty\text{ and }\ell/\|x\|^2\to \infty,$$ for some other constant $c'>0$ (see below for a proof at least when $\ell = \infty$ and $\alpha=0$). Therefore in these cases Theorem \ref{thm.asymptotic} provides a true equivalent for the term on the left-hand side of \eqref{thm.asymp.formula}. } \end{remark}
\begin{remark} \emph{This theorem strengthens Theorem C in two aspects: on one hand it allows to consider functionals of the position of one of the two walks at its hitting time of the other path, and on the other hand it also allows to consider only a finite time horizon for one of the two walks (not mentioning the fact that it gives additionally some bound on the error term). Both these aspects will be needed later (the first one in the proof of Lemma \ref{lem.var.2}, and the second one in the proofs of Lemmas \ref{lem.var.3} and \ref{lem.var.4}). } \end{remark}
Given this result one obtains Theorem C as a corollary. To see this, we first recall an asymptotic result on the Green's function:
in any dimension $d\ge 5$, under our hypotheses on $\mu$, there exists a constant $c_d>0$, such that as $\|x\|\to \infty$, \begin{equation}\label{Green.asymp}
G(x)= \frac{c_d}{\mathcal J(x)^{d-2}} + \mathcal{O}(\|x\|^{1-d}). \end{equation} This result is proved in \cite{Uchiyama98} under only the hypothesis that $X_1$ has a finite $(d-1)$-th moment (we refer also to Theorem 4.3.5 in \cite{LL}, for a proof under the stronger hypothesis that $X_1$ has a finite $(d+1)$-th moment). One also needs the following elementary fact: \begin{lemma}\label{Green.convolution}
There exists a positive constant $c$, such that as $\|x\|\to \infty$,
$$\sum_{y\in \mathbb{Z}^d\setminus\{0,x\}} \frac{1}{\mathcal J(y)^{d-2}\cdot \mathcal J(y-x)^{d-2}} = \frac{c}{\mathcal J(x)^{d-4}} + \mathcal{O}\left(\frac 1{\|x\|^{d-3}}\right).$$ \end{lemma} \begin{proof} The proof follows by first an approximation by an integral, and then a change of variables. More precisely, letting $u:=x/\mathcal J(x)$, one has \begin{align*} & \sum_{y\in \mathbb{Z}^d\setminus\{0,x\} } \frac{1}{\mathcal J(y)^{d-2} \mathcal J(y-x)^{d-2}} = \ \int_{\mathbb{R}^d} \frac{1}{\mathcal J(y)^{d-2} \mathcal J(y-x)^{d-2}} \, dy +
\mathcal{O}(\|x\|^{3-d}) \\
& = \frac 1{\mathcal J(x)^{d-4}} \int_{\mathbb{R}^5}\frac{1}{\mathcal J(y)^{d-2} \mathcal J(y-u)^{d-2} }\, dy + \mathcal{O}(\|x\|^{3-d}), \end{align*} and it suffices to observe that by rotational invariance, the last integral is independent of $x$. \end{proof}
\begin{proof}[Proof of Theorem C] The result follows from Theorem \ref{thm.asymptotic}, by taking $F\equiv 1$ and $\ell = \infty$, and then by using \eqref{Green.asymp} together with Lemma \ref{Green.convolution}. \end{proof}
It amounts now to prove Theorem \ref{thm.asymptotic}. For this, we need some technical estimates that we gather in Lemma \ref{lem.thm.asymptotic} below. Since we believe this is not the most interesting part, we defer its proof to the end of this section.
\begin{lemma}\label{lem.thm.asymptotic} Assume that $F$ satisfies \eqref{cond.F}. Then \begin{enumerate} \item There exists a constant $C>0$, such that for any $x\in \mathbb{Z}^d$, \begin{equation}\label{lem.thm.asymp.1}
\sum_{i=0}^\infty \mathbb{E}\left[\left(\sum_{j=0}^\infty G(S_j-S_i)\frac{\|S_j-S_i\|}{1+\|S_j\|}\right) \cdot |F(S_i)| G(S_i-x)\right] \le C J_F(\|x\|). \end{equation} \item There exists $C>0$, such that for any $R>0$, and any $x\in \mathbb{Z}^d$, \begin{equation}\label{lem.thm.asymp.2}
\sum_{i=0}^\infty \mathbb{E}\left[\left(\sum_{|j-i|\ge R}G(S_j-S_i)\right) |F(S_i)| G(S_i-x)\right] \le \frac{C}{R^{\frac{d-4}2}}\cdot I_F(\|x\|), \end{equation} \begin{equation}\label{lem.thm.asymp.2bis}
\sum_{i=0}^\infty \mathbb{E}\left[\left(\sum_{|j-i|\ge R}G(S_j-S_i) |F(S_j)|\right) G(S_i-x)\right] \le \frac{C}{R^{\frac{d-4}2} }\cdot I_F(\|x\|) . \end{equation} \end{enumerate} \end{lemma}
One also need some standard results from (discrete) potential theory. If $\Lambda$ is a nonempty finite subset of $\mathbb{Z}^d$, containing the origin, we define
$$\text{rad}(\Lambda):=1+\sup_{x\in \Lambda} \|x\|,$$ and also consider for $x\in \Lambda$, $$e_\Lambda(x):=\mathbb{P}_x[H_\Lambda^+=\infty], \quad \text{and} \quad \overline e_\Lambda(x):=\frac{e_\Lambda(x)}{\mathrm{Cap}(\Lambda)}.$$ The measure $\overline e_\Lambda$ is sometimes called the harmonic measure of $\Lambda$ from infinity, due to the next result. \begin{lemma}\label{lem.potential}
There exists a constant $C>0$, such that for any finite subset $\Lambda\subseteq \mathbb{Z}^d$ containing the origin, and any $y\in \mathbb{Z}^d$, with $\| y\|>2\text{rad}(\Lambda)$, \begin{eqnarray}\label{cap.hitting}
\mathbb{P}_y[H_\Lambda<\infty] \le C\cdot \frac{\mathrm{Cap}(\Lambda)}{1+\|y\|^{d-2}}. \end{eqnarray} Furthermore, for any $x\in \Lambda$, and any $y\in \mathbb{Z}^d$, \begin{eqnarray}\label{harm.hit}
\Big| \mathbb{P}_y[S_{H_\Lambda}=x\mid H_\Lambda<\infty] - \overline e_\Lambda(x)\Big| \le C\cdot \frac{\text{rad}(\Lambda)}{1+\|y\|}. \end{eqnarray} \end{lemma} This lemma is proved in \cite{LL} for finite range random walks. The proof extends to our setting, but some little care is needed, so we shall give some details at the end of this section. Assuming this, one can now give the proof of our main result.
\begin{proof}[Proof of Theorem \ref{thm.asymptotic}] The proof consists in computing the quantity \begin{equation}\label{eq.A} A:= \mathbb{E}_{0,x}\left[\sum_{i=0}^\ell \sum_{j=0}^\infty {\text{\Large $\mathfrak 1$}}\{S_i = \widetilde S_j\}F(S_i)\right], \end{equation} in two different ways\footnote{This idea goes back to the seminal paper of Erd\'os and Taylor \cite{ET60}, even though it was not used properly there and was corrected only a few years later by Lawler, see \cite{Law91}.}. On one hand, by integrating with respect to the law of $\widetilde S$ first, we obtain \begin{equation}\label{A.first} A= \mathbb{E}\left[\sum_{i=0}^\ell G(S_i-x)F(S_i)\right]. \end{equation} On the other hand, the double sum in \eqref{eq.A} is nonzero only when $\tau$ is finite. Therefore, using also the Markov property at time $\tau$, we get \begin{align*} A &= \mathbb{E}_{0,x}\left[\left(\sum_{i=0}^\ell \sum_{j=0}^\infty {\text{\Large $\mathfrak 1$}}\{S_i = \widetilde S_j\}F(S_i)\right) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\} \right]\\ & = \sum_{i=0}^\ell \mathbb{E}_{0,x}\left[ \left( \sum_{j=0}^\ell G(S_j-S_i) F(S_j)\right)Z_i^\ell \cdot {\text{\Large $\mathfrak 1$}}\{\tau<\infty, \widetilde S_\tau = S_i\} \right], \end{align*} where we recall that $Z_i^\ell = {\text{\Large $\mathfrak 1$}}\{S_j \neq S_i,\, \forall j=i+1,\dots,\ell\}$. The computation of this last expression is divided in a few steps.
\underline{Step 1.} Set $$B:= \sum_{i=0}^\ell \mathbb{E}_{0,x}\left[ \left( \sum_{j=0}^\ell G(S_j-S_i) \right)F(S_i)Z_i^\ell \cdot {\text{\Large $\mathfrak 1$}}\{\tau<\infty, \widetilde S_\tau = S_i\} \right],$$ and note that, \begin{align*}
& |A-B| \stackrel{\eqref{cond.F}}{\le} C_F\, \sum_{i=0}^\ell \mathbb{E}_{0,x}\left[ \left( \sum_{j=0}^\ell G(S_j-S_i)\frac{\|S_j-S_i\|}{(1+\|S_j\|)} \right)|F(S_i)| {\text{\Large $\mathfrak 1$}}\{S_i\in \widetilde \mathcal{R}_\infty\} \right]\\
&\stackrel{\eqref{Green.hit}}{\le} C_F\, \sum_{i=0}^\ell \mathbb{E}\left[ \left( \sum_{j=0}^\ell G(S_j-S_i)\frac{\|S_j-S_i\|}{(1+\|S_j\|)} \right)|F(S_i)| G(S_i-x)\right] \stackrel{\eqref{lem.thm.asymp.1}}{=} \mathcal{O}\left(J_F(\|x\|) \right). \end{align*}
\underline{Step 2.} Consider now some positive integer $R$, and define $$D_R:= \sum_{i=0}^{\ell} \mathbb{E}_{0,x}\left[ \mathcal G_{i,R,\ell} F(S_i)Z_i^\ell \cdot {\text{\Large $\mathfrak 1$}}\{\tau<\infty, \widetilde S_\tau = S_i\} \right],$$ with $\mathcal G_{i,R,\ell}:= \sum_{j=(i-R)\vee 0}^{(i+R)\wedge \ell} G(S_j-S_i)$. One has
$$|B-D_R| \stackrel{\eqref{Green.hit}}{\le} \sum_{i=0}^{\ell} \mathbb{E}\left[ \left(\sum_{|j-i|>R} G(S_j-S_i)\right) |F(S_i)|G(S_i-x)\right]
\stackrel{\eqref{lem.thm.asymp.2}}{\lesssim} \frac{ I_F(\|x\|)}{R^{\frac{d-4}2}}.$$
\underline{Step 3.} Let $R$ be an integer larger than $2$, and such that $\ell\wedge \|x\|^2 \ge R^6$. Let $M:=\lfloor \ell / R^5\rfloor -1$, and define for $0\le m\le M$, $$I_m:=\{mR^5+R^3,\dots, (m+1)R^5-R^3\}, \text{ and } J_m:=\{mR^5,\dots, (m+1)R^5-1\}.$$ Define further $$E_R := \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}_{0,x}\left[ \mathcal G_{i,R} F(S_i)Z_i^\ell \cdot {\text{\Large $\mathfrak 1$}}\{\tau<\infty, \widetilde S_\tau = S_i\} \right],$$ with $\mathcal G_{i,R} := \sum_{j=i-R}^{i+R} G(S_j-S_i)$. One has, bounding $\mathcal G_{i,R}$ by $(2R+1)G(0)$, \begin{align*}
|D_R - E_R| & \le (2R+1) G(0)\\
& \times \left\{ \sum_{m=0}^M \sum_{i\in J_m\setminus I_m} \mathbb{E} \left[|F(S_i)| G(S_i-x) \right] + \sum_{i=(M+1) R^5}^\ell \mathbb{E} \left[|F(S_i)| G(S_i-x) \right] \right\}, \end{align*} with the convention that the last sum is zero when $\ell$ is infinite. Using $\ell \ge R^6$, we get \begin{align*}
&\sum_{i=(M+1) R^5}^\ell \mathbb{E} \left[|F(S_i)| G(S_i-x) \right] \le \sum_{z\in \mathbb{Z}^d} |F(z)| G(z-x) \sum_{i=(M+1)R^5}^{(M+2)R^5} p_i(z) \\
&\stackrel{\eqref{pn.largex},\, \eqref{Green}}{\lesssim} \frac{R^5}{\ell} \sum_{z\in \mathbb{Z}^d} \frac{|F(z)|}{(1+ \|z-x\|^{d-2})(1+\|z\|^{d-2})} \lesssim \frac{R^5}{ \ell} \cdot I_F(\|x\|). \end{align*}
Likewise, since $\|x\|^2\ge R^6$, \begin{align}\label{final.step3}
\nonumber & \sum_{m=0}^M \sum_{i\in J_m\setminus I_m} \mathbb{E} \left[|F(S_i)| G(S_i-x) \right] \le \sum_{z\in \mathbb{Z}^d} \frac{|F(z)|}{1+\|z-x\|^{d-2}} \sum_{m=0}^M \sum_{i\in J_m\setminus I_m} p_i(z)\\
\nonumber &\stackrel{\eqref{Green}}{\lesssim} \frac{1}{1+\|x\|^{d-2}} \sum_{\|z\|^2\le R^5} \frac{1}{1+\|z\|^{d-2}} \\
\nonumber & \qquad + \sum_{\|z\|^2 \ge R^5} \frac{|F(z)|}{1+\|z-x\|^{d-2}} \sum_{m=0}^M \sum_{i\in J_m\setminus I_m} \left(\frac{{\text{\Large $\mathfrak 1$}}\{i\le \|z\|^2\}}{1+\|z\|^d} + \frac{{\text{\Large $\mathfrak 1$}}\{i\ge \|z\|^2\}}{i^{d/2}}\right) \\
& \lesssim \frac{R^5}{1+\|x\|^{d-2}} + \frac{1}{R^2} \cdot I_F(\|x\|), \end{align} using for the last inequality that the proportion of indices $i$ which are not in one of the $I_m$'s, is of order $1/R^2$.
\underline{Step 4.} For $0\le m \le M+1$, set $$\mathcal{R}^{(m)}:=\mathcal{R}[mR^5,(m+1)R^5-1], \quad \text{and}\quad \tau_m:= \inf\{ n \ge 0 \, :\, \widetilde S_n \in \mathcal{R}^{(m)}\}.$$ Then let $$F_R := \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}_{0,x}\left[ \mathcal G_{i,R} F(S_i)Z_i^\ell \cdot {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty, \widetilde S_{\tau_m} = S_i\} \right].$$ Since by definition $\tau\le \tau_m$, for any $m$, one has for any $i\in I_m$, \begin{align*}
& |\mathbb{P}_{0,x}[\tau<\infty, \widetilde S_{\tau} = S_i\mid S] - \mathbb{P}_{0,x}[\tau_m<\infty, \widetilde S_{\tau_m} = S_i\mid S] | \\
& \le \mathbb{P}_{0,x}[\tau<\tau_m<\infty, \widetilde S_{\tau_m}=S_i\mid S]
\le \sum_{j\notin J_m} \mathbb{P}_{0,x}[\tau<\tau_m<\infty, \widetilde S_{\tau} = S_j, \widetilde S_{\tau_m} = S_i\mid S]\\ &\stackrel{\eqref{Green.hit}}{\le} \sum_{j\notin J_m} G(S_j-x) G(S_i-S_j). \end{align*} Therefore, bounding again $\mathcal G_{i,R}$ by $(2R+1)G(0)$, we get \begin{align*}
|E_R-F_R| & \lesssim R \, \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}\left[\left(\sum_{j\notin J_m} G(S_i-S_j) G(S_j-x)\right)\cdot |F(S_i)| \right]\\
&\lesssim R \, \sum_{i=0}^\infty \mathbb{E}\left[\left(\sum_{j\, :\, |j-i|\ge R^3} G(S_i-S_j) G(S_j-x)\right)\cdot |F(S_i)| \right] \\
& \stackrel{\eqref{lem.thm.asymp.2bis}}{\lesssim} \frac{1}{ R^{3\frac{d-4}{2}-1}} \cdot I_F(\|x\|)\lesssim \frac{1}{\sqrt R} \cdot I_F(\|x\|). \end{align*}
\underline{Step 5.} For $m\ge 0$ and $i\in I_m$, define $$e_i^m := \mathbb{P}_{S_i} \left[H_{\mathcal{R}^{(m)}}^+=\infty\mid S\right], \quad \text{and}\quad \overline e_i^m:= \frac{e_i^m}{\mathrm{Cap}(\mathcal{R}^{(m)}) }.$$ Then let $$H_R: = \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}_{0,x}\left[ \mathcal G_{i,R}F(S_i)Z_i^\ell \overline e_i^m \cdot {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right].$$ Applying \eqref{harm.hit} to the sets $\Lambda_m:=\mathcal{R}^{(m)}-S_{i_m}$, we get for any $m\ge 0$, and any $i\in I_m$, \begin{eqnarray}\label{harm.application}
\left| \mathbb{P}_{0,x}[\widetilde S_{\tau_m} = S_i\mid \tau_m<\infty, S] - \overline e_i^m \right| \le C\, \frac{\text{rad}(\Lambda_m)}{1+\|x-S_{i_m}\|}. \end{eqnarray} By \eqref{cap.hitting}, it also holds \begin{align}\label{hit.cap.application}
\nonumber \mathbb{P}_{0,x}[\tau_m<\infty \mid S ] & \le \frac{ CR^5}{1+\|x-S_{i_m}\|^{d-2}} + {\text{\Large $\mathfrak 1$}}\{\|x-S_{i_m} \| \le 2\text{rad}(\Lambda_m)\}\\
& \lesssim \frac{ R^5+\text{rad}(\Lambda_m)^{d-2}}{1+\|x-S_{i_m}\|^{d-2}}, \end{align}
using that $\mathrm{Cap}(\Lambda_m)\le |\Lambda_m| \le R^5$. Note also that by \eqref{norm.Sn} and Doob's $L^p$-inequality (see Theorem 4.3.3 in \cite{Dur}), one has for any $1< p\le d$, \begin{equation}\label{Doob} \mathbb{E}[\text{rad}(\Lambda_m)^p] = \mathcal{O}(R^{\frac{5p}{2}}). \end{equation} Therefore, \begin{align*}
& |F_R - H_R| \stackrel{\eqref{harm.application}}{\lesssim} R
\sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}_{0,x}\left[ \frac{|F(S_i)|\cdot \text{rad}(\Lambda_m)}{1+\|x-S_{i_m}\|} {\text{\Large $\mathfrak 1$}}\{ \tau_m<\infty\}\right] \\ &\stackrel{\eqref{cond.F}}{\lesssim} R^6
\sum_{m=0}^M \mathbb{E}_{0,x}\left[ \frac{|F(S_{i_m})|\cdot \text{rad}(\Lambda_m)^2}{1+\|x-S_{i_m}\|} {\text{\Large $\mathfrak 1$}}\{ \tau_m<\infty\}\right] \\
& \stackrel{\eqref{hit.cap.application}, \eqref{Doob}}{\lesssim} R^{6+\frac{5d}{2}} \sum_{m=0}^M \mathbb{E}\left[ \frac{|F(S_{i_m})|}{1+\|x-S_{i_m}\|^{d-1}}\right] \lesssim R^{6+\frac{5d}{2}}\, \sum_{z\in \mathbb{Z}^d} \frac{|F(z)|G(z)}{1+\|x-z\|^{d-1}} \\
& \stackrel{\eqref{Green}}{\lesssim} \frac{R^{6+\frac{5d}{2}}}{1+\|x\|} \cdot I_F(\|x\|). \end{align*}
\underline{Step 6.} Let $$K_R:= \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell \overline e_i^m\right] \cdot \mathbb{E}\left[F(S_{i_m}) {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right].$$ One has, using the Markov property and a similar argument as in the previous step, \begin{align*}
& |K_R-H_R| \stackrel{\eqref{cond.F}}{\lesssim}
R \sum_{m=0}^M \sum_{i\in I_m} \mathbb{E}_{0,x}\left[\frac{|F(S_{i_m})|\cdot (1+\|S_i-S_{i_m}\|^2)}{1+\|S_{i_m}\|}\cdot {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right] \\
& \stackrel{\eqref{hit.cap.application}, \eqref{Sn.large}}{\lesssim} R^{6+\frac{5d}{2}} \sum_{m=0}^M \mathbb{E}\left[ \frac{|F(S_{i_m})|}{(1+\|S_{i_m}\|)(1+\|x-S_{i_m}\|^{d-2})}\right] \lesssim R^{6+\frac{5d}{2}}\cdot J_F(\|x\|). \end{align*}
\underline{Step 7.} Finally we define $$\widetilde A:= \frac{\kappa}{\gamma_d} \cdot \mathbb{E}_{0,x}\left[F(\widetilde S_\tau) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\} \right].$$ We recall that one has (see Lemmas 2.1 and 2.2 in \cite{AS19}), \begin{eqnarray}\label{easy.variance} \mathbb{E}\left[\left(\mathrm{Cap}(\mathcal{R}_n) - \gamma_d n\right)^2\right] = \mathcal{O}(n(\log n)^2). \end{eqnarray} It also holds for any nonempty subset $\Lambda\subseteq \mathbb{Z}^d$, \begin{eqnarray}\label{min.cap}
\mathrm{Cap}(\Lambda) \ge c|\Lambda|^{1-\frac 2d}\ge c|\Lambda|^3, \end{eqnarray}
using $d\ge 5$ for the second inequality (while the first inequality follows from \cite[Proposition 6.5.5]{LL} applied to the constant function equal to $c/|\Lambda|^{2/d}$, with $c>0$ small enough). As a consequence, for any $m\ge 0$ and any $i\in I_m$, \begin{align*}
& \left|\mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell \overline e_i^m\right] - \frac{\mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell e_i^m\right]}{\gamma_dR^5} \right| \lesssim
\frac{1}{R^4} \mathbb{E}\left[ \frac {|\mathrm{Cap}(\mathcal{R}^{(m)}) - \gamma_dR^5|}{\mathrm{Cap}(\mathcal{R}^{(m)})}\right] \\
\stackrel{\eqref{easy.variance}}{\lesssim} & \frac{\log R}{R^{3/2}} \mathbb{E}\left[\frac 1{\mathrm{Cap}(\mathcal{R}^{(m)})^2}\right]^{1/2}
\stackrel{\eqref{min.cap}}{\lesssim} \frac{\log R}{R^{3/2}} \left(\frac{\mathbb{P}[\mathrm{Cap}(\mathcal{R}^{(m)}) \le \gamma_d R^5/2]}{R^6} + \frac{1}{R^{10}}\right)^{1/2} \\
\stackrel{\eqref{easy.variance}}{\lesssim} & \frac{\log R}{R^{3/2}} \left(\frac{(\log R)^2}{R^{11}} + \frac{1}{R^{10}}\right)^{1/2} \lesssim \frac{1}{R^6}. \end{align*} Next, recall that $Z(i)={\text{\Large $\mathfrak 1$}}\{S_j\neq S_i,\, \forall j>i\}$, and note that
$$|\mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell e_i^m\right] - \mathbb{E} \left[ \mathcal G_{i,R} Z(i) e_i^m\right]| \stackrel{\eqref{Green.hit},\, \eqref{Green}}{\lesssim} \frac{1}{R^{7/2}}.$$ Moreover, letting $e_i:=\mathbb{P}_{S_i}[H^+_{\overline \mathcal{R}_\infty} = \infty\mid \overline \mathcal{R}_\infty]$ (where we recall $\overline \mathcal{R}_\infty$ is the range of a two-sided random walk), one has
$$ |\mathbb{E} \left[ \mathcal G_{i,R} Z_i e_i^m\right] - \mathbb{E} \left[ \mathcal G_{i,R} Z_i e_i \right]| \stackrel{\eqref{lem.hit.3}}{\lesssim} \frac{1}{\sqrt R}, $$
$$| \mathbb{E} \left[ \mathcal G_{i,R} Z_i e_i \right] - \kappa| \le 2\, \mathbb{E}\left[\sum_{j>R} G(S_j)\right] \stackrel{\eqref{exp.Green}}{\lesssim}\frac 1{\sqrt R}.$$ Altogether this gives for any $m\ge 0$ and any $i\in I_m$,
$$\left|\mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell \overline e_i^m\right] - \frac{\kappa}{\gamma_dR^5}\right|\lesssim \frac{1}{R^{5+\frac 12}},$$ and thus for any $m\ge 0$,
$$\left| \left(\sum_{i\in I_m} \mathbb{E} \left[ \mathcal G_{i,R} Z_i^\ell \overline e_i^m\right] \right) - \frac{\kappa}{\gamma_d}\right|\lesssim \frac{1}{\sqrt R}.$$ Now, a similar argument as in Step 6 shows that
$$\sum_{m=0}^M \left|\mathbb{E}_{0,x}\left[F(S_{i_m}) {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right] - \mathbb{E}_{0,x}\left[F(\widetilde S_{\tau_m}) {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right]\right| \lesssim R^{\frac{5d}2} J_F(\|x\|). $$ Furthermore, using that \begin{align*} F(\widetilde S_\tau){\text{\Large $\mathfrak 1$}}\{\tau<\infty\} & = \sum_{m=0}^{M+1} F(\widetilde S_{\tau_m}){\text{\Large $\mathfrak 1$}}\{\tau=\tau_m<\infty\}\\ & =\sum_{m=0}^{M+1} F(\widetilde S_{\tau_m})({\text{\Large $\mathfrak 1$}} \{\tau_m<\infty\} - {\text{\Large $\mathfrak 1$}}\{\tau<\tau_m<\infty\}), \end{align*} (with the convention that the term corresponding to index $M+1$ is zero when $\ell =\infty$) we get, \begin{align*}
& \left| \sum_{m=0}^M \mathbb{E}_{0,x}\left[F(\widetilde S_{\tau_m}) {\text{\Large $\mathfrak 1$}}\{\tau_m<\infty\} \right] - \mathbb{E}_{0,x}\left[F(\widetilde S_{\tau}) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\} \right]\right| \\
& \lesssim \mathbb{P}_{0,x}[\tau_{M+1}<\infty] + \sum_{m=0}^M \mathbb{E}_{0,x}\left[|F(\widetilde S_{\tau_m})| {\text{\Large $\mathfrak 1$}}\{\tau<\tau_m<\infty\}\right]. \end{align*} Using \eqref{hit.cap.application}, \eqref{Doob} and \eqref{exp.Green.x}, we get
$$\mathbb{P}_{0,x}[\tau_{M+1}<\infty] \lesssim \frac{R^{\frac{5(d-2)}2}}{1+\|x\|^{d-2} }.$$ On the other hand, for any $m\ge 0$, \begin{align*}
&\mathbb{E}\left[|F(\widetilde S_{\tau_m})| {\text{\Large $\mathfrak 1$}}\{\tau<\tau_m<\infty\}\right] \le \sum_{j\in J_m} \sum_{i\notin J_m} \mathbb{E}\left[|F(S_j)| G(S_i-S_j)G(S_i-x)\right]\\
& \le \sum_{j\in I_m} \sum_{|j-i|>R^3} \mathbb{E}\left[|F(S_j)| G(S_i-S_j)G(S_i-x)\right] \\
& \qquad + \sum_{j\in J_m\setminus I_m} \sum_{i\notin J_m} \mathbb{E}\left[|F(S_j)|G(S_i-S_j)G(S_i-x)\right]. \end{align*} The first sum is handled as in Step 4. Namely, \begin{align*}
& \sum_{m=0}^M \sum_{j\in I_m} \sum_{|j-i|>R^3} \mathbb{E}\left[|F(S_j)| G(S_i-S_j)G(S_i-x)\right] \\
& \le \sum_{j\ge 0} \sum_{|j-i|>R^3} \mathbb{E}\left[|F(S_j)| G(S_i-S_j)G(S_i-x)\right] \stackrel{\eqref{lem.thm.asymp.2bis}}{\lesssim} \frac{I_F(\|x\|)}{R^{3/2}} . \end{align*} Similarly, defining $\widetilde J_m:=\{mR^5,\dots, mR^5+R\} \cup \{(m+1)R^5-R,\dots,(m+1)R^5-1\}$, one has, \begin{align*}
&\sum_{m=0}^M \sum_{j\in J_m\setminus I_m} \sum_{i\notin J_m} \mathbb{E}\left[|F(S_j)|G(S_i-S_j)G(S_i-x)\right] \\
& \le \sum_{m=0}^M \sum_{j\in J_m\setminus I_m} \sum_{|i-j|>R} \mathbb{E}\left[|F(S_j)|G(S_i-S_j)G(S_i-x)\right] \\
& \quad +\sum_{m=0}^M \sum_{j\in J_m\setminus I_m} \sum_{i\notin J_m,\, |i-j|\le R} \mathbb{E}\left[|F(S_j)|G(S_i-S_j)G(S_i-x)\right] \\
& \stackrel{\eqref{lem.thm.asymp.2bis}, \eqref{cond.F}}{\lesssim} \frac{I_F(\|x\|)}{\sqrt R} + \sum_{m=0}^M \sum_{j\in J_m\setminus I_m} \sum_{i\notin J_m,\, |i-j|\le R} \mathbb{E}\left[|F(S_i)|G(S_i-x)\right]\\
& \lesssim \frac{ I_F(\|x\|)}{\sqrt R} + R \sum_{m= 0}^M \sum_{i \in \widetilde J_m} \mathbb{E}\left[|F(S_i)|G(S_i-x)\right] \lesssim \frac{I_F(\|x\|)}{\sqrt R} + \frac{R^5}{1+\|x\|^{d-2}}, \end{align*} using for the last inequality the same argument as in \eqref{final.step3}. Note also that
$$\mathbb{E}[|F(\widetilde S_\tau)|{\text{\Large $\mathfrak 1$}}\{\tau<\infty\}] \stackrel{\eqref{lem.hit.1}}{\le} \sum_{i\ge 0} \mathbb{E}[|F(S_i)|G(S_i-x)]\lesssim I_F(\|x\|). $$ Therefore, putting all pieces together yields
$$|K_R - \widetilde A| \lesssim \frac{I_F(\|x\|)}{\sqrt{R}} + R^{\frac{5d}2}\cdot J_F(\| x\|) + \frac{R^{\frac{5(d-2)}{2}}}{1+\|x\|^{d-2}}.$$
\underline{Step 8.}
Altogether the previous steps show that for any $R$ large enough, any $\ell \ge 1$, and any $x\in\mathbb{Z}^d$, satisfying $\ell\wedge \|x\|^2 \ge R^6$,
$$|A-\widetilde A| \lesssim \left(\frac{1}{\sqrt{R}} + \frac{R^{6+\frac{5d}{2}}}{1+\|x\|}\right) \cdot I_F(\|x\|) + \frac{R^{\frac{5(d-2)}{2}}}{1+\|x\|^{d-2}} + R^{6+\frac{5d}{2}}\cdot J_F(\|x\|). $$
The proof of the theorem follows by taking for $R$ a sufficiently small power of $\|x\|\wedge \ell$, and observing that
for any function $F$ satisfying \eqref{cond.F}, one has $\liminf_{\|z\|\to \infty} |F(z)|/\|z\|>0$, and thus also $I_F(\|x\|) \ge \frac{c}{1+\|x\|^{d-3}}$. \end{proof}
It amounts now to give the proofs of Lemmas \ref{lem.thm.asymptotic} and \ref{lem.potential}.
\begin{proof}[Proof of Lemma \ref{lem.thm.asymptotic}] We start with the proof of \eqref{lem.thm.asymp.1}. Recall the definition of $\chi_d$ given just above Theorem \ref{thm.asymptotic}. One has for any $i\ge 0$, \begin{align*}
&\mathbb{E}\left[\sum_{j= i+1}^\infty G(S_j-S_i) \frac{\|S_j-S_i\|}{1+\|S_j\|} \mid S_i \right] \stackrel{\eqref{Green}}{\lesssim}
\mathbb{E}\left[\sum_{j= i+1}^\infty \frac{1}{(1+\|S_j - S_i\|^{d-3})(1+\|S_j\|)} \mid S_i \right] \\
& \lesssim \sum_{z\in \mathbb{Z}^d} G(z) \frac{1}{(1+\|z\|^{d-3})(1+\|S_i+z\|)}\stackrel{\eqref{Green}}{\lesssim} \frac{\chi_d(\|S_i\|)}{1+\|S_i\|}, \end{align*} and moreover, \begin{align}\label{lem.FG}
\nonumber &\sum_{i=0}^\infty \mathbb{E}\left[\frac{|F(S_i)|\chi_d(\|S_i\|)}{1+\|S_i\|}G(S_i-x)\right] = \sum_{z\in \mathbb{Z}^d} G(z) \frac{|F(z)|\chi_d(\|z\|)}{1+\|z\|} G(z-x) \\
\nonumber \stackrel{\eqref{Green}}{\lesssim}& \frac{ \chi_d(\|x\|)}{1+\|x\|^{d-2}} \sum_{\|z\|\le \frac{\|x\|}{2}} \frac{|F(z)|}{1+\|z\|^{d-1}} + \sum_{\|z\|\ge \frac{\|x\|}{2}} \frac{|F(z)|\chi_d(\|z\|)}{1+\|z\|^{2d-3}} \\
\nonumber & \qquad + \frac{\chi_d(\|x\|)}{1+\|x\|^{d-1}} \sum_{\|z-x\|\le \frac{\|x\|}{2}} \frac{|F(z)|}{1+\|z-x\|^{d-2}} \\
\stackrel{\eqref{cond.F}}{\lesssim} & J_F(\|x\|/2) + \frac{|F(x)|\chi_d(\|x\|)}{1+\|x\|^{d-3}}\lesssim J_F(\|x\|), \end{align} where the last inequality follows from the fact that by \eqref{cond.F},
$$ \int_{\|x\|/2}^{\|x\|} \frac{\overline F(s)\chi_d(s)}{s^{d-2}} \, ds\, \asymp\, \frac{|F(x)|\chi_d(\|x\|)}{1+\|x\|^{d-3}}\, \asymp\, \frac{\chi_d(\|x\|)}{1+\|x\|^{d-2}} \int_{\|x\|/2}^{\|x\|} \overline F(s)\, ds.$$ Thus
$$\sum_{i=0}^\infty \sum_{j=i+1}^\infty \mathbb{E}\left[G(S_j-S_i) \frac{\|S_j-S_i\|}{1+\|S_j\|} |F(S_i)| G(S_i-x) \right] = \mathcal{O}(J_F(\|x\|)).$$ On the other hand, for any $j\ge 0$, \begin{align}\label{lem.FG.2}
\nonumber &\mathbb{E}\left[\sum_{i= j+1}^\infty G(S_j-S_i) \|S_j-S_i\| \cdot |F(S_i)|G(S_i-x) \mid S_j \right] \\
\nonumber \stackrel{\eqref{Green}}{\lesssim} &
\sum_{i=j+1}^\infty \mathbb{E}\left[ \frac{|F(S_i)|G(S_i-x)}{1+\|S_j - S_i\|^{d-3}} \mid S_j \right]
\stackrel{\eqref{Green}}{\lesssim} \sum_{z\in \mathbb{Z}^d} \frac{|F(S_j+z)| G(S_j + z-x)}{1+\|z\|^{2d-5}}\\ \nonumber \stackrel{\eqref{cond.F}, \eqref{Green}}{\lesssim} & \sum_{z\in \mathbb{Z}^d}
\frac{|F(S_j)|}{(1+\|z\|^{2d-5})(1+\|S_j+z-x\|^{d-2})} + \frac{1}{1+\|S_j\|^{2d-5}}\sum_{\|u\|\le \|S_j\|}\frac{|F(u)| }{1+\|u-x\|^{d-2}} \\
\nonumber \stackrel{\eqref{cond.F}}{\lesssim} & \frac{|F(S_j)|\chi_d(\|S_j-x\|)}{1+\|S_j-x\|^{d-2}} + \frac{{\text{\Large $\mathfrak 1$}}\{\|S_j\|\le \|x\|/2\}\cdot |F(S_j)|}{(1+\|x\|^{d-2})(1+\|S_j\|^{d-5})} \\
\nonumber & \qquad + \frac{{\text{\Large $\mathfrak 1$}}\{\|S_j\|\ge \|x\|/2\}}{1+\|S_j\|^{2d-5}}\left(|F(x)|(1+\| x\|^2) + |F(S_j)|(1+\|S_j\|^2)\right) \\
\lesssim & \frac{|F(S_j)|\chi_d(\|S_j-x\|)}{1+\|S_j-x\|^{d-2}} + \frac{{\text{\Large $\mathfrak 1$}}\{\|S_j\|\le \|x\|\} |F(S_j)|}{1+\|x\|^{d-2}} + \frac{{\text{\Large $\mathfrak 1$}}\{\|S_j\|\ge \|x\| \} |F(S_j)|}{1+\|S_j\|^{d-2}}, \end{align}
where for the last two inequalities we used that by \eqref{cond.F}, if $\|u\|\le \|v\|$, then $|F(u)|\lesssim | F(v)| (1+\|v\|)/(1+\|u\|)$, and also that $d\ge 5$ for the last one. Moreover, for any $r\ge 0$
$$\sum_{j=0}^\infty \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|S_j\|\le r\} \cdot |F(S_j)| }{1+\|S_j\|} \right] = \sum_{\|z\| \le r} \frac{G(z)|F(z)|}{1+\|z\|} \stackrel{\eqref{Green}}{=}\mathcal{O} \left(\int_0^{r} \overline F(s)\, ds\right),$$
$$\sum_{j=0}^\infty \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|S_j\|\ge r\} \cdot |F(S_j)| }{1+\|S_j\|^{d-1}} \right] = \sum_{\|z\| \ge r} \frac{G(z)|F(z)|}{1+\|z\|^{d-1}} \stackrel{\eqref{Green}}{=}\mathcal{O} \left(\int_{r}^\infty \frac{\overline F(s)}{s^{d-2}}\, ds\right).$$ Using also similar computations as in \eqref{lem.FG} to handle the first term in \eqref{lem.FG.2}, we conclude that
$$\sum_{j=0}^\infty \sum_{i=j+1}^\infty \mathbb{E}\left[G(S_j-S_i) \frac{\|S_j-S_i\|}{1+\|S_j\|} |F(S_i)| G(S_i-x) \right] = \mathcal{O}(J_F(\|x\|)),$$ which finishes the proof of \eqref{lem.thm.asymp.1}.
We then move to the proof of \eqref{lem.thm.asymp.2}. First note that for any $i\ge 0$, $$\mathbb{E}\left[\sum_{j\ge i+R} G(S_j-S_i)\mid S_i \right] = \mathbb{E}\left[\sum_{j\ge R} G(S_j) \right] \stackrel{\eqref{exp.Green}}{=} \mathcal{O}\left(R^{\frac{4-d}{2}}\right),$$ and furthermore, \begin{equation}\label{lem.FG.3}
\sum_{i=0}^\infty \mathbb{E}[|F(S_i)|G(S_i-x)] = \sum_{z\in \mathbb{Z}^d} |F(z)| G(z-x)G(z) \stackrel{ \eqref{cond.F},\, \eqref{Green}}{=} \mathcal{O}(I_F(\|x\|)), \end{equation} which together give the desired upper bound for the sum on the set $\{0\le i\le j-R\}$. On the other hand, for any $j\ge 0$, we get as for \eqref{lem.FG.2}, \begin{align*}
&\mathbb{E}\left[\sum_{i\ge j+R} G(S_j-S_i)|F(S_i)|G(S_i-x) \mid S_j \right] = \sum_{z\in \mathbb{Z}^d} G(z)|F(S_j+z)| G(S_j+z-x) G_R(z) \\
& \stackrel{\eqref{Green}}{\lesssim} \frac{1}{R^{\frac{d-4}{2}}} \cdot \sum_{z\in \mathbb{Z}^d} \frac{|F(S_j+z)|}{ (1+\|z\|^d)(1+\|S_j+z-x\|^{d-2})}\\
& \stackrel{ \eqref{cond.F}}{\lesssim} \frac{1}{R^{\frac{d-4}{2}}} \left\{ \sum_{z\in \mathbb{Z}^d} \frac{|F(S_j)|}{ (1+\|z\|^d)(1+\|S_j+z-x\|^{d-2})} + \frac 1{1+\|S_j\|^d} \sum_{\|u\|\le \|S_j\|} \frac{|F(u)|}{1+\|u-x\|^{d-2}}\right\}\\
&\lesssim \frac{1}{R^{\frac{d-4}{2}}} \left\{ \frac{|F(S_j)|\log (2+\|S_j-x\|)}{1+\|S_j-x\|^{d-2}} + \frac{|F(S_j)|}{1+\|x\|^{d-2}+\|S_j\|^{d-2}} \right\}. \end{align*} Then similar computation as above, see e.g. \eqref{lem.FG.3}, give \begin{equation}\label{FSj}
\sum_{j\ge 0} \mathbb{E}\left[\frac{|F(S_j)|\log (2+\|S_j-x\|)}{1+\|S_j-x\|^{d-2}}\right] = \mathcal{O}(I_F(\|x\|)), \end{equation} \begin{equation*}
\sum_{j\ge 0} \mathbb{E}\left[\frac{|F(S_j)|}{1+\|x\|^{d-2} + \|S_j\|^{d-2}}\right] = \mathcal{O}(I_F(\|x\|)), \end{equation*} which altogether proves \eqref{lem.thm.asymp.2}.
The proof of \eqref{lem.thm.asymp.2bis} is entirely similar: on one hand, for any $i\ge 0$, \begin{align*}
& \mathbb{E}\left[\sum_{j= i+R}^\infty G(S_j-S_i) |F(S_j)| \mid S_i \right] \stackrel{\eqref{cond.F}}{\lesssim} \mathbb{E}\left[\sum_{j= i+R}^\infty G(S_j-S_i) \frac{\|S_j-S_i\|}{1+\|S_j\|} \mid S_i \right] |F(S_i)| \\
& \lesssim \sum_{z\in \mathbb{Z}^d} G_R(z) \frac{|F(S_i)|}{(1+\|z\|^{d-3})(1+\|S_i+z\|)} \\
& \lesssim \sum_{z\in \mathbb{Z}^d} \frac{|F(S_i)|}{(R^{\frac{d-2}{2}} + \|z\|^{d-2})(1+\|z\|^{d-3})(1+\|S_i+z\|)} \lesssim \frac{|F(S_i)|}{R^{\frac{d-4}{2}}},
\end{align*} and together with \eqref{FSj}, this yields
$$\sum_{i=0}^\infty \sum_{j=i+R}^\infty \mathbb{E}\left[G(S_j-S_i) |F(S_j)| G(S_i-x) \right]\lesssim \frac{I_F(\|x\|)}{R^{\frac{d-4}{2}} }.$$ On the other hand, for any $j\ge 0$, using \eqref{Green}, \begin{align*}
\mathbb{E}\left[\sum_{i\ge j+R} G(S_j-S_i)G(S_i-x) \mid S_j \right] \lesssim \sum_{z\in \mathbb{Z}^d} \frac{G(S_j + z-x)}{R^{\frac{d-4}{2}}(1+\|z\|^d)}
\lesssim \frac{\log (2+ \|S_j-x\|)}{R^{\frac{d-4}{2}}(1+\|S_j-x\|^{d-2})}, \end{align*} and we conclude the proof of \eqref{lem.thm.asymp.2bis} using \eqref{FSj} again. \end{proof}
\begin{proof}[Proof of Lemma \ref{lem.potential}] The first statement follows directly from \eqref{Green.asymp} and the last-exit decomposition (see Proposition 4.6.4 (c) in \cite{LL}): $$\mathbb{P}_y[H_\Lambda<\infty] = \sum_{x\in \Lambda} G(y-x) e_\Lambda(x).$$
Indeed if $\|y\|>2 \text{rad}(\Lambda)$, using \eqref{Green} we get $G(y-x)\le C\|y\|^{2-d}$, for some constant $C>0$ independent of $x\in \Lambda$, which gives well \eqref{cap.hitting}, since by definition $\sum_{x\in \Lambda} e_\Lambda(x) = \mathrm{Cap}(\Lambda)$.
The second statement is more involved. Note that one can always assume $\mathcal J(y)>C\text{rad}(\Lambda)$, for some constant $C>0$, for otherwise the result is trivial. We use similar notation as in \cite{LL}. In particular $G_A(x,y)$ denotes the Green's function restricted to a subset $A\subseteq \mathbb{Z}^d$, that is the expected number of visits to $y$ before exiting $A$ for a random walk starting from $x$, and $H_A(x,y)=\mathbb{P}_x[H_{A^c}=y]$. We also let $\mathcal{C}_n$ denote the (discrete) ball of radius $n$ for the norm $\mathcal J(\cdot)$. Then exactly as in \cite{LL} (see Lemma 6.3.3 and Proposition 6.3.5 thereof), one can see using \eqref{Green.asymp} that for all $n\ge 1$, \begin{equation}\label{GA}
\left| G_{\mathcal{C}_{n}}(x,w) - G_{\mathcal{C}_{n}}(0,w) \right| \le C \frac{\|x\|}{1+\|w\|} \, G_{\mathcal{C}_{n}}(0,w), \end{equation} for all $x\in \mathcal{C}_{n/4}$, and all $w$ satisfying $2\mathcal J(x) \le \mathcal J(w) \le n/2$. One can then derive an analogous estimate for the (discrete) derivative of $H_{\mathcal{C}_n}$. Define $A_n=\mathcal{C}_n\setminus \mathcal{C}_{n/2}$, and $\rho = H^+_{A_n^c}$. By the last-exit decomposition (see \cite[Lemma 6.3.6]{LL}), one has for $x\in \mathcal{C}_{n/8}$ and $z\notin \mathcal{C}_n$, \begin{align*}
\nonumber & |H_{\mathcal{C}_n}(x,z) - H_{\mathcal{C}_n}(0,z)|\le \sum_{w\in \mathcal{C}_{n/2}}|G_{\mathcal{C}_n}(x,w) - G_{\mathcal{C}_n}(0,w)|\cdot \mathbb{P}_w[S_\rho = z]\\
\nonumber & \stackrel{\eqref{GA}, \eqref{Green}}{\lesssim} \frac{\|x\|}{n}\cdot H_{\mathcal{C}_n}(0,z) + \sum_{2\mathcal J(x)\le \mathcal J(w)\le \frac n4} \frac{\|x\|}{\|w\|^{d-1}} \mathbb{P}_w[S_\rho = z] \\
& \qquad + \sum_{\mathcal J(w) \le 2\mathcal J(x)} \left(\frac 1{1+\|w-x\|^{d-2}} + \frac 1{1+\|w\|^{d-2}}\right)\mathbb{P}_w[S_\rho = z]. \end{align*} Now, observe that for any $y\notin \mathcal{C}_n$, any $w\in \mathcal{C}_{n/4}$, and any $A\subseteq \mathbb{Z}^d$, \begin{align*}
\sum_{z\notin \mathcal{C}_n} G_{A} (y,z) \mathbb{P}_w[S_\rho = z] \lesssim \sum_{z\notin \mathcal{C}_n} \mathbb{P}_w[S_\rho = z] \lesssim \mathbb{P}_w[\mathcal J(S_1)>\frac n2] \lesssim \mathbb{P}[\mathcal J(X_1)> \frac n4] \lesssim n^{-d}, \end{align*} using that by hypothesis $\mathcal J(X_1)$ has a finite $d$-th moment. It follows from the last two displays that \begin{align}\label{potentiel.3}
\sum_{z\notin \mathcal{C}_n} G_{A} (y,z) H_{\mathcal{C}_n}(x,z) = \left(\sum_{z\notin \mathcal{C}_n} G_{A} (y,z) H_{\mathcal{C}_n}(0,z)\right)\left(1+\mathcal{O}\Big(\frac{\|x\|}{n}\Big)\right)+ \mathcal{O}\left( \frac{\|x\|}{n^{d-1}} \right). \end{align}
Now let $\Lambda$ be some finite subset of $\mathbb{Z}^d$ containing the origin, and let $m:=\sup\{\mathcal J(u)\, :\, \|u\|\le 2\text{rad}(\Lambda)\}$. Note that $m=\mathcal{O}(\text{rad}(\Lambda))$, and thus one can assume $\mathcal J(y)>16m$. Set $n:=\mathcal J(y)-1$. Using again the last-exit decomposition and symmetry of the step distribution, we get for any $x\in \Lambda$, \begin{align}\label{potentiel.4} \mathbb{P}_y[S_{H_\Lambda} =x,\, H_\Lambda<\infty] = \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z) \mathbb{P}_x[S_{\tau_n} = z,\, \tau_n<H_\Lambda^+], \end{align} with $\tau_n:= H_{\mathcal{C}_n^c}$. We then write, using the Markov property, \begin{align}\label{potentiel.5} \nonumber \mathbb{P}_x[S_{\tau_n} = z,\, \tau_n<H_\Lambda^+]& =\sum_{x'\in \mathcal{C}_{n/8}\setminus \mathcal{C}_m}\mathbb{P}_x[\tau_m<H_\Lambda^+,\, S_{\tau_m} =x']\cdot \mathbb{P}_{x'}[S_{\tau_n} = z,\, \tau_n<H_\Lambda^+] \\ & \qquad + \mathbb{P}_x\left[\mathcal J(S_{\tau_m}) >\frac n8,\, S_{\tau_n}=z\right], \end{align} with $\tau_m:=H_{\mathcal{C}_m^c}$. Concerning the last term we note that \begin{align}\label{potentiel.5.bis} \nonumber & \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z) \mathbb{P}_x\left[\mathcal J(S_{\tau_m}) >\frac n8,\, S_{\tau_n}=z\right] \\ \nonumber & \stackrel{\eqref{Green.hit}}{\le} \sum_{z\notin \mathcal{C}_n} G(z-y) \left\{\mathbb{P}_x[S_{\tau_m}=z] + \sum_{u\in \mathcal{C}_n\setminus \mathcal{C}_{n/8}} \mathbb{P}_x[S_{\tau_m} =u]G(z-u)\right\}\\
\nonumber & \stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} \sum_{z\notin \mathcal{C}_n} G(z-y) \mathbb{P}_x[S_{\tau_m}=z] + \sum_{u\in \mathcal{C}_n\setminus \mathcal{C}_{n/8}} \frac{\mathbb{P}_x[S_{\tau_m} =u]}{\|y-u\|^{d-4}} \\ \nonumber & \lesssim \mathbb{P}_x[\mathcal J(S_{\tau_m})>n/8] \lesssim \sum_{u\in \mathcal{C}_m} G_{\mathcal{C}_m}(x,u) \mathbb{P}[J(X_1)>\frac n8 - m] \\ & \stackrel{\eqref{Green}}{=}\mathcal{O}\left(\frac{m^2}{n^d}\right) = \mathcal{O}\left(\frac{m}{n^{d-1}}\right), \end{align} applying once more the last-exit decomposition at the penultimate line, and the hypothesis that $\mathcal J(X_1)$ has a finite $d$-th moment at the end. We handle next the sum in the right-hand side of \eqref{potentiel.5}. First note that \eqref{potentiel.3} gives for any $x'\in \mathcal{C}_{n/8}$, \begin{align}\label{potentiel.6} \nonumber \sum_{z\notin \mathcal{C}_n} & G_{\Lambda^c}(y,z)\mathbb{P}_{x'}[S_{\tau_n} = z] = \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(x', z)\\
& = \left(\sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{\|x'\|}n\Big)\right) + \mathcal{O}\left( \frac{\|x'\|}{n^{d-1}} \right). \end{align} Observe then two facts. On one hand, by the last exit-decomposition and symmetry of the step distribution, \begin{equation}\label{pot.1} \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z) \le \sum_{z\notin \mathcal{C}_n} G_{\mathbb{Z}^d\setminus \{0\}}(y,z)H_{\mathcal{C}_n}(0, z) = \mathbb{P}[H_y<\infty] \stackrel{\eqref{Green.hit}, \eqref{Green}}{\lesssim} n^{2-d}, \end{equation} and on the other hand by Proposition 4.6.2 in \cite{LL}, \begin{align}\label{pot.2} & \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z) H_{\mathcal{C}_n}(0, z) \\ \nonumber & = \sum_{z\notin \mathcal{C}_n} G_{\mathbb{Z}^d\setminus \{0\}}(y,z)H_{\mathcal{C}_n}(0, z) + \sum_{z\notin \mathcal{C}_n} \left(G_{\Lambda^c}(y,z) - G_{\mathbb{Z}^d\setminus \{0\}}(y,z)\right) H_{\mathcal{C}_n}(0, z) \\ \nonumber & \ge \mathbb{P}[H_y<\infty] - \mathcal{O}\left(\mathbb{P}_y[H_\Lambda<\infty]\sum_{z\notin \mathcal{C}_n} G(z) H_{\mathcal{C}_n}(0, z)\right) \\ \nonumber & \stackrel{\eqref{hit.ball}}{\ge} \mathbb{P}[H_y<\infty] - \mathcal{O}\left(n^{2-d}\sum_{z\notin \mathcal{C}_n} G(z)^2\right)
\stackrel{\eqref{Green.asymp}}{\ge} \frac{c}{n^{d-2}}. \end{align} This last fact, combined with \eqref{potentiel.6} gives therefore, for any $x'\in \mathcal{C}_{n/8}$, \begin{align}\label{potentiel.6.bis}
\sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)\mathbb{P}_{x'}[S_{\tau_n} = z] = \left(\sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{\|x'\|}n\Big)\right). \end{align} By the Markov property, we get as well \begin{align*}
\sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)\mathbb{P}_{x'}[S_{\tau_n} = z\mid H_\Lambda<\tau_n ] = \left(\sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{m}n\Big)\right), \end{align*} since by definition $\Lambda\subseteq \mathcal{C}_m\subset \mathcal{C}_{n/8}$, and thus \begin{align*}
\sum_{z\notin \mathcal{C}_n} & G_{\Lambda^c}(y,z)\mathbb{P}_{x'}[S_{\tau_n} = z,\, H_\Lambda<\tau_n ] \\
& = \mathbb{P}_{x'}[H_\Lambda<\tau_n]\left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{m}n\Big)\right). \end{align*} Subtracting this from \eqref{potentiel.6.bis}, we get for $x'\in \mathcal{C}_{n/8}\setminus \mathcal{C}_m$, \begin{align*}
\sum_{z\notin \mathcal{C}_n} & G_{\Lambda^c}(y,z)\mathbb{P}_{x'}[S_{\tau_n} = z,\, \tau_n<H_\Lambda ] \\
& = \mathbb{P}_{x'}[\tau_n<H_\Lambda]\left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{\|x'\|}n\Big)\right), \end{align*} since by \eqref{hit.ball}, one has $\mathbb{P}_{x'}[\tau_n<H_\Lambda]>c$, for some constant $c>0$, for any $x'\notin \mathcal{C}_m$ (note that the stopping time theorem gives in fact
$\mathbb{P}_{x'}[H_\Lambda<\infty] \le G(x') / \inf_{\|u\|\le \text{rad}(\Lambda)} G(u)$, and thus by using \eqref{Green.asymp}, one can ensure $\mathbb{P}_{x'}[H_\Lambda<\infty]\le 1-c$, by taking $\|x'\|$ large enough, which is always possible). Combining this with \eqref{potentiel.4}, \eqref{potentiel.5} and \eqref{potentiel.5.bis}, and using as well \eqref{pot.1} and \eqref{pot.2}, we get \begin{align*} & \mathbb{P}_y[S_{H_\Lambda} =x,\, H_\Lambda<\infty] = \mathbb{P}_x[\tau_n<H_\Lambda]\left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right) \\
& \qquad + \mathcal{O}\left(\frac 1{n^{d-1}}\sum_{x'\in\mathcal{C}_{n/8} \setminus \mathcal{C}_m} \mathbb{P}_x[S_{\tau_m}=x'] \cdot \|x'\| \right) + \mathcal{O}\left(\frac{m}{n^{d-1}}\right)\\
\stackrel{\eqref{hit.ball}}{=} & e_\Lambda(x)\left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{m}{n}\Big)\right)+ \mathcal{O}\left(\frac 1{n^{d-1}}\sum_{r=2m}^{n/8} \frac{m^2}{r^{d-1}} \right) + \mathcal{O}\left(\frac{m}{n^{d-1}}\right)\\
= & \ e_\Lambda(x)\left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{m}{n}\Big)\right), \end{align*} using the same argument as in \eqref{potentiel.5.bis} for bounding $\mathbb{P}_{x'}[\mathcal J(S_{\tau_m})\ge r]$, when $r\ge 2m$. Summing over $x\in \Lambda$ gives \begin{align*} \mathbb{P}_y[H_\Lambda<\infty]= \mathrm{Cap}(\Lambda) \left( \sum_{z\notin \mathcal{C}_n} G_{\Lambda^c}(y,z)H_{\mathcal{C}_n}(0, z)\right)\left(1+\mathcal{O}\Big(\frac{m}{n}\Big)\right), \end{align*} and the proof of the lemma follows from the last two displays. \end{proof}
\section{Proof of Proposition \ref{prop.phipsi.2}} The proof is divided in four steps, corresponding to the next four lemmas.
\begin{lemma} \label{lem.var.1} Assume that $\varepsilon_k\to \infty$, and $\varepsilon_k/k\to 0$. There exists a constant $\sigma_{1,3}>0$, such that $$\operatorname{Cov}(Z_0\varphi_3, Z_k\psi_1) \sim \frac{\sigma_{1,3}}{k}.$$ \end{lemma}
\begin{lemma} \label{lem.var.2} There exist positive constants $\delta$ and $\sigma_{1,1}$, such that when $\varepsilon_k\ge k^{1-\delta}$, and $\varepsilon_k/k\to 0$, $$ \operatorname{Cov}(Z_0\varphi_1,Z_k\psi_1) \sim \operatorname{Cov}(Z_0\varphi_3, Z_k\psi_3) \sim \frac{\sigma_{1,1}}{k}.$$ \end{lemma}
\begin{lemma}\label{lem.var.3} There exist positive constants $\delta$ and $\sigma_{1,2}$, such that when $\varepsilon_k\ge k^{1-\delta}$, and $\varepsilon_k/k\to 0$, $$ \operatorname{Cov}(Z_0\varphi_2,Z_k\psi_1) \sim \operatorname{Cov}(Z_0\varphi_3, Z_k\psi_2) \sim \frac{\sigma_{1,2}}{k}.$$ \end{lemma}
\begin{lemma}\label{lem.var.4} There exist positive constants $\delta$ and $\sigma_{2,2}$, such that when $\varepsilon_k\ge k^{1-\delta}$, and $\varepsilon_k/k\to 0$, $$ \operatorname{Cov}(Z_0\varphi_2,Z_k\psi_2) \sim \frac{\sigma_{2,2}}{k}.$$ \end{lemma}
\subsection{Proof of Lemma \ref{lem.var.1}} We assume now to simplify notation that the distribution $\mu$ is aperiodic, but it should be clear from the proof that the case of a bipartite walk could be handled similarly.
The first step is to show that \begin{equation}\label{var.1.1}
\operatorname{Cov}(Z_0\varphi_3, Z_k \psi_1) = \rho^2 \left\{\sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x^2 - \left(\sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x\right)^2\right\} + o\left(\frac 1k\right),
\end{equation} where $\rho$ and $\varphi_x$ are defined respectively as \begin{eqnarray}\label{def.rho} \rho:= \mathbb{E}\left[ \mathbb{P}\left[H^+_{\overline \mathcal{R}_\infty } = \infty \mid (S_n)_{n\in \mathbb{Z}} \right] \cdot {\text{\Large $\mathfrak 1$}}\{S_\ell \neq 0,\, \forall \ell \ge 1\}\right] , \end{eqnarray} and $$\varphi_x : = \mathbb{P}_{0,x} [\mathcal{R}_\infty \cap \widetilde \mathcal{R}_\infty \neq \varnothing].$$ To see this, one needs to dissociate $Z_0$ and $Z_k$, as well as the events of avoiding $\mathcal{R}[-\varepsilon_k,\varepsilon_k]$ and $\mathcal{R}[k-\varepsilon_k,k+\varepsilon_k]$ by two independent walks starting respectively from the origin and from $S_k$, which are local events (in the sense that they only concern small parts of the different paths), from the events of hitting $\mathcal{R}[k+1,\infty)$ and $\mathcal{R}(-\infty,-1]$ by these two walks, which involve different parts of the trajectories.
To be more precise, consider $(S_n^1)_{n\ge 0}$ and $(S_n^2)_{n\ge 0}$, two independent random walks starting from the origin, and independent of $(S_n)_{n\in \mathbb{Z}}$. Then define $$\tau_1:= \inf\{n\ge \varepsilon_k : S_n^1 \in \mathcal{R}[k+\varepsilon_k,\infty) \}, \ \tau_2:= \inf \{n\ge \varepsilon_k : S_k + S_n^2 \in \mathcal{R}(-\infty,-\varepsilon_k]\}.$$ We first consider the term $\mathbb{E}[Z_0\varphi_3]$. Let $$\tau_{0,1}:=\inf \left\{n\ge \varepsilon_k \, :\, S_n^1 \in \mathcal{R}[-\varepsilon_k,\varepsilon_k]\right\},$$ and $$\Delta_{0,3}:= \mathbb{E}\left[Z_0\cdot {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k]\cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] =\varnothing\}\cdot {\text{\Large $\mathfrak 1$}}\{ \tau_1 <\infty\}\right].$$ One has, \begin{align*}
\left| \mathbb{E}[Z_0\varphi_3] - \Delta_{0,3}\right| &\le \mathbb{P}\left[\tau_{0,1}<\infty,\, \tau_1<\infty \right] + \mathbb{P}\left[ \mathcal{R}^1[0,\varepsilon_k] \cap \mathcal{R}[k,\infty)\neq \varnothing \right] \\ & \qquad +\mathbb{P}[\mathcal{R}^1_\infty \cap \mathcal{R}[k,k+\varepsilon_k]\neq \varnothing] \\ & \stackrel{\eqref{lem.hit.3}}{\le} \mathbb{P}[\tau_1\le \tau_{0,1}<\infty] + \mathbb{P}[\tau_{0,1}\le \tau_1<\infty ] + \mathcal{O}\left(\frac{\varepsilon_k}{k^{3/2}} \right). \end{align*} Next, conditioning on $\mathcal{R}[-\varepsilon_k,\varepsilon_k]$ and using the Markov property at time $\tau_{0,1}$, we get with $X=S_{\varepsilon_k} - S^1_{\tau_{0,1}}$, \begin{align*} \mathbb{P}[\tau_{0,1}\le \tau_1<\infty ] &\le \mathbb{E}\left[\mathbb{P}_{0,X}[\mathcal{R}[k,\infty)\cap \widetilde \mathcal{R}_\infty \neq \varnothing] \cdot {\text{\Large $\mathfrak 1$}}\{\tau_{0,1}<\infty\}\right] \\ & \stackrel{\eqref{lem.hit.3}}{=} \mathcal{O}\left(\frac{\mathbb{P}[\tau_{0,1}<\infty] }{\sqrt{k}}\right) \stackrel{\eqref{lem.hit.3}}{=} \mathcal{O}\left(\frac 1{\sqrt {k \varepsilon_k}} \right). \end{align*} Likewise, using the Markov property at time $\tau_1$, we get \begin{align*} & \mathbb{P}[\tau_1\le \tau_{0,1}<\infty ] \stackrel{\eqref{lem.hit.1}}{\le} \mathbb{E}\left[\left(\sum_{j=-\varepsilon_k}^{\varepsilon_k} G(S_j- S_{\tau_1}^1)\right) {\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}\right] \\ & \stackrel{\eqref{lem.hit.1}}{\le}\sum_{i=k+\varepsilon_k}^\infty \sum_{j=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[G(S_j- S_i)G(S_i-S^1_{\varepsilon_k})\right]\\ & \le (2\varepsilon_k+1) \sup_{x\in \mathbb{Z}^5} \sum_{i=k}^\infty \mathbb{E}\left[G(S_i)G(S_i-x)\right] \\ & \le (2\varepsilon_k+1) \sup_{x\in \mathbb{Z}^5} \sum_{z\in \mathbb{Z}^5} G(z) G(z-x) G_k(z) \stackrel{\eqref{Green}, \, \text{Lemma }\ref{lem.upconvolG}}{=}\mathcal{O}\left(\frac{\varepsilon_k}{k^{3/2}}\right). \end{align*} Now define for any $y_1,y_2\in \mathbb{Z}^5$, \begin{equation}\label{Hy1y2} H(y_1,y_2):= \mathbb{E}\left[Z_0 {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k]\cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] =\varnothing, S_{\varepsilon_k }= y_1, S^1_{\varepsilon_k} = y_2 \}\right]. \end{equation} One has by the Markov property \begin{align*} \Delta_{0,3} =\sum_{x\in \mathbb{Z}^5} \sum_{y_1,y_2\in \mathbb{Z}^5} H(y_1,y_2) p_k(x+y_2-y_1) \varphi_x. \end{align*}
Observe that typically $\|y_1\|$ and $\|y_2\|$ are much smaller than $\|x\|$, and thus $p_k(x+y_2-y_1)$ should be also typically close to $p_k(x)$. To make this precise, consider $(\chi_k)_{k\ge 1}$ some sequence of positive integers, such that $\varepsilon_k \chi_k^3 \le k$, for all $k\ge 1$, and $\chi_k\to \infty$, as $k\to \infty$. One has using Cauchy-Schwarz at the third line, \begin{align*}
& \sum_{\|x\|^2\le k/\chi_k} \sum_{y_1,y_2\in \mathbb{Z}^5} H(y_1,y_2) p_k(x+y_2-y_1)\varphi_{x} \\
& \le \sum_{\|x\|^2\le k/\chi_k } \sum_{y_2\in \mathbb{Z}^5} p_{\varepsilon_k}(y_2) p_{k+\varepsilon_k}(x) \varphi_{x-y_2}
\stackrel{\eqref{lem.hit.2}}{\lesssim} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|S_{k+\varepsilon_k}\|^2 \le k/\chi_k\}}{1+\|S_{k+\varepsilon_k} - S^1_{\varepsilon_k}\|}\right]\\
& \lesssim \mathbb{E}\left[\frac 1{1+\|S_{k+2\varepsilon_k}\|^2}\right]^{1/2}\cdot \mathbb{P}\left[\|S_{k+\varepsilon_k}\|^2\le k/\chi_k\right]^{1/2}\stackrel{\eqref{pn.largex}}{\lesssim} \frac 1{\sqrt{k}\cdot \chi_k^{5/4}}. \end{align*} Likewise, using just \eqref{Sn.large} at the end instead of \eqref{pn.largex}, we get
$$\sum_{\|x\|^2 \ge k\chi_k} \sum_{y_1,y_2\in \mathbb{Z}^5} H(y_1,y_2) p_k(x+y_2-y_1)\varphi_{x} \lesssim \frac 1{\sqrt{k}\cdot \chi_k^{5/4}},$$
and one can handle the sums on the sets $\{\|y_1\|^2\ge \varepsilon_k\chi_k\}$ and $\{\|y_2\|^2\ge \varepsilon_k\chi_k\}$ similarly. Therefore, it holds
$$\Delta_{0,3} =\sum_{k/\chi_k \le \|x\|^2 \le k \chi_k } \sum_{\|y_1\|^2\le \varepsilon_k \chi_k} \sum_{\|y_2\|^2\le \varepsilon_k \chi_k} H(y_1,y_2) p_k(x+y_2-y_1) \varphi_x + \mathcal{O}\left(\frac 1{\sqrt{k}\cdot \chi_k^{5/4}} \right).$$ Moreover, Theorem \ref{LCLT} shows that for any $x,y_1,y_2$ as in the three sums above, one has
$$|p_k(x+y_2-y_1)-p_k(x)| = \mathcal{O}\left(\frac{\sqrt{\varepsilon_k} \cdot \chi_k}{\sqrt k}\cdot p_k(x) + \frac 1{k^{7/2}}\right).$$ Note also that by \eqref{lem.hit.2}, one has \begin{equation}\label{sum.pkxphix} \sum_{x,y_1,y_2\in \mathbb{Z}^5} H(y_1,y_2)p_k(x) \varphi_x\le \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x = \mathcal{O}\left(\frac 1{\sqrt{k}}\right). \end{equation}
Using as well that $\sqrt{\varepsilon_k}\chi_k \le \sqrt {k/\chi_k}$, and $\sum_{\|x\|^2 \le k\chi_k} \varphi_x = \mathcal{O}(k^2\chi_k^2)$, we get \begin{align*} \Delta_{0,3} =\rho_k \sum_{x\in \mathbb{Z}^5 } p_k(x) \varphi_x + \mathcal{O}\left(\frac{1}{\sqrt{k\cdot \chi_k}} + \frac{\chi_k^2}{k^{3/2}}\right), \end{align*} with $$\rho_k:= \sum_{y_1,y_2\in \mathbb{Z}^5} H(y_1,y_2) = \mathbb{E}\left[Z_0\cdot {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k] \cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] = \varnothing\}\right].$$ Note furthermore that one can always take $\chi_k$ such that $\chi_k=o( \sqrt k)$, and that by \eqref{Green.hit}, \eqref{Green} and \eqref{lem.hit.3}, one has
$|\rho_k - \rho| \lesssim \varepsilon_k^{-1/2}$. This gives \begin{eqnarray}\label{Z0phi3.final} \mathbb{E}[Z_0\varphi_3] = \rho \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x + o\left(\frac{1}{\sqrt{k}}\right). \end{eqnarray} By symmetry the same estimate holds for $\mathbb{E}[Z_k\psi_1]$, and thus using again \eqref{sum.pkxphix}, it entails $$\mathbb{E}[Z_0\varphi_3] \cdot \mathbb{E}[Z_k\psi_1] = \rho^2\left( \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x\right)^2 + o\left(\frac{1}{k}\right).$$ The estimate of $\mathbb{E}[Z_0\varphi_3Z_k\psi_1]$ is done along the same line, but is a bit more involved. Indeed, let \begin{align*} \Delta_{1,3}:= \mathbb{E}\left[\right. &Z_0Z_k {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k]\cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] =\varnothing\} \\ & \left. \times {\text{\Large $\mathfrak 1$}}\{(S_k+\mathcal{R}^2[1,\varepsilon_k])\cap \mathcal{R}[k-\varepsilon_k,k+\varepsilon_k] =\varnothing, \tau_1 <\infty, \tau_2<\infty\}\right]. \end{align*} The difference between $\mathbb{E}[Z_0\varphi_3Z_k\psi_1]$ and $\Delta_{1,3}$ can be controlled roughly as above, but one needs additionally to handle the probability of $\tau_2$ being finite. Namely one has using symmetry, \begin{align}\label{Delta13}
& |\mathbb{E}[Z_0\varphi_3Z_k\psi_1] - \Delta_{1,3} | \le 2\left(\mathbb{P}\left[\tau_{0,1}<\infty,\, \tau_1<\infty, \, \overline \tau_2<\infty \right] \right. \\
\nonumber + & \mathbb{P}\left[ \mathcal{R}^1[0,\varepsilon_k] \cap \mathcal{R}[k,\infty)\neq \varnothing,\, \overline \tau_2<\infty \right] + \left. \mathbb{P}[\mathcal{R}^1_\infty \cap \mathcal{R}[k,k+\varepsilon_k]\neq \varnothing, \, \overline \tau_2<\infty]\right), \end{align} with $$\overline \tau_2:= \inf\{n\ge 0 \, : \, S_k+S^2_n \in \mathcal{R}(-\infty,0]\}.$$ The last term in \eqref{Delta13} is handled as follows: \begin{align*} &\mathbb{P}\left[\mathcal{R}^1_\infty \cap \mathcal{R}[k,k+\varepsilon_k]\neq \varnothing, \overline \tau_2<\infty\right] =\sum_{x\in \mathbb{Z}^5} \mathbb{P}[\mathcal{R}^1_\infty \cap \mathcal{R}\left[k,k+\varepsilon_k]\neq \varnothing, \overline \tau_2<\infty, S_k=x\right] \\ & \stackrel{\eqref{lem.hit.1}}{\le} \sum_{x\in \mathbb{Z}^5} p_k(x)\varphi_x \sum_{i=0}^{\varepsilon_k} \mathbb{E}[G(S_i+x)]
\stackrel{\eqref{Green}, \eqref{lem.hit.2}, \eqref{exp.Green.x}}{\lesssim} \varepsilon_k \sum_{x\in \mathbb{Z}^5} \frac{p_k(x)}{1+\|x\|^4} \stackrel{\eqref{pn.largex}}{\lesssim} \frac{\varepsilon_k}{k^2}. \end{align*} The same arguments give as well $$\mathbb{P}\left[ \mathcal{R}^1[0,\varepsilon_k] \cap \mathcal{R}[k,\infty)\neq \varnothing,\, \overline \tau_2<\infty \right] \lesssim \frac{\varepsilon_k}{k^2},$$ $$\mathbb{P}\left[\tau_{0,1}<\infty,\, \tau_1<\infty, \, \overline \tau_2<\infty \right] = \mathbb{P}\left[\tau_{0,1}<\infty,\, \tau_1<\infty, \, \tau_2<\infty \right] + \mathcal{O}\left(\frac{\varepsilon_k}{k^2}\right).$$ Then we can write, \begin{align*} &\mathbb{P}\left[\tau_{0,1}\le \tau_1<\infty, \tau_2<\infty \right] = \mathbb{E} \left[ \mathbb{P}_{0,S_{k+\varepsilon_k}-S_{\tau_{0,1}}}[\mathcal{R}_\infty \cap \widetilde \mathcal{R}_\infty \neq \varnothing] {\text{\Large $\mathfrak 1$}}\{\tau_{0,1}<\infty, \tau_2<\infty\}\right] \\
& \stackrel{\eqref{lem.hit.1}, \eqref{lem.hit.2}}{\lesssim} \sum_{i=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[\frac {1}{1+\|S_{k+\varepsilon_k}-S_i\|}\cdot \frac{G(S_i-S^1_{\varepsilon_k})}{1+\|S_k-S_{-\varepsilon_k}\|}\right]\\
& \stackrel{\eqref{exp.Green}}{\lesssim}\frac{1}{\varepsilon_k^{3/2}} \sum_{i=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[\frac {1}{1+\|S_k-S_i\|}\cdot \frac{1}{1+\|S_k-S_{-\varepsilon_k}\|}\right] \\
& \lesssim \frac{1}{\sqrt{\varepsilon_k}} \max_{k-\varepsilon_k\le j \le k+\varepsilon_k}\, \sup_{u\in\mathbb{Z}^d}\, \mathbb{E}\left[\frac {1}{1+\|S_j\|}\cdot \frac{1}{1+\|S_j+u\|}\right] \lesssim \frac{1}{k\sqrt{\varepsilon_k}}, \end{align*} where the last equality follows from straightforward computations, using \eqref{pn.largex}. On the other hand, \begin{align*} &\mathbb{P}\left[\tau_1\le \tau_{0,1}<\infty, \tau_2<\infty \right] \stackrel{\eqref{lem.hit.1}, \eqref{lem.hit.2}}{\lesssim} \sum_{i=k+\varepsilon_k}^\infty \sum_{j=-\varepsilon_k}^{\varepsilon_k}
\mathbb{E}\left[\frac{G(S_j-S_i)G(S_i-S_{\varepsilon_k}^1)}{1+\|S_k-S_{-\varepsilon_k}\|}\right] \\ & \stackrel{\eqref{Green}, \eqref{exp.Green.x}}{\lesssim} \sum_{j=-\varepsilon_k}^{\varepsilon_k} \sum_{i=k+\varepsilon_k}^\infty
\mathbb{E}\left[\frac{G(S_j-S_i)}{(1+\|S_i\|^3)(1+\|S_k-S_{-\varepsilon_k}\|)}\right] \\
& \lesssim \sum_{j=-\varepsilon_k}^{\varepsilon_k}\sum_{z\in \mathbb{Z}^d} G_{\varepsilon_k}(z) \mathbb{E}\left[\frac{G(z+ S_k-S_j)}{(1+\|z+S_k\|^3)(1+\|S_k-S_{-\varepsilon_k}\|)}\right]. \end{align*} Note now that for $x,y\in \mathbb{Z}^5$, by \eqref{Green} and Lemma \ref{lem.upconvolG}, \begin{align*}
\sum_{z\in \mathbb{Z}^d} \frac{G_{\varepsilon_k}(z)}{(1+\|z-x\|^3)(1+\|z-y\|^3)} \lesssim \frac{1}{1+\|x\|^3} \left(\frac 1{\sqrt{\varepsilon_k}} + \frac{1}{1+\|y-x\|}\right) . \end{align*} It follows that \begin{align*} &\mathbb{P}\left[\tau_1\le \tau_{0,1}<\infty, \tau_2<\infty \right] \lesssim \sum_{j=-\varepsilon_k}^{\varepsilon_k}
\mathbb{E}\left[\frac{1}{(1+\|S_k\|^3)(1+\|S_k-S_{-\varepsilon_k}\|)}\left(\frac 1{\sqrt{\varepsilon_k}} + \frac 1{1+\|S_j\|}\right) \right]\\
& \stackrel{\eqref{exp.Green.x}}{\lesssim} \mathbb{E}\left[\frac {\sqrt{\varepsilon_k}}{1+\|S_k\|^4}\right] + \sum_{j=-\varepsilon_k}^0 \mathbb{E}\left[\frac 1{(1+\|S_k\|^3)(1+\|S_k-S_j\|)(1+\|S_j\|)}\right] \\
& \qquad + \sum_{j=1}^{\varepsilon_k} \mathbb{E}\left[\frac 1{(1+\|S_k\|^4)(1+\|S_j\|)}\right]\\
& \lesssim \frac{1}{k^2} \left(\sqrt{\varepsilon_k} + \sum_{j=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[\frac 1{1+\|S_j\|}\right] \right)\lesssim \frac{\sqrt{\varepsilon_k}}{k^2}, \end{align*} using for the third inequality that by \eqref{pn.largex}, it holds uniformly in $x\in \mathbb{Z}^5$ and $j\le \varepsilon_k$,
$$\mathbb{E}\left[\frac 1{1+\|S_k-S_j+x\|^4}\right] \lesssim k^{-2}, \quad \mathbb{E}\left[\frac 1{(1+\|S_k\|^3) (1+\|S_k+x\|)}\right] \lesssim k^{-2}.$$ Now we are left with computing $\Delta_{1.3}$. This step is essentially the same as above, so we omit to give all the details. We first define for $y_1,y_2,y_3 \in \mathbb{Z}^5$, $$H(y_1,y_2,y_3):= \mathbb{E}\left [Z_0 {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k]\cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] =\varnothing, S_{\varepsilon_k} =y_1, S^1_{\varepsilon_k} = y_2, S_{-\varepsilon_k} = y_3\}\right],$$ and note that $$\Delta_{1,3}= \sum_{\substack{y_1,y_2,y_3\in \mathbb{Z}^5 \\ z_1,z_2,z_3\in \mathbb{Z}^5 \\ x\in \mathbb{Z}^5}} H(y_1,y_2,y_3) H(z_1,z_2,z_3) p_{k-2\varepsilon_k}(x-y_1+z_3) \varphi_{x+z_1-y_2} \varphi_{x + z_2-y_3}.$$
Observe here that by Theorem C, $\varphi_{x+z_1-y_2}$ is equivalent to $\varphi_x$, when $\|z_1\|$ and $\|y_2\|$ are small when compared to $\|x\|$, and similarly for $\varphi_{x+z_2-y_3}$. Thus using similar arguments as above, and in particular that by \eqref{pn.largex} and \eqref{lem.hit.2}, \begin{equation}\label{sum.pkxphix.2} \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x^2 = \mathcal{O}\left(\frac 1k\right), \end{equation} we obtain $$\Delta_{1,3} = \rho^2 \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x^2 + o\left(\frac 1k\right).$$ Putting all pieces together gives \eqref{var.1.1}. Using in addition \eqref{pn.largex}, \eqref{lem.hit.2} and Theorem \ref{LCLT}, we deduce that $$\operatorname{Cov}(Z_0\varphi_3, Z_k \psi_1) = \rho^2 \left\{\sum_{x\in \mathbb{Z}^5} \overline p_k(x) \varphi_x^2 - \left(\sum_{x\in \mathbb{Z}^5} \overline p_k(x) \varphi_x\right)^2\right\} + o\left(\frac 1k\right).$$ Then Theorem C, together with \eqref{sum.pkxphix} and \eqref{sum.pkxphix.2} show that $$\operatorname{Cov}(Z_0\varphi_3, Z_k \psi_1) = \sigma \left\{\sum_{x\in \mathbb{Z}^5} \frac{\overline p_k(x)}{1+\mathcal J(x)^2} - \left(\sum_{x\in \mathbb{Z}^5} \frac{\overline p_k(x)}{1+\mathcal J(x)} \right)^2\right\} +o\left(\frac 1k\right), $$ for some constant $\sigma>0$. Finally an approximation of the series with an integral and a change of variables gives, with $c_0:=(2\pi)^{-5/2} (\det \Gamma)^{-1/2}$, \begin{align*}
\operatorname{Cov}(Z_0\varphi_3, Z_k \psi_1) & = \frac{\sigma c_0}{k} \left\{\int_{\mathbb{R}^5} \frac{e^{- 5 \mathcal J(x)^2/2}}{\mathcal J(x)^2} \, dx - c_0\left(\int_{\mathbb{R}^5} \frac{e^{- 5 \mathcal J(x)^2/2}}{\mathcal J(x)} \, dx \right)^2\right\} +o\left(\frac 1k\right). \end{align*} The last step of the proof is to observe that the difference between the two terms in the curly bracket is well a positive real. This follows simply by Cauchy-Schwarz, once we observe that $c_0\int_{\mathbb{R}^5} e^{-5 \mathcal J(x)^2/2} \, dx = 1$, which itself can be deduced for instance from the fact that $1= \sum_{x\in \mathbb{Z}^5} p_k(x) \sim c_0\int_{\mathbb{R}^5} e^{- 5 \mathcal J(x)^2/2} \, dx$, by the above arguments. This concludes the proof of Lemma \ref{lem.var.1}.
$\square$
\subsection{Proof of Lemma \ref{lem.var.2}} Let us concentrate on the term $\operatorname{Cov}(Z_0\varphi_3,Z_k\psi_3)$, the estimate of $\operatorname{Cov}(Z_0\varphi_1,Z_k\psi_1)$ being entirely similar. We also assume to simplify notation that the walk is aperiodic.
We consider as in the proof of the previous lemma $(S_n^1)_{n\ge 0}$ and $(S_n^2)_{n\ge 0}$ two independent random walks starting from the origin, independent of $(S_n)_{n\in \mathbb{Z}}$, and define this time $$\tau_1:= \inf\{n\ge k+\varepsilon_k : S_n \in \mathcal{R}^1[\varepsilon_k,\infty) \}, \ \tau_2:= \inf \{n\ge k+\varepsilon_k : S_n \in S_k+\mathcal{R}^2[\sqrt \varepsilon_k,\infty)\}.$$ Define as well $$\overline \tau_1:= \inf\{n\ge k+\varepsilon_k : S_n \in \mathcal{R}^1_\infty \}, \ \overline \tau_2:= \inf \{n\ge k+\varepsilon_k : S_n \in S_k+\mathcal{R}^2_\infty\}.$$ \underline{Step $1$.} Our first task is to show that \begin{equation}\label{cov.33.first} \operatorname{Cov}(Z_0\varphi_3,Z_k\psi_3) = \rho^2 \cdot \operatorname{Cov}\left({\text{\Large $\mathfrak 1$}}\{\overline \tau_1<\infty\} ,\, {\text{\Large $\mathfrak 1$}}\{\overline \tau_2<\infty\}\right) + o\left(\frac 1k\right), \end{equation} with $\rho$ as defined in \eqref{def.rho}. This step is essentially the same as in the proof of Lemma \ref{lem.var.1}, but with some additional technical difficulties, so let us give some details. First, the proof of Lemma \ref{lem.var.1} shows that (using the same notation), $$\mathbb{E}[Z_0\varphi_3] = \Delta_{0,3} + \mathcal{O}\left(\frac{1}{\sqrt{k\varepsilon_k}} + \frac{\varepsilon_k}{k^{3/2}}\right),$$ and that for any sequence $(\chi_k)_{k\ge 1}$ going to infinity with $\varepsilon_k \chi_k^{2+\frac 14} \le k$,
$$\Delta_{0,3} = \sum_{k/\chi_k \le \|x\|^2 \le k \chi_k } \sum_{\substack{\|y_1\|^2\le \varepsilon_k \chi_k \\ \|y_2\|^2\le \varepsilon_k \chi_k}} H(y_1,y_2) p_k(x+y_2-y_1) \varphi_x + \mathcal{O}\left(\frac 1{\sqrt{k}\cdot \chi_k^{5/4}} \right).$$ Observe moreover, that by symmetry $H(y_1,y_2) = H(-y_1,-y_2)$, and that by Theorem \ref{LCLT}, for any $x$, $y_1$, and $y_2$ as above, for some constant $c>0$,
$$\left|p_k(x+y_2-y_1) + p_k(x+y_1-y_2) - p_k(x) \right| = \mathcal{O}\left(\frac{\varepsilon_k\chi_k}{k} \overline p_k(cx) + \frac 1{k^{7/2}}\right),$$ It follows that one can improve the bound \eqref{Z0phi3.final} into \begin{align}\label{Z0phi3.bis} \nonumber \mathbb{E}[Z_0\varphi_3] & = \rho \sum_{x\in \mathbb{Z}^5} p_k(x) \varphi_x + \mathcal{O}\left(\frac{\varepsilon_k\chi_k}{k^{3/2}} + \frac{\chi_k^2}{k^{3/2}} +\frac 1{\sqrt{k}\cdot \chi_k^{5/4}} + \frac{1}{\sqrt{k\varepsilon_k}} + \frac{\varepsilon_k}{k^{3/2}} \right)\\ & = \rho \, \mathbb{P}[\overline\tau_1<\infty] + \mathcal{O}\left(\frac{\varepsilon_k\chi_k}{k^{3/2}} + \frac{\chi_k^2}{k^{3/2}} +\frac 1{\sqrt{k}\cdot \chi_k^{5/4}} + \frac{1}{\sqrt{k\varepsilon_k}} + \frac{\varepsilon_k}{k^{3/2}} \right). \end{align} Since by \eqref{lem.hit.3} one has $$\mathbb{E}[Z_k\psi_3] \le \mathbb{E}[\psi_3] = \mathcal{O}\left(\frac 1{\sqrt{\varepsilon_k}}\right),$$ this yields by taking $\chi_k^{2+1/4} := k/\varepsilon_k$, and $\varepsilon_k\ge k^{2/3}$ (but still $\varepsilon_k= o(k)$), \begin{align}\label{Z03.1} \mathbb{E}[Z_0\varphi_3] \cdot \mathbb{E}[Z_k\psi_3] = \rho\, \mathbb{P}[\overline\tau_1<\infty]\cdot \mathbb{E}[Z_k\psi_3] + o\left(\frac 1k \right). \end{align} We next seek an analogous estimate for $\mathbb{E}[Z_k\psi_3]$. Define $Z'_k:=1\{S_{k+i}\neq S_k,\, \forall i=1,\dots,\varepsilon_k^{3/4}\}$, and $$\Delta_0:= \mathbb{E}\left[Z'_k\cdot {\text{\Large $\mathfrak 1$}}\left\{\mathcal{R}[k-\varepsilon_k,k+\varepsilon_k^{3/4}] \cap (S_k+\mathcal{R}^2[1,\sqrt{\varepsilon_k}])=\varnothing, \, \tau_2<\infty \right\}\right].$$ Note that (with $\mathcal{R}$ and $\widetilde \mathcal{R}$ two independent walks), \begin{align*}
\left| \mathbb{E}[Z_k\psi_3] - \Delta_0\right| & \le \mathbb{P}\left[0\in \mathcal{R}[\varepsilon_k^{3/4},\varepsilon_k]\right] + \mathbb{P}\left[\widetilde \mathcal{R}[0,\sqrt {\varepsilon_k}]\cap \mathcal{R}[\varepsilon_k,\infty)\neq \varnothing\right] \\ & + \mathbb{P}\left[\widetilde \mathcal{R}_\infty\cap \mathcal{R}[\varepsilon_k^{3/4},\varepsilon_k] \neq \varnothing, \widetilde \mathcal{R}_\infty\cap \mathcal{R}[\varepsilon_k,\infty) \neq \varnothing \right] \\
& + \mathbb{P}\left[\widetilde \mathcal{R}[\sqrt{\varepsilon_k},\infty) \cap \mathcal{R}[-\varepsilon_k,\varepsilon_k]\neq \varnothing, \widetilde \mathcal{R}[\sqrt{\varepsilon_k},\infty) \cap \mathcal{R}[\varepsilon_k,\infty)\neq \varnothing \right]. \end{align*} Moreover, \begin{equation}\label{ZkZ'k} \mathbb{P}\left[0\in \mathcal{R}[\varepsilon_k^{3/4},\varepsilon_k]\right] \stackrel{\eqref{Green.hit}, \eqref{exp.Green}}{\lesssim} \varepsilon_k^{-9/8},\quad \mathbb{P}\left[\widetilde \mathcal{R}[0,\sqrt {\varepsilon_k}]\cap \mathcal{R}[\varepsilon_k,\infty)\neq \varnothing\right] \stackrel{\eqref{lem.hit.3}}{\lesssim} \varepsilon_k^{- 1}. \end{equation} Using also the same computation as in the proof of Lemma \ref{lem.123}, we get \begin{equation*} \mathbb{P}\left[\widetilde \mathcal{R}_\infty\cap \mathcal{R}[\varepsilon_k^{3/4},\varepsilon_k] \neq \varnothing,\, \widetilde \mathcal{R}_\infty\cap \mathcal{R}[\varepsilon_k,\infty) \neq \varnothing \right] \lesssim \varepsilon_k^{-\frac 38 - \frac 12}, \end{equation*} \begin{equation} \label{tau01tau2}
\mathbb{P}\left[\widetilde \mathcal{R}[\sqrt{\varepsilon_k},\infty) \cap \mathcal{R}[-\varepsilon_k,\varepsilon_k]\neq \varnothing, \widetilde \mathcal{R}[\sqrt{\varepsilon_k},\infty) \cap \mathcal{R}[\varepsilon_k,\infty)\neq \varnothing \right] \lesssim \varepsilon_k^{-\frac 14 - \frac 12}. \end{equation} As a consequence \begin{align}\label{Zk3.1} \mathbb{E}[Z_k\psi_3] = \Delta_0 + \mathcal{O}\left(\varepsilon_k^{-3/4}\right). \end{align} Introduce now \begin{align*} \widetilde H(y_1,y_2) := \mathbb{E}\left[Z'_k\cdot \right. & {\text{\Large $\mathfrak 1$}}\{\mathcal{R}[k-\varepsilon_k,k+\varepsilon_k^{3/4}]\cap (S_k+ \mathcal{R}^2[1,\sqrt{\varepsilon_k}])=\varnothing\} \\ & \times \left. {\text{\Large $\mathfrak 1$}}\{S_{k+\varepsilon_k^{3/4}}-S_k = y_1, S^2_{\sqrt{\varepsilon_k}} = y_2\}\right], \end{align*} and note that $$\Delta_0 = \sum_{x\in \mathbb{Z}^d} \sum_{y_1,y_2\in \mathbb{Z}^d} \widetilde H(y_1,y_2) p_{\varepsilon_k-\varepsilon_k^{3/4}}(x+y_2-y_1) \varphi_x.$$ Let $\chi_k:= \varepsilon_k^{1/8}$. As above, we can see that \begin{align*}
\Delta_0 & = \sum_{\substack{\varepsilon_k/\chi_k\le \|x\|^2\le \varepsilon_k\chi_k \\ \|y_1\|^2\le \varepsilon_k^{3/4}\chi_k \\ \|y_2\|^2 \le \sqrt{\varepsilon_k}\chi_k}} \widetilde H(y_1,y_2) p_{\varepsilon_k-\varepsilon_k^{3/4}}(x+y_2-y_1) \varphi_x + \mathcal{O}\left(\frac{1}{\sqrt{\varepsilon_k} \chi_k^{5/4}}\right)\\
&= \left(\sum_{y_1,y_2\in \mathbb{Z}^d} \widetilde H(y_1,y_2)\right) \left(\sum_{x\in \mathbb{Z}^d} p_{\varepsilon_k}(x)\varphi_x\right) + \mathcal{O}\left( \frac{\chi_k}{\varepsilon_k^{3/4}} + \frac{\chi_k^2}{\varepsilon_k^{3/2}} + \frac{1}{\sqrt{\varepsilon_k} \chi_k^{5/4}}\right) \\
& = \rho\cdot \mathbb{P}[\overline \tau_2<\infty] + \mathcal{O}(\varepsilon_k^{-5/8}). \end{align*} Then by taking $\varepsilon_k\ge k^{5/6}$, and recalling \eqref{Z03.1} and \eqref{Zk3.1}, we obtain \begin{align}\label{Z03.2} \mathbb{E}[Z_0\varphi_3] \cdot \mathbb{E}[Z_k\psi_3] = \rho^2\cdot \mathbb{P}[\overline\tau_1<\infty]\cdot \mathbb{P}[\overline \tau_2<\infty]+ o\left(\frac 1k \right). \end{align} Finally, let \begin{align*} \Delta_{3,3}:= & \mathbb{E} [Z_0Z'_k {\text{\Large $\mathfrak 1$}}\{\mathcal{R}^1[1,\varepsilon_k]\cap \mathcal{R}[-\varepsilon_k,\varepsilon_k] =\varnothing\}\\ & \times {\text{\Large $\mathfrak 1$}}\{ (S_k+\mathcal{R}^2[1,\sqrt{\varepsilon_k}])\cap \mathcal{R}[k-\varepsilon_k^{\frac 34},k+\varepsilon_k^{\frac 34}] =\varnothing, \tau_1 <\infty, \tau_2<\infty \}]. \end{align*} It amounts to estimate the difference between $\Delta_{3,3}$ and $\mathbb{E}[Z_0Z_k\varphi_3\psi_3]$. Define $$\widetilde \tau_1:=\inf\{n\ge k+\varepsilon_k : S_n\in \mathcal{R}^1[0,\varepsilon_k]\}, \ \widetilde \tau_2:=\inf\{n\ge k+\varepsilon_k : S_n\in S_k+\mathcal{R}^2[0,\sqrt{\varepsilon_k}]\}.$$ Observe first that \begin{align}\label{tilde1bar2}
\nonumber & \mathbb{P}[\widetilde \tau_1\le \overline \tau_2<\infty] \stackrel{\eqref{lem.hit.2}}{\lesssim} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\widetilde \tau_1<\infty\}}{1+\|S_{\widetilde \tau_1}-S_k\|} \right]
\stackrel{\eqref{lem.hit.1}}{\lesssim} \sum_{i=0}^{\varepsilon_k} \mathbb{E}\left[\frac{G(S_i^1-S_{k+\varepsilon_k})}{1+\|S_i^1-S_k\|} \right]\\
\nonumber & \lesssim \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} p_i(z) \mathbb{E}\left[\frac{G(z-S_{k+\varepsilon_k})}{1+\|z-S_k\|} \right]\stackrel{\eqref{pn.largex}}{\lesssim}
\sum_{z\in \mathbb{Z}^5} \frac {\sqrt{\varepsilon_k}}{1+\|z\|^4}\, \mathbb{E}\left[\frac{G(z-S_{k+\varepsilon_k})}{1+\|z-S_k\|} \right] \\
&\stackrel{\eqref{Green}}{\lesssim} \mathbb{E}\left[\frac{\sqrt{\varepsilon_k}}{(1+\|S_{k+\varepsilon_k}\|^2)(1+\|S_k\|)} \right]
\stackrel{\eqref{exp.Green.x}}{\lesssim} \mathbb{E}\left[\frac{\sqrt{\varepsilon_k}}{1+\|S_k\|^3} \right] \stackrel{\eqref{exp.Green}}{\lesssim} \frac{\sqrt{\varepsilon_k}}{k^{3/2}}, \end{align} and likewise, \begin{align*} & \mathbb{P}[\overline \tau_1\le \widetilde \tau_2<\infty] \stackrel{\eqref{lem.hit.1}}{\le} \sum_{j\ge 0}\sum_{i=0}^{\varepsilon_k}\mathbb{E}\left[G(S_k + S_i^2 - S_j^1) G(S_j^1- S_{k+\varepsilon_k})\right] \\ & = \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5}\mathbb{E}\left[G(z) G(S_k + S_i^2 - z) G(z- S_{k+\varepsilon_k})\right] \\
& \le C\sum_{i=0}^{\varepsilon_k} \mathbb{E}\left[\frac 1{1+\|S_k + S_i^2\|^3}\left(\frac 1{1+\|S_{k+\varepsilon_k}\|} +\frac 1{1+ \|S_{k+\varepsilon_k}-S_k-S_i^2\|}\right)\right]\\
& \stackrel{\eqref{exp.Green},\, \eqref{exp.Green.x}}{\le} C\mathbb{E}\left[\frac {\varepsilon_k}{1+\|S_k\|^4}\right] +C \mathbb{E}\left[\frac {\sqrt{\varepsilon_k}}{1+\|S_k\|^3} \right]= \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right). \end{align*} Additionally, it follows directly from \eqref{lem.hit.3} that $$\mathbb{P}[\overline \tau_2 \le \widetilde \tau_1<\infty] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}, \quad \text{and} \quad \mathbb{P}[\widetilde \tau_2 \le \overline \tau_1<\infty] \lesssim \frac{1}{\varepsilon_k \sqrt k},$$ which altogether yields
$$|\mathbb{P}[\overline \tau_1<\infty,\, \overline \tau_2<\infty] - \mathbb{P}[\tau_1<\infty,\, \tau_2<\infty]| \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}} + \frac{1}{\varepsilon_k \sqrt k}.$$ Similar computations give also \begin{equation}\label{tau1tau2} \mathbb{P}[\overline \tau_1<\infty, \, \overline \tau_2<\infty] \lesssim \frac 1{\sqrt{k \varepsilon_k}}. \end{equation} Next, using \eqref{ZkZ'k} and the Markov property, we get
$$\mathbb{E}[|Z_k-Z'_k|{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}] \lesssim \frac 1{\varepsilon_k^{9/8}\sqrt k}.$$ Thus, for $\varepsilon_k \ge k^{5/6}$, \begin{align*}
\left|\mathbb{E}[Z_0Z_k\varphi_3\psi_3] - \Delta_{3,3}\right| & \le \mathbb{P}[\tau_{0,1}<\infty, \tau_1<\infty, \tau_2<\infty] +\mathbb{P}[\tau_{0,2}<\infty, \tau_1<\infty, \tau_2<\infty] \\ & \qquad + \mathbb{P}[\widetilde \tau_{0,2}<\infty, \tau_1<\infty, \tau_2<\infty] + o\left(\frac 1k\right), \end{align*} where $\tau_{0,1}$ is as defined in the proof of Lemma \ref{lem.var.1}, $$\tau_{0,2} : =\inf\{n\ge \sqrt{\varepsilon_k} : S_k + S_n^2 \in \mathcal{R}[k-\varepsilon_k,k+\varepsilon_k]\},$$ and $$\widetilde \tau_{0,2} : =\inf\{n\le \sqrt{\varepsilon_k} : S_k + S_n^2 \in \mathcal{R}[k-\varepsilon_k,k-\varepsilon_k^{3/4}]\cup \mathcal{R}[k+\varepsilon_k^{3/4},k+\varepsilon_k]\}.$$ Applying \eqref{lem.hit.3} twice already shows that $$\mathbb{P}[\widetilde \tau_{0,2}<\infty,\, \tau_1<\infty] \lesssim \frac{1}{\sqrt k} \cdot \mathbb{P}[\widetilde \tau_{0,2}<\infty] \lesssim \frac{1}{\sqrt{k}\varepsilon_k^{5/8}} = o\left(\frac 1k\right). $$ Then, notice that \eqref{tilde1bar2} entails $$\mathbb{P}[\mathcal{R}[k+\varepsilon_k,\infty) \cap \mathcal{R}^1[0,\tau_{0,1}]\neq \varnothing, \, S^1_{\tau_{0,1}} \in \mathcal{R}[-\varepsilon_k,0]] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}.$$ On the other hand, \begin{align*} &\mathbb{P}[\mathcal{R}[k+\varepsilon_k,\infty) \cap \mathcal{R}^1[0,\tau_{0,1}]\neq \varnothing, S^1_{\tau_{0,1}} \in \mathcal{R}[0,\varepsilon_k]] \\
\stackrel{\eqref{lem.hit.1}}{\le} & \sum_{i=0}^{\varepsilon_k} \sum_{j=k+\varepsilon_k}^\infty \mathbb{E}[G(S_i-S_{k+j})G(S_{k+j} - S_k)] = \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} \mathbb{E}[G(S_i-S_k + z)G(z)G_{\varepsilon_k}(z)] \\ \stackrel{\eqref{exp.Green}}{\lesssim} & \frac{\varepsilon_k}{k^{3/2}} \sum_{z\in \mathbb{Z}^5} G(z)G_{\varepsilon_k}(z)\stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} By \eqref{lem.hit.1} and \eqref{exp.Green}, one has with $\widetilde \mathcal{R}_\infty$ an independent copy of $\mathcal{R}_\infty$, \begin{align*} & \mathbb{P}[\tau_{0,1}<\infty, \tau_2<\infty, \mathcal{R}[k+\varepsilon_k,\infty) \cap \mathcal{R}^1[\tau_{0,1},\infty)\neq \varnothing ] \\ \lesssim & \frac{1}{\sqrt{\varepsilon_k}} \max_{-\varepsilon_k\le i\le \varepsilon_k}\mathbb{P}[\tau_2<\infty,\, \mathcal{R}[k+\varepsilon_k,\infty) \cap (S_i+ \widetilde \mathcal{R}_\infty) \neq \varnothing ] \lesssim \frac{1}{\varepsilon_k\sqrt{k}}, \end{align*} where the last equality follows from \eqref{tau1tau2}. Thus $$\mathbb{P}[\tau_{0,1}<\infty, \tau_1<\infty, \tau_2<\infty] =o\left(\frac 1k\right).$$ In a similar fashion, one has $$\mathbb{P}[\tau_{0,2}<\infty, \tau_2\le \tau_1<\infty] \stackrel{\eqref{lem.hit.3}}{\lesssim} \frac{1}{\sqrt k} \mathbb{P}[\tau_{0,2}<\infty, \tau_2<\infty] \stackrel{\eqref{tau01tau2}}{\lesssim} \frac{1}{\varepsilon_k^{3/4}\sqrt{k}},$$ as well as, \begin{align*} & \mathbb{P}\left[\tau_{0,2}<\infty, \, \tau_1\le \tau_2<\infty,\, S_{\tau_2}\in (S_k+\mathcal{R}^2[0, \tau_{0,2}])\right] \\
\stackrel{\eqref{lem.hit.1}}{\le} & \sum_{i=k-\varepsilon_k}^{k+\varepsilon_k} \sum_{j\ge 0} \sum_{\ell \ge 0} \mathbb{E}[G(S_i-\widetilde S_j - S^1_\ell) G(\widetilde S_j + S^1_\ell-S_k) G(S^1_\ell - S_{k+\varepsilon_k})] \\ \le & \sum_{i=k-\varepsilon_k}^{k+\varepsilon_k} \sum_{\ell \ge 0} \sum_{z\in\mathbb{Z}^5} \mathbb{E}[G(z) G(S_i- S^1_\ell - z) G(z + S^1_\ell-S_k) G(S^1_\ell - S_{k+\varepsilon_k})]\\
\stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} & \sum_{i=k-\varepsilon_k}^{k+\varepsilon_k} \sum_{\ell \ge 0}
\mathbb{E}\left[\frac{G(S^1_\ell - S_{k+\varepsilon_k})}{1+\|S^1_\ell - S_k\|^3} \left(\frac 1{1+\|S_\ell^1- S_i\|} + \frac 1{1+\|S_i-S_k\|}\right)\right]\\
\stackrel{\eqref{exp.Green}, \eqref{exp.Green.x}}{\lesssim} & \sum_{i=0}^{\varepsilon_k} \sum_{\ell \ge 0}
\left\{\mathbb{E}\left[\frac{\varepsilon_k^{-3/2}}{1+\|S^1_\ell - S_k\|^3} \left(\frac 1{1+\|S_\ell^1- S_{k-i}\|} + \frac 1{1+\|S_{k-i}-S_k\|}\right)\right] \right. \\
+& \left. \mathbb{E}\left[\frac{1}{(1+\|S^1_\ell - S_{k+i}\|^3)(1+\|S^1_\ell - S_k\|^3)} \left(\frac 1{1+\|S_\ell^1- S_{k+i}\|} + \frac 1{1+\|S_{k+i}-S_k\|}\right)\right] \right\}\\
\stackrel{\eqref{pn.largex}, \eqref{exp.Green.x}}{\lesssim} & \sum_{i=0}^{\varepsilon_k} \sum_{\ell \ge 0}
\left\{\mathbb{E}\left[\frac{\varepsilon_k^{-3/2}}{1+\|S^1_\ell - S_{k-i}\|^3} \left(\frac 1{1+\|S_\ell^1- S_{k-i}\|} + \frac 1{1+\sqrt{i}}\right)\right] \right. \\
& \qquad + \left. \mathbb{E}\left[\frac{(1+i)^{-1/2}}{1+\|S^1_\ell - S_k\|^6} \right] \right\} \\
\lesssim &\ \frac {\sqrt{\varepsilon_k}}{k^{3/2}} , \end{align*} and \begin{align*} & \mathbb{P}\left[\tau_{0,2}<\infty, \, \tau_1\le \tau_2<\infty,\, S_{\tau_2}\in (S_k+\mathcal{R}^2[\tau_{0,2},\infty))\right] \\
\stackrel{\eqref{Green.hit}}{\le} & \sum_{i=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[G(S_{k+i}-S_k - S^2_{\sqrt{\varepsilon_k}}) {\text{\Large $\mathfrak 1$}}\{\tau_1<\infty,\, \mathcal{R}[\tau_1,\infty) \cap (S_{k+i}+ \widetilde \mathcal{R}_\infty) \neq \varnothing\}\right]\\\
\stackrel{\eqref{lem.hit.2}}{\lesssim} & \sum_{i=-\varepsilon_k}^{\varepsilon_k} \mathbb{E}\left[G(S_{k+i}-S_k - S^2_{\sqrt{\varepsilon_k}})
\frac{{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}}{1+ \|S_{\tau_1} - S_{k+i}\|} \right] \\
\stackrel{\eqref{lem.hit.1}}{\lesssim} & \sum_{i=-\varepsilon_k}^{\varepsilon_k}\sum_{j\ge k+\varepsilon_k} \mathbb{E}\left[\frac{G(S_{k+i}-S_k - S^2_{\sqrt{\varepsilon_k}}) G(S_j)}{1+ \|S_j - S_{k+i}\|} \right] \\
\lesssim & \sum_{i=0}^{\varepsilon_k} \sum_{z\in \mathbb{Z}^5} \left\{\mathbb{E}\left[\frac{G(S_{k-i}-S_k - S^2_{\sqrt{\varepsilon_k}}) G(S_k+z)G(z)}{1+ \|z + S_k - S_{k-i}\|} \right] \right. \\
& \qquad \left. + \mathbb{E}\left[\frac{G(S_{k+i}-S_k - S^2_{\sqrt{\varepsilon_k}}) G(S_{k+i}+z)G(z)}{1+ \|z\|} \right] \right\}\\
\lesssim & \sum_{i=0}^{\varepsilon_k} \left\{\mathbb{E}\left[\frac{G(S_{k-i}-S_k - S^2_{\sqrt{\varepsilon_k}}) }{1+ \|S_k\|^2} \right]
+ \mathbb{E}\left[\frac{G(S_{k+i}-S_k - S^2_{\sqrt{\varepsilon_k}}) }{1+ \|S_{k+i}\|^2} \right] \right\}\\
\stackrel{\eqref{exp.Green}, \eqref{exp.Green.x}}{\lesssim} & \frac{1}{\varepsilon_k^{3/4}}\sum_{i=0}^{\sqrt{\varepsilon_k}} \mathbb{E}\left[\frac{1}{1+ \|S_k\|^2} +\frac{1}{1+ \|S_{k+i}\|^2}\right] + \sum_{i = \sqrt{\varepsilon_k}}^{\varepsilon_k} \mathbb{E}\left[\frac{G(S_{k-i}-S_k)}{1+ \|S_k\|^2} +\frac{G(S_{k+i}-S_k)}{1+ \|S_{k+i}\|^2}\right]\\
\stackrel{\eqref{pn.largex}, \eqref{exp.Green}}{\lesssim} & \frac{1}{\varepsilon_k^{1/4}k} + \sum_{i = \sqrt{\varepsilon_k}}^{\varepsilon_k} \frac 1{i^{3/2}}\cdot \mathbb{E}\left[\frac{1}{1+ \|S_{k-i}\|^2} +\frac{1}{1+ \|S_k\|^2}\right] \lesssim \frac 1{\varepsilon_k^{1/4} k}. \end{align*} Thus at this point we have shown that \begin{eqnarray}\label{approx.Delta3}
\left|\mathbb{E}[Z_0Z_k\varphi_3\psi_3] - \Delta_{3,3}\right| = o\left(\frac 1k\right).
\end{eqnarray} Now define \begin{align*} \widetilde H(z_1,z_2,z_3) := \mathbb{P}\left[0\notin \mathcal{R}[1,\varepsilon_k^{3/4}], \right. & \widetilde \mathcal{R}[1,\sqrt{\varepsilon_k}]\cap \mathcal{R}[-\varepsilon_k^{ 3/4},\varepsilon_k^{ 3/4}]=\varnothing, \\
& \left. S_{\varepsilon_k^{3/4}}=z_1, S_{-\varepsilon_k^{3/4}}=z_3, \widetilde S_{\sqrt{\varepsilon_k}} = z_3\right],
\end{align*} and recall also the definition of $H(y_1,y_2)$ given in \eqref{Hy1y2}. One has $$\Delta_{3,3} = \sum H(y_1,y_2) \widetilde H(z_1,z_2,z_3) p_{k-\varepsilon_k - \varepsilon_k^{3/4}}(x - y_1+y_2+z_3-z_2) p_{\varepsilon_k- \varepsilon_k^{3/4}}(u-z_1+z_2) \varphi_{x,u},$$ where the sum runs over all $x,u,y_1,y_2,z_1,z_2,z_3\in \mathbb{Z}^5$, and \begin{align*} \varphi_{x,u} := \mathbb{P}[\overline \tau_1<\infty,\, \overline \tau_2<\infty\mid S_k = x,\, S_{k+\varepsilon_k} = x+u]. \end{align*} Note that the same argument as for \eqref{tau1tau2} gives also \begin{eqnarray}\label{phixu}
\varphi_{x,u} \lesssim \frac 1{1+\|u\|}\left(\frac 1{1+\|x+u\|} +\frac 1{1+\|x\|}\right). \end{eqnarray} Using this it is possible to see that in the expression of $\Delta_{3,3}$ given just above, one can restrict the sum to typical values of the parameters. Indeed, consider for instance the sum on atypically large values of $x$. More precisely, take $\chi_k$, such that $\varepsilon_k \chi_k^{2+1/4} =k$, and note that by \eqref{phixu}, \begin{align*}
& \sum_{\substack{\|x\|^2\ge k\chi_k \\ u,y_1,y_2,z_1,z_2,z_3}} H(y_1,y_2) \widetilde H(z_1,z_2,z_3) p_{k-\varepsilon_k - \varepsilon_k^{3/4}}(x - y_1+y_2+z_3-z_2) p_{\varepsilon_k- \varepsilon_k^{3/4}}(u-z_1+z_2) \varphi_{x,u}\\
& \le \mathbb{P}\left[ \|S_k-S_{\varepsilon_k}^1\|\ge \sqrt{k\chi_k}, \tau_1<\infty, \tau_2<\infty\right]\le \mathbb{P}\left[ \|S_k-S_{\varepsilon_k}^1\|\ge \sqrt{k\chi_k}, \tau_1<\infty, \overline \tau_2<\infty\right] \\
& \lesssim \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|S_k-S_{\varepsilon_k}^1\|\ge \sqrt{k\chi_k}\}}{1+\|S_{k+\varepsilon_k}-S_k\|} \left(\frac {1}{1+\|S_k-S^1_{\varepsilon_k}\|} + \frac 1{1+\|S_{k+\varepsilon_k}-S^1_{\varepsilon_k}\|}\right)\right] \\ & \lesssim \frac{1}{\chi_k^{5/4}\sqrt{k\varepsilon_k} }, \end{align*} where the last equality follows by applying Cauchy-Schwarz inequality and \eqref{Sn.large}. The other cases are entirely similar. Thus $\Delta_{3,3}$ is well approximated by the sums on typical values of the parameters (similarly as for $\Delta_0$ for instance), and then we can deduce with Theorem \ref{LCLT} and \eqref{phixu} that $$\Delta_{3,3} = \rho^2\cdot \mathbb{P}[\overline \tau_1<\infty,\, \overline \tau_2<\infty]+ o\left(\frac 1k\right).$$ Together with \eqref{approx.Delta3} and \eqref{Z03.2} this proves \eqref{cov.33.first}.
\underline{Step $2$.} For a (possibly random) time $T$, set $$\overline \tau_1\circ T := \inf \{n\ge T\vee \varepsilon_k : S_n \in \mathcal{R}^1_\infty\},\ \overline \tau_2\circ T := \inf \{n\ge T\vee \varepsilon_k : S_n \in (S_k+\mathcal{R}^2_\infty)\}. $$ Observe that \begin{equation}\label{main.tau.1} \mathbb{P}[\overline \tau_1\le \overline \tau_2<\infty] = \mathbb{P}[\overline \tau_1\le \overline \tau_2\circ \overline \tau_1<\infty] - \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2\le \overline \tau_2\circ \overline \tau_1\circ \overline \tau_2<\infty], \end{equation} and symmetrically, \begin{equation}\label{main.tau.2} \mathbb{P}[\overline \tau_2\le \overline \tau_1<\infty] = \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2<\infty] - \mathbb{P}[\overline \tau_1\le\overline \tau_2\circ \overline \tau_1\le \overline \tau_1\circ \overline \tau_2\circ \overline \tau_1<\infty]. \end{equation} Our aim here is to show that the two error terms appearing in \eqref{main.tau.1} and \eqref{main.tau.2} are negligible. Applying repeatedly \eqref{lem.hit.1} gives \begin{align*} E_1& := \mathbb{P}[\overline \tau_1\le\overline \tau_2\circ \overline \tau_1\le \overline \tau_1\circ \overline \tau_2\circ \overline \tau_1<\infty] \\ &\lesssim \sum_{j\ge 0} \sum_{\ell \ge 0} \sum_{m\ge 0} \mathbb{E}\left[ G(S_j^1 - S_k - S_\ell^2) G(S_k + S_\ell^2 - S_m^1) G(S_m^1 - S_{k+\varepsilon_k})\right]\\ & \stackrel{\eqref{exp.Green.x}}{\lesssim} \sum_{j\ge 0} \sum_{\ell \ge 0} \sum_{m\ge 0} \mathbb{E}\left[ G(S_j^1 - S_k - S_\ell^2) G(S_k + S_\ell^2 - S_m^1) G(S_m^1 - S_k)\right]\\ & \lesssim \sum_{j\ge 0} \sum_{m\ge 0} G(z) \mathbb{E}\left[ G(S_j^1 - S_k - z) G(S_k + z - S_m^1) G(S_m^1 - S_k)\right]. \end{align*} Note also that by using Lemma \ref{lem.upconvolG} and \eqref{Green}, we get
$$\sum_{z\in \mathbb{Z}^5}G(z-x) G(z-y) G(z) \lesssim \frac 1{1+\|x\|^3} \left(\frac 1{1+\|y\|} + \frac {1}{1+ \|y-x\|} \right).$$ Thus, distinguishing also the two cases $j\le m$ and $m\le j$, we obtain \begin{align*}
E_1& \lesssim \sum_{j\ge 0} \sum_{m\ge 0}\mathbb{E}\left[\frac {G(S_m^1 - S_k)}{1+\|S_j^1-S_k\|^3} \left( \frac 1{1+\|S_m^1-S_k\|} + \frac 1{1+\|S_m^1- S_j^1\|}\right) \right] \\
& \lesssim \sum_{j\ge 0} \sum_{z\in \mathbb{Z}^5} G(z)\left\{ \mathbb{E}\left[\frac {G(z+S_j^1 - S_k)}{1+\|S_j^1-S_k\|^3} \left( \frac 1{1+\|z+S_j^1-S_k\|} + \frac 1{1+\|z\|}\right) \right] \right. \\
& \qquad \left. + \mathbb{E}\left[\frac {G(S_j^1 - S_k)}{1+\|z+S_j^1-S_k\|^3} \left( \frac 1{1+\|S_j^1-S_k\|} + \frac 1{1+\|z\|}\right) \right]\right\}\\
& \lesssim \sum_{j\ge 0} \mathbb{E}\left[\frac {1}{1+\|S_j^1-S_k\|^5} \right] \lesssim \mathbb{E}\left[\frac {\log (1+\|S_k\|)}{1+\|S_k\|^3}\right] \lesssim \frac{\log k}{k^{3/2}}. \end{align*} Similarly, \begin{align*} & \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2\le \overline \tau_2\circ \overline \tau_1\circ \overline \tau_2<\infty] \\ &\lesssim \sum_{j\ge 0} \sum_{\ell \ge 0} \sum_{m\ge 0} \mathbb{E}\left[ G(S_j^2 + S_k - S_\ell^1) G(S_\ell^1 - S_k- S_m^2) G(S_m^2 + S_k - S_{k+\varepsilon_k})\right]\\
& \stackrel{\eqref{exp.Green}, \eqref{exp.Green.x}}{\lesssim} \frac {1}{\sqrt{\varepsilon_k}} \sum_{j\ge 0} \sum_{\ell \ge 0} \sum_{m\ge 0}\mathbb{E}\left[ \frac{G(S_j^2 + S_k - S_\ell^1) G(S_\ell^1 - S_k- S_m^2) }{1+\|S_m^2\|^2} \right]\\
&\lesssim \frac{1}{\sqrt{\varepsilon_k}} \sum_{j\ge 0} \sum_{m\ge 0}\mathbb{E}\left[\frac 1{(1+\|S_m^2\|^2)(1+\|S_j^2+S_k\|^3)} \left( \frac 1{1+\|S_m^2+S_k\|} + \frac 1{1+\|S_m^2- S_j^2\|}\right) \right] \\
&\lesssim \frac{1}{\sqrt{\varepsilon_k}} \sum_{j\ge 0}\mathbb{E}\left[\frac 1{(1+\|S_j^2\|)(1+\|S_j^2+S_k\|^3)} +\frac 1{(1+\|S_j^2\|^2)(1+\|S_j^2+S_k\|^2)} \right] \\
&\lesssim \frac{1}{\sqrt{\varepsilon_k}}\cdot \mathbb{E}\left[\frac {\log (1+\|S_k\|)}{1+\|S_k\|^2}\right] \lesssim \frac{\log k}{k \sqrt{\varepsilon_k}}. \end{align*}
\underline{Step $3$.} We now come to the estimate of the two main terms in \eqref{main.tau.1} and \eqref{main.tau.2}. In fact it will be convenient to replace $\overline \tau_1$ in the first one by $$\widehat \tau_1:= \inf \{n\ge k : S_n\in \mathcal{R}_\infty^1\}.$$ The error made by doing this is bounded as follows: by shifting the origin to $S_k$, and using symmetry of the step distribution, we can write \begin{align*}
& \left| \mathbb{P}[\overline \tau_1\le \overline \tau_2\circ \overline \tau_1<\infty] - \mathbb{P}[\widehat \tau_1\le \overline \tau_2\circ \widehat \tau_1<\infty]\right| \le \mathbb{P}\left[\mathcal{R}_\infty^1\cap \mathcal{R}[k,k+\varepsilon_k] \neq \varnothing, \overline \tau_2<\infty\right] \\ & \stackrel{\eqref{Green.hit}}{\le} \mathbb{E}\left[\left(\sum_{i=0}^{\varepsilon_k} G(S_i- \widetilde S_k)\right) \left(\sum_{j=\varepsilon_k}^\infty G(S_j)\right)\right] \\ & = \mathbb{E}\left[\left(\sum_{i=0}^{\varepsilon_k} G(S_i- \widetilde S_k)\right) \left(\sum_{z\in \mathbb{Z}^5} G(z) G(z+S_{\varepsilon_k})\right)\right] \\
&\stackrel{\text{Lemma }\ref{lem.upconvolG}}{\lesssim} \sum_{i=0}^{\varepsilon_k} \mathbb{E}\left[ \frac{G(S_i- \widetilde S_k)}{1+\|S_{\varepsilon_k}\|}\right] \stackrel{\eqref{exp.Green}}{\lesssim} \frac {\varepsilon_k}{k^{3/2}}\cdot \mathbb{E}\left[ \frac 1{1+\|S_{\varepsilon_k}\|}\right] \lesssim \frac{\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} Moreover, using Theorem C, the Markov property and symmetry of the step distribution, we get for some constant $c>0$, \begin{align*} & \mathbb{P}[\widehat \tau_1\le \overline \tau_2\circ \widehat \tau_1<\infty]
= c \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\widehat \tau_1<\infty\}}{1+\mathcal{J}(S_{\widehat \tau_1} - S_k) }\right] + o\left(\frac 1k\right) \\
& = c \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\widehat \tau_1<\infty\}}{1+\mathcal{J}(S_{\widehat \tau_1}) }\right] + o\left(\frac 1k\right)
= c \sum_{x\in \mathbb{Z}^5}p_k(x) \, \mathbb{E}_{0,x} \left[F(S_\tau) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\}\right] + o\left(\frac 1k\right), \end{align*} with $\tau$ the hitting time of two independent walks starting respectively from the origin and from $x$, and $F(z) := 1/(1+ \mathcal{J}(z))$. Note that the bound $o(1/k)$ on the error term in the last display comes from the fact that
$$\mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\widehat \tau_1<\infty\}}{1+\mathcal{J}(S_{\widehat \tau_1})} \right] \stackrel{\eqref{lem.hit.1}}{\lesssim} \sum_{j\ge 0} \mathbb{E}\left[\frac{G(\widetilde S_j-S_k)}{1+\|\widetilde S_j\|}\right] \lesssim \sum_{z\in \mathbb{Z}^5}
\mathbb{E}\left[\frac{G(z)G(z-S_k)}{1+\| z\|}\right] \lesssim \frac 1k. $$ Then by applying Theorem \ref{thm.asymptotic}, we get \begin{equation}\label{main.tau.1.2}
\mathbb{P}[\widehat \tau_1\le \overline \tau_2\circ \widehat \tau_1<\infty] = c_0 \sum_{x\in \mathbb{Z}^5} p_k(x) \sum_{z\in \mathbb{Z}^5} \frac{G(z)G(z-x)}{1+\mathcal J(z)} + o\left(\frac 1k\right), \end{equation} for some constant $c_0>0$. Likewise, by Theorem \ref{thm.asymptotic} one has for some constant $\nu\in (0,1)$, \begin{align*} \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2<\infty] & = c\, \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\overline \tau_2<\infty\} }{1+ \mathcal{J}(S_{\overline \tau_2})}\right] + \mathcal{O}\left(\mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\overline \tau_2<\infty\} }{1+ \mathcal{J}(S_{\overline \tau_2})^{1+\nu}} \right] \right). \end{align*} Furthermore, \begin{align*}
& \mathbb{E}\left[ \frac{{\text{\Large $\mathfrak 1$}}\{\overline \tau_2<\infty\} }{1+ \mathcal{J}(S_{\overline \tau_2})^{1+\nu}} \right] \lesssim \sum_{j\ge 0} \mathbb{E}\left[ \frac {G(S_j^2+S_k-S_{k+\varepsilon_k}) }{1+ \| S_j^2+S_k\|^{1+\nu}} \right] \\
& \stackrel{\eqref{exp.Green}, \eqref{exp.Green.x}}{\lesssim} \frac{1}{\sqrt{\varepsilon_k}}\sum_{j\ge 0} \mathbb{E}\left[ \frac {1 }{(1+\|S_j^2\|^2)(1+ \| S_j^2+S_k\|^{1+\nu})} \right] \\
& \lesssim \frac{1}{\sqrt{\varepsilon_k}} \mathbb{E}\left[\frac{\log (1+\|S_k\|)}{1+\|S_k\|^{1+\nu}}\right] \lesssim \frac {\log k}{ k^{(1+ \nu)/2}\sqrt{\varepsilon_k}}.
\end{align*} Therefore, taking $\varepsilon_k\ge k^{1-\nu/2}$, we get \begin{align}\label{main.tau.2.1}
\nonumber \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2<\infty] & = c\, \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\overline \tau_2<\infty\} }{1+ \mathcal{J}(S_{\overline \tau_2})}\right] + o\left(\frac 1k\right) \\ \nonumber & = c \sum_{u\in \mathbb{Z}^5} p_{\varepsilon_k}(u) \mathbb{E}_{0,u} \left[\frac{{\text{\Large $\mathfrak 1$}}\{\tau<\infty\} }{1+ \mathcal{J}(S_\tau - S_k)}\right] + o\left(\frac 1k\right) \\
& = c \sum_{u\in \mathbb{Z}^5} p_{\varepsilon_k}(u) \mathbb{E}_{0,u} \left[\widetilde F(S_\tau) {\text{\Large $\mathfrak 1$}}\{\tau<\infty\} \right] + o\left(\frac 1k\right), \end{align} with $\tau$ the hitting time of two independent walks starting respectively from the origin and from $u$, and $$\widetilde F(z):= \mathbb{E}\left[\frac 1{1+ \mathcal{J}(z-S_k)} \right]. $$ We claim that this function $\widetilde F$ satisfies \eqref{cond.F}, for some constant $C_{\widetilde F}$ which is independent of $k$. Indeed, first notice that
$$\widetilde F(z) \asymp \frac 1{1+\|z\| + \sqrt{k}},\quad \text{and}\quad \mathbb{E}\left[ \frac 1{1+\mathcal{J}(z-S_k)^2}\right] \asymp \frac 1{1+\|z\|^2 + k},$$ which can be seen by using Theorem \ref{LCLT}. Moreover, by triangle inequality, and Cauchy-Schwarz, \begin{align*}
|\widetilde F(y) - \widetilde F(z) | & \lesssim \mathbb{E}\left[\frac {\|y-z\|}{(1+\|y-S_k\|)(1+\|z-S_k\|)}\right] \\
& \lesssim \|y-z\| \, \mathbb{E}\left[\frac 1{1+\|y-S_k\|^2}\right]^{\frac 12} \mathbb{E}\left[\frac 1{1+\|z-S_k\|^2}\right]^{\frac 12} \\
& \lesssim \frac{\|y-z\|}{(1+\|y\|+\sqrt k)(1+\|z\| +\sqrt{k})} \lesssim \frac{\|y-z\|}{1+\|y\|}\cdot \widetilde F(z), \end{align*} which is the desired condition \eqref{cond.F}. Therefore, coming back to \eqref{main.tau.2.1} and applying Theorem \ref{thm.asymptotic} once more gives, \begin{align}\label{main.tau.2.1.bis} \nonumber \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2<\infty] & = c_0\sum_{u\in \mathbb{Z}^5} p_{\varepsilon_k}(u) \sum_{z\in \mathbb{Z}^5} G(z)G(z-u)\widetilde F(z) + o\left(\frac 1k\right) \\
& = c_0\sum_{u\in \mathbb{Z}^5} \sum_{x\in \mathbb{Z}^5}p_{\varepsilon_k}(u) p_k(x) \sum_{z\in \mathbb{Z}^5} \frac{G(z)G(z-u)}{1+\mathcal J(z-x)} + o\left(\frac 1k\right). \end{align} Similarly, one has \begin{align}\label{main.tau.product} \nonumber &\mathbb{P}[\overline \tau_1<\infty] \cdot \mathbb{P}[\overline \tau_2<\infty] =\mathbb{P}[\widehat \tau_1<\infty] \cdot \mathbb{P}[\overline \tau_2<\infty] + \mathcal{O}\left(\frac{\sqrt{\varepsilon_k}}{k^{3/2}}\right) \\ & = c_0 \sum_{u\in \mathbb{Z}^5} \sum_{x\in \mathbb{Z}^5} p_{\varepsilon_k}(u)p_k(x) \sum_{z\in \mathbb{Z}^5} \frac{G(z)G(z-u)}{1+\mathcal J(x)} + o\left(\frac 1k\right). \end{align} Note in particular that the constant $c_0$ that appears here is the same as in \eqref{main.tau.1.2} and \eqref{main.tau.2.1.bis}.
\underline{Step $4$.} We claim now that when one takes the difference between the two expressions in \eqref{main.tau.2.1.bis} and \eqref{main.tau.product}, one can remove the parameter $u$ from the factor $G(z-u)$ (and then absorb the sum over $u$). Indeed, note that for any $z$ with $\mathcal{J}(z)\le \mathcal{J}(x)/2$, one has
$$\left| \frac 1{1+\mathcal{J}(z+x)} + \frac 1{1+\mathcal{J}(z-x)} - \frac 2{1+\mathcal{J}(x)}\right| \lesssim \frac{\|z\|^2}{1+\|x\|^3}.$$ It follows that, for any $\chi_k\ge 2$, \begin{align*}
& \sum_{\substack{u,x\in \mathbb{Z}^5 \\ \mathcal{J}(z) \le \frac{\mathcal{J}(x)}{\chi_k}}} p_{\varepsilon_k}(u) p_k(x) G(z) G(z-u) \left|\frac 1{1+\mathcal{J}(z-x)}+\frac 1{1+\mathcal{J}(z+x)} - \frac 2{1+\mathcal{J}(x)}\right| \\
&\lesssim \sum_{x\in \mathbb{Z}^5} \frac{p_k(x)}{1+\|x\|^3} \sum_{\mathcal{J}(z) \le \mathcal{J}(x)/\chi_k } \frac{\mathbb{E}[G(z-S_{\varepsilon_k})]}{1+\|z\|} \stackrel{\eqref{exp.Green.x}}{\lesssim} \frac{1}{k\chi_k}. \end{align*} In the same way, for any $z$ with $\mathcal{J}(z) \ge 2\mathcal{J}(u)$, one has
$$|G(z-u) -G(z)| \lesssim \frac{\|u\|}{1+\|z\|^4},$$
$$\left|\frac 1{1+\mathcal{J}(z-x)} - \frac 1{1+\mathcal{J}(x)}\right| \lesssim \frac{\|z\|}{(1+\|x\|)(1+\|z-x\|)}.$$ Therefore, for any $\chi_k\ge 2$, \begin{align*}
& \sum_{\substack{u,x\in \mathbb{Z}^5 \\ \mathcal{J}(z) \ge (\mathcal{J}(u)\chi_k)\vee \frac{\mathcal{J}(x)}{\chi_k}}} p_{\varepsilon_k}(u) p_k(x) G(z) |G(z-u)-G(z)| \left|\frac 1{1+\mathcal{J}(z-x)} - \frac 1{1+\mathcal{J}(x)}\right| \\
&\lesssim \sqrt{\varepsilon_k} \sum_{x\in \mathbb{Z}^5} \frac{p_k(x)}{1+\|x\|} \sum_{\mathcal{J}(z) \ge \mathcal{J}(x)/\chi_k } \frac{1}{\|z\|^6(1+\|z-x\|)}
\stackrel{\eqref{exp.Green.x}}{\lesssim} \frac{\chi_k^2\sqrt{\varepsilon_k}}{k^{3/2}}. \end{align*} On the other hand by taking $\chi_k = (k/\varepsilon_k)^{1/6}$, we get using \eqref{pn.largex} and \eqref{Sn.large}, \begin{align*}
\sum_{\substack{x,z\in \mathbb{Z}^5 \\ \mathcal{J}(u)\ge \sqrt{\varepsilon_k}\chi_k }} p_{\varepsilon_k}(u) p_k(x) G(z) G(z-u) \left(\frac 1{1+\mathcal{J}(z-x)} + \frac 1{1+\mathcal{J}(x)}\right) \lesssim \frac{1}{\chi_k^5\sqrt{k\varepsilon_k}} = o \left(\frac 1k\right), \end{align*} \begin{align*} & \sum_{\substack{u,z\in \mathbb{Z}^5 \\ \mathcal{J}(x)\le \sqrt{k}/\chi_k }} p_{\varepsilon_k}(u) p_k(x) G(z) G(z-u) \left(\frac 1{1+\mathcal{J}(z-x)} + \frac 1{1+\mathcal{J}(x)}\right) = o \left(\frac 1k\right). \end{align*} As a consequence, since $\mathcal{J}(u)\le \sqrt{\varepsilon_k} \chi_k$ and $\mathcal{J}(x)\ge \sqrt{k}/\chi_k$, implies $\mathcal{J}(u)\le \mathcal{J}(x)/\chi_k$, with our choice of $\chi_k$, we get as wanted (using also symmetry of the step distribution) that \begin{align}\label{main.tau.combined.1} & \mathbb{P}[\overline \tau_2\le \overline \tau_1\circ \overline \tau_2<\infty] - \mathbb{P}[\overline \tau_1<\infty] \cdot \mathbb{P}[\overline \tau_2<\infty] \\ \nonumber & = c_0 \sum_{x,z\in \mathbb{Z}^5} p_k(x) G(z)^2 \left(\frac 1{1+\mathcal{J}(z-x)} - \frac{1}{1+\mathcal J(x)}\right) + o \left(\frac 1k\right)\\ \nonumber &= \frac{c_0}{2} \sum_{x,z\in \mathbb{Z}^5} p_k(x) G(z)^2 \left(\frac 1{1+\mathcal{J}(z-x)} +\frac 1{1+\mathcal{J}(z+x)}- \frac{2}{1+\mathcal J(x)}\right) + o \left(\frac 1k\right). \end{align}
\underline{Step 5.} The previous steps show that $$\operatorname{Cov}\left(\{\overline \tau_1<\infty\} , \{\overline \tau_2<\infty\}\right) = c_0 \sum_{x,z\in \mathbb{Z}^5} p_k(x) \left(\frac{G(z)G(z-x)}{1+\mathcal{J}(z)} + \frac{G(z)^2}{1+\mathcal{J}(z-x)} - \frac{G(z)^2}{1+\mathcal{J}(x)}\right).$$ Now by approximating the series with an integral (recall \eqref{Green.asymp}), and doing a change of variables, we get with $u:=x/\mathcal{J}(x)$ and $v:=\Lambda^{-1} u$, and for some constant $c>0$ (that might change from line to line), \begin{align}\label{last.lem.integral} \nonumber & \sum_{z\in \mathbb{Z}^5}\left(\frac{G(z)G(z-x)}{1+\mathcal{J}(z)} + \frac{G(z)^2}{1+\mathcal{J}(z-x)} - \frac{G(z)^2}{1+\mathcal{J}(x)}\right) \\ \nonumber & \sim c \int_{\mathbb{R}^5} \left\{\frac{1}{\mathcal{J}(z)^4\cdot \mathcal{J}(z-x)^3} + \frac{1}{\mathcal{J}(z)^6} \left(\frac 1{\mathcal{J}(z-x)} -\frac 1{\mathcal{J}(x)}\right)\right\}\, dz \\ \nonumber & = \frac{c}{\mathcal{J}(x)^2} \int_{\mathbb{R}^5}\left\{\frac{1}{\mathcal{J}(z)^4\cdot \mathcal{J}(z-u)^3} + \frac{1}{\mathcal{J}(z)^6} \left(\frac 1{\mathcal{J}(z-u)} -1\right)\right\}\, dz \\
& = \frac{c}{\mathcal{J}(x)^2} \int_{\mathbb{R}^5} \left\{\frac{1}{\|z\|^4\cdot \|z-v\|^3} + \frac{1}{\|z\|^6}\left(\frac 1{\|z-v\|} -1\right)\right\} \, dz. \end{align} Note that the last integral is convergent and independent of $v$ (and thus of $x$ as well) by rotational invariance. Therefore, since $\sum_{x\in \mathbb{Z}^5} p_k(x) / \mathcal{J}(x)^2\sim \sigma/k$, for some constant $\sigma>0$ (for instance by applying Theorem \ref{LCLT}), it only remains to show that the integral above is positive.
To see this, we use that the map $z\mapsto \|z\|^{-3}$ is harmonic outside the origin, and thus satisfies the mean value property on $\mathbb{R}^5\setminus \{0\}$. In particular, using also the rotational invariance, this shows (with $\mathcal{B}_1$ the unit Euclidean ball and $\partial \mathcal{B}_1$ the unit sphere), \begin{align}\label{last.lem2.a}
\int_{\mathcal{B}_1^c} \frac{1}{\|z\|^4\cdot \|z-v\|^3}\, dz &= \frac 1{|\partial \mathcal{B}_1|} \int_{\partial \mathcal{B}_1} \, dv \int_{\mathcal{B}_1^c} \frac{1}{\|z\|^4\cdot \|z-v\|^3}\, dz\\
\nonumber & = \int_{\mathcal{B}_1^c} \frac 1{\|z\|^7} \, dz = c_1 \int_1^\infty \frac 1{r^3}\, dr = \frac {c_1}2, \end{align} for some constant $c_1>0$. Likewise, \begin{equation}\label{last.lem2.b}
\int_{\mathcal{B}_1} \frac{1}{\|z\|^4\cdot \|z-v\|^3}\, dz = \frac{c_1}{|\partial \mathcal{B}_1|} \int_0^1 \, dr \int_{\partial \mathcal{B}_1} \frac{du}{\|ru - v\|^3} = c_1, \end{equation} with the same constant $c_1$ as in the previous display. On the other hand \begin{equation}\label{last.lem2.c}
\int_{\mathcal{B}_1^c} \frac 1{\|z\|^6}\, dz = c_1 \int_1^\infty \frac 1{r^2} \, dr = c_1. \end{equation} Furthermore, using again the rotational invariance, \begin{align}\label{last.lem.2}
& \int_{\mathcal{B}_1} \frac{1}{\|z\|^6}\left(\frac 1{\|z-v\|} -1\right) \, dz = \int_{\mathcal{B}_1} \frac{1}{\|z\|^6}\left(\frac 1{2\|z-v\|} + \frac 1{2\|z+v\|} -1\right) \, dz \\
\nonumber & = \frac{c_1}{|\partial \mathcal{B}_1|} \int_0^1 \frac {dr}{r^2} \int_{\partial \mathcal{B}_1}\left(\frac 1{2\|v-ru\|} + \frac 1{2\|v+ru\|} -1\right)\, du. \end{align} Now we claim that for any $u,v\in \partial \mathcal{B}_1$, and any $r\in (0,1)$, \begin{equation}\label{claim.geom}
\frac 12\left(\frac 1{\|v-ru\|} + \frac 1{\|v+ru\|}\right) \ge \frac{1}{\sqrt{1+r^2}}. \end{equation} Before we prove this claim, let us see how we can conclude the proof. It suffices to notice that, if $f(s) = (1+s^2)^{-1/2}$, then $f'(s) \ge - s$, for all $s\in (0,1)$, and thus \begin{equation}\label{lower.sqrt} \frac 1{\sqrt{1+r^2}} - 1 = f(r) - f(0) \ge - \int_0^r s\, ds \ge -r^2/2. \end{equation} Inserting this and \eqref{claim.geom} in \eqref{last.lem.2} gives
$$ \int_{\mathcal{B}_1} \frac{1}{\|z\|^6}\left(\frac 1{\|z-v\|} -1\right) \, dz \ge -\frac {c_1}{2}. $$ Together with \eqref{last.lem2.a}, \eqref{last.lem2.b}, \eqref{last.lem2.c}, this shows that the integral in \eqref{last.lem.integral} is well positive. Thus all that remains to do is proving the claim \eqref{claim.geom}. Since the origin, $v$, $v+ru$, and $v-ru$ all lie in a common two-dimensional plane, one can always work in the complex plane, and assume for simplicity that $v= 1$, and $u=e^{i\theta}$, for some $\theta\in [0,\pi/2]$. In this case, the claim is equivalent to showing that $$\frac 12 \left( \frac 1{\sqrt{1+ r^2 + 2r\cos \theta} }+ \frac 1{\sqrt{1+ r^2 - 2r\cos \theta}}\right) \ge \frac 1{\sqrt{1+r^2}},$$ which is easily obtained using that the left hand side is a decreasing function of $\theta$. This concludes the proof of Lemma \ref{lem.var.2}.
$\square$
\begin{remark}\emph{Note that the estimate of the covariance mentioned in the introduction, in case $(ii)$, can now be done as well. Indeed, denoting by $$\widehat \tau_2:= \inf\{n\ge k+1\, :\, S_n\in S_k + \mathcal{R}_\infty^2\},$$ it only remains to show that
$$\left|\mathbb{P}[\widehat \tau_2\le k+\varepsilon_k, \, \overline \tau_1<\infty ] - \mathbb{P}[\widehat \tau_2\le k+ \varepsilon_k] \cdot \mathbb{P}[\overline \tau_1<\infty]\right| = o\left(\frac 1k\right).$$ Using similar estimates as above we get, with $\chi_k = (k/\varepsilon_k)^{4/5}$, \begin{align*}
& \left|\mathbb{P}[\widehat \tau_2\le k+ \varepsilon_k, \overline \tau_1<\infty ] - \mathbb{P}[\widehat \tau_2\le k+ \varepsilon_k] \cdot \mathbb{P}[\overline \tau_1<\infty]\right| \\
\stackrel{\eqref{Sn.large}}{=} & \left|\mathbb{P}[\widehat \tau_2\le k+ \varepsilon_k, \|S_{\widehat \tau_2} - S_k\|\le \sqrt{\varepsilon_k\chi_k}, \overline \tau_1<\infty ] - \mathbb{P}[\widehat \tau_2\le k+ \varepsilon_k] \mathbb{P}[\overline \tau_1<\infty]\right| + \mathcal{O}\left(\frac 1{\sqrt k \chi_k^{\frac 52}}\right)\\
=& \sum_{\substack{x\in \mathbb{Z}^5 \\ \|y\|\le \sqrt{\varepsilon_k \chi_k}} } \left|\frac{p_k(x+y) + p_k(x-y)}{2}-p_k(x)\right| \mathbb{P}[\widehat \tau_2\le k+\varepsilon_k, S_{\widehat \tau_2} -S_k= y] \varphi_x + \mathcal{O}\left(\frac 1{\sqrt k \chi_k^{\frac 52}}\right)\\
\lesssim & \frac 1{k^{\frac 32}} \mathbb{E}\left[\|S_{\widehat \tau_2}-S_k\|^2 {\text{\Large $\mathfrak 1$}}\{\|S_{\widehat \tau_2}-S_k\|\le \sqrt{\varepsilon_k\chi_k} \}\right] + \frac 1{\sqrt k \chi_k^{\frac 52}}\lesssim \frac 1{\sqrt k \chi_k^{\frac 52}}+ \frac {\sqrt{\varepsilon_k\chi_k}}{k^{\frac 32}}, \end{align*}
using that by \eqref{lem.hit.2} and the Markov property, one has $\mathbb{P}[\|S_{\widehat \tau_2}-S_k\|\ge t] \lesssim \frac 1t $.
} \end{remark}
\subsection{Proof of Lemma \ref{lem.var.3}} We consider only the case of $\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_1)$, the other one being entirely similar. Define $$\tau_1:= \inf\{n\ge 0 : S_n^1\in \mathcal{R}[\varepsilon_k,k]\},\ \tau_2:=\inf\{n\ge 0 : S_k+S_n^2\in \mathcal{R}(-\infty, 0] \},$$ with $S^1$ and $S^2$ two independent walks, independent of $S$. The first step is to see that $$\operatorname{Cov}(Z_0\varphi_3,Z_k\psi_2)= \rho^2\cdot \operatorname{Cov}({\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\},{\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}) +o\left(\frac 1k\right),$$ with $\rho$ as in \eqref{def.rho}. Since the proof of this fact has exactly the same flavor as in the two previous lemmas, we omit the details and directly move to the next step.
Let $\eta\in (0,1/2)$ be some fixed constant (which will be sent to zero later). Notice first that \begin{align}\label{eta.tau1}
& \nonumber \mathbb{P}\left[S^1_{\tau_1} \in \mathcal{R}[(1-\eta) k,k],\, \tau_2<\infty\right] \stackrel{\eqref{Green.hit}, \eqref{lem.hit.2}}{\lesssim} \sum_{i=\lfloor (1-\eta)k\rfloor }^k
\mathbb{E}\left[\frac{G(S_i)}{1+\|S_k\|}\right] \\ & \stackrel{\eqref{exp.Green}}{\lesssim} \sum_{i=\lfloor (1-\eta)k\rfloor }^k \frac{\mathbb{E}\left[G(S_i)\right] }{1+\sqrt{k-i}}
\stackrel{\eqref{exp.Green}}{\lesssim}\frac{\sqrt{\eta}}{k}. \end{align} Next, fix another constant $\delta\in (0,1/4)$ (which will be soon chosen small enough). Then let $N: = \lfloor (1-\eta)k/\varepsilon_k^{1-\delta}\rfloor$, and for $i=1,\dots,N$, define $$\tau_1^i:= \inf\{n\ge 0 \, :\, S_n^1 \in \mathcal{R}[k_i,k_{i+1}]\},\quad \text{with}\quad k_i:= \varepsilon_k + i\lfloor \varepsilon_k^{1-\delta}\rfloor .$$ We claim that with sufficiently high probability, at most one of these hitting times is finite. Indeed, for $i\le N$, set $I_i := \{k_i,\dots,k_{i+1}\}$, and notice that \begin{align*} &\sum_{1\le i< j\le N} \mathbb{P}[\tau_1^i <\infty, \, \tau_1^j<\infty,\, \tau_2<\infty] \\
\le & \sum_{1\le i< j\le N} \left(\mathbb{P}[\tau_1^i \le \tau_1^j<\infty,\, \tau_2<\infty] + \mathbb{P}[\tau_1^j \le \tau_1^i<\infty,\, \tau_2<\infty]\right) \\
\stackrel{\eqref{lem.hit.1}, \eqref{lem.hit.2}}{\lesssim} & \sum_{\substack{i=1,\dots,N, j\neq i \\ \ell \in I_i, m\in I_j}} \mathbb{E}\left[\frac{G(S_\ell - S_m) G(S_m)}{1+\|S_k\|} \right] \lesssim \frac{1}{\sqrt{k}} \sum_{\substack{i=1,\dots,N, j\neq i \\ \ell \in I_i, m\in I_j}} \mathbb{E}\left[G(S_\ell - S_m) G(S_m)\right]\\
\stackrel{\eqref{exp.Green}, \eqref{exp.Green.x}}{\lesssim} & \frac{1}{\sqrt{k}} \sum_{\substack{i=1,\dots, N, j\neq i \\ \ell \in I_i, m\in I_j}} \frac{1}{(1+ |m-\ell |^{3/2}) (m\wedge \ell)^{3/2}} \lesssim \frac {N\varepsilon_k^{(1-\delta)/2}}{\varepsilon_k^{3/2}\sqrt k} = o\left(\frac 1k\right), \end{align*} where the last equality follows by assuming $\varepsilon_k\ge k^{1-c}$, with $c>0$ small enough. Therefore, as claimed $$\mathbb{P}[\tau_1<\infty,\, \tau_2<\infty ] = \sum_{i=1}^{N} \mathbb{P}[\tau_1^i <\infty, \, \tau_2<\infty] + o\left(\frac 1k\right), $$ and one can show as well that, $$\mathbb{P}[\tau_1<\infty]\cdot \mathbb{P}[ \tau_2<\infty ] = \sum_{i=1}^{N-2} \mathbb{P}[\tau_1^i <\infty] \cdot \mathbb{P}[\tau_2<\infty] +o\left(\frac 1k\right).$$ Next, observe that for any $i\le N$, using H\"older's inequality at the third line, \begin{align*}
&\mathbb{P}\left[\tau_1^i <\infty, \tau_2<\infty, \|S_{k_{i+1}} - S_{k_i}\|^2\ge \varepsilon_k^{1-\delta/2}\right] \stackrel{\eqref{Green.hit}, \eqref{lem.hit.2}}{\lesssim} \sum_{j=k_i}^{k_{i+1}} \mathbb{E}\left[ \frac{G(S_j){\text{\Large $\mathfrak 1$}}\{\|S_{k_{i+1}} - S_{k_i}\|^2\ge \varepsilon_k^{1-\delta/2}\} }{1+\|S_k\|}\right]\\
& \stackrel{\eqref{exp.Green}}{\lesssim} \frac {1}{\sqrt k} \sum_{j=k_i}^{k_{i+1}} \mathbb{E}\left[ G(S_j){\text{\Large $\mathfrak 1$}}\{\|S_{k_{i+1}} - S_{k_i}\|^2\ge \varepsilon_k^{1-\delta/2}\} \right]\\
& \lesssim \frac {1}{\sqrt k}\left( \sum_{j=k_i}^{k_{i+1}} \mathbb{E}\left[\frac 1{1+\|S_j\|^4}\right]^{3/4}\right) \cdot \mathbb{P}\left[\|S_{k_{i+1}} - S_{k_i}\|^2\ge \varepsilon_k^{1-\delta/2}\right]^{1/4} \\ & \stackrel{\eqref{Sn.large}}{\lesssim} \frac {\varepsilon_k^{1-\delta}}{ k_i^{3/2}\sqrt{k}} \cdot \frac 1{\varepsilon_k^{5\delta/16}}= o\left(\frac 1{Nk}\right), \end{align*} by choosing again $\varepsilon_k\ge k^{1-c}$, with $c$ small enough. Similarly, one has using Cauchy-Schwarz, \begin{align*}
& \mathbb{P}\left[\tau_1^i <\infty, \tau_2<\infty, \|S_k - S_{k_{i+1}}\|^2\ge k \varepsilon_k^{\delta/2}\right] \lesssim \sum_{j=k_i}^{k_{i+1}} \mathbb{E}\left[ \frac{G(S_j){\text{\Large $\mathfrak 1$}}\{\|S_k - S_{k_{i+1}}\|^2 \ge k \varepsilon_k^{\delta/2}\} }{1+\|S_k\|}\right]\\
& \lesssim \frac{1}{\varepsilon_k^{5\delta/8}}\sum_{j=k_i}^{k_{i+1}} \mathbb{E}\left[ G(S_j) \mathbb{E}\left[\frac 1{1+\|S_k\|^2} \mid S_j \right]^{1/2}\right]\lesssim \frac {\varepsilon_k^{1-\delta}}{ k_i^{3/2}\sqrt{k}} \cdot \frac 1{\varepsilon_k^{5\delta/8}}= o\left(\frac 1{Nk}\right). \end{align*} As a consequence, using also Theorem \ref{LCLT}, one has for $i\le N$, and with $\ell := k_{i+1}-k_i$,
\begin{align*} & \mathbb{P}[\tau_1^i <\infty, \, \tau_2<\infty] \\
& =\sum_{x\in \mathbb{Z}^5} \sum_{\substack{ \|z\|^2 \le k \varepsilon_k^{\delta/2} \\ \|y\|^2 \le \varepsilon_k^{1-\delta/2} }} p_{k_i}(x)\mathbb{P}_{0,x}\left[ \mathcal{R}_\infty \cap \widetilde \mathcal{R}[0,\ell] \neq\varnothing, \widetilde S_{\ell} = y\right] p_{k-k_{i+1}}(z-y) \varphi_{x+z} + o\left(\frac 1{Nk}\right)\\
& = \sum_{x\in \mathbb{Z}^5} \sum_{\underset{\|z\|^2 \le k \varepsilon_k^{\delta/2} }{\|y\|^2 \le \varepsilon_k^{1-\delta/2}} } p_{k_i}(x) \mathbb{P}_{0,x} \left[ \mathcal{R}_\infty \cap \widetilde \mathcal{R}[0,\ell] \neq\varnothing, \widetilde S_\ell = y\right] p_{k-k_i}(z) \varphi_{x+z} + o\left(\frac 1{Nk}\right)\\ & = \sum_{x,z\in \mathbb{Z}^5} p_{k_i}(x) \mathbb{P}_{0,x} \left[ \mathcal{R}_\infty \cap \widetilde \mathcal{R}[0,\ell] \neq\varnothing \right] p_{k-k_i}(z) \varphi_{x+z} + o\left(\frac 1{Nk}\right).
\end{align*} Moreover, Theorem \ref{thm.asymptotic} yields for any nonzero $x\in \mathbb{Z}^5$, and some $\nu>0$,
\begin{align}\label{RtildeRell}
\mathbb{P}_{0,x} \left[ \mathcal{R}_\infty \cap \widetilde \mathcal{R}[0,\ell] \neq\varnothing \right] = \frac{\gamma_5}{\kappa}\cdot \mathbb{E}\left[ \sum_{j=0}^{\ell} G(x+\widetilde S_j) \right] + \mathcal{O}\left(\frac {\log(1+ \|x\|)}{\|x\| (\|x\|\wedge \ell)^{\nu}}\right). \end{align} Note also that for any $\varepsilon \in [0,1]$,
\begin{align*}
\sum_{x,z\in \mathbb{Z}^5} \frac{p_{k_i}(x)}{1+\|x\|^{1+\varepsilon}} p_{k-k_i}(z) \varphi_{x+z} = \mathbb{E}\left[\frac 1{(1+\|S_{k_i}\|^{1+\varepsilon})(1+\|S_k\|)}\right]
\lesssim \frac 1{\sqrt{k_i}^{1+\varepsilon} \sqrt k},
\end{align*}
and thus
$$\sum_{i=1}^N \sum_{x,z\in \mathbb{Z}^5} \frac{p_{k_i}(x)}{1+\|x\|^{1+\varepsilon}} p_{k-k_i}(z) \varphi_{x+z} = \mathcal{O}\left(\frac 1{\ell k^{\varepsilon}}\right).$$
In particular, the error term in \eqref{RtildeRell} can be neglected, as we take for instance $\delta=\nu/2$, and $\varepsilon_k\ge k^{1-c}$, with $c$ small enough. It amounts now to estimate the other term in \eqref{RtildeRell}. By \eqref{pn.largex}, for any $x\in \mathbb{Z}^5$ and $j\ge 0$, \begin{equation*}
\mathbb{E}[G(x+S_j)] = G_j(x) = G(x)-\mathcal O(\frac{j}{1+ \|x\|^d}). \end{equation*} As will become clear the error term can be neglected here. Furthermore, similar computations as above show that for any $j\in \{k_i,\dots,k_{i+1}\}$, $$\sum_{x,z\in \mathbb{Z}^5} p_{k_i}(x) G(x) p_{k-k_i}(z) \varphi_{x+z} = \sum_{x,z\in \mathbb{Z}^5} p_j(x) G(x) p_{k-j}(z) \varphi_{x+z}+ o\left(\frac 1{Nk}\right),$$ Altogether, and applying once more Theorem \ref{thm.asymptotic}, this gives for some $c_0>0$,
\begin{equation}\label{cov.3.sum}
\sum_{i=1}^N \mathbb{P}[\tau_1^i <\infty, \tau_2<\infty] = \sum_{j=\varepsilon_k}^{(1-\eta) k} \mathbb{E}[G(S_j)\varphi_{S_k}]+ o\left(\frac 1k\right) =c_0 \sum_{j=\varepsilon_k}^{\lfloor (1-\eta) k\rfloor} \mathbb{E}\left[\frac{G(S_j)}{1+\mathcal{J}(S_k)}\right]+ o\left(\frac 1k\right). \end{equation} We treat the first terms of the sum separately. Concerning the other ones notice that by \eqref{Green.asymp} and Donsker's invariance principle, one has \begin{align*}
\sum_{j=\lfloor \eta k\rfloor}^{\lfloor (1-\eta)k\rfloor} \mathbb{E}\left[\frac{G(S_j)}{1+\mathcal{J}(S_k)}\right] & = \frac 1k \int_\eta^{1-\eta} \mathbb{E}\left[\frac{G(\Lambda \beta_s)}{\mathcal{J}(\Lambda \beta_1)}\right] \, ds+ o\left(\frac 1k\right) \\
& = \frac {c_5}k \int_\eta^{1-\eta} \mathbb{E}\left[\frac{1}{\|\beta_s\|^3 \cdot \|\beta_1\|}\right] \, ds+ o\left(\frac 1k\right), \end{align*} with $(\beta_s)_{s\ge 0}$ a standard Brownian motion, and $c_5>0$ the constant that appears in \eqref{Green.asymp}. In the same way, one has \begin{align*} \sum_{i=1}^N \mathbb{P}[\tau_1^i <\infty] \cdot \mathbb{P}[\tau_2<\infty] & = c_0 \sum_{j=\varepsilon_k}^{\lfloor \eta k\rfloor} \mathbb{E}[G(S_j)]\mathbb{E}\left[\frac{1}{1+\mathcal{J}(S_k)}\right] \\
& \quad + \frac{c_0c_5}{k} \int_\eta^{1-\eta}\mathbb{E}\left[\frac{1}{\|\beta_s\|^3}\right] \mathbb{E}\left[ \frac 1{\|\beta_1\|}\right] \, ds+ o\left(\frac 1k\right), \end{align*} with the same constant $c_0$, as in \eqref{cov.3.sum}. We next handle the sum of the first terms in \eqref{cov.3.sum} and show that its difference with the sum from the previous display is negligible. Indeed, observe already that with $\chi_k := k/(\eta\varepsilon_k)$,
$$\sum_{j=\varepsilon_k}^{\lfloor \eta k\rfloor} \mathbb{E}\left[\frac{G(S_j){\text{\Large $\mathfrak 1$}}\{\|S_j\|\ge \eta^{1/4} \sqrt k\}}{1+\mathcal{J}(S_k)}\right]+ \mathbb{E}\left[\frac{G(S_j){\text{\Large $\mathfrak 1$}}\{\|S_k\|\ge \sqrt{k\chi_k}\}}{1+\mathcal{J}(S_k)}\right] \lesssim \frac{\eta^{1/4}}{k}. $$ Thus one has, using Theorem \ref{LCLT}, \begin{align}\label{eta.tau1.bis}
&\sum_{j=\varepsilon_k}^{\lfloor \eta k\rfloor}\left| \mathbb{E}\left[\frac{G(S_j)}{1+\mathcal{J}(S_k)}\right] - \mathbb{E}[G(S_j)]\cdot \mathbb{E}\left[\frac{1}{1+\mathcal{J}(S_k)}\right]\right| \\
\nonumber & \lesssim \sum_{j=\varepsilon_k}^{\lfloor \eta k\rfloor} \sum_{\underset{\|x\|\le \eta^{1/4}\sqrt{k}}{\|z\|\le \sqrt{k\chi_k}} } \frac{p_j(x)G(x)}{1+\|z\|} \left|\overline p_{k-j}(z-x) +\overline p_{k-j}(z+x) -2 \overline p_k(z)\right| + \frac {\eta^{1/4}}k \lesssim \frac {\eta^{1/4}}k. \end{align} Define now for $s\in (0,1]$,
$$ H_s: = \mathbb{E}\left[\frac 1{\|\beta_s\|^3 \|\beta_1\|}\right] - \mathbb{E}\left[\frac 1{\|\beta_s\|^3}\right] \cdot \mathbb{E}\left[\frac 1{\|\beta_1\|}\right].$$ Let $f_s(\cdot)$ be the density of $\beta_s$ and notice that as $s\to 0$, \begin{align*}
& H_s=\int_{\mathbb{R}^5} \int_{\mathbb{R}^5} \frac { f_s(x) f_{1-s}(y)}{\|x\|^3 \|x+y\|} \, dx\, dy - \int_{\mathbb{R}^5} \int_{\mathbb{R}^5} \frac { f_s(x) f_1(y)}{\|x\|^3 \|y\|} \, dx\, dy \\
& = \frac {1}{s^{3/2}} \int_{\mathbb{R}^5} \int_{\mathbb{R}^5} \frac{f_1(x)f_1(y)}{\|x\|^3} \left(
\frac {1}{\|y\sqrt{1-s} +x \sqrt s \| } -\frac {1}{\|y\|}\right) \, dx\, dy\\
& = \frac {1}{s^{3/2}} \int_{\mathbb{R}^5} \int_{\mathbb{R}^5} \frac{f_1(x)f_1(y)}{\|x\|^3\|y\|}
\left\{\left(\frac 12 + \frac{\|x\|^2}{2\|y\|^2} + \frac{\langle x,y\rangle^2}{\|y\|^4} \right)s + \mathcal{O}(s^{3/2}) \right\}\, dx\, dy =\frac c{\sqrt{s}} + \mathcal{O}(1), \end{align*} with $c>0$. Thus the map $s\mapsto H_s$ is integrable at $0$, and since it is also continuous on $(0,1]$, its integral on this interval is well defined. Since $\eta$ can be taken arbitrarily small in \eqref{eta.tau1} and \eqref{eta.tau1.bis}, in order to finish the proof it just remains to show that the integral of $H_s$ on $(0,1]$ is positive.
To this end, note first that $\widetilde \beta_{1-s}:= \beta_1 - \beta_s$ is independent of $\beta_s$. We use then \eqref{claim.geom}, which implies,
with $q=\mathbb{E}[1/\|\beta_1\|^3]$, \begin{align*}
& \mathbb{E}\left[\frac 1{\|\beta_s\|^3 \|\beta_1\|}\right] = \mathbb{E}\left[\frac 1{\|\beta_s\|^3 \|\beta_s + \widetilde \beta_{1-s}\|}\right]
\ge \mathbb{E}\left[\frac 1{\|\beta_s\|^3 \sqrt{\|\beta_s\|^2 + \|\widetilde \beta_{1-s}\|^2}}\right]\\
& = \frac{(5q)^2}{s^{3/2}} \int_0^\infty \int_0^\infty \frac{r e^{-\frac 52 r^2} u^4 e^{-\frac 52u^2}}{\sqrt{sr^2 + (1-s)u^2}} \, dr\, du\\
& = \frac{q^2}{5s^{3/2}} \int_0^\infty \int_0^\infty \frac{r e^{-\frac{r^2}2} u^4 e^{-\frac{u^2}2}}{\sqrt{sr^2 + (1-s)u^2}} \, dr\, du.
\end{align*}
We split the double integral in two parts, one on the set $\{sr^2\le (1-s)u^2\}$, and the other one on the complementary set $\{sr^2\ge (1-s)u^2\}$.
Call respectively $I_s^1$ and $I_s^2$ the integrals on these two sets. For $I_s^1$, \eqref{lower.sqrt} gives
\begin{align*}
I_s^1 & \ge \frac 1{\sqrt{1-s}} \int_0^\infty u^3 e^{-\frac{u^2}2} \int_0^{\sqrt{\frac{1-s}{s}}u} r e^{-\frac{r^2}2}\, dr \, du\\ & \qquad - \frac{s}{2(1-s)^{3/2}} \int_0^\infty u e^{-\frac{u^2}2} \int_0^{\sqrt{\frac{1-s}{s}}u} r^3 e^{-\frac{r^2}2}\, dr \, du\\ & = \frac{2(1-s^2)}{\sqrt{1-s}} + \frac{s^2}{\sqrt{1-s}} - \frac{s}{\sqrt{1-s}}= \frac{2-s - s^2}{\sqrt{1-s}}.
\end{align*} For $I_s^2$ we simply use the rough bound: $$I_s^2 \ge \frac 1{\sqrt{2s}} \int_0^\infty \int_0^\infty e^{-\frac{r^2}2} u^4 e^{-\frac{u^2}2}{\text{\Large $\mathfrak 1$}}\{sr^2\ge (1-s)u^2\} \, dr \, du, $$ which entails \begin{align*} & \int_0^1 \frac{I_s^2}{s^{3/2}}\, ds \ge \frac 1{\sqrt 2} \int_0^\infty \int_0^\infty e^{-\frac{r^2}2} u^4 e^{-\frac{u^2}2} \left(\int_{\frac{u^2}{u^2+r^2}}^1 \frac 1{s^2}\, ds\right)\, dr \, du \\ & = \frac 1{\sqrt 2} \left(\int_0^\infty r^2 e^{-\frac{r^2}2} \, dr\right)^2= \frac 1{\sqrt 2} \left(\int_0^\infty e^{-\frac{r^2}2}\, dr\right)^2 =\frac{\pi}{2\sqrt 2}>1, \end{align*} where for the last inequality we use $\sqrt{2}<3/2$. Note now that
$$\mathbb{E}\left[\frac 1{\|\beta_s\|^3}\right] \cdot \mathbb{E}\left[\frac 1 {\|\beta_1\|}\right] = \frac {2q^2}{5s^{3/2}} , $$
and \begin{align*} & \int_0^1 \frac{I_s^1 - 2}{s^{3/2}}\, ds \ge \int_0^1 s^{-3/2} \left\{(2- s- s^2)(1+\frac s2 + \frac{3s^2}{8}) - 2\right\} \, ds \\ & = - \int_0^1 (\frac 34 \sqrt s + \frac 78 s^{3/2} +\frac {3}{8} s^{5/2} ) \, ds = - (\frac 12 + \frac 7{20} + \frac{3}{28}) = - \frac{134}{140} > -1. \end{align*} Altogether this shows that the integral of $H_s$ on $(0,1]$ is well positive as wanted. This concludes the proof of the lemma.
$\square$
\begin{remark}\emph{The value of $H_1$ can be computed explicitely and one can check that it is positive. Similarly, by computing the leading order term in $I_s^2$, we could show that $H_s$ is also positive in a neighborhood of the origin, but it would be interesting to know whether $H_s$ is positive for all $s\in (0,1)$. } \end{remark}
\subsection{Proof of Lemma \ref{lem.var.4}} We define here $$\tau_1:= \inf\{n\ge 0 : S_n^1\in \mathcal{R}[\varepsilon_k,k-\varepsilon_k]\},\ \tau_2:=\inf\{n\ge 0 : S_k+S_n^2\in \mathcal{R}[\varepsilon_k,k-\varepsilon_k]\},$$ with $S^1$ and $S^2$ two independent walks, independent of $S$. As in the previous lemma, we omit the details of the fact that $$\operatorname{Cov}(Z_0\varphi_2,Z_k\psi_2)= \rho^2\cdot \operatorname{Cov}({\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\},{\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}) +o\left(\frac 1k\right).$$ Then we define $N:=\lfloor (k-3\varepsilon_k)/\varepsilon_k\rfloor$ and let $(\tau_1^i)_{i=1,\dots,N}$ be as in the proof of Lemma \ref{lem.var.3}. Define also $(\tau_2^i)_{i=1,\dots,N}$ analogously. Similarly as before one can see that \begin{eqnarray}\label{tau1i.tau2j} \mathbb{P}[\tau_1<\infty, \tau_2<\infty ]= \sum_{i=1}^{N} \sum_{j=1}^{N} \mathbb{P}[\tau_1^i <\infty, \tau_2^j <\infty ] + o\left(\frac 1k\right). \end{eqnarray}
Note also that for any $i$ and $j$, with $|i-j| \le 1$, by \eqref{Green.hit} and \eqref{exp.Green}, $$\mathbb{P}[\tau_1^i<\infty, \, \tau_2^j<\infty] = \mathcal{O}\left(\frac {\varepsilon_k^{2(1-\delta)}}{k_i^{3/2} (k-k_i)^{3/2} }\right), $$
so that in \eqref{tau1i.tau2j}, one can consider only the sum on the indices $i$ and $j$ satisfying $|i-j|\ge 2$. Furthermore, when $i<j$, the events $\{\tau_1^i<\infty\}$ and $\{\tau_2^j<\infty\}$ are independent. Thus altogether this gives \begin{align*}
\operatorname{Cov}( &{\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}, {\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}) \\
& = \sum_{i = 1}^{N-2} \sum_{j=i+2}^N \left( \mathbb{P}[ \tau_1^j <\infty, \tau_2^i<\infty] - \mathbb{P}[\tau_1^j<\infty] \mathbb{P}[\tau_2^i<\infty] \right) + o\left(\frac 1k\right). \end{align*} Then by following carefully the same steps as in the proof of the previous lemma we arrive at $$\operatorname{Cov}({\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}, \, {\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\}) = \frac {c}{k} \int_0^1 \widetilde H_t\, dt + o\left(\frac 1k\right), $$ with $c>0$ some positive constant and,
$$\widetilde H_t := \int_0^t \left(\mathbb{E}\left[\frac 1{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right] - \mathbb{E}\left[\frac 1{\|\beta_s-\beta_1\|^3}\right] \cdot\mathbb{E}\left[\frac 1{ \|\beta_t\|^3} \right] \right) \, ds,$$ at least provided we show first that $\widetilde H_t$ it is well defined and that its integral over $[0,1]$ is convergent.
However, observe that for any $t\in (0,1)$, one has with $q=\mathbb{E}[\|\beta_1\|^{-3}]$,
$$\int_0^t \mathbb{E}\left[\frac 1{\|\beta_s-\beta_1\|^3}\right] \cdot\mathbb{E}\left[\frac 1{ \|\beta_t\|^3} \right] =\frac{q^2}{t^{3/2}} \int_0^t \frac 1{(1-s)^{3/2}}\, ds = \frac{2q^2(1-\sqrt{1-t})}{t^{3/2}\sqrt{1-t}},$$ and therefore this part is integrable on $[0,1]$. This implies in fact that the other part in the definition of $\widetilde H_t$ is also well defined and integrable, since we already know that $\operatorname{Cov}({\text{\Large $\mathfrak 1$}}\{\tau_1<\infty\}, \, {\text{\Large $\mathfrak 1$}}\{\tau_2<\infty\})=\mathcal{O}(1/k)$. Thus it only remains to show that the integral of $\widetilde H_t$ on $[0,1]$ is positive. To this end, we write $\beta_t = \beta_s + \gamma_{t-s}$, and $\beta_1 = \beta_s + \gamma_{t-s} + \delta_{1-t}$, with $(\gamma_u)_{u\ge 0}$ and $(\delta_u)_{u\ge 0}$
two independent Brownian motions, independent of $\beta$. Furthermore, using that the map $z\mapsto 1/\|z\|^3$ is harmonic outside the origin, we can compute: \begin{align*}
& I_1:= \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|\beta_s\|\ge \|\gamma_{t-s}\|\ge \|\delta_{1-t}\| \} }{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right] = \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|\beta_s\|\ge \|\gamma_{t-s}\|\ge \|\delta_{1-t}\| \} }{\|\gamma_{t-s} + \delta_{1-t}\|^3 \cdot \|\beta_s\|^3} \right] \\
= & \frac{5q}{s^{3/2}} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|\gamma_{t-s}\|\ge \|\delta_{1-t}\| \} }{\|\gamma_{t-s} + \delta_{1-t}\|^3} \int_{\frac{\|\gamma_{t-s}\|}{\sqrt s}}^\infty re^{-\frac 52 r^2} \, dr\right] = \frac{q}{s^{3/2}} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|\gamma_{t-s}\|\ge \|\delta_{1-t}\| \}}{\|\gamma_{t-s} + \delta_{1-t}\|^3} e^{-\frac 5{2s} \|\gamma_{t-s}\|^2} \right] \\
=& \frac{q}{s^{3/2}} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|\gamma_{t-s}\|\ge \|\delta_{1-t}\| \}}{\|\gamma_{t-s}\|^3} e^{-\frac 5{2s} \|\gamma_{t-s}\|^2} \right] = \frac{5q^2}{s^{3/2}(t-s)^{3/2}} \mathbb{E}\left[\int_{\frac {\|\delta_{1-t}\|}{\sqrt{t-s}}}^\infty r e^{-\frac 52 r^2 (1+ \frac {t-s}{s})}\, dr \right] \\
=& \frac{q^2}{\sqrt{s}(t-s)^{3/2}t}\mathbb{E}\left[ e^{- \frac{\|\delta_{1-t}\|^2t}{s(t-s)}}\right] = \frac{5q^3}{\sqrt{s}(t-s)^{3/2}t} \int_0^\infty r^4 e^{-\frac 52r^2(1+ \frac{t(1-t)}{s(t-s)})}\, dr = \frac{q^2s^2(t-s)}{t\, \Delta^{5/2}}, \end{align*} with $$\Delta := t(1-t) + s(t-s) = (1-t)(t-s) + s(1-s).$$ Likewise, \begin{align*}
I_2&:= \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|\beta_s\|\ge\|\gamma_{t-s}\|,\, \|\delta_{1-t}\| \ge \|\gamma_{t-s}\| \} }{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right] = \frac{q}{s^{3/2}} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|\gamma_{t-s}\|\le \|\delta_{1-t}\| \}}{\|\gamma_{t-s} + \delta_{1-t}\|^3} e^{-\frac 5{2s} \|\gamma_{t-s}\|^2} \right] \\
=& \frac{q}{s^{3/2}} \mathbb{E}\left[\frac{{\text{\Large $\mathfrak 1$}}\{\|\gamma_{t-s}\|\le \|\delta_{1-t}\| \}}{\|\delta_{1-t}\|^3} e^{-\frac 5{2s} \|\gamma_{t-s}\|^2} \right]
= \frac{5q^2}{s^{3/2}(1-t)^{3/2}} \mathbb{E}\left[ e^{-\frac 5{2s} \|\gamma_{t-s}\|^2} \int_{\frac {\|\gamma_{t-s}\|}{\sqrt{1-t}}}^\infty r e^{-\frac 52 r^2}\, dr \right] \\
=& \frac{q^2}{s^{3/2}(1-t)^{3/2}} \mathbb{E}\left[ e^{-\frac 5{2} \|\gamma_{t-s}\|^2(\frac 1s + \frac {1}{1-t})}\right] = \frac{q^2s(1-t)}{\Delta^{5/2}}. \end{align*} Define as well \begin{align*}
I_3 & := \mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{\|\beta_s\|\le \|\gamma_{t-s}\|\le \|\delta_{1-t}\| \} }{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right], \end{align*} \begin{align*}
I_4:=\mathbb{E}\left[\frac {{\text{\Large $\mathfrak 1$}}\{ \|\delta_{1-t}\|\le\|\beta_s\|\le \|\gamma_{t-s}\| \} }{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right], \ I_5:= \mathbb{E}\left[\frac { {\text{\Large $\mathfrak 1$}}\{\|\beta_s\|\le \|\delta_{1-t}\|\le \|\gamma_{t-s}\| \} }{ \|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right]. \end{align*} Note that by symmetry one has $$\int_{0\le s\le t \le 1} I_1 \, ds \, dt = \int_{0\le s\le t \le 1} I_3 \, ds \, dt,\text{ and } \int_{0\le s\le t \le 1} I_4 \, ds \, dt = \int_{0\le s\le t \le 1} I_5 \, ds \, dt. $$ Observe also that, $$I_1+I_2 = \frac{q^2s}{t \Delta^{3/2}}. $$ Moreover, using symmetry again, we can see that $$\int_0^t \frac {s-t/2}{ \Delta^{3/2}} \, ds = 0,$$ and thus $$\int_0^t (I_1+I_2)\, ds = \frac {q^2}{2} \int_0^t \frac{1}{ \Delta^{3/2}}\, ds. $$ Likewise, \begin{align*} & \int_{0\le s\le t\le 1} I_1\, ds \, dt = \int_{0\le s\le t\le 1} \frac{q^2s(t-s)^2}{t\Delta^{5/2}}\, ds \, dt = \frac 12\int_{0\le s\le t\le 1} \frac{q^2s(t-s)}{\Delta^{5/2}}\, ds \, dt \\
=& \int_{0\le s\le t\le 1} \frac{q^2(1-t)(t-s)}{2\Delta^{5/2}}\, ds \, dt = \int_{0\le s\le t\le 1} \frac {q^2t(1-t)}{4\Delta^{5/2}}\, ds\, dt = \int_{0\le s\le t\le 1} \frac {q^2}{6\Delta^{3/2}}\, ds\, dt. \end{align*} It follows that $$\int_{0\le s\le t\le 1} (I_1+I_2+I_3) \, ds\, dt = \frac {2q^2}3 \int_{0\le s\le t\le 1} \Delta^{-3/2}\, ds\, dt.$$ We consider now the term $I_4$, which is a bit more complicated to compute, thus we only give a lower bound on a suitable interval. To be more precise, we first define for $r\ge 0$ and $\lambda\ge 0$, $$F(r):=\int_0^r s^4e^{-5s^2/2}\, ds,\quad \text{and}\quad F_2(\lambda, r):=\int_0^r F(\lambda s) s^4e^{-5s^2/2}\, ds,$$ and then we write, \begin{align*}
& I_4= \mathbb{E}\left[ \frac { {\text{\Large $\mathfrak 1$}}\{ \|\delta_{1-t}\|\le\|\beta_s\|\le \|\gamma_{t-s}\| \} }{ \|\gamma_{t-s}\|^6} \right]
= 5q\cdot \mathbb{E}\left[\frac { {\text{\Large $\mathfrak 1$}}\{ \|\beta_s\|\le \|\gamma_{t-s}\| \} }{ \|\gamma_{t-s}\|^6} F\left(\frac{\|\beta_s\|}{\sqrt{1-t}}\right)\right]\\
=& \mathbb{E}\left[\frac {(5q)^2}{ \|\gamma_{t-s}\|^6} F_2\left(\frac{\sqrt s}{\sqrt{1-t}},\frac{\|\gamma_{t-s}\|}{\sqrt s}\right)\right] = \frac{(5q)^3}{(t-s)^3} \int_0^\infty \frac{e^{-\frac{5r^2}{2}}}{r^2} F_2\left(\frac{\sqrt s}{\sqrt{1-t}},r\frac{\sqrt{t-s}}{\sqrt s}\right)\, dr \\
=& \frac{(5q)^3}{(t-s)^3} \left\{ \frac{\sqrt{t-s}}{\sqrt s} \int_0^\infty F\left(r\frac{\sqrt{t-s}}{\sqrt{1-t}}\right) r^3e^{-\frac{5r^2}{2}}\, dr -5\int_0^\infty F_2\left(\frac{\sqrt s}{\sqrt{1-t}},r\frac{\sqrt{t-s}}{\sqrt s}\right) e^{-\frac{5r^2}{2}}\, dr \right\}\\
\ge & \frac{(5q)^3}{(t-s)^3} \left\{ \frac{(t-s)^{\frac 32}}{s^{3/2}} \int_0^\infty F\left( r\frac{\sqrt{t-s}}{\sqrt {1-t}}\right) r^3 e^{-\frac {5r^2t}{2s}}\, dr +\frac{(2s-t)\sqrt{t-s}}{s^{3/2}} \int_0^\infty F\left(r\frac{\sqrt{t-s}}{\sqrt{1-t}}\right) r^3e^{-\frac {5r^2}{2}}\, dr \right\}, \end{align*} using that $$F_2(\lambda,r)\le \frac 15 r^3F(\lambda r)(1-e^{-5r^2/2}).$$ Therefore, if $t/2\le s\le t$, \begin{align*} I_4& \ge \frac{(5q)^3}{[s(t-s)]^{3/2}} \int_0^\infty r^3 F\left( r\frac{\sqrt{t-s}}{\sqrt {1-t}}\right) e^{-\frac {5r^2t}{2s}}\, dr \\ & = \frac{(5q)^3\sqrt s}{t^2(t-s)^{3/2}} \int_0^\infty r^3 F\left( r\frac{\sqrt{s(t-s)}}{\sqrt {t(1-t)}}\right) e^{-5r^2/2}\, dr\\ & \ge \frac{2\cdot 5^2q^3\sqrt s}{t^2(t-s)^{3/2}}\int_0^\infty F\left( r\frac{\sqrt{s(t-s)}}{\sqrt {t(1-t)}}\right) re^{-\frac{5r^2}{2}}\, dr =\frac{2\cdot 5q^3s^3(t-s)}{t^2[t(1-t)]^{5/2}}\int_0^\infty r^4 e^{-\frac{5r^2\Delta}{2t(1-t)}}\, dr\\ & = \frac{2 q^2s^3(t-s)}{t^2\Delta^{5/2}}\ge \frac{q^2s(t-s)}{2\Delta^{5/2}}, \end{align*} and as a consequence, \begin{align*} \int_{0\le s\le t\le 1} I_4\, ds\, dt & \ge \int_{t/2\le s\le t\le 1}I_4\, ds\, dt \ge \frac {q^2}2 \int_{t/2\le s\le t\le 1}\, \frac{s(t-s)}{\Delta^{5/2}}\, ds\, dt \\ &=\frac {q^2}4 \int_{0\le s\le t\le 1}\frac{s(t-s)}{\Delta^{5/2}}\, ds\, dt = \frac {q^2}{12} \int_{0\le s\le t\le 1}\Delta^{-3/2}\, ds\, dt. \end{align*} Putting all these estimates together yields
$$\int_{0\le s\le t\le 1} \mathbb{E}\left[\frac 1{\|\beta_s-\beta_1\|^3 \cdot \|\beta_t\|^3} \right] \, ds\, dt = \sum_{k=1}^5 \int_{0\le s\le t\le 1} I_k\, ds\, dt \ge \frac 56 \int_{0\le s\le t\le 1} \Delta^{-3/2}\, ds\,dt.$$ Thus it just remains to show that \begin{eqnarray}\label{finalgoal} \int_{0\le s\le t\le 1} \Delta^{-3/2}\, ds\,dt \ge \frac 65 \int_{0\le s\le t\le 1} \widetilde \Delta^{-3/2}\, ds\,dt, \end{eqnarray} where $\widetilde \Delta := t(1-s)$. Note that $\Delta = \widetilde \Delta +(t-s)^2$. Recall also that for any $\alpha \in \mathbb{R}$, and any $x\in (-1,1)$, \begin{eqnarray}\label{DL1+x} (1+x)^\alpha = 1+ \sum_{i\ge 1}\frac{\alpha (\alpha-1)\dots(\alpha-i+1)}{i!} x^i. \end{eqnarray} Thus $$\frac 1{\Delta^{3/2}} = \frac 1{\widetilde \Delta^{3/2}} \left(1+\sum_{k\ge 1} \frac{(3/2)(5/2)\dots (k+1/2)}{k!} \cdot \frac{(t-s)^{2k}}{\widetilde \Delta^k}\right).$$ One needs now to compute the coefficients $C_k$ defined by $$C_k := \frac{(3/2)(5/2)\dots (k+1/2)}{k!} \int_{0\le s\le t\le 1} \frac{(t-s)^{2k}}{\widetilde \Delta^{k+3/2}}\, ds\,dt.$$ We claim that one has for any $k\ge 0$, \begin{eqnarray}\label{Ckformula} C_k= \frac{2^{2k+2}}{2k+1}(-1)^k \Sigma_k, \end{eqnarray} with $\Sigma_0=1$, and for $k\ge 1$, $$\Sigma_k = 1+ \sum_{i=1}^{2k}(-1)^i \frac{(k+1/2)(k-1/2)\dots(k-i +3/2)}{i!}.$$ We will prove this formula in a moment, but let us conclude the proof of the lemma first, assuming it is true. Straightforward computations show by \eqref{Ckformula} that $$C_0 = 4,\quad C_1= \frac 23, \quad \text{and}\quad C_2= \frac {3}{10},$$ and $C_0+C_1+C_2\ge 6C_0/5$, gives \eqref{finalgoal} as wanted.
So let us prove \eqref{Ckformula} now. Note that one can assume $k\ge 1$, as the result for $k=0$ is immediate. By \eqref{DL1+x}, one has $$(1-s)^{-k-3/2}= 1+ \sum_{i\ge 1} \frac{(k+3/2)(k+5/2)\dots (k+ i +1/2)}{i!} s^i.$$ Thus by integrating by parts, we get $$\int_0^t \frac{(t-s)^{2k}}{(1-s)^{k+3/2}} \, ds = (2k)! \sum_{i\ge 0} \frac{(k+3/2)\dots(k+i+1/2)}{(2k + i +1)!}\cdot t^{2k+i+1},$$ and then $$\int_0^1 \int_0^t \frac{(t-s)^{2k}}{t^{k+3/2}(1-s)^{k+3/2}} \, ds\, dt = (2k)! \sum_{i\ge 0} \frac{(k+3/2)\dots(k+i-1/2)}{(2k + i +1)!}.$$ As a consequence, \begin{align*} C_k& = \frac{(2k)!}{k!} \sum_{i\ge 0} \frac{(3/2)(5/2)\dots(k+i-1/2)}{(2k+i+1)!} \\
& = \frac{(2k)!}{(k+1/2)(k-1/2)\dots(3/2)(1/2)^2\cdot k!} \sum_{i\ge 0} \frac{|(k+1/2)(k-1/2)\dots(-k-i+1/2)|}{(2k + i +1)!} \\
& = \frac{2^{2k+2}}{2k+1} \sum_{i\ge 0} \frac{|(k+1/2)(k-1/2)\dots(-k-i+1/2)|}{(2k + i +1)!}, \end{align*} and it just remains to observe that the last sum is well equal to $\Sigma_k$. The latter is obtained by taking the limit as $t$ goes to $1$ in the formula \eqref{DL1+x} for $(1-t)^{k+1/2}$. This concludes the proof of Lemma \ref{lem.var.4}.
$\square$
\begin{remark}
\emph{It would be interesting to show that the covariance between $1/\|\beta_s-\beta_1\|^3$ and $1/\|\beta_t\|^3$ itself is positive for all $0\le s\le t\le 1$, and not just its integral, as we have just shown. } \end{remark}
\section{Proof of Theorem B} The proof of Theorem B is based on the Lindeberg-Feller theorem for triangular arrays, that we recall for convenience (see Theorem 3.4.5 in \cite{Dur}): \begin{theorem}[Lindeberg-Feller]\label{thm:lind} For each $n$ let $(X_{n,i}: \, 1\leq i\leq n)$ be a collection of independent random variables with zero mean. Suppose that the following two conditions are satisfied \newline {\rm{(i)}} $\sum_{i=1}^{n}\mathbb{E}[X_{n,i}^2] \to \sigma^2>0$ as $n\to \infty$, and \newline
{\rm{(ii)}} $\sum_{i=1}^{n}\mathbb{E}\left[(X_{n,i})^2{\text{\Large $\mathfrak 1$}}\{|X_{n,i}|>\varepsilon\}\right] \to 0$, as $n\to \infty$, for all $\varepsilon>0$. \newline Then, $S_n=X_{n,1}+\ldots + X_{n,n} \Longrightarrow \mathcal{N}(0,\sigma^2)$, as $n\to \infty$. \end{theorem} In order to apply this result, one needs three ingredients. The first one is an asymptotic estimate for the variance of the capacity of the range, which is given by our Theorem A. The second ingredient is a decomposition of the capacity of two sets as a sum of the capacities of the two sets minus some error term, in the spirit of the inclusion-exclusion formula for the cardinality of a set, which allows to decompose the capacity of the range up to time $n$ into a sum of independent pieces having the law of the capacity of the range up to a smaller time index, and finally the last ingredient is a sufficiently good bound on the centered fourth moment.
This strategy has been already employed successfully for the capacity of the range in dimension six and more in \cite{ASS18} (and for the size of the range as well, see \cite{JO69, JP71}). In this case the asymptotic of the variance followed simply from a sub-additivity argument, but the last two ingredients are entirely similar in dimension $5$ and in higher dimension. In particular one has the following decomposition (see Proposition 1.6 in \cite{ASS19}): for any two subsets $A,B\subset \mathbb{Z}^d$, $d\ge 3$, \begin{eqnarray}\label{cap.decomp} \mathrm{Cap}(A\cup B) = \mathrm{Cap}(A) + \mathrm{Cap}(B) - \chi(A,B), \end{eqnarray} where $\chi(A,B)$ is some error term. Its precise expression is not so important here. All one needs to know is that
$$|\chi(A,B)| \le 3\sum_{x\in A}\sum_{y\in B} G(x,y),$$ so that by \cite[Lemma 3.2]{ASS18}, if $\mathcal{R}_n$ and $\widetilde \mathcal{R}_n$ are the ranges of two independent walks in $\mathbb{Z}^5$, then \begin{eqnarray}\label{bound.chin} \mathbb{E}[\chi(\mathcal{R}_n,\widetilde \mathcal{R}_n)^4] = \mathcal{O}(n^2). \end{eqnarray} We note that the result is shown for the simple random walk only in \cite{ASS18}, but the proof applies as well to our setting (in particular Lemma 3.1 thereof also follows from \eqref{exp.Green}). Now as noticed already by Le Gall in his paper \cite{LG86} (see his remark (iii) p.503), a good bound on the centered fourth moment follows from \eqref{cap.decomp} and \eqref{bound.chin}, and the triangle inequality in $L^4$. More precisely in dimension $5$, one obtains (see for instance the proof of Lemma 4.2 in \cite{ASS18} for some more details): \begin{eqnarray}\label{cap.fourth} \mathbb{E}\left[\left(\mathrm{Cap}(\mathcal{R}_n)-\mathbb{E}[\mathrm{Cap}(\mathcal{R}_n)]\right)^4\right] = \mathcal{O}(n^2(\log n)^4). \end{eqnarray} Actually we would even obtain the slightly better bound $\mathcal{O}(n^2(\log n)^2)$, using our new bound on the variance $\operatorname{Var}(\mathrm{Cap}(\mathcal{R}_n))=\mathcal{O}(n\log n)$, but this is not needed here. Using next a dyadic decomposition of $n$, one can write with $T:=\lfloor n/(\log n)^4\rfloor$, \begin{eqnarray}\label{cpRn}
\mathrm{Cap}(\mathcal{R}_n) = \sum_{i=0}^{\lfloor n/T\rfloor} \mathrm{Cap}(\mathcal{R}^{(i)}_T) - R_n,
\end{eqnarray} where the $(\mathcal{R}^{(i)}_T)_{i=0,\dots,n/T}$ are independent pieces of the range of length either $T$ or $T+1$, and $$ R_n= \sum_{\ell =1}^L \sum_{i=0}^{2^{\ell-1}} \chi(\mathcal{R}^{(2i)}_{n/2^\ell},\mathcal{R}^{(2i+1)}_{n/2^\ell}), $$ is a triangular array of error terms (with $L=\log_2(\log n)^4$). Then it follows from \eqref{bound.chin}, that \begin{align*} \operatorname{Var}(R_n) &\le L \sum_{\ell=1}^L \operatorname{Var}\left(\sum_{i=1}^{2^{\ell-1}} \chi(\mathcal{R}^{(2i)}_{n/2^\ell},\mathcal{R}^{(2i+1)}_{n/2^\ell})\right)\le L\sum_{\ell=1}^L \sum_{i=1}^{2^{\ell-1}} \operatorname{Var}\left(\chi(\mathcal{R}^{(2i)}_{n/2^\ell},\mathcal{R}^{(2i+1)}_{n/2^\ell})\right)\\ & = \mathcal{O}(L^2 n)=\mathcal{O}(n(\log \log n)^2). \end{align*} In particular $(R_n-\mathbb{E}[R_n])/\sqrt{n\log n}$ converges in probability to $0$. Thus one is just led to show the convergence in law of the remaining sum in \eqref{cpRn}. For this, one can apply Theorem \ref{thm:lind}, with $$X_{n,i}:=\frac{\mathrm{Cap}(\mathcal{R}^{(i)}_T)-\mathbb{E}\left[\mathrm{Cap}(\mathcal{R}^{(i)}_T)\right]}{\sqrt{n\log n}}.$$ Indeed, Condition (i) of the theorem follows from Theorem A, and Condition (ii) follows from \eqref{cap.fourth} and Markov's inequality (more details can be found in \cite{ASS18}). This concludes the proof of Theorem B.
$\square$
\section*{Acknowledgments} We thank Fran{\c c}oise P\`ene for enlightening discussions at an early stage of this project, and Pierre Tarrago for Reference \cite{Uchiyama98}. We also warmly thank Amine Asselah and Perla Sousi for our many discussions related to the subject of this work, which grew out of it. The author was supported by the ANR SWIWS (ANR-17-CE40-0032) and MALIN (ANR-16-CE93-0003).
\end{document} |
\begin{document}
\title{Homomorphisms of planar $(m, n)$-colored-mixed graphs to planar targets}
\begin{abstract} An $(m, n)$-colored-mixed graph $G=(V, A_1, A_2,\cdots, A_m, E_1, E_2,\cdots, E_n)$ is a graph having $m$ colors of arcs and $n$ colors of edges. We do not allow two arcs or edges to have the same endpoints. A homomorphism from an $(m,n)$-colored-mixed graph $G$ to another $(m, n)$-colored-mixed graph $H$ is a morphism $\varphi:V(G)\rightarrow V(H)$ such that each edge (resp. arc) of $G$ is mapped to an edge (resp. arc) of $H$ of the same color (and orientation). An $(m,n)$-colored-mixed graph $T$ is said to be $P_g^{(m, n)}$-universal if every graph in $P_g^{(m, n)}$ (the planar $(m, n)$-colored-mixed graphs with girth at least $g$) admits a homomorphism to $T$.
We show that planar $P_g^{(m, n)}$-universal graphs do not exist for $2m+n\ge3$ (and any value of $g$) and find a minimal (in the number vertices) planar $P_g^{(m, n)}$-universal graphs in the other cases. \end{abstract}
\section{Introduction}
The concept of homomorphisms of $(m, n)$-colored-mixed graph was introduced by J. Nes\v{e}t\v{r}il and A. Raspaud~\cite{MNCM} in order to generalize homomorphisms of $k$-edge-colored graphs and oriented graphs.
An \emph{$(m, n)$-colored-mixed graph} $G=(V, A_1, A_2,\cdots, A_m, E_1, E_2,\cdots, E_n)$ is a graph having $m$ colors of arcs and $n$ colors of edges. We do not allow two arcs or edges to have the same endpoints. The case $m=0$ and $n=1$ corresponds to simple graphs, $m=1$ and $n=0$ to oriented graphs and $m=0$ and $n=k$ to $k$-edge-colored graphs. For the case $m=0$ and $n = 2$ ($2$-edge-colored graphs) we refer to the two types of edges as \emph{blue} and \emph{red} edges.
A \emph{homomorphism} from an $(m, n)$-colored-mixed graph $G$ to another $(m, n)$-colored-mixed graph $H$ is a mapping $\varphi:V(G) \rightarrow V(H)$ such that every edge (resp. arc) of $G$ is mapped to an edge (resp. arc) of $H$ of the same color (and orientation). If $G$ admits a homomorphism to $H$, we say that $G$ is \emph{$H$-colorable} since this homomorphism can be seen as a coloring of the vertices of $G$ using the vertices of $H$ as colors. The edges and arcs of $H$ (and their colors) give us the rules that this coloring must follow. Given a class of graphs $\mathcal{C}$, a graph is \emph{$\mathcal{C}$-universal} if for every graph $G \in \mathcal{C}$ is $H$-colorable. The class $P_g^{(m, n)}$ contains every planar $(m, n)$-colored-mixed graph with girth at least $g$.
In this paper, we consider some planar $P_g^{(m, n)}$-universal graphs with $k$ vertices. They are depicted in Figures~\ref{fig:t_oriented} and~\ref{fig:t_2edge}. The known results about this topic are as follows.
\begin{theorem}\label{thm:known}{\ } \begin{enumerate} \item $K_4$ is a planar $P^{(0,1)}_3$-universal graph. This is the four color theorem. \item $K_3$ is a planar $P^{(0,1)}_4$-universal graph. This is Grötzsch's Theorem \cite{grotzsch}. \item $\overrightarrow{C_6^2}$ is a planar $P_{16}^{(1,0)}$-universal graph~\cite{P10}. \end{enumerate} \end{theorem}
Our first result shows that, in addition to the case of $(0,1)$-graphs covered by Theorems~\ref{thm:known}.1 and~\ref{thm:known}.2, our topic is actually restricted to the cases of oriented graphs (i.e., $(m,n)=(1,0)$) and 2-edge-colored graphs (i.e., $(m,n)=(0,2)$).
\begin{theorem}\label{thm:Pmn} For every $g\ge3$, there exists no planar $P_g^{(m,n)}$-universal graph if $2m+n\ge3$. \end{theorem}
As Theorems~\ref{thm:known}.1 and~\ref{thm:known}.2 show for $(0,1)$-graphs, there might exist a trade-off between minimizing the girth $g$ and the number of vertices of the universal graph, for a fixed pair $(m,n)$. For oriented graphs, Theorem~\ref{thm:known}.3 tries to minimize the girth. For oriented graphs and 2-edge-colored graphs, we choose instead to minimize the number of vertices of the universal graph.
\begin{theorem}\label{thm:positive}{\ } \begin{enumerate} \item $\overrightarrow{T_5}$ is a planar $P_{28}^{(1,0)}$-universal graph on 5 vertices. \item $T_6$ is a planar $P_{22}^{(0, 2)}$-universal graph on 6 vertices. \end{enumerate} \end{theorem}
The following results shows that Theorem~\ref{thm:positive} is optimal in terms of the number of vertices of the universal graph.
\begin{theorem}\label{thm:negative}{\ } \begin{enumerate} \item For every $g\ge3$, there exists an oriented bipartite cactus graph (i.e., $K_4^-$ minor-free graph) with girth at least $g$ and oriented chromatic number at least 5. \item For every $g\ge3$, there exists a 2-edge-colored bipartite outerplanar graph (i.e., $(K_4^-,K_{2,3})$ minor-free graph) with girth at least $g$ that does not map to a planar graph with at most 5 vertices. \end{enumerate} \end{theorem}
Most probably, Theorem~\ref{thm:positive} is not optimal in terms of girth. The following constructions give lower bounds on the girth.
\begin{theorem}\label{thm:ce}{\ } \begin{enumerate} \item There exists an oriented bipartite 2-outerplanar graph with girth $14$ that does not map to $\overrightarrow{T_5}$. \item There exists a 2-edge-colored planar graph with girth $11$ that does not map to $T_6$. \item There exists a 2-edge-colored bipartite planar graph with girth $10$ that does not map to $T_6$. \end{enumerate} \end{theorem}
\begin{figure}
\caption{The $P_{28}^{(1,0)}$-universal graph overrightarrow$(T_5)$.}
\label{fig:t_oriented}
\caption{The $P_{22}^{(0,2)}$-universal graph $T_6$.}
\label{fig:t_2edge}
\end{figure}
Next, we obtain the following complexity dichotomies:
\begin{theorem}\label{thm:NPC}{\ } \begin{enumerate} \item For any fixed girth $g\geqslant 3$, either every graph in $P_g^{(1,0)}$ maps to $\overrightarrow{T_5}$ or it is NP-complete to decide whether a graph in $P_g^{(1,0)}$ maps to $\overrightarrow{T_5}$. Either every bipartite graph in $P_g^{(1,0)}$ maps to $\overrightarrow{T_5}$ or it is NP-complete to decide whether a bipartite graph in $P_g^{(1,0)}$ maps to $\overrightarrow{T_5}$. \item Either every graph in $P_g^{(0,2)}$ maps to $T_6$ or it is NP-complete to decide whether a graph in $P_g^{(1,0)}$ maps to $T_6$. Either every bipartite graph in $P_g^{(0,2)}$ maps to $T_6$ or it is NP-complete to decide whether a bipartite graph in $P_g^{(1,0)}$ maps to $T_6$. \end{enumerate} \end{theorem}
Finally, we can use Theorem~\ref{thm:NPC} with the non-colorable graphs in Theorem~\ref{thm:ce}.
\begin{corollary}\label{cor:cor}{\ } \begin{enumerate} \item Deciding whether a bipartite graph in $P_{14}^{(1,0)}$ maps to $\overrightarrow{T_5}$ is NP-complete. \item Deciding whether a graph in $P_{11}^{(0,2)}$ maps to $T_6$ is NP-complete. \item Deciding whether a bipartite graph in $P_{10}^{(0,2)}$ maps to $T_6$ is NP-complete. \end{enumerate} \end{corollary}
A 2-edge-colored path or cycle is said to be \emph{alternating} if any two adjacent edges have distinct colors.
\begin{proposition}[folklore]\label{prop:3n-6}{\ } \begin{itemize} \item Every planar simple graph on $n$ vertices has at most $3n-6$ edges. \item Every planar simple graph satisfies $(\mad(G)-2)\cdot(g(G)-2)<4$. \end{itemize} \end{proposition}
\section{Proof of Theorem~\ref{thm:positive}} We use the discharging method for both results in Theorem~\ref{thm:positive}. The following lemma will handle the discharging part. We call a vertex of degree $n$ an $n$-vertex and a vertex of degree at least $n$ an $n^+$-vertex. If there is a path made only of $2$-vertices linking two vertices $u$ and $v$, we say that $v$ is a weak-neighbor of $u$. If $v$ is a neighbor of $u$, we also say that $v$ is a weak-neighbor of $u$. We call a (weak-)neighbor of degree $n$ an $n$-(weak-)neighbor.
\begin{lemma}\label{lem:discharge} Let $k$ be a non-negative integer. Let $G$ be a graph with minimum degree 2 such that every 3-vertex has at most $k$ 2-weak-neighbors and every path contains at most $\tfrac{k+1}2$ consecutive 2-vertices. Then $\mad(G)\ge2+\tfrac2{k+2}$. In particular, $G$ cannot be a planar graph with girth at least $2k+6$. \end{lemma}
\begin{proof} Let $G$ be as stated. Every vertex has an initial charge equal to its degree. Every $3^+$-vertex gives $\tfrac1{k+2}$ to each of its 2-weak-neighbors. Let us check that the final charge $ch(v)$ of every vertex $v$ is at least $2+\tfrac2{k+2}$. \begin{itemize}
\item If $d(v)=2$, then $v$ receives $\tfrac1{k+2}$ from both of its 3-weak-neighbors. Thus $ch(v)=2+\tfrac2{k+2}$.
\item If $d(v)=3$, then $v$ gives $\tfrac1{k+2}$ to each of its 2-weak-neighbors. Thus $ch(v)\ge3-\tfrac{k}{k+2}=2+\tfrac2{k+2}$.
\item If $d(v)=d\ge4$, then $v$ has at most $\tfrac{k+1}2$ 2-weak-neighbors in each of the $d$ incident paths.
Thus $ch(v)\geqslant d-d\paren{\tfrac{k+1}2}\paren{\tfrac1{k+2}}=\tfrac d2\paren{1+\tfrac1{k+2}}\ge2+\tfrac2{k+2}$. \end{itemize} This implies that $mad(G)\ge2+\frac2{k+2}$. Finally, if $G$ is planar, then the girth of $G$ cannot be at least $2k+6$, since otherwise $(\mad(G)-2)\cdot(g(G)-2)\geqslant\paren{2+\tfrac2{k+2}-2}\paren{2k+6-2}=\paren{\tfrac2{k+2}}\paren{2k+4}=4$, which contradicts Proposition~\ref{prop:3n-6}. \end{proof}
\subsection{Proof of Theorem~\ref{thm:positive}.1} We prove that the oriented planar graph $\overrightarrow{T_5}$ on 5 vertices from Figure~\ref{fig:t_oriented} is $P_{28}^{(1,0)}$-universal by contradiction. Assume that $G$ is an oriented planar graphs with girth at least $28$ that does not admit a homomorphism to $\overrightarrow{T_5}$ and is minimal with respect to the number of vertices. By minimality, $G$ cannot contain a vertex $v$ with degree at most one since a $\overrightarrow{T_5}$-coloring of $G-v$ can be extended to $G$. Similarly, $G$ does not contain the following configurations.
\begin{itemize} \item A path with 6 consecutive 2-vertices. \item A $3$-vertex with at least 12 2-weak-neighbors. \end{itemize}
Suppose that $G$ contains a path $u_0u_1u_2u_3u_4u_5u_6u_7$ such that the degree of $u_i$ is two for $1\leqslant i\le6$. By minimality of $G$, $G-{u_1,u_2,u_3,u_4,u_5,u_6}$ admits a $\overrightarrow{T_5}$-coloring $\varphi$. We checked on a computer that for any $\varphi(v_0)$ and $\varphi(v_6)$ in $V\paren{\overrightarrow{T_5}}$ and every possible orientation of the 7 arcs $u_iu_{i+1}$, we can always extend $\varphi$ into a $\overrightarrow{T_5}$-coloring of $G$, a contradiction.
Suppose that $G$ contains a 3-vertex $v$ with at least 12 2-weak-neighbors. Let $u_1$, $u_2$, $u_3$ be the $3^+$-weak-neighbors of $v$ and let $l_i$ be the number of common 2-weak-neighbors of $v$ and $u_i$, i.e., $2$-vertices on the path between $v$ and $l_i$. Without loss of generality and by the previous discussion, we have $5\geqslant l_1\geqslant l_2\geqslant l_3$ and $l_1+l_2+l_3\ge12$. So we have to consider the following cases: \begin{itemize} \item\textbf{Case 1:} $l_1=5$, $l_2=5$, $l_3=2$. \item\textbf{Case 2:} $l_1=5$, $l_2=4$, $l_3=3$. \item\textbf{Case 3:} $l_1=4$, $l_2=4$, $l_3=4$. \end{itemize}
By minimality, the graph $G'$ obtained from $G$ by removing $v$ and its 2-weak-neighbors admits a $\overrightarrow{T_5}$-coloring $\varphi$. Let us show that in all three cases, we can extend $\varphi$ into a $\overrightarrow{T_5}$-coloring of $G$ to get a contradiction.
With an extensive search on a computer we found that if a vertex $v$ is connected to a vertex $u$ colored in $\varphi(u)$ by a path made of $l$ 2-vertices ($0\leqslant l\le5$) then $v$ can be colored in:
\begin{itemize} \item at least 1 color if $l=0$, \item at least 2 colors if $l=1$, \item at least 2 colors if $l=2$ (the sets $\acc{c, d, e}$ and $\acc{b, c, d}$ are the only sets of size 3 that can be forbidden from $v$), \item at least 3 colors if $l=3$, \item at least 4 colors if $l=4$ and \item at least 4 colors if $l=5$ (only the sets $\acc{b}$, $\acc{c}$, and $\acc{e}$ can be forbidden from $v$). \end{itemize}
In Case 1, $u_3$ forbids at most 3 colors from $v$ since $l_3=2$. If it forbids less than $3$ colors, we will be able to find a color for $v$ since $u_1$ and $u_2$ forbid at most 1 color from $v$. The only sets of 3 colors that $u_3$ can forbid are $\acc{b,c,d}$ and $\acc{c, d, e}$. Since $u_1$ and $u_2$ can each only forbid $b$, $c$ or $e$, we can always find a color for $v$.
In Case 2, $u_1$ and $u_2$ each forbid at most one color and $u_3$ forbids at most $2$ colors so there remains at least one color for $v$.
In Case 3, $u_1$, $u_2$, and $u_3$ each forbid at most one color, so there remains at least two colors for $v$.
We can always extend $\varphi$ into a $\overrightarrow{T_5}$-coloring of $G$, a contradiction.
So $G$ contains at most 5 consecutive 2-vertices and every 3-vertex has at most 11 2-weak-neighbors. Using Lemma~\ref{lem:discharge} with $k=11$ contradicts the fact that the girth of $G$ is at least 28.
\subsection{Proof of Theorem~\ref{thm:positive}.2} We prove that the 2-edge-colored planar graph $T_6$ on 6 vertices from Figure~\ref{fig:t_2edge} is $P_{22}^{(0,2)}$-universal by contradiction. Assume that $G$ is a 2-edge-colored planar graphs with girth at least $22$ that does not admit a homomorphism to $T_6$ and is minimal with respect to the number of vertices. By minimality, $G$ cannot contain a vertex $v$ with degree at most one since a $T_6$-coloring of $G-v$ can be extended to $G$. Similarly, $G$ does not contain the following configurations.
\begin{itemize} \item A path with 5 consecutive 2-vertices. \item A $3$-vertex with at least 9 2-weak-neighbors. \end{itemize}
Suppose that $G$ contains a path $u_0u_1u_2u_3u_4u_5u_6$ such that the degree of $u_i$ is two for $1\leqslant i\le5$. By minimality of $G$, $G-{u_1, u_2, u_3, u_4, u_5}$ admits a $T_6$-coloring $\varphi$. We checked on a computer that for any $\varphi(v_0)$ and $\varphi(v_6)$ in $V(T)$ and every possible colors of the 6 edges $u_iu_{i+1}$, we can always extend $\varphi$ into a $T_6$-coloring of $G$, a contradiction.
Suppose that $G$ contains a 3-vertex $v$ with at least 9 2-weak-neighbors. Let $u_1$, $u_2$, $u_3$ be the $3^+$-weak-neighbors of $v$ and let $l_i$ be the number of common 2-weak-neighbors of $v$ and $u_i$, i.e., $2$-vertices on the path between $v$ and $l_i$. Without loss of generality and by the previous discussion, we have $4\geqslant l_1\geqslant l_2\geqslant l_3$ and $l_1+l_2+l_3\geqslant 9$. So we have to consider the following cases:
\begin{itemize} \item\textbf{Case 1:} $l_1=3$, $l_2=3$, $l_3=3$. \item\textbf{Case 2:} $l_1=4$, $l_2=3$, $l_3=2$. \item\textbf{Case 3:} $l_1=4$, $l_2=4$, $l_3=1$. \end{itemize}
By minimality of $G$, the graph $G'$ obtained from $G$ by removing $v$ and its 2-weak-neighbors admits a $T_6$-coloring $\varphi$. Let us show that in all three cases, we can extend $\varphi$ into a $T_6$-coloring of $G$ to get a contradiction.
With an extensive search on a computer we found that if a vertex $v$ is connected to a vertex $u$ colored in $\varphi(u)$ by a path $P$ made of $l$ 2-vertices ($0\leqslant l\leqslant 4$) then $v$ can be colored in:
\begin{itemize} \item at least 1 color if $l=0$ (the sets ${a, c, d, e, f}$ and ${b, c, d, e, f}$ of colors are the only sets of size 5 that can be forbidden from $v$ for some $\varphi(u)\in T$ and edge-colors on $P$), \item at least 2 colors if $l=1$ (the sets ${a, b, c, f}$ and ${b, c, e, f}$ are the only sets of size 4 that can be forbidden from $v$), \item at least 3 colors if $l=2$ (the sets ${b, c, f}$, ${c, e, f}$ and ${d, e, f}$ are the only sets of size 3 that can be forbidden from $v$), \item at least 4 colors if $l=3$ (the set ${c, b}$ is the only set of size 2 that can be forbidden from $v$), and \item at least 5 colors if $l=4$ (the sets ${c}$ and ${f}$ are the only sets of size 1 that can be forbidden from $v$). \end{itemize}
Suppose that we are in Case 1. Vertices $u_1$, $u_2$, and $u_3$ each forbid at most 2 colors from $v$ since $l_1=l_2=l_3=3$. Suppose that $u_1$ forbids 2 colors. It has to forbid colors $c$ and $f$ (since it is the only pair of colors that can be forbidden by a path made of 3 2-vertices). If $u_2$ or $u_3$ also forbids 2 colors, they will forbid the exact same pair of colors. We can therefore assume that they each forbid 1 color from $v$. There are 6 available colors in $T_6$, so we can always find a color for $v$ and extend $\varphi$ to a $T_6$-coloring of $G$, a contradiction. We proceed similarly for the other two cases.
So $G$ contains at most 4 consecutive 2-vertices and every 3-vertex has at most 8 2-weak-neighbors. Then Lemma~\ref{lem:discharge} with $k=8$ contradicts the fact that the girth of $G$ is at least 22.
\section{Proof of Theorem~\ref{thm:negative}.1} We construct an oriented bipartite cactus graph with girth at least $g$ and oriented chromatic number at least 5. Let $g'$ be such that $g'\geqslant g$ and $g'\equiv4\pmod{6}$. Consider a circuit $v_1,\cdots,v_{g'}$. Clearly, the oriented chromatic number of this circuit is 4 and the only tournament on 4 vertices it can map to is the tournament $\overrightarrow{T_4}$ induced by the vertices $a$, $b$, $c$, and $d$ in $\overrightarrow{T_5}$. Now we consider the cycle $C=w_1,\cdots,w_{g'}$ containing the arcs $w_{2i-1}w_{2i}$ with $1\leqslant i\leqslant g'/2$, $w_{2i+1}w_{2i}$ with $1\leqslant i\leqslant g'/2-1$, and $w_{g'}w_1$.
Suppose for contradiction that $C$ admits a homomorphism $\varphi$ such that $\varphi(w_1)=d$. This implies that $\varphi(w_2)=a$, $\varphi(w_3)=d$, $\varphi(w_4)=a$, and so on until $\varphi(w_{g'})=a$. Since $\varphi(w_{g'})=a$ and $\varphi(w_1)=d$, $w_{g'}w_1$ should map to $ad$, which is not an arc of $\overrightarrow{T_4}$, a contradiction.
Our cactus graph is then obtain from the circuit $v_1,\cdots,v_{g'}$ and $g'$ copies of $C$ by identifying every vertex $v_i$ with the vertex $w_1$ of a copy of $C$. This cactus graph does not map to $\overrightarrow{T_4}$ since one of the $v_i$ would have to map to $d$ and then the copy of $C$ attached to $v_i$ would not be $\overrightarrow{T_4}$-colorable.
\section{Proof of Theorem~\ref{thm:negative}.2} We construct a 2-edge-colored bipartite outerplanar graph with girth at least $g$ that does not map to a 2-edge-colored planar graph with at most 5 vertices. Let $g'$ be such that $g'\geqslant g$ and $g'\equiv2\pmod{4}$. Consider an alternating cycle $C=v_0,\cdots,v_{g'-1}$. For every $0\leqslant i\leqslant g'-3$, we add $g'-2$ 2-vertices $w_{i,1},\cdots,w_{i,g'-2}$ that form the path $P_i=v_iw_{i,1}\cdots w_{i,g'-2}v_{i+1}$ such that the edges of $P_i$ get the color distinct from the color of the edge $v_iv_{i+1}$. Let $G$ be the obtained graph. The 2-edge-colored chromatic number of $C$ is 5. So without loss of generality, we assume for contradiction that $G$ admits a homomorphism $\varphi$ to a 2-edge-colored planar graph $H$ on 5 vertices. Let us define $\mathcal{E}=\bigcup_{i\texttt{ even}}\varphi(v_i)$ and $\mathcal{O}=\bigcup_{i\texttt{ odd}}\varphi(v_i)$. Since $C$ is alternating, $\varphi(v_i)\ne\varphi(v_{i+2})$ (indices are modulo $g'$). Since $g'\equiv2\pmod{4}$, there is an odd number of $v_i$ with an even (resp. odd) index. Thus, $\abs{\mathcal{E}}\ge3$ and $\abs{\mathcal{O}}\ge3$. Therefore we must have $\mathcal{E}\cap\mathcal{O}\ne\emptyset$.
Notice that every two vertices $v_i$ and $v_j$ in $G$ are joined by a blue path and a red path such that the lengths of these paths have the same parity as $i-j$. Thus, the blue (resp. red) edges of $H$ must induce a connected spanning subgraph of $H$. Since $|V(H)|=5$, $H$ contains at least 4 blue (resp. red) edges. Since red and blue edges play symmetric roles in $G$ and since $|E(H)|\le9$ by Proposition~\ref{prop:3n-6}, we assume without loss of generality that $H$ contains exactly 4 blue edges. Moreover, these 4 blue edges induce a tree. In particular, the blue edges induce a bipartite graph which partitions $V(H)$ into 2 parts. Thus, every $v_i$ with even index is mapped into one part of $V(H)$ and every $v_i$ with odd index is mapped into the other part of $V(H)$. So $\mathcal{E}\cap\mathcal{O}=\emptyset$, which is a contradiction.
\section{Proof of Theorem~\ref{thm:Pmn}} Let $T$ be a $P_g^{(m, n)}$-universal planar graph for some $g$ that is minimal with respect to the subgraph order.
By minimality of $T$, there exists a graph $G \in P_g^{(m, n)}$ such that every color in $T$ has to be used at least once to color $G$. Without loss of generality, $G$ is connected, since otherwise we can replace $G$ by the connected graph obtained from $G$ by choosing a vertex in each component of $G$ and identifying them. We create a graph $G'$ from $G$ as follows:
For each edge or arc $uv$ we create $4m+n$ paths starting at $u$ and ending at $v$ made of vertices of degree 2:
\begin{itemize} \item For each type of edge, we create a path made of $g-1$ edges of this type.
\item For each type of arc, we create two paths made of $g-1$ arcs of this type such that the paths alternate between forward and backward arcs. We make the paths such that $u$ is the tail of the first arc of one path and the head of the first arc of the other path.
\item Similarly, for each type of arc we create two paths made of $g$ arcs of this type such that the paths alternate between forward and backward arcs. We make the paths such that $u$ is the tail of the first arc of one path and the head of the first arc of the other path. \end{itemize}
Notice that $G'$ is in $P_g^{(m, n)}$ and thus admits a homomorphism $\varphi$ to $T$. Since $G$ is connected and every color in $T$ has to be used at least once to color $G$, we can find for each pair of vertices and $(c_1, c_2)$ in $T$ and each type of edge a path $(v_1, v_2,\cdots, v_l)$ in $G'$ made only of edges of this type such that $\varphi(v_1)=c_1$ and $\varphi(v_l)=c_2$. \newline
This implies that for every pair of vertices $(c_1, c_2)$ in $T$ and each type of edge, there exists a walk from $c_1$ to $c_2$ made of edges of this type. Therefore, for $1\leqslant j\leqslant n$, the subgraph induced by $E_j(T)$ is connected and contains all the vertices of $T$.
So $E_j(T)$ contains a spanning tree of $T$. Thus $T$ contains at least $|V(T)|-1$ edges of each type.\newline
Similarly, we can find for each pair of vertices $(c_1, c_2)$ in $T$ and each type of arc a path of even length $(v_1, v_2,\cdots, v_{2l-1})$ in $G'$ made only of arcs of this type, starting with a forward arc and alternating between forward and backward arcs such that $\varphi(v_1)=c_i$ and $\varphi(v_l)=c_2$. We can also find a path of the same kind with odd length.\newline
This implies that for every pair of vertices $(c_1, c_2)$ in $T$ and each type of arc there exist a walk of odd length and a walk of even length from $c_1$ to $c_2$ made of arcs of this type, starting with a forward arc and alternating between forward and backward arcs. Let $p$ be the maximum of the length of all these paths. Given one of these walks of length $l$, we can also find a walk of length $l+2$ that satisfies the same constraints by going through the last arc of the walk twice more. Therefore, for every $l\geqslant p$, every pair of vertices $(c_1, c_2)$ in $T$, and every type of arc, it is possible to find a homomorphism from the path $P$ of length $l$ made of arcs of this type, starting with a forward arc and alternating between forward and backward arcs to $T$ such that the first vertex is colored in $c_1$ and the last vertex is colored in $c_2$.\newline
We now show that this implies that $|A_j(T)|\ge2|V(T)|-1$ for $1\leqslant j\leqslant m$. Let $P$ be a path $(v_1, v_2,\cdots, v_p, v_{p+1})$ of length $p$ starting with a forward arc and alternating between forward and backward arcs of the same type. We color $v_1$ in some vertex $c$ of $T$. Let $C_i$ be the set of colors in which vertex $v_i$ could be colored. We know that $C_1=c$ and $C_2$ is the set of direct successors of $c$. Set $C_3$ is the set of direct predecessors of vertices in $C_2$ so $C_1\subseteq C_3$ and, more generally, $C_i \subseteq C_i+2$. Let $uv$ be an arc in $T$. If $u\in C_i$ with $i$ odd, then $v\in C_{i+1}$. If $v\in C_i$ with $i$ even then $u\in C_{i+1}$. We can see that $uv$ is capable of adding at most one vertex to a $C_i$ (and every $C_j$ with $j\equiv i\mod 2$ and $i\leqslant j$). We know that $C_{p+1}=V(T)$ hence $T$ contains at least $2|V(T)|-1$ arcs of each type.\newline
Therefore, the underlying graph of $T$ contains at least $m\paren{2|V(T)|-1}+n\paren{|V(T)|-1}=\paren{2m+n}|V(T)|-m-n$ edges, which contradicts Proposition~\ref{prop:3n-6} for $2m+n\ge3$.
\section{Proof of Theorem~\ref{thm:ce}.1} We construct an oriented bipartite 2-outerplanar graph with girth $14$ that does not map to $\overrightarrow{T_5}$.
The oriented graph $X$ is a cycle on 14 vertices $v_0,\cdots,v_{13}$ such that the tail of every arc is the vertex with even index, except for the arc $\overrightarrow{v_{13}v_0}$. Suppose for contradiction that $X$ has a $\overrightarrow{T_5}$-coloring $h$ such that no vertex with even index maps to $b$. The directed path $v_{12}v_{13}v_0$ implies that $h(v_{12})\ne h(v_0)$. If $h(v_0)=a$, then $h(v_1)\in\acc{b,c}$ and $h(v_2)=a$ since $h(v_2)\ne b$. By contagion, $h(v_0)=h(v_2)=\cdots=h(v_{12})=a$, which is a contradiction. Thus $h(v_0)\ne a$. If $h(v_0)=c$, then $h(v_1)=d$ and $h(v_2)=c$ since $h(v_2)\ne b$. By contagion, $h(v_0)=h(v_2)=\cdots=h(v_{12})=c$, which is a contradiction. Thus $h(v_0)\ne c$. So $h(v_0)\not\in\acc{a,b,c}$, that is, $h(v_0)\in\acc{d,e}$. Similarly, $h(v_{12})\in\acc{d,e}$. Notice that $\overrightarrow{T_5}$ does not contain a directed path $xyz$ such that $x$ and $z$ belong to $\acc{d,e}$. So the path $v_{12}v_{13}v_0$ cannot be mapped to $\overrightarrow{T_5}$. Thus $X$ does not have a $\overrightarrow{T_5}$-coloring $h$ such that no vertex with even index maps to $b$.
Consider now the path $P$ on 7 vertices $p_0,\cdots,p_6$ with the arcs $\overrightarrow{p_1p_0}$, $\overrightarrow{p_1p_2}$, $\overrightarrow{p_3p_2}$, $\overrightarrow{p_4p_3}$, $\overrightarrow{p_5p_4}$, $\overrightarrow{p_5p_6}$. It is easy to check that there exists no $\overrightarrow{T_5}$-coloring $h$ of $P$ such that $h(p_0)=h(p_6)=b$.
We construct the graph $Y$ as follows: we take 8 copies of $X$ called $X_{\texttt{main}}$, $X_0$, $X_2$, $X_4$, $\cdots$, $X_{12}$. For every couple $(i,j)\in\acc{0,2,4,6,8,10,12}^2$, we take a copy $P_{i,j}$ of $P$, we identify the vertex $p_0$ of $P_{i,j}$ with the vertex $v_i$ of $X_{\texttt{main}}$ and we identify the vertex $p_6$ of $P_{i,j}$ with the vertex $v_j$ of $H_i$.
So $Y$ is our oriented bipartite 2-outerplanar graph with girth $14$. Suppose for contradiction that $Y$ has a $\overrightarrow{T_5}$-coloring $h$. By previous discussion, there exists $i\in\acc{0,2,4,6,8,10,12}$ such that the vertex $v_i$ of $X_{\texttt{main}}$ maps to $b$. Also, there exists $j\in\acc{0,2,4,6,8,10,12}$ such that the vertex $v_j$ of $X_i$ maps to $b$. So the corresponding path $P_{i,j}$ is such that $h(p_0)=h(p_6)=b$, a contradiction. Thus $Y$ does not map to $\overrightarrow{T_5}$.
\section{Proof of Theorem~\ref{thm:ce}.2} We construct a 2-edge-colored 2-outerplanar graph with girth $11$ that does not map to $T_6$. We take 12 copies $X_0,\cdots,X_{11}$ of a cycle of length $11$ such that every edge is red. Let $v_{i,j}$ denote the $j^{\text{\tiny th}}$ vertex of $X_i$. For every $0\leqslant i\leqslant 10$ and $0\leqslant j\leqslant 10$, we add a path consisting of 5 blue edges between $v_{i,11}$ and $v_{j,i}$.
Notice that in any $T_6$-coloring of a red odd cycle, one vertex must map to $c$. So we suppose without loss of generality that $v_{0,11}$ maps to $c$. We also suppose without loss of generality that $v_{0,0}$ maps to $c$. The blue path between $v_{0,11}$ and $v_{0,0}$ should map to a blue walk of length 5 from $c$ to $c$ in $T_6$. Since $T_6$ contains no such walk, our graph does not map to $T_6$.
\section{Proof of Theorem~\ref{thm:ce}.3} We construct a 2-edge-colored bipartite 2-outerplanar graph with girth $10$ that does not map to $T_6$. By Theorem~\ref{thm:negative}.2, there exists a bipartite outerplanar graph $M$ with girth at least $10$ such that for every $T_6$-coloring $h$ of $M$, there exists a vertex $v$ in $M$ such that $h(v)=c$.
Let $X$ be the graph obtained as follows. Take a main copy $Y$ of $M$. For every vertex $v$ of $Y$, take a copy $Y_v$ of $M$. Since $Y_v$ is bipartite, let $A$ and $B$ the two independent sets of $Y_v$. For every vertex $w$ of $A$, we add a path consisting of 5 blue edges between $v$ and $w$. For every vertex $w$ of $B$, we add a path consisting of 4 edges colored (blue, blue, red, blue) between $v$ and $w$.
Notice that $X$ is indeed a bipartite 2-outerplanar graph with girth $10$. We have seen in the previous proof that $T_6$ contains no blue walk of length 5 from $c$ to $c$. We also check that $T_6$ contains no walk of length 4 colored (blue, blue, red, blue) from $c$ to $c$. By the property of $M$, for every $T_6$-coloring $h$ of $X$, there exist a vertex $v$ in $Y$ and a vertex $w$ in $Y_v$ such that $h(v)=h(w)=c$. Then $h$ cannot be extended to the path of length 4 or 5 between $v$ and $w$. So $X$ does not map to $T_6$.
\section{Proof of Theorem~\ref{thm:NPC}.1} Let $g$ be the largest integer such that there exists a graph in $P_g^{(1,0)}$ that does not map to $\overrightarrow{T_5}$. Let $G\in P_g^{(1,0)}$ be a graph that does not map to $\overrightarrow{T_5}$ and such that the underlying graph of $G$ is minimal with respect to the homomorphism order.
Let $G'$ be obtained from $G$ by removing an arbitrary arc $v_0v_3$ and adding two vertices $v_1$ and $v_2$ and the arcs $v_0v_1$, $v_2v_1$, $v_2v_3$. By minimality, $G'$ admits a homomorphism $\varphi$ to $\overrightarrow{T_5}$. Suppose for contradiction that $\varphi(v_2)=c$. This implies that $\varphi(v_1)=\varphi(v_3)=d$. Thus $\varphi$ provides a $\overrightarrow{T_5}$-coloring of $G$, a contradiction. So $\varphi(v_2)\ne c$ and, similarly, $\varphi(v_2)\ne e$.
Given a set $S$ of vertices of $\overrightarrow{T_5}$, we say that we force $S$ if we specify a graph $H$ and a vertex $v\in V(H)$ such that for every vertex $x\in V\paren{\overrightarrow{T_5}}$, we have $x\in S$ if and only if there exists a $\overrightarrow{T_5}$-coloring $\varphi$ of $H$ such that $\varphi(v)=x$. Thus, with the graph $G'$ and the vertex $v_2$, we force a non-empty set $\mathcal{S}\subset V\paren{\overrightarrow{T_5}}\setminus\acc{c,e}=\acc{a,b,d}$.
We use a series of constructions in order to eventually force the set $\acc{a,b,c,d}$ starting from $\mathcal{S}$. Recall that $\acc{a,b,c,d}$ induces the tournament $\overrightarrow{T_4}$. We thus reduce $\overrightarrow{T_5}$-coloring to $\overrightarrow{T_4}$-coloring, which is NP-complete for subcubic bipartite planar graphs with any given girth~\cite{GO15}.
These constructions are summarized in the tree depicted in Figure~\ref{fig:oriented}. The vertices of this forest contain the non-empty subsets of $\acc{a,b,d}$ and a few other sets. In this tree, an arc from $S_1$ to $S_2$ means that if we can force $S_1$, then we can force $S_2$. Every arc has a label indicating the construction that is performed. In every case, we suppose that $S_1$ is forced on the vertex $v$ of a graph $H_1$ and we construct a graph $H_2$ that forces $S_2$ on the vertex $w$.
\begin{figure}
\caption{Forcing the set $\acc{a,b,c,d}$.}
\label{fig:oriented}
\end{figure}
\begin{itemize} \item Arcs labelled "out": The set $S_2$ is the out-neighborhood of $S_1$ in $\overrightarrow{T_5}$. We construct $H_2$ from $H_1$ by adding a vertex $w$ and the arc $vw$. Thus, $S_2$ is indeed forced on the vertex $w$ of $H_2$. \item Arcs labelled "in": The set $S_2$ is the in-neighborhood of $S_1$ in $\overrightarrow{T_5}$. We construct $H_2$ from $H_1$ by adding a vertex $w$ and the arc $wv$. Thus, $S_2$ is indeed forced on the vertex $w$ of $H_2$. \item Arc labelled "Z": Let $g'$ be the smallest integer such that $g'\geqslant g$ and $g'\equiv4\pmod{6}$. We consider a circuit $v_1,\cdots,v_{g'}$. For $2\leqslant i\leqslant g'$, we take a copy of $H_1$ and we identify its vertex $v$ with $v_i$. We thus obtain the graph $H_2$ and we set $w=v_2$. Let $\varphi$ be any $T_6$-coloring of $H_2$. By construction, $\acc{\varphi(v_2),\cdots,\varphi(v_{g'})}\subset S_1=\acc{a,b,d}$. A circuit of length $\not\equiv0\pmod{3}$ cannot map to the 3-circuit induced by $\acc{a,b,d}$, so $\varphi(v_1)\in\acc{c,e}$. If $\varphi(v_1)=c$ then $\varphi(v_2)=d$ and if $\varphi(v_1)=e$ then $\varphi(v_2)=a$. Thus $S_2=\acc{ad}$. \end{itemize}
\section{Proof of Theorem~\ref{thm:NPC}.2} Let $g$ be the largest integer such that there exists a graph in $P_g^{(0,2)}$ that does not map to $T_6$. Let $G\in P_g^{(0,2)}$ be a graph that does not map to $T_6$ and such that the underlying graph of $G$ is minimal with respect to the homomorphism order.
Let $G'$ be obtained from $G$ by subdividing an arbitrary edge $v_0v_3$ twice to create the path $v_0v_1v_2v_3$ such that the edges $v_0v_1$ and $v_1v_2$ are red and the edge $v_2v_3$ gets the color of the original edge $v_0v_3$. By minimality, $G'$ admits a homomorphism $\varphi$ to $T_6$. Suppose for contradiction that $\varphi(v_1)=f$. This implies that $\varphi(v_0)=\varphi(v_2)=b$. Thus $\varphi$ provides a $T_6$-coloring of $G$, a contradiction.
Given a set $S$ of vertices of $T_6$, we say that we force $S$ if we specify a graph $H$ and a vertex $v\in V(H)$ such that for every vertex $x\in V(T_6)$, we have $x\in S$ if and only if there exists $T_6$-coloring $\varphi$ of $H$ such that $\varphi(v)=x$. Thus, with the graph $G'$ and the vertex $v_1$, we force a non-empty set $\mathcal{S}\subset V(T_6)\setminus\acc{f}=\acc{a,b,c,d,e}$.
Recall that the core of a graph is the smallest subgraph which is also a homomorphic image. We say that a subset $S$ of $V(T_6)$ is \emph{good} if the core of the subgraph induced by $S$ is isomorphic to the graph $T_4$ which is a a clique on 4 vertices such that both the red and the blue edges induce a path of length $3$. We use a series of constructions in order to eventually force a good set starting from $\mathcal{S}$. We thus reduce $T_6$-coloring to $T_4$-coloring, which is NP-complete for subcubic bipartite planar graphs with any given girth~\cite{MO17}.
These constructions are summarized in the forest depicted in Figure~\ref{fig:2edge}. The vertices of this forest are the non-empty subsets of $\acc{a,b,c,d,e}$ together with a few auxiliary sets of vertices containing $f$. In this forest, an arc from $S_1$ to $S_2$ means that if we can force $S_1$, then we can force $S_2$. Every set with no outgoing arc is good. We detail below the construction that is performed for each arc. In every case, we suppose that $S_1$ is forced on the vertex $v$ of a graph $H_1$ and we construct a graph $H_2$ that forces $S_2$ on the vertex $w$.
\begin{figure}
\caption{Forcing a good set.}
\label{fig:2edge}
\end{figure}
\begin{itemize} \item Blue arcs: The set $S_2$ is the blue neighborhood of $S_1$ in $T_6$. We construct $H_2$ from $H_1$ by adding a vertex $w$ adjacent to $v$ such that $vw$ is blue. Thus, $S_2$ is indeed forced on the vertex $w$ of $H_2$. \item Red arcs: The set $S_2$ is the red neighborhood of $S_1$ in $T_6$. The construction is as above except that the edge $vw$ is red. \item Dashed blue arcs: The set $S_2$ is the set of vertices incident to a blue edge contained in the subgraph induced by $S_1$ in $T_6$. We construct $H_2$ from two copies of $H_1$ by adding a blue edge between the vertex $v$ of one copy and the vertex $v$ of the other copy. Then $w$ is one of the vertices $v$. \item Dashed red arcs: The set $S_2$ is the set of vertices incident to a red edge contained in the subgraph induced by $S_1$ in $T_6$. The construction is as above except that the added edge is red. \item Arc labelled "X": Let $g'=2\ceil{g/2}$. We consider an even cycle $v_1,\cdots,v_{g'}$ such that $v_1v_{g'}$ is red and the other edges are blue. For every vertex $v_i$, we take a copy of $H_1$ and we identify its vertex $v$ with $v_i$. We thus obtain the graph $H_2$ and we set $w=v_1$. Let $\varphi$ be any $T_6$-coloring of $H_2$. In any $T_6$-coloring of $H_2$, the cycle $v_1,\cdots,v_{g'}$ maps to a 4-cycle with exactly one red edge contained in the subgraph of $T_6$ induced by $S_1=\acc{a,b,c,d,e}$. These 4-cycles are $aedb$ with red edge $ae$ and $cdba$ with red edge $cd$. Since $w$ is incident to the red edge in the cycle $v_1,\cdots,v_{g'}$, $w$ can be mapped to $a$, $e$, $c$, or $d$ but not to $b$. Thus $S_2=\acc{a,c,d,e}$. \item Arc labelled "Y": We consider an alternating cycle $v_0,\cdots,v_{8g-1}$. For every vertex $v_i$, we take a copy of $H_1$ and we identify its vertex $v$ with $v_i$. We obtain the graph $H_2$ by adding the vertex $x$ adjacent to $v_0$ and $v_{4g+2}$ such that $xv_0$ and $xv_{4g+2}$ are blue. We set $w=v_0$. In any $T_6$-coloring $\varphi$ of $H_2$, the cycle $v_1,\cdots,v_{g'}$ maps to the alternating $4$-cycle $acde$ contained in $S_1=\acc{a,c,d,e}$ such that $\varphi(v_i)=\varphi(v_{i+4\pmod{8g}})$. So, a priori, either $\acc{\varphi(v_0),\varphi(v_{4g+2})}=\acc{a,d}$ or $\acc{\varphi(v_0),\varphi(v_{4g+2})}=\acc{c,e}$. In the former case, we can extend $\varphi$ to $H_2$ by setting $\varphi(x)=b$. In the latter case, we cannot color $x$ since $c$ and $e$ have no common blue neighbor in $T_6$. Thus, $\acc{\varphi(v_0),\varphi(v_{4g+2})}=\acc{a,d}$ and $S_2=\acc{a,d}$. \end{itemize}
\end{document} |
\begin{document}
\title{Zero-sum $K_m$ over $\Z$ and the story of $K_4$}
\begin{center}
\begin{multicols}{2}
Yair Caro\\[1ex] {\small Dept. of Mathematics and Physics\\ University of Haifa-Oranim\\ Tivon 36006, Israel\\ [email protected]}
\columnbreak
Adriana Hansberg\\[1ex] {\small Instituto de Matem\'aticas\\ UNAM Juriquilla\\ Quer\'etaro, Mexico\\ [email protected]}\\[2ex]
\end{multicols}
Amanda Montejano\\[1ex] {\small UMDI, Facultad de Ciencias\\ UNAM Juriquilla\\ Quer\'etaro, Mexico\\ [email protected]}\\[4ex]
\end{center}
\begin{abstract}
We prove the following results solving a problem raised in Caro-Yuster \cite{CY3}. For a positive integer $m\geq 2$, $m\neq 4$, there are infinitely many values of $n$ such that the following holds: There is a weighting function $f:E(K_n)\to \{-1,1\}$ (and hence a weighting function $f: E(K_n)\to \{-1,0,1\}$), such that $\sum_{e\in E(K_n)}f(e)=0$ but, for every copy $H$ of $K_m$ in $K_n$, $\sum_{e\in E(H)}f(e)\neq 0$. On the other hand, for every integer $n\geq 5$ and every weighting function $f:E(K_n)\to \{-1,1\}$ such that $|\sum_{e\in E(K_n)}f(e)|\leq \binom{n}{2}-h(n)$, where $h(n)=2(n+1)$ if $n \equiv 0$ (mod $4$) and $h(n)=2n$ if $n \not\equiv 0$ (mod $4$), there is always a copy $H$ of $K_4$ in $K_n$ for which $\sum_{e\in E(H)}f(e)=0$, and the value of $h(n)$ is sharp.
\end{abstract}
\section{Introduction}
Our main source of motivation is a recent paper of Caro and Yuster \cite{CY3}, extending classical zero-sum Ramsey theory to weighting functions $f:E(K_n)\to \{-r,-r+1, \cdots ,0, \cdots , r-1,r\}$ seeking zero-sum copies of a given graph $H$ subject to the obviously necessary condition that $|\sum_{e\in E(K_n)}f(e)|$ is bounded away from $ r\binom{n}{2}$, or even in the extreme case where $|\sum_{e\in E(K_n)}f(e)|=0$.
In zero-sum Ramsey theory, one studies functions $f:E(K_n)\to X$, where $X$ is usually the cyclic group $\mbox{${\mathbb Z}$}_k$ or (less often) an arbitrary finite abelian group. The goal is to show that, under some necessary divisibility conditions imposed on the number of the edges $e(H)$ of a graph $H$ and for sufficiently large $n$, there is always a zero-sum copy of $H$, where by a zero-sum copy of $H$ we mean a copy of $H$ in $K_n$ for which $\sum_{e\in E(H)}f(e)=0$ (where $0$ is the neutral element of $X$). For several results concerning zero-sum Ramsey theory for graphs see \cite{AC,BD1,BD2,C1,C2,CY1,FK,SS}, for zero-sum Ramsey problems concerning matrices and linear algebraic techniques see \cite{BCRY,CY2,WW,W}.
The following notation was introduced in \cite{CY3} and the following zero-sum problems over $\mbox{${\mathbb Z}$}$
were considered. For positive integers $r$ and $q$, an \emph{$(r,q)$-weighting} of the edges of the complete graph $K_n$ is a function $f:E(K_n)\to \{-r, \cdots ,r\}$ such that $|\sum_{e\in E(K_n)}f(e)|<q$. The general problem considered in \cite{CY3} is to find nontrivial conditions on the $(r,q)$-weightings that guarantee the existence of certain bounded-weight subgraphs and even zero-weighted subgraphs (also called \emph{zero-sum} subgraphs). So, given a subgraph $H$ of $K_n$, and a weighting $f:E(K_n)\to \{-r,\cdots ,r\}$, the \emph{weight} of $H$ is defined as $w(H)=\sum_{e\in E(H)}f(e)$, and we say that $H$ is a \emph{zero-sum graph} if $w(H)=0$. Finally, we say that a weighting function $f:E(K_n) \to \{-1,1\}$ is \emph{zero-sum-$H$ free} if it contains no zero-sum copy of $H$.
Among the many results proved in \cite{CY3}, the following theorem and open problem are the main motivation of this paper.
\begin{theorem}[Caro and Yuster, \cite{CY3}]\label{thm:CY}
For a real $\epsilon >0$ the following holds. For $n$ sufficiently large, any weighting $f:E(K_n)\to \{-1,0,1\}$ with $|\sum_{e\in E(K_n)}f(e)|\leq (1-\epsilon)n^2/6$ contains a zero-sum copy of $K_4$. On the other hand, for any positive integer $m$ which is not of the form $m = 4d^2$, there are infinitely many integers $n$ for which there is a weighting $f:E(K_n)\to \{-1,0,1\}$ with $\sum_{e\in E(K_n)}f(e)=0$ without a zero-sum copy of $K_m$. \end{theorem}
The authors posed the following complementary problem:
\begin{problem}[Caro and Yuster, \cite{CY3}]\label{probl} For an integer $m = 4d^2$, is it true that, for $n$ sufficiently large, any weighting $f:E(K_n)\to \{-1,0,1\}$ with $\sum_{e\in E(K_n)}f(e)=0$ contains a zero-sum copy of $K_m$? \end{problem}
The main result in this paper is a negative answer to the above problem for any $m \geq 2$ except for $m=4$ already for weightings of the form $f:E(K_n)\to \{-1,1\}$ with $\sum_{e\in E(K_n)}f(e)=0$. On the other hand, concerning the study of the existence of zero-sum copies of $K_4$, we prove a result analogous to Theorem \ref{thm:CY} where the range $\{-1,1\}$ instead of $\{-1,0,1\}$ is considered. Finally, we show that Theorem \ref{thm:CY} can neither be extended to wider ranges. To be more precise, we gather our results in the following theorem.
\begin{theorem}\label{thm:main} \hspace{1cm} \begin{enumerate}
\item For any positive integer $m \geq2$, $m\neq 4$, there are infinitely many values of $n$ such that the following holds: There is a weighting function $f:E(K_n)\to \{-1,1\}$ with $\sum_{e\in E(K_n)}f(e)=0$ which is zero-sum-$K_m$ free.
\item Let $n$ be an integer such that $n\geq 5$. Define $g(n)=2(n+1)$ if $n \equiv 0$ (mod $4$) and $g(n)=2n$ if $n \not\equiv 0$ (mod $4$). Then, for any weighting $f:E(K_n)\to \{-1,1\}$ such that $|\sum_{e\in E(K_n)}f(e)|\leq \binom{n}{2}-g(n)$, there is a zero-sum copy of $K_4$.
\item There are infinitely many values of $n$ such that the following holds: There is a weighting function $f:E(K_n)\to \{-2,-1,0,1,2\}$ with $\sum_{e\in E(K_n)}f(e)=0$ which is zero-sum-$K_4$ free.
\end{enumerate} \end{theorem}
Theorem \ref{thm:main} together with the above Theorem \ref{thm:CY} do not only solve Problem \ref{probl}, they also supply a good understanding of the situation concerning $K_4$ as the value of $g(n)$ is sharp and the upper bound $(1-\epsilon)n^2/6$ in Theorem \ref{thm:CY} is nearly sharp, as already observed in \cite{CY3}.
We will use the following notation. Given a weighting $f:E(K_n)\to \{-r, \cdots, r\}$ and $i\in \ \{-r, \cdots, r\}$, denote by $E(i)$ the set of the $i$-weighted edges, that is, $E(i)=f^{-1}(i)$ and define $e(i)=|E(i)|$. Given a vertex $x\in V(K_n)$ we use $deg_{i}(x)$ to denote the number of $i$-weighted edges incident to $x$, that is, $deg_{i}(x)=|\{u:f(xu)=i\}|$.
In Section \ref{sec:K4} we will prove instances 2 and 3 of Theorem \ref{thm:main}, corresponding to the study of the existence of zero-sum copies of $K_4$. In order to prove instance 2, we will use an equivalent formulation consequence of the following remark.
\begin{remark}\label{rem:eq}
A weighting $f:E(K_n)\to \{-1,1\}$ satisfies $\left|\sum_{e\in E(K_n)}f(e)\right|\leq \binom{n}{2}-g(n)$ if and only if $\min\{e(-1),e(1)\}\geq \frac{1}{2}g(n)$. \end{remark}
The remark follows from the fact that $e(1)+e(-1)=\binom{n}{2}$, which implies $|\sum_{e\in E(K_n)}f(e)|=|e(1)-e(-1)|=\max\{e(-1),e(1)\}-\min\{e(-1),e(1)\}=\binom{n}{2}-2\min\{e(-1),e(1)\}$.
In Section \ref{sec:K4} we will also prove that instance 2 of Theorem \ref{thm:main} is best posible by exhibiting, for each $n\geq5$, a weighting function $f:E(K_n)\to \{-1,1\}$ with $\min\{e(-1),e(1)\}= \frac{1}{2}g(n)-1$ and no zero-sum copies of $K_4$. Moreover, we will characterize the extremal functions.
Finally, relying heavily on Pell equations and some classical biquadratic Diophantine equations,
in Section \ref{sec:Kk} we will prove instance 1 of Theorem \ref{thm:main}, corresponding to the study of the existence of zero-sum copies of $K_m$ in $0$-weighted weightings, where $m \neq 4$.
\section{The case of $K_4$}\label{sec:K4}
We will use standard graph theoretical notation to denote particular graphs. Having said this, $K_{1,3}$ will stand for the star with three leaves, $K_3 \cup K_1$ for the disjoint union of a triangle and a vertex, $P_k$ for a path with $k$ edges, and $C_k$ for a cycle with $k$ edges.
A weighting function $f:E(K_n) \to \{-1,1\}$ is \emph{zero-sum-$K_4$ free} if and only if the graph induced by $E(-1)$ (or equivalently $E(1)$) is $\{K_{1,3}, K_3 \cup K_1, P_3\}$-free (in the induced sense). The following lemma, which characterizes the $K_3$-free subclass of the family of $\{K_{1,3}, K_3 \cup K_1, P_3\}$-free graphs, will be useful in proving the forthcoming results. We define \[h(n)= \left\{ \begin{array}{rl}
n+1, & \mbox{ if } n\equiv 0 \mbox{ (mod $4$), and} \\
n, & \mbox{ otherwise. } \\ \end{array}\right.\]
\begin{lemma}\label{lem:triangle-free} Let $G$ be a $\{K_{1,3}, K_3, P_3\}$-free graph on $n$ vertices. Then each component of $G$ is isomorphic to one of $C_4$, $K_1$, $K_2$ or $P_2$. Moreover, $e(G) \le h(n)-1$, and equality holds if and only if $G \cong J \cup \bigcup_{i=1}^{q} C_4$, where $J \in \{\emptyset, K_1, K_2, P_2\}$ and $q = \lfloor \frac{n}{4} \rfloor$. \end{lemma}
\begin{proof}
Let $J$ be a connected component of $G$. If $J$ has at most $3$ vertices, then it is easy to see that $J \in \{K_1, K_2, P_2\}$. So assume that $J$ has at least $4$ vertices. Then, since $J$ is $\{K_3, P_3\}$-free, we infer that $J$ has no vertex of degree larger than $2$ and so we can deduce that $J \cong C_4$. Further, we note that $e(J) = |J|$ if $J \cong C_4$, and $e(J) = |J|-1$ otherwise. This implies that, among all $\{K_{1,3}, K_3, P_3\}$-free graphs on $n$ vertices, $G$ has maximum number of edges if and only if $G \cong J \cup \bigcup_{i=1}^{q} C_4$, where $J \in \{\emptyset, K_1, K_2, P_2\}$ and $q = \lfloor \frac{n}{4} \rfloor$. Since, clearly, $e(J \cup \bigcup_{i=1}^{q} C_4) = h(n) -1$, the proof is complete. \end{proof}
\begin{lemma}\label{lem:one_is_K3-free} Let $n\geq 5$ and $f:E(K_n)\to \{-1,1\}$ be a zero-sum-$K_4$ free coloring. Let $G_{-1}$ and $G_1$ be the graphs induced by $E(-1)$ and $E(1)$, respectively. Then at least one of $G_{-1}$ or $G_1$ is triangle-free. \end{lemma}
\begin{proof} Suppose for contradiction that both $G_{-1}$ and $G_1$ have a triangle. Let $abc$ be a triangle in $G_{-1}$ and $uvw$ a triangle in $G_1$. Suppose first that $abc$ and $uvw$ have a vertex in common, say $a=u$. Consider the graph $J$ induced by the $(-1)$-edges among vertices in $\{a,b,c,v,w\}$. If a vertex $x \in \{b,c\}$ is neighbor of both $v$ and $w$, then $\{x,v,w,a\}$ would induce a $K_{1,3}$ in $G_{-1}$, which is not possible. If no vertex $x \in \{b,c\}$ is adjacent to some $y \in \{v,w\}$, then $\{a,b,c,y\}$ would induce a $K_3\cup K_1$ in $G_{-1}$, which again is not possible. Hence, $\{b,c,v,w\}$ induces two independent edges. But then $\{b,c,v,w\}$ induces a $P_3$ in $G_{-1}$, a contradiction. Hence, we can assume that any pair of triangles such that one has only $(-1)$-edges and the other only $1$-edges are vertex disjoint. This implies that from any vertex in $\{u,v,w\}$ there is at most one $(-1)$-edge to vertices from $\{a,b,c\}$. Analogously, from any vertex in $\{a,b,c\}$ there is at most one $1$-edge to vertices from $\{u,v,w\}$. But this implies that there are at most $6$ edges between $\{a,b,c\}$ and $\{u,v,w\}$, which is false. Since in all cases we obtain a contradiction, we conclude that at least one of $G_{-1}$ or $G_1$ is triangle-free. \end{proof}
By Remark \ref{rem:eq}, the next result is equivalent to instance 2 of Theorem \ref{thm:main}.
\begin{theorem}\label{thm:k4} Let $n\geq 5$ and $f:E(K_n)\to \{-1,1\}$ such that $\min\{e(-1),e(1)\}\geq h(n)$. Then there is a zero-sum $K_4$. \end{theorem}
\begin{proof}
Let $f:E(K_n)\to \{-1,1\}$ be such that $\min\{e(-1),e(1)\}\geq h(n)$ and suppose for contradiction that it has no zero-sum $K_4$. Let $G_{-1}$ and $G_1$ be the graphs induced by $E(-1)$ and $E(1)$, respectively. Then both $G_{-1}$ and $G_1$ are $\{K_{1,3}, K_3 \cup K_1, P_3\}$-free graphs. By Lemma \ref{lem:one_is_K3-free}, $G_{-1}$ or $G_1$ is $K_3$-free. So we may assume, without loss of generality, that $G_{-1}$ is triangle-free. It follows by Lemma \ref{lem:triangle-free} that $e(-1) = |E(G_{-1})| \le h(n)-1$, which is a contradiction to the hypothesis. \end{proof}
The following theorem shows that Theorem \ref{thm:k4} is best possible and characterizes the extremal zero-sum-$K_4$ free weightings. We will use Mantel's Theorem, that any graph on $n$ vertices and at least $\frac{n^2}{4}+1$ edges contains a copy of $K_3$.
\begin{theorem}\label{thm:k4_sharp} Let $n\geq 5$ and $f:E(K_n)\to \{-1,1\}$ such that $e(1) = h(n)-1$. Then $f$ is zero-sum-$K_4$ free if and only if the graph induced by $E(1)$ is isomorphic to $J \cup \bigcup_{i=1}^{q} C_4$, where $J \in \{\emptyset, K_1, K_2, P_2\}$ and $q = \lfloor \frac{n}{4} \rfloor$. \end{theorem}
\begin{proof} If the graph induced by $E(1)$ is isomorphic to $J \cup \bigcup_{i=1}^{q} C_4$, where $J \in \{\emptyset, K_1, K_2, P_2\}$ and $q = \lfloor \frac{n}{4} \rfloor$, it is easy to check that $f$ is zero-sum-$K_4$ free. Conversely, let $f$ be zero-sum-$K_4$ free. Then the graphs $G_{-1}$ and $G_1$ induced by $E(-1)$ and $E(1)$, respectively, are both $\{K_{1,3}, K_3 \cup K_1, P_3\}$-free. If $n = 5$, it is easy to check that the only $\{K_{1,3}, K_3 \cup K_1, P_3\}$-free graph with $h(5) - 1 = 4$ edges is isomorphic to $C_4 \cup K_1$, and so we are done. Hence, we may assume that $n \ge 6$. Observe that \[e(-1) = \frac{n(n-1)}{2} - h(n)+1 \ge \frac{n(n-1)}{2} - n = \frac{n(n-3)}{2},\] whose right-hand side is at least $\frac{n^2}{4}$ for $n \ge 6$. Hence, by Mantel's Theorem, $G_{-1}$ has a triangle, and, by Lemma \ref{lem:one_is_K3-free}, this implies that $G_1$ is triangle-free. It follows that $G_1$ is a $\{K_{1,3}, K_3, P_3\}$-free graph on $n$ vertices and with $h(n) - 1$ edges. Thus, with Lemma \ref{lem:triangle-free}, we obtain that $G_1$ is isomorphic to $J \cup \bigcup_{i=1}^{q} C_4$, where $J \in \{\emptyset, K_1, K_2, P_2\}$ and $q = \lfloor \frac{n}{4} \rfloor$, and we are done. \end{proof}
The following theorem is instance 2 from Theorem \ref{thm:main}. It shows that, whenever we take a wider range for the weighting function $f$, we cannot hope for a result as in Theorem \ref{thm:k4} anymore.
\begin{theorem}\label{thm:larger_range} There are infinitely many values of $n$ such that the following holds: There is a weighting function $f:E(K_n)\to \{-2,-1,0,1,2\}$ with $\sum_{e\in E(K_n)}f(e)=0$ which is zero-sum-$K_4$ free. \end{theorem}
\begin{proof} Let $X \cup Y$ be a partition of the vertex set of $K_n$ and consider the weighting function $f:E(K_n)\to \{-2,-1,0,1,2\}$ such that \[ f(uv) = \left\{\begin{array}{ll} -2, & \mbox{if } u,v \in X\\ 1, & \mbox{if } u,v \in Y\\ 0, & \mbox{otherwise}. \end{array} \right. \] Clearly, $f$ is zero-sum-$K_4$ free. On the other hand, $\sum_{e\in E(K_n)}f(e)=0$ if and only if \[
-2\frac{|X|(|X|-1)}{2} + \frac{|Y|(|Y|-1)}{2} = 0, \]
which is equivalent to $(2|Y|-1)^2-2(2|X|-1)^2 = -1$. Hence, solving the latter equation is equivalent to solve the following Pell's equation \begin{equation}\label{eq:pell-pythago} y^2-2x^2 = -1, \end{equation}
for (odd) integers $x = 2|X|-1$ and $y= 2|Y|-1$. It is well-known that the Diophantine equation $y^2-2x^2=\pm 1$ has infinitely many solutions given by \[x_{k}=\frac{a^k-b^k}{a-b}=\frac{a^k-b^k}{2\sqrt{2}}, \hspace{2ex} y_k=\frac{a^k+b^k}{2},\]
where $a=1+\sqrt{2}$, $b=1-\sqrt{2}$ and $k \in \mathbb{N}$. Moreover, since $y_k^2 - 2x_k^2 = (-1)^k$, the solutions for equation (\ref{eq:pell-pythago}) are the pairs $(x_k,y_k)$ where $k$ is odd. Observe also that, for odd $k$, $x_k$ and $y_k$ are odd, too. Hence, each odd $k$ gives us a solution $(\frac{x_k+1}{2}, \frac{y_k+1}{2})$ for $(|X|,|Y|)$ and thus for $n = \frac{x_k+y_k}{2}+1$ and we are done. \end{proof}
For the sake of comprehension, let us compute small values of $n=\frac{x_k+y_k}{2}+1$ and exhibit how the partition $(|X|,|Y|)=(\frac{x_k+1}{2}, \frac{y_k+1}{2})$ gives a zero-sum weighting function $f$ as described in the theorem. Recall that we only want to consider solutions for odd values of $k$.
So we have $(x_1,x_3,x_5,\dots)=(1,5,29,\dots)$ and $(y_1,y_3,y_5,\dots)=(1,7,41,\dots)$, and the corresponding sequence of $n$'s is $(2,7,36,\dots )$. The case of $n=2$ is not interesting for vaquity reasons. For $n=7$, the partition is $(|X|,|Y|)=(3,4)$, thus there will be $ \binom{3}{2}$ edges weighted with $-2$, $ \binom{4}{2}$ edges weighted with $1$ and the rest of edges weighted with $0$, adding up to zero. For $n=36$, the partition is $(|X|,|Y|)=(15,21)$, and the sum of weighted edges is $-2\binom{15}{2}+1\binom{21}{2}=(-2) \cdot 105+1 \cdot 210=0$.
\section{The case of $K_m$, $m\neq 4$}\label{sec:Kk}
A \emph{balanced} $\{-1,1\}$-weighting function $f:E(K_n)\to \{-1,1\}$ is a function for which $e(-1)=e(1)$. In Section \ref{sec:K4}, we prove that, for $n\geq 5$, any function $f:E(K_n)\to \{-1,1\}$ with sufficiently many edges assigned to each type contains a zero-sum $K_4$. In this section, we prove that this is not true for $K_m$ with $m \in \mathbb{N} \setminus \{1, 4\}$. In other words, we exhibit, for infinitely many values of $n$, the existence of a balanced weighting function $f:E(K_n)\to \{-1,1\}$ without a zero-sum copies of $K_m$, where $m \neq 1,4$. In order to define those functions, consider first the following Pell equation: \begin{equation}\label{eq:pell} 8x^2-8x+1=y^2. \end{equation} It is well known that such a Diophantine equation has infinitely many solutions given by the recursion \[(x_1,y_1)=(1,1),\] \[(x_2,y_2)=(3,7),\] \[y_k=6y_{k-1}-y_{k-2}, \hspace{.5cm} x_{k}=\frac{y_k+x_{k-1}+1}{3}.\]
\begin{lemma}\label{lem:bal1} Let $n$ be a positive integer and consider the complete graph $K_n$ and a partition $V(K_n) = A \cup B$ of its vertex set. Then the function $f:E(K_n)\to \{-1,1\}$ defined as \[f(e)= \left\{ \begin{array}{rl}
-1, & \mbox{ if } e\subset A \\
1, & \mbox{ otherwise, } \\ \end{array}\right.\]
is balanced if and only if $n = \frac{1+y_k}{2}$ and $|A| = x_k$ for some $k \in\mathbb{N}$. \end{lemma}
\begin{proof}
Suppose first that $f$ is balanced and let $|A| = x$. Then $$e(-1)=\frac{x(x-1)}{2} = \frac{1}{2} \binom{n}{2},$$ which yields \[ n^2-n-(2x^2-2x)=0, \]
and therefore $n = \frac{1+\sqrt{8x^2-8x+1}}{2}$. But this is only an integer if $8x^2-8x+1 = y^2$ for some integer $y$, and we obtain equation (\ref{eq:pell}). Hence, $|A| = x = x_k$ and $n = \frac{1+y_k}{2}$ for some $k \in \mathbb{N}$.\\
Conversely, suppose that $n = \frac{1+y_k}{2}$ and $|A| = x_k$ for some $k \in\mathbb{N}$. Then \[ n = \frac{1+y_k}{2} = \frac{1+\sqrt{y_k^2}}{2} = \frac{1+\sqrt{8x_k^2-8x_k+1}}{2}. \] Thus $n$ is the positive root of \begin{equation}\label{eq:n_k} n^2-n-(2x_k^2-2x_k)=0, \end{equation} which is equivalent to \[ \frac{x_k(x_k-1)}{2}=\frac{1}{2}\binom{n}{2}. \] Since the left hand side of this equation is precisely $e(-1)$ and the right hand side is half the number of the edges of $K_n$, it follows that $f$ is balanced. \end{proof}
\begin{lemma}\label{lem:bal2} Let $n$ be a positive integer and consider the complete graph $K_n$ and a partition $V(K_n) = A \cup B$ of its vertex set. Then the function \[f(e)= \left\{ \begin{array}{rl}
-1, & \mbox{ if } e\subset A \mbox{ or } e\subset B \\
1, & \mbox{ otherwise, } \\ \end{array}\right.\]
is balanced if and only if $n = k^2$ and $\{|A|, |B| \} = \{\frac{1}{2}k(k+1), \frac{1}{2}k(k-1)\}$ for some $k \in\mathbb{N}$. \end{lemma}
\begin{proof}
Suppose first that $f$ is balanced and let $|A| = w$. Then \[ e(1) = w(n-w)=\frac{1}{2} \binom{n}{2}, \] which is equivalent to \[ w^2 - nw + \frac{1}{4}n(n-1)=0. \] Hence, \begin{equation}\label{eq:n(w)} w = \frac{n \pm \sqrt{n}}{2}, \end{equation}
which is an integer if and only if $n = k^2$ for some $k \in \mathbb{N}$. So we obtain $n = k^2$ and $w \in \{\frac{1}{2}k(k+1), \frac{1}{2}k(k-1)\}$. Since $|B| = n - |A| = k^2 - w$, it follows easily that $\{|A|, |B| \} = \{\frac{1}{2}k(k+1), \frac{1}{2}k(k-1)\}$ and we are done.\\
Conversely, suppose that $n = k^2$ and $\{|A|, |B| \} = \{\frac{1}{2}k(k+1), \frac{1}{2}k(k-1)\}$ for some $k \in\mathbb{N}$. Without loss of generality, assume that $|A| = \frac{1}{2}k(k+1)$. Then \[
e(1) = |A| (n - |A|) = \frac{1}{2}k(k+1) \left( k^2 - \frac{1}{2}k(k+1)\right) = \frac{1}{4} k^2(k^2-1) = \frac{1}{2} \binom{n}{2},\] implying that $f$ is balanced. \end{proof}
We define the set $S_1$ as the set of all integers $n_k = \frac{1+y_k}{2}$, $k \in \mathbb{N}$, where $(x_k,y_k)$ is the k-th solution of (\ref{eq:pell}), that is,
$$S_1 = \left\{\frac{1+y_k}{2} \; | \; k \in \mathbb{N} \right\}.$$ Further, let $S_2$ be the set of all integer squares, that is,
$$S_2 = \left\{k^2 \; | \; k \in \mathbb{N} \right\}.$$
Lemmas \ref{lem:bal1} and \ref{lem:bal2} yield the following corollary.
\begin{corollary}\label{cor:Km_Si} For any integer $m \in \mathbb{N} \setminus (S_1 \cap S_2)$, there are infinitely many positive integers $n$ such that there exists a balanced function $f:E(K_n)\to \{-1,1\}$ without zero-sum $K_m$. \end{corollary}
\begin{proof} Let $m \in \mathbb{N} \setminus (S_1 \cap S_2)$. By Lemmas \ref{lem:bal1} and \ref{lem:bal2}, there is a balanced function $f:E(K_n)\to \{-1,1\}$ for each $n \in S_1 \cup S_2$. Suppose that there is a zero-sum $K_m$ in such a weighting $f$ for a given $n \in S_1 \cup S_2$. Then, the function $f$ restricted to the edges of $K_m$ is a balanced function on $E(K_m)$, which is not possible by Lemmas \ref{lem:bal1} and \ref{lem:bal2} since $m \in \mathbb{N} \setminus S_1 \cap S_2$. Since $S_1 \cup S_2$ has infinitely many elements, it follows that there are infinitely many positive integers $n$ such that there exists a balanced function $f:E(K_n)\to \{-1,1\}$ without zero-sum $K_m$. \end{proof}
Now we can state the main result of this section, which is equivalent to instance 3 of Theorem \ref{thm:main}.
\begin{theorem} For any integer $m \in \mathbb{N} \setminus \{1, 4\}$, there are infinitely many positive integers $n$ such that there exists a balanced weighting $f:E(K_n)\to \{-1,1\}$ which is zero-sum-$K_m$ free. \end{theorem}
\begin{proof} By Corollary \ref{cor:Km_Si}, for any $m \in \mathbb{N} \setminus (S_1\cap S_2)$, there are infinitely many positive integers $n$, such that there exists a balanced weighting function $f:E(K_n)\to \{-1,1\}$ without a zero-sum $K_m$. We will show that $S_1 \cap S_2=\{1,4\}$. Let $q$ be an integer such that $q^2\in S_1$ (and thus $q^2\in S_1\cap S_2$). Then $q^2$ must be the positive root of equation (\ref{eq:n_k}) for some $x_k$. Thus we need to know for which positive integers $q$ and $x$ the following is possible: \begin{equation}\label{eq:qx} q^4-q^2-(2x^2-2x)=0. \end{equation} Note that equation (\ref{eq:qx}) can be written as: \begin{equation}\label{eq:QX} Q^2-2X^2=-1. \end{equation} where $Q=2q^2-1$ and $X=2x-1$. Again (as in the proof of Theorem \ref{thm:larger_range}), we have to deal with the Diophantine equation $Q^2-2X^2=\pm 1$, which has infinitely many solutions given by \[Q_k=\frac{a^k+b^k}{2}, \hspace{.5cm} X_{k}=\frac{a^k-b^k}{a-b}=\frac{a^k-b^k}{2\sqrt{2}},\] where $a=1+\sqrt{2}$ and $b=1-\sqrt{2}$. Since we need to solve equation (\ref{eq:QX}) (that is, with $-1$ on the right side), we know that $k$ must be odd. Therefore, according to the definition of $Q$, we need to determine all odd $k$'s such that $$Q_k+1=2q^2,$$ or equivalently, $$2Q_k+2=4q^2,$$ and so, \begin{equation}\label{eq:ab} a^k+b^k+a+b=(2q)^2. \end{equation} We consider two cases:\\
\noindent \emph{Case 1.} If $k\equiv 1$ (mod $4$), we will prove that the left side of equation (\ref{eq:ab}) is $4Q_{\frac{k-1}{2}}Q_{\frac{k+1}{2}}$. Note that $ab=-1$ and, since in this case $\frac{k-1}{2}$ is even, we have $(ab)^{\frac{k-1}{2}}=(-1)^{\frac{k-1}{2}}=1$. Hence, \begin{align*} a^k+b^k+a+b&=a^k+b^k+(ab)^{\frac{k-1}{2}}(a+b)\\
&=a^{\frac{k-1}{2}}a^{\frac{k+1}{2}}+b^{\frac{k-1}{2}}b^{\frac{k+1}{2}}+a^{\frac{k-1}{2}}b^{\frac{k-1}{2}}(a+b)\\
&=a^{\frac{k-1}{2}}a^{\frac{k+1}{2}}+b^{\frac{k-1}{2}}b^{\frac{k+1}{2}}+a^{\frac{k+1}{2}}b^{\frac{k-1}{2}}+a^{\frac{k-1}{2}}b^{\frac{k+1}{2}}\\
&=(a^{\frac{k-1}{2}}+b^{\frac{k-1}{2}})(a^{\frac{k+1}{2}}+b^{\frac{k+1}{2}})\\
&=4Q_{\frac{k-1}{2}}Q_{\frac{k+1}{2}}. \end{align*} Thus, by (\ref{eq:ab}), we conclude that $Q_{\frac{k-1}{2}}Q_{\frac{k+1}{2}}$ is a perfect square. We know that, for all $i$, $Q_i$ and $Q_{i+1}$ are coprimes. Thus, it follows that both $Q_{\frac{k-1}{2}}$ and $Q_{\frac{k+1}{2}}$ are perfect squares. Coming back to equation (\ref{eq:QX}), the following must be satisfied \begin{equation}\label{eq:YX} Y^4-2X^2=-1 \end{equation} where $Q_{\frac{k+1}{2}}=Y^2$. But, the only possible solution for the Diophantine equation (\ref{eq:YX}) is $(Y,X)=(\pm 1,1)$. Hence, $Q_{\frac{k+1}{2}}= 1$, which means that $k=1$, and so $Q = Q_1 = 1$. Since $Q=2q^2-1$ and $q>0$, we conclude that $q=1$.\\
\noindent \emph{Case 2.} If $k\equiv 3$ (mod $4$), then we will prove that the left side of equation (\ref{eq:ab}) is $8X_{\frac{k-1}{2}}X_{\frac{k+1}{2}}$. Recall that $ab=-1$ and, since in this case $\frac{k+1}{2}$ is even, we have $(ab)^{\frac{k+1}{2}}=(-1)^{\frac{k+1}{2}}=1$. Hence, \begin{align*} a^k+b^k+a+b&=a^k+b^k+(ab)^{\frac{k+1}{2}}(a+b)\\ &=a^k+b^k-(ab)^{\frac{k-1}{2}}(a+b)\\
&=a^{\frac{k-1}{2}}a^{\frac{k+1}{2}}+b^{\frac{k-1}{2}}b^{\frac{k+1}{2}}-a^{\frac{k-1}{2}}b^{\frac{k-1}{2}}(a+b)\\
&=a^{\frac{k-1}{2}}a^{\frac{k+1}{2}}+b^{\frac{k-1}{2}}b^{\frac{k+1}{2}}-a^{\frac{k+1}{2}}b^{\frac{k-1}{2}}-a^{\frac{k-1}{2}}b^{\frac{k+1}{2}}\\
&=(a^{\frac{k-1}{2}}-b^{\frac{k-1}{2}})(a^{\frac{k+1}{2}}-b^{\frac{k+1}{2}})\\
&=8X_{\frac{k-1}{2}}X_{\frac{k+1}{2}}. \end{align*} Thus, by (\ref{eq:ab}), we conclude that $2X_{\frac{k-1}{2}}X_{\frac{k+1}{2}}$ is a perfect square. We know that $X_{\frac{k-1}{2}}$and $X_{\frac{k+1}{2}}$ have different parity. Observe that, for $k\equiv 3$ (mod $4$), $X_{\frac{k-1}{2}}$ is odd and $X_{\frac{k+1}{2}}$ is even. Since for all $i$, $X_i$ and $X_{i+1}$ are coprimes, also $X_{\frac{k-1}{2}}$ and $2X_{\frac{k+1}{2}}$ are coprimes, from which it follows that both $X_{\frac{k-1}{2}}$ and $2X_{\frac{k+1}{2}}$ are perfect squares. Particularly, coming back to equation (\ref{eq:QX}), we obtain \begin{equation}\label{eq:QW} Q^2-2W^4=-1 \end{equation} where $X_{\frac{k-1}{2}}=W^2$. Note that equation (\ref{eq:QW}) is the well known Ljunggren Equation $1+Q^2=2W^4$. Such a Diophantine equation has solutions only for $W=1$ and $W=13$, which correspond respectively to $X_1$ and $X_7$ (because $X_1=1=1^2$ and $X_7=169=13^2$). Therefore, we have two possibilities, either $k=3$ (that is $X_{\frac{3-1}{2}}=X_1$), or $k=15$ (that is $X_{\frac{15-1}{2}}=X_7$). The second case is disclaimed since $X_{\frac{15+1}{2}}=X_8=408=2 \cdot (204)$ and $204$ is not a perfect square. The first case, corresponding to $k=3$, leads to $X_{\frac{3-1}{2}}=X_1=1$ and $X_{\frac{3+1}{2}}=X_2=2$. The solution $(Q_1,X_1)=(1,1)$ gives $q=1$ as we saw in Case 1. The solution $(Q_2,X_2)=(3,2)$ gives $q=2$ (since $Q=2q^2-1$ an $q>0$).
From both cases we conclude that, if $q^2\in S_1\cap S_2$ then either $q=1$ or $q=2$. Hence, $S_1\cap S_2=\{1,4\}$ which concludes the proof. \end{proof}
\section{Conclusions}
While the situation about zero-sum copies of $K_m$ over $\mbox{${\mathbb Z}$}$-weightings is fairly clear now, a lot of interesting results can be proved when the graphs in question are not complete graphs. Several examples are given in \cite{CY3} (for example, certain complete bipartite graphs and many more), and in a forthcoming paper \cite{CHM} which is under preparation.
\end{document} |
\begin{document}
\title{Concurrence of Two Identical Atoms in a Rectangular Waveguide: Linear Approximation with Single Excitation}
\author{Lijuan \surname{Hu}} \affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of Ministry of Education, Department of Physics and Synergetic Innovation Center of Quantum Effects and Applications, Hunan Normal University, Changsha 410081, China} \author{Guiyuan \surname{Lu}} \affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of Ministry of Education, Department of Physics and Synergetic Innovation Center of Quantum Effects and Applications, Hunan Normal University, Changsha 410081, China} \author{Jing \surname{Lu}} \affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of Ministry of Education, Department of Physics and Synergetic Innovation Center of Quantum Effects and Applications, Hunan Normal University, Changsha 410081, China} \author{Lan \surname{Zhou}} \thanks{Corresponding author} \email{[email protected]} \affiliation{Key Laboratory of Low-Dimensional Quantum Structures and Quantum Control of Ministry of Education, Department of Physics and Synergetic Innovation Center of Quantum Effects and Applications, Hunan Normal University, Changsha 410081, China}
\begin{abstract} We study two two-level systems (TLSs) interacting with a reservoir of guided modes confined in a rectangular waveguide. For the energy separation of the identical TLSs far away from the cutoff frequencies of transverse modes, the delay-differential equations are obtained with single excitation initial in the TLSs. The effects of the inter-TLS distance on the time evolution of the concurrence of the TLSs are examined. \end{abstract}
\pacs{03.65.Yz, 03.65.-w}
\maketitle
\section{Introduction}
Quantum entanglement is a nonlocal correlation of multipartite quantum systems, which distinguishes the quantum world from the classical world. Due to its important role in quantum computation and communication, it is a physical resource which quantum technologies are based on. However, the inevitable interaction of quantum systems with their surrounding environments induces decoherence of quantum systems, which degrades the entanglement of quantum systems. Understanding the dynamics of the entanglement is desirable to be able to manipulate entanglement states in a practical way as well as the question of emergent classicality from quantum theory. Entanglement dynamics is studied under local decoherence (two particles in an entangled state are coupled to its own environment individually), a peculiar dynamical feature of entangled state is that complete disentanglement is achieved in finite time although complete decoherence takes an infinite time, which is termed ``entanglement sudden death''~\cite{ESD1,ESD2}. The assumption of local decoherence requires that two two-level systems (TLSs), e.g. atoms, are sufficiently separated. It is well known that the radiation field emitted by an atom may influence the dynamics of its closely spaced atoms~\cite {Dicke,Lehmberg70,Feng41,Ordon70,BermanPRA76}. The entanglement can be generated in a two-atom system after a finite time by their cooperative spontaneous emission, or the destroyed entanglement may reappear suddenly after its death, which is known as sudden birth of entanglement~\cite{ESB}.
In quantum network, stationary qubits generate, store, and process quantum information at quantum nodes, and flying qubit transmit quantum information between the nodes through quantum channels. A distributed quantum network requires coherently transferring quantum information among stationary qubits, flying qubits, and between stationary qubits and flying qubits. With the development of techniques in quantum information, an alternative waveguide-based quantum electrodynamics (QED) system has emerged as a promising candidate for achieving quantum network~\cite{Tien,HamRMP82,Caruso} . In this system, atoms are located at quantum nodes and photons propagating along the network are confined in a waveguide. Inside a one-dimensional (1D) waveguide, the electromagnetic field is confined spatially in two dimensions and propagates along the remaining one, which is called guided modes. The spectrum of the guided modes is continuous. The coupling of the electromagnetic field to a TLS can be increased by reducing the transverse size of the guided modes. Therefore the study of entanglement dynamics in systems embedded in waveguides is of importance. A waveguide with a cross section has many guided modes~\cite{TETMmode}, e.g. transverse-magnetic (TM) modes or transverse-electric (TE) ones. However, most work only consider one guided mode of the waveguide~\cite {Fans,ZLPRL08,ZLQrouter,Zheng,LawPRA78,TShiSun,PRA14Red,Ordonez}. In this paper, we consider the dynamic behavior of bipartite entanglement involving two identical TLSs which is implanted into the 1D rectangular hollow metallic waveguide. Since local addressing is difficult, we assume that there is no direct interaction between the TLSs, the TLSs and the field share initially a single excitation. By considering the energy separation of the TLSs is far away from the cutoff frequencies of the transverse modes, the delay differential equations are obtained for two TLSs' amplitudes with the field initially in vacuum, where multiple guided modes are included. The spatial separation of the two TLSs introduces the position-dependent phase factor and the time delay (finite time required for light to travel from one TLS to the other) in each transverse mode. The phase factors and the time delays are different in different transverse modes. The effect of the phase factors and the time delays on the entanglement dynamics of the TLSs are studied in details by considering the TLSs interacting with single transverse mode and double transverse modes.
This paper is organized as follows. In Sec.~\ref{Sec:2}, we introduce the model and establish the notation. In Sec.~\ref{Sec:3}, we derive the relevant equations describing the dynamics of the system for the TLSs being initially excited and the waveguide mode in the vacuum state, and investigate the effect of spatial separation on the dynamics of entanglement between two identical TLSs, which is characterized by concurrence. We make a conclusion in Sec.~\ref{Sec:4}.
\section{\label{Sec:2}Two TLSs in a rectangular waveguide}
We consider a rectangular hollow metallic waveguide with the area $A=ab$ ($ a=2b$) of its cross section, as shown in Fig.~\ref{Fig1.eps}. The axes of the waveguide parallel to the $z$ axis, and the waveguide is infinite along the $z$ axis. Since the translation invariance is maintained along the $z$ axis, all the components of the electromagnetic field describing the guided mode depend on the coordinate z as $e^{ikz}$. The guided mode can be characterized by three wave numbers $\{k_{x},k_{y},k_{z}\}$. The spatial confinement of the electromagnetic field along the $xy$ plane makes the appearance of the two non-negative integers $m$ and $n$, which related to wave numbers along the $x$ and $y$ directions by $k_{x}=m\pi /a$ and $ k_{y}=n\pi /b$. In this waveguide, there are two types of guiding modes~\cite{TETMmode,HuangPRA,Shahmoon,liqongPRA,zhouCTP69}:the transverse-magnetic modes $TM_{mn}$ ($H_{z}=0$) and the transverse-electric modes $TE_{mn}$ ($ E_{z}=0$). Two idential TLSs, named TLS $1$ and TLS $2$, are separately located inside the waveguide at positions $\vec{r}_{1}=(a/2,b/2,z_{1})$ and $ \vec{r}_{2}=(a/2,b/2,z_{2})$, the distance between the TLSs is denoted by $ d=z_{2}-z_{1}$. The free Hamiltonian of the TLSs read \begin{equation} H_{a}=\sum\limits_{l=1}^{2}\hbar \omega _{A}\sigma _{l}^{+}\sigma _{l}^{-} \label{2-A1} \end{equation} where $\omega _{A}$ are the energy difference between the excited state $
|e\rangle $ and the ground state $|g\rangle $, and $\sigma _{l}^{+}\equiv \left\vert e_{l}\right\rangle \left\langle g_{l}\right\vert $ ($\sigma _{l}^{-}\equiv \left\vert g_{l}\right\rangle \left\langle e_{l}\right\vert $ ) is the rising (lowing) atomic operator of the $l$-$th$ TLS. We assume the dipoles of TLSs are along the $z$ axis. In this case, only the $TM_{mn}$ guided modes are interacted with the TLSs. The free Hamiltonian of the field reads \begin{equation} H_{f}=\sum_{j}\int dk\hbar \omega _{jk}\hat{a}_{jk}^{\dagger }\hat{a}_{jk} \label{2-A2} \end{equation} where $\hat{a}_{jk}^{\dagger }$ ($\hat{a}_{jk}$) is the creation (annihilation) operator of the $TM_{mn}$ modes. Here,we have replaced $(m,n)$ with the sequence number\ $j$, i.e., $j=1,2,3...$ denoting $ TM_{11},TM_{31},TM_{51}\cdots$, respectively. For each guided mode, the dispersion relation is given by $\omega _{jk}=\sqrt{\Omega _{j}^{2}+c^{2}k^{2}} $, where $\Omega _{mn}=c\sqrt{(m\pi /a)^{2}+(n\pi /b)^{2}}$ is the cutoff frequency. No electromagnetic field can be guided if their frequency is smaller than the cutoff frequency $\Omega _{1}$. The interaction between the TLSs and the the electromagnetic field is written as \begin{equation} H_{int}=\sum_{l=1}^{2}\sum_{j}\int dk\hbar \frac{g_{jl}}{\sqrt{\omega _{jk}}} e^{ikz_{l}}S_{l}^{-}\hat{a}_{k}^{\dagger }+h.c. \label{2-A3} \end{equation} in the electric dipole and rotating wave approximations, where $ g_{jl}=\Omega _{j}\mu _{l}/\sqrt{A\pi \epsilon _{0}}$ and $\mu _{l}$ the magnitude of the dipole of the $l$-th TLS. We assume that $\mu _{1}=\mu _{2}=\mu $ is real. Then the parameter $g_{jl}$ becomes \begin{equation} g_{j}=\frac{\Omega _{j}\mu \sin \left( \frac{m\pi }{2}\right) \sin \left( \frac{n\pi }{2}\right) }{\sqrt{\hbar A\pi \epsilon _{0}}}, \label{2-A4} \end{equation} where $\epsilon _{0}$ is the permittivity of free space. The TLS's position is presented in the exponential function in Eq.(\ref{2-A3}). \begin{figure}
\caption{(color online) Schematic illustration for an infinite waveguide of rectangular cross section $A=ab$ (a) coupling to two TLSs (b) located at $ \vec{r}_{1}=(a/2,b/2,z)$ and $\vec{r}_{2}=(a/2,b/2,z_2)$.}
\label{Fig1.eps}
\end{figure} The total system, the two TLSs and the photons in quantum electromagnetic field, is described by the Hamiltonian \begin{equation} H=H_{f}+H_{a}+H_{int} \label{2-A5} \end{equation} The total system is a closed system. However, each subsystem is an open system. When we are only interested in the dynamics of TLSs, the quantum electromagnetic field can be regarded as an environment.
\section{\label{Sec:3} Entanglement Dynamics}
Any state of the two TLSs are linear superposition of the basis of the separable product states $\left\vert 1\right\rangle =\left\vert g_{1}g_{2}\right\rangle $, $\left\vert 2\right\rangle =\left\vert e_{1}g_{2}\right\rangle $, $\left\vert 3\right\rangle =\left\vert g_{1}e_{2}\right\rangle $, and $\left\vert 4\right\rangle =\left\vert e_{1}e_{2}\right\rangle $. Since the number of quanta is conserved in this system, the wavefunction of the total system can be written as: \begin{equation} \left\vert \psi (t)\right\rangle =b_{1}\left\vert 20\right\rangle +b_{2}\left\vert 30\right\rangle +\sum_{j}\int dkb_{jk}a_{jk}^{\dagger }\left\vert 10\right\rangle \label{2-A6} \end{equation} in single excitation subspace, where $\left\vert 0\right\rangle $ is the vacuum state of the quantum field. The first term in Eq.(\ref{2-A6})presents TLS $1$ in the excited state with no excitations in the field, $b_{1}\left( t\right) $ is the corresponding amplitude, the second term in Eq.(\ref{2-A6} ) presents TLS $2$ in the excited state with photons in the vacuum, whereas the third term in Eq.(\ref{2-A6}) describes all TLSs in the ground state with a photon emitted at a mode $k$ of the TM$_{j}$ guided mode, $ b_{jk}\left( t\right) $ is the corresponding amplitude. The initial state of the system is denoted by the amplitudes $b_{1}\left( 0\right) ,b_{2}\left( 0\right) $, $b_{jk}\left( 0\right) =0$. The Schr\"{o}dinger equation results in the following coupled equation of the amplitudes \begin{subequations} \label{2-A7} \begin{eqnarray} \dot{b}_{1} &=&-i\omega _{A}b_{1}-\sum_{j}\int dk\frac{b_{jk}g_{j}}{\sqrt{ \omega _{jk}}}e^{-ikz_{1}} \\ \dot{b}_{2} &=&-i\omega _{A}b_{2}-\sum_{j}\int dk\frac{b_{jk}g_{j}}{\sqrt{ \omega _{jk}}}e^{-ikz_{2}} \\ \dot{b}_{jk} &=&-i\omega _{jk}b_{jk}+\frac{g_{j}e^{ikz_{1}}}{\sqrt{\omega _{jk}}}\left( b_{1}+b_{2}e^{ikd}\right) \end{eqnarray} We introduce three new variables to remove the high-frequency effect \end{subequations} \begin{subequations} \label{2-A8} \begin{eqnarray} b_{1}(t) &=&B_{1}(t)e^{-i\omega _{A}t}, \\ b_{2}(t) &=&B_{2}(t)e^{-i\omega _{A}t}, \\ b_{jk}(t) &=&B_{jk}\left( t\right) e^{-i\omega _{jk}t}, \end{eqnarray} then, formally integrate equation of $B_{jk}\left( t\right) $, which is later inserted into the equations for $B_{1}\left( t\right) $ and $ B_{2}\left( t\right) $. The probability amplitude for one TLS being excited is determined by two coupled integro-differential equations. Assuming that the frequency $\omega _{A}$ is far away from the cutoff frequencies $\Omega _{j}$, we can expand $\omega _{jk}$ around $\omega _{A}$ up to the linear term \end{subequations} \begin{equation} \omega _{jk}=\omega _{A}+v_{j}\left( k-k_{j0}\right) , \label{2-A10} \end{equation} where the wavelength of the emitted radiation $k_{j0}=\sqrt{\omega _{A}^{2}-\Omega _{j}^{2}}/c$ is determined by $\omega _{jk_{0}}=\omega _{A}$ , and the group velocity \begin{equation}
v_{j}\equiv \frac{d\omega _{jk}}{dk}|_{k=k_{j0}}=\frac{c\sqrt{\omega _{A}^{2}-\Omega _{j}^{2}}}{\omega _{A}} \label{2-A11} \end{equation} is different for different TM$_{j}$ guided modes. Integrating over all wave vectors $k$ gives rise to a linear combination of $\delta \left( t-\tau -\tau _{j}\right) $ and $\delta \left( t-\tau_j \right) $, where $\tau _{j}=d/v_{j}$ is the time delay taking by a photon traveling from one TLS to the other TLS in the given transverse mode $j$. The differential equations governing the dynamics of two TLSs read \begin{subequations} \label{2-A12} \begin{eqnarray} \left( \partial _{t}+\gamma \right) B_{1}(t) &=&-\sum_{j}\gamma _{j}e^{i\varphi _{j}}B_{2}\left( t-\tau_{j}\right) \Theta \left( t-\tau_{j}\right) \\ \left( \partial _{t}+\gamma \right) B_{2}(t) &=&-\sum_{j}\gamma _{j}e^{i\varphi _{j}}B_{1}\left( t-\tau_{j}\right) \Theta \left( t-\tau_{j}\right) \end{eqnarray} where we have defined the phase $\varphi _{j}=k_{j0}d$ due to the distance between the TLSs, the decay rate $\gamma _{j}=\pi \left\vert g_{j}\right\vert ^{2}/(v_{j}\omega _{A})$ caused by the interaction between the TLSs and the vacuum field in a give transverse mode $j$, $\Theta(x)$ is the Heaviside unit step function, i.e., $\Theta(x)=1$ for $x>0$, and $ \Theta(x)=0$ for $x<0$. The decay to all $TM_{j}$ modes is denoted by $ \gamma =\sum_{j}\gamma _{j}$, the retard effect~\cite {Milonni74,cookPRA53,DungPRA59,DornPRA66,RistPRA78,GulfPRA12,JingPLA377} has been implied by the symbol $\tau _{j}$. At times less than minimum $\tau _{j}$, two TLSs decay as if they are isolated in rectangular waveguide. After the time $\min\tau _{j}$ the TLS recognizes the other TLS due to its absorption of photons. As time goes on, reemissions and reabsorptions of photons by two TLSs might produce interference, which leads to the change of atomic upper state population. It is convenient to write Eq.(~\ref{2-A12})
in the Dicke symmetric state $|s\rangle =(|2\rangle +|3\rangle )/\sqrt{2}$
and antisymmetric state $|a\rangle =(|2\rangle -|3\rangle )/\sqrt{2}$ \end{subequations} \begin{subequations} \label{2-A13} \begin{eqnarray} \left( \partial _{t}+\gamma \right) C_{s}(t) &=&-\sum_{j}\gamma _{j}e^{i\varphi _{j}}C_{s}\left( t-\tau_{j}\right) \Theta \left( t-\tau_{j}\right) \\ \left( \partial _{t}+\gamma \right) C_{a}(t) &=&\sum_{j}\gamma _{j}e^{i\varphi _{j}}C_{a}\left( t-\tau_{j}\right) \Theta \left( t-\tau_{j}\right) \end{eqnarray} which allow either TLS 1 or TLS 2 to be excited with equal probability. They are degenerate eigenstates of Hamiltonian $H_a$. The equations for the amplitudes of the Dicke states are not coupled.
\begin{figure}
\caption{(Color online) The concurrence between the TLSs as functions of the dimensionless time $t/\protect\tau_{1}$ with initial condition $C_{s}(0)=1$ for different phase $\protect\varphi _{1}=2n\protect\pi$ (black solid curve), $\protect\varphi _{1}=2n\protect\pi+\protect\pi$ (blue dotted curve), $\protect\varphi _{1}=2n\protect\pi+\protect\pi/2$ (green dot-dashed curve), $\protect\varphi _{1}=2n\protect\pi+\protect\pi/4$ (red dashed curve) in (a)$n=2$, (b)$n=20$, (c)$n=150$. We have set the following parameters: $a=2b$, $\protect\omega_{A}=(\Omega_{11}+\Omega_{31})/2$, $ \protect\gamma _{1}\protect\lambda _{1}/v_{1}=0.05 $.}
\label{Fig2.eps}
\end{figure}
To measure the amount of the entanglement, we use concurrence as the quantifier~\cite{Wootters}. By taking a partial trace over the waveguide degrees of freedom, the initial density matrix of the two TLSs is of an X-form in the two-qubit standard basis $\{\left\vert 1\right\rangle ,\left\vert 2\right\rangle ,\left\vert 3\right\rangle ,\left\vert 4\right\rangle \}$. The concurrence for this type of state can be calculated easily as \end{subequations} \begin{equation} \label{2-A14} C(t)=\max (0,2\left\vert B_{1}(t)B_{2}^{\ast }(t)\right\vert ) \end{equation} which can also expressed as the function of the amplitudes of the Dicke states by the relation \begin{subequations} \label{2-A15} \begin{eqnarray} C_{s}(t) &=&\frac{B_{1}\left( t\right) +B_{2}\left( t\right) }{\sqrt{2}}, \\ C_{a}(t) &=&\frac{B_{1}\left( t\right) -B_{2}\left( t\right) }{\sqrt{2}}. \end{eqnarray} We expect that the position-dependent phase factors $e^{i\varphi _{j}}$ and the delay times $\tau _{j}$ will lead to a modification of the entanglement among the TLSs.
\subsection{Single transverse mode}
In the frequency band between $\Omega _{11}$ and $\Omega _{31}$, the waveguide is said to be single-moded. The TLSs with the transition frequency $\omega _{A}\in \left( \Omega _{11},\Omega _{31}\right) $ only emit photons into the TM$_{11}$ ($j=1$) guided mode. In this case, the time behavior of Dicke states reads \end{subequations} \begin{subequations} \label{2-B1} \begin{eqnarray} C_{s}(t) &=&C_{s0}\sum_{n=0}^{\infty }\frac{\left( -\gamma _{1}e^{i\varphi _{1}}\right) ^{n}}{n!}t_{n}^{n}e^{-\gamma _{1}t_{n}}, \\ C_{a}(t) &=&C_{a0}\sum_{n=0}^{\infty }\frac{\left( \gamma _{1}e^{i\varphi _{1}}\right) ^{n}}{n!}t_{n}^{n}e^{-\gamma _{1}t_{n}}. \end{eqnarray} where $t_{n}=t-n\tau _{1}$, and $C_{s0}$ and $C_{a0}$ are the initial amplitude. The time axis is divided into intervals of length $\tau _{1}$. A step character is presented in Eqs.(\ref{2-B1}). For $t\in \left[ 0,\tau _{1} \right] $, both amplitudes, $C_{s}$ and $C_{a}$, decay exponentially with decay rate $\gamma _{1}$. The underlying physics is that one TLS requires at least the time $\tau _{1}$ to recognize the other TLS. For $t\in \left[ \tau _{1},2\tau _{1}\right] $, the absorption and reemission of light by each TLS produce the interference, which results in a energy change between two TLSs.
\begin{figure}
\caption{(Color online) The concurrence between the TLSs as a function of the dimensionless time $t/\protect\tau _{1}$ with initial condition $ C_{a}(0)=1$ for distance $d=0$ (black solid line), $d=10\protect\lambda _{1}$ (green dot-dashed line), $d=200\protect\lambda _{1}$ (red dashed line). Other parameters are the same as in Fig.~\ref{Fig2.eps}.}
\label{Fig3.eps}
\end{figure}
From Eq.(\ref{2-B1}), one can observe that there is a $\pi $ phase difference between the amplitudes $C_{s}(t)$ and $C_{a}(t)$. Hence, we assume that the two TLSs are initially prepared in the symmetric state $
C_{s0}=1$ to study the effect of the inter-TLS distance on the the dynamics of entanglement between the TLSs. In this case, the concurrence takes the maximum value between $0$ and $|C_{s}(t)|^{2}$. In Fig.~\ref{Fig2.eps}, we have numerically plotted the concurrence as a function of time $t$ in units of $\tau _{1}$ with $\gamma _{1}\lambda _{1}/\tau _{1}=0.05$, where the wavelength $\lambda _{1}k_{10}=2\pi $. In the time interval $t\in \left[ 0,\tau _{1}\right] $, two TLSs radiate spontaneously, so the concurrence decays exponentially with time. As time goes on, the inter-TLS distance have influenced the entanglement dynamics via phase $\varphi _{1}$ and delay time $\tau _{1}$. When the two TLSs are close together, two TLSs act collectively, the system dynamics is independent of the finite propagating time of the light, which has been shown in Fig.~\ref{Fig2.eps}(a) with $ \gamma _{1}\tau _{1}\ll 1$. There is stationary two-TLS entanglement when the inter-TLS distance equals an odd integer number of $\lambda _{1}/2$, (i.e., $\varphi _{1}=2n\pi +\pi $). And a small deviation of the special position leads to the entanglement decaying asymptotically to zero. The entanglement loses fast when the inter-TLS distance equals an integer number of $\lambda _{1}$ corresponding to $\varphi _{1}=2n\pi $. For $\gamma _{1}\tau _{1}\ll 1$, The dependence of the entanglement on phase in Fig.~\ref {Fig2.eps}(a) can be understood by letting $\tau _{1}\rightarrow 0$. In this case, the amplitude of state $\left\vert s\right\rangle $ becomes \end{subequations} \begin{equation} C_{s}(t)=C_{s0}\exp \left[ -t\gamma _{1}(1+\cos \varphi _{1})-it\gamma _{1}\sin \varphi _{1}\right] \label{2-B2} \end{equation}
It can be observed from Eq.~(\ref{2-B2}) that $|C_{s}(t)|$ exponentially decays with time, it decays fast when $\varphi _{1}=2n\pi $ and keeps its initial value when $\varphi _{1}=2n\pi +\pi $. Although one can explain the relation of entanglement with phase by Eq.~(\ref{2-B2}), the probability of finding the TLSs in the initial state is less than unity in Fig.~\ref {Fig2.eps}(a) when $\varphi _{1}=2n\pi +\pi $. Hence, the stationary two-TLS entanglement indicates a superposition of the symmetry state in the absence of photons and the ground TLS state in the presence of a photon. As the inter-TLS separation increases a little bit to meet $\gamma _{1}\tau _{1}\sim 1$ in Fig.~\ref{Fig2.eps}(b), both the decay time and the phase play important roles due to the interference. The interference produced by multiple reemissions and reabsorptions of photon results in an oscillatory entanglement. Panel (c) of Fig.~\ref{Fig2.eps} illustrates the dynamics of entanglement for a larger inter-TLS distance with $\gamma _{1}\tau _{1}\gg 1$ . It can be observed that the phase does not make any sense. At early time, each initially excited TLS emits light to the waveguide, the entanglement begins to decrease abruptly from one. Then the concurrence keeps small and approximates to zero until the time $t=\tau _{1}$, at which the excitation get absorbed by each TLS. As soon as the emitted photon returns to the TLSs, the entanglement is created. Then, the decrease of entanglement begins. The entanglement of the TLSs exhibits peaks due to the iteration of the process where a photon emitted by one of the atoms is reabsorbed by another atom, but its periodic maxima are reduced in magnitude as $t$ increases because the energy is carried away from TLSs by the forward-going waves emitted by TLS 1 and the backward-going waves emitted by TLS 2. In this case, the amplitudes are approximately described by \begin{equation} C_{s}(t)\propto \frac{\left( -\gamma _{1}e^{i\varphi _{1}}\right) ^{n}}{n!} t_{n}^{n}e^{-\gamma _{1}t_{n}} \label{2-BA} \end{equation} in each time interval $\left[ n\tau _{1},\left( n+1\right) \tau _{1}\right]$ . \begin{figure*}
\caption{(Color online) The time evolution of the concurrence between the TLSs as a function of the dimensionless $t/\protect\tau _{1}$ with TLSs initial in the antisymmetry state for phases $\protect\varphi _{1}=2n\protect \pi $ in (a) $n=4$, (b) $n=10$, (c) $n=30$, (d) $n=3000$. Here, the concurrence for $d=0$ is given in the black dot-dashed lines. The concurrence that only TM$_{11}$ mode is considered is presented by blue dashed lines. The concurrence that both TM$_{11}$ and TM$_{31}$ modes are considered is presented by the red solid lines. We have set the following parameters: $a=2b$, $\protect\omega _{A}=(\Omega _{31}+\Omega _{51})/2$, $ \protect\gamma _{1}\protect\lambda _{1}/v_{1}=0.0086$.}
\label{Fig4.eps}
\end{figure*}
For TLSs initially in the antisymmetry state, the concurrence exhibits the similar behavior to the symmetry state with a $\pi$ phase difference. However, when the inter-TLS spacing $d=0$, the antisymmetry state is a dark state which does not interact with the electromagnetic field, it means the the probability of finding the TLSs in the initial state is unity at any time, so the concurrence is unchanged and remains its initial value one (see the solid black line in Fig.~\ref{Fig3.eps}). We also plot the concurrence as a function of the dimensionless time $t/\tau_{1}$ for phase $\varphi _{1}=2n\pi $ with $n=10$ (the green dot-dashed line) and $n=200$ (red dashed line) in Fig.~\ref{Fig3.eps}, where all the position-dependent phase factors $e^{i\varphi_1}$ are equal. It can be seen that the cooperative effect become lower and lower as the inter-TLS distance increases, so does the maximum of concurrence.
\subsection{two transverse modes}
A TLS in its upper state radiates waves into the continua resonant with itself. As the transition frequency of the TLSs increases, there are additional guided modes taking part in the interaction with the TLSs. Due to their different wave numbers, the inter-TLS distance introduces different flight time $\tau _{j}$ of light between the TLSs as well as different phases $\varphi _{j}$. From the definition of $\tau _{j}$ and $\varphi _{j}$ , we know that $\tau _{j}<\tau _{j+1}$ and $\varphi _{j}<\varphi _{j+1}$ for the given distance $d$. In this section, we assume that the transition frequency of the TLSs is smaller than the cutoff frequency $\Omega _{51}$ and larger than the cutoff frequency $\Omega _{31}$, this means that the emit photons will propagate in guided modes TM$_{11}$ and TM$_{51}$. The delay-differential equation Eq.(~\ref{2-A13}) reduces to \begin{subequations} \label{2-B3} \begin{eqnarray} \left( \partial _{t}+\gamma \right) C_{s}(t) &=&-\alpha _{1}C_{s}\left( t-\tau _{1}\right) \Theta \left( t-\tau _{1}\right) \notag \\ &&-\alpha _{2}C_{s}\left( t-\tau _{2}\right) \Theta \left( t-\tau _{2}\right) \\ \left( \partial _{t}+\gamma \right) C_{a}(t) &=&\alpha _{1}C_{a}\left( t-\tau _{1}\right) \Theta \left( t-\tau _{1}\right) \notag \\ &&+\alpha _{2}C_{a}\left( t-\tau _{2}\right) \Theta \left( t-\tau _{2}\right) \end{eqnarray}
where $\gamma =\gamma _{1}+\gamma _{2}$, and $\alpha _{j}=\gamma _{j}e^{i\varphi _{j}}$ ($j=1,2$). We first discuss the case with $d=0$. It is clear by inspection of Eq.~(\ref{2-B3}) that the initial entanglement determined by the state $|s\rangle $ decreases more quickly in time, on the contrary, the initial entanglement determined by the state $|a\rangle $ does not change in time as shown by the black lines in Fig.~\ref{Fig4.eps}.
To analyze the influence of the number of the transverse modes on the entanglement which interact with TLSs, we fix the phase $\varphi _{1}=2n\pi $ with $n=4$ in Fig.~\ref{Fig4.eps}(a), $n=10$ in Fig.~\ref{Fig4.eps}(b), $ n=30 $ in Fig.~\ref{Fig4.eps}(c), $n=3000$ in Fig.~\ref{Fig4.eps}(d). We plotted the time behavior of the concurrence between TLS by only considering the TM$_{11}$ mode in Eq.(\ref{2-B3}), which is shown in blue dashed lines in Fig.~\ref{Fig4.eps}. The red solid lines in Fig.~\ref{Fig4.eps} present the time behavior of the concurrence by considering both TM$_{11}$ and TM$ _{31}$ modes in Eq.~(\ref{2-B3}). It can be found that increasing the number of the transverse modes which interact with TLSs leads to exponentially decay with a rate $\gamma _{1}+\gamma _{2}$ up to time $t=\tau _{1}$. After this, the phase $\varphi _{1}$ begins to have an effect until time $t=\tau _{2}$. After time $\tau _{2}$, the dynamics can be dramatically affected by the phases $\varphi _{j}$ and delay times $\tau _{j}$ induced by the inter-TLS distance. The blue lines of panels (a) and (b) in Fig.~\ref {Fig4.eps} show that the concurrence remains constant in time after the time $t=\tau _{1}$ in TM$_{11}$ mode, which indicate that the delay time $\tau _{1}$ is negligibly small. As the phase factor $e^{i\varphi _{1}}$ is fixed, the behavior of the red solid lines is completely determined by the phase $ \varphi _{2}$ and delay time $\tau _{2}$ in TM$_{31}$ mode. We see from panel (a) that the entanglement decays almost exponentially in time after delay time $\tau _{2}$, which means that a part of the emitted energy from one TLS is transferred directly to another TLS, so there is no delay in the absorption of the energy by another TLS in both TM$_{11}$ and TM$_{31}$ modes. In this case, we can solve Eq.(\ref{2-B3}) by letting $\tau _{1},\tau _{2}\rightarrow 0$ \end{subequations} \begin{subequations} \label{2-B4} \begin{eqnarray} C_{s}(t) &=&C_{s0}\exp \left[ -(\gamma _{1}+\alpha _{1})t-(\gamma _{2}+\alpha _{2})t\right] , \\ C_{a}(t) &=&C_{a0}\exp \left[ -(\gamma _{1}-\alpha _{1})t-(\gamma _{2}-\alpha _{2})t\right] . \end{eqnarray} The norm of amplitudes are completely determined by phases leading to exponential decay of the entanglement. The red solid line in panel (b) exhibits behavior different from that in panel (a), indicating that the $ \varphi _{2}$ and delay time $\tau _{2}$ play an equal role. The part of excitation emitted into TM$_{11}$ mode is immediately reabsorbed by the other one, but the part of excitation emitted into TM$_{31}$ mode undergoes delay, however, wave interference still produced at each exchanges of the excitation between the TLSs in TM$_{31}$ mode. In this case, we can solve Eq.(\ref{2-B3}) by letting $\tau _{1}\rightarrow 0$ \end{subequations} \begin{subequations} \label{2-B5} \begin{eqnarray} &&C_{s}=C_{s0}\sum_{n=0}^{\infty }\frac{\left( -\alpha _{2}\right) ^{n}}{n!} e^{-\left( \gamma +\alpha _{1}\right) \left( t-n\tau _{2}\right) }\left( t-n\tau _{2}\right) ^{n} \\ &&C_{a}=C_{a0}\sum_{n=0}^{\infty }\frac{\alpha _{2}^{n}}{n!}e^{-\left( \gamma -\alpha _{1}\right) \left( t-n\tau _{2}\right) }\left( t-n\tau _{2}\right) ^{n} \end{eqnarray} Panel (c) shows that as $d$ increased, the delay time should be taken into account in TM$_{11}$ mode besides the wave interference. In this case, we can use Laplace transformation and geometric series expansion to solve Eq.( \ref{2-B3}), the solutions \end{subequations} \begin{subequations} \label{2-B6} \begin{eqnarray} C_{s} &=&C_{s0}\sum_{n=0}^{\infty }\sum_{k=0}^{n}C_n^k\alpha _{1}^{k}\alpha _{2}^{n-k}\frac{\left(\tau _{nk}-t\right) ^{n}}{n!}e^{-\gamma \left( t-\tau _{nk}\right) }, \\ C_{a} &=&C_{a0}\sum_{n=0}^{\infty }\sum_{k=0}^{n}C_n^k\alpha _{1}^{k}\alpha _{2}^{n-k}\frac{\left( t-\tau _{nk}\right) ^{n}}{n!}e^{-\gamma \left( t-\tau _{nk}\right) }, \end{eqnarray} are coherent sums over contributions starting at different instants of time $ \tau _{nk}=k\tau _{1}+\left( n-k\right) \tau _{2}$, where $C_n^k=\frac{k!}{
n!(n-k)!}$. Each term of the sum has a well-defined phase, and are damped by an exponential function at rate $\gamma$. Interference is possible if the amplitudes do not decay appreciably over the time $\tau_{nk}$.As $d$ is large enough so that $\min_{p,q}{\gamma|p\tau_2-q\tau_1|}\gg 1$ with non-negative integers $p$ and $q$ which are not zero at the same time,the phase factor plays no role as shown in panel (d). The wave packets of the emitted excitation is bouncing back and forth between two TLSs until its intensity is damped to zero. So there are collapses and revivals of the concurrence until the amplitude of the revivals damped to zero.
\section{\label{Sec:4} conclusion}
We have studied the effects of the inter-TLS distance on the entanglement properties of two identical TLSs located inside a rectangular hollow metallic waveguide of transverse dimensions $a$ and $b$. When the energy separation of the TLS is far away from the cutoff frequencies of the transverse modes and there is single excitation in the system, the Schrodinger equation for the wave function with single excitation initial in the TLSs is reduced to the delay differential equations for the amplitudes of two TLSs, where phase factors and delay times are induced by the finite distance between the TLSs. The delay differential equations are solved exactly for the TLSs interacting with either single transverse mode or double transverse modes of the waveguide, which directly reveals the retarded character of multiple reemissions and reabsorptions of photons between the TLSs. For the inter-TLS distance $d=0$, there exists an anti-symmetry state decoupled with the field modes, so the entanglement can be generated if the TLSs are initial in a separate state, later trapped in the anti-symmetry state. As the TLSs are close together such that the time delay $\max \{\tau _{j}\}$ is much smaller than the TLS decay time $\gamma
^{-1}$, the excitation emitted by one TLS into the field is absorbed immediately by the other. The dynamic of the entanglement are dramatically affected by phases, leading to an enhanced and inhibited exponential decay of the concurrence when only one transverse mode are considered, and an exponential decay when more transverse modes are involved. As the inter-TLS distance increases, both phases and delay times affect the concurrence. There is a proper delay of reabsorption after reemission of photons but interference is possible if the amplitudes of TLSs do not decay appreciably over time $\tau_{nk}$. As $d$ is large enough so that $\min_{p,q}{\gamma|p\tau_2-q\tau_1|}\gg 1$ with non-negative integers $p$ and $q$ which are not zero at the same time,the phase factor plays no role. There appear collapses and revivals of the entanglement of the TLSs. We note that our studies focus on the the dependence of the concurrence on the inter-TLS distance but it is easy to study the dependence of the concurrence on the initial state of the system with the exact solution.
\begin{acknowledgments} This work was supported by NSFC Grants No. 11434011, No. 11575058. \end{acknowledgments}
\end{subequations}
\end{document} |
\begin{document}
\title{Greenberger-Horne-Zeilinger theorem for $N$ qudits}
\author{Junghee Ryu}
\affiliation{Institute of Theoretical Physics and Astrophysics, University of Gda\'{n}sk, 80-952 Gda\'{n}sk, Poland} \affiliation{Department of Physics, Hanyang University, Seoul 133-791, Korea}
\author{Changhyoup Lee}
\affiliation{Centre for Quantum Technologies, National University of Singapore, 3 Science Drive 2, Singapore 117543} \affiliation{Department of Physics, Hanyang University, Seoul 133-791, Korea}
\author{Marek \.{Z}ukowski}
\affiliation{Institute of Theoretical Physics and Astrophysics, University of Gda\'{n}sk, 80-952 Gda\'{n}sk, Poland}
\author{Jinhyoung Lee}
\affiliation{Department of Physics, Hanyang University, Seoul 133-791, Korea} \affiliation{Center for Macroscopic Quantum Control, Seoul National University, Seoul, 151-742, Korea}
\begin{abstract} We generalize Greenberger-Horne-Zeilinger (GHZ) theorem to an arbitrary number of $D$-dimensional systems. Contrary to conventional approaches using compatible composite observables, we employ incompatible and concurrent observables, whose common eigenstate is still a generalized GHZ state. It is these concurrent observables which enable to prove a genuinely $N$-partite and $D$-dimensional GHZ theorem. Our principal idea is illustrated for a four-partite system with $D$ which is an arbitrary multiple of $3$. By extending to $N$ qudits, we show that GHZ theorem holds as long as $N$ is not divisible by all nonunit divisors of $D$, smaller than $N$. \end{abstract} \pacs{} \maketitle
\newcommand{\bra}[1]{\langle #1\vert} \newcommand{\ket}[1]{\vert #1\rangle} \newcommand{\abs}[1]{\vert#1\vert} \newcommand{\avg}[1]{\langle#1\rangle}
\newcommand{\braket}[2]{\langle{#1}|{#2}\rangle} \newcommand{\commute}[2]{\left[{#1},{#2}\right]}
\newtheorem{theorem}{Theorem}
\section{Introduction}
The inconsistency of (local) hidden variable theories with quantum mechanics fascinates many researchers. It has been discussed in many theoretical~\cite{Bell64,*Clauser69,*Specker60,*Kochen67,*Leggett03} and experimental works~\cite{Aspect82,*Lapkiewicz11,*Groeblacher07}. Bell's theorem, one of the most profound discoveries concerning the foundations of quantum mechanics, states that any local realistic theory is incompatible with quantitative predictions of quantum mechanics. Even though Bell's theorem was studied mostly in terms of statistical inequalities, a more striking conflict, without inequalities, was also shown for a multiqubit system by Greenberger, Horne, and Zeilinger (GHZ)~\cite{GHZ89,*Pan12}. They derived an {\it all-versus-nothing} contradiction based on perfect correlations for so-called GHZ states. This leads to a direct refutation of Einstein-Podolsky-Rosen (EPR) ideas on the relation between locality and elements of reality with quantum mechanics~\cite{EPR35}.
This is a striking blow right into the very basic ideas linked with local hidden variables. After all, EPR used the concept of (local) elements of reality to support their claim that quantum mechanics is incomplete. All this can be best explained using the three particle GHZ paradox. Take a state $\ket{GHZ}=\frac{1}{\sqrt{2}}(\ket{+++}-\ket{---})$, where $\ket{\pm}$ denotes states associated with the eigenvalues $\pm1$ of the local Pauli $\sigma_z$ operator. The
operators $\sigma_x\otimes\sigma_x\otimes\sigma_x$, $\sigma_x\otimes\sigma_y\otimes\sigma_y$, $\sigma_y\otimes\sigma_x\otimes\sigma_y$, and $\sigma_y\otimes\sigma_y\otimes\sigma_x$ all commute, and their eigenstate is $|GHZ\rangle$. The eigenvalues are $-1$, $1$, $1$, and $1$, respectively, which signify perfect (GHZ)-EPR correlations. This would please any local realist. Assume that the particles are far away from each other, and three distant independent observers can perform experiments on them, choosing at will the observables. For example, the first and the second one may choose $\sigma_x$ and their measurement results are $1$ and $-1$, respectively. In such a case, they can together predict with certainty what would have been the result of the third observer had he or she chosen to measure also $\sigma_x$. Simply the local results must multiply to the eigenvalue of the joint observable $\sigma_x\otimes\sigma_x\otimes\sigma_x$, and this is $-1$. Thus, the third observer, if the hypothetical case of him or her choosing to measure $\sigma_x$ really happens, must for sure get $1$. Thus, as EPR would say, this value is an {\em element of reality}, because in no way the distant choices, and obtained results can influence anything that happens at the location of the third observer (especially if measurements actions, are spatially separated events in the relativistic sense of this term, and the measurement choices are made some time after the particles are emitted from a common, say central, source). For such counterfactual reasonings one can use any of the four perfect correlation cases for the joint measurements given above, and apply to each observer. Thus, it seems that one can ascribe elements of reality of to all local situations, no matter whether the local observable is $\sigma_x$ or $\sigma_y$. Note that these are incommensurable. Let us denote such elements of reality, related with a single emission act of three particles, by $r_{w}^k$, where $w=x,y$ denotes the observable, and $k=1,2,3$ denotes the observer. Obviously $r_{w}^k=\pm 1. $ For the four cases of GHZ-EPR perfect correlations one therefore must have $r_{x}^1r_{x}^2r_{x}^3=-1$, and $r_{x}^1r_{y}^2r_{y}^3=1$, $r_{y}^1r_{x}^2r_{y}^3=1$, $r_{y}^1r_{y}^2r_{x}^3=1$. If one multiplies these four relations side by side, one gets $1=-1$. Thus an attempt of introducing EPR elements of reality leads to a nonsense. {\em Ergo}, elements of reality are a nonsense. No other argument against local realism could be more striking.
Extending Bell's theorem to more complex systems such as multipartite and/or high-dimensional systems ~\cite{Mermin90c,*Werner01,*Zukowski02,*Collins02,*Laskowski04,*Son06,*James10} is important not only for a deeper understanding of foundations of quantum mechanics. It is associated with developing new applications in quantum information processing, such as quantum cryptography, secret sharing, quantum teleportation, reduction of communication complexity, quantum key distribution, and random numbers generation~\cite{Ekert91,Horodecki96,Cleve97, *Brukner04,Zukowski98,*Hillery99,*Kempe99,*Scarani01,*Barrett05,*Acin07,Pironio10}. Similarly to Bell's theorem, also all-versus-nothing tests, which we call GHZ theorem, have been generalized to higher dimensional systems. For the sake of convenience, we shall use the tuple $(N, M, D)$ to denote $N$ parties, $M$ measurements for each party, and $D$ distinct outcomes for each measurement. In Ref.~\cite{Zukowski99}, the GHZ theorem was derived for a $(D+1, 2, D)$ problem. A probabilistic but conclusive GHZ-like test was shown for $(D,2,D)$ in Ref.~\cite{Kaszlikowski02}. The $(N,2,D)$ problem for odd $N>D$ and even $D$ was studied by Cerf {\it et al.}~\cite{Cerf02a}. Lee {\it et al.} showed the GHZ theorem for more general cases, $(\mbox{odd}~N, 2, \mbox{even}~D)$, by an unconventional approach using incompatible observables~\cite{Lee06}. Recently, Tang {\it et al.} generalized GHZ theorem to the $N (\geq4)$-partite case and even-$D$ dimensional systems with the help of GHZ graphs~\cite{Tang13}. Despite such an intensive progress in extending GHZ theorem, many cases of $N$-partite and $D$-dimensional systems remain still as open problems.
We generalize the GHZ theorem to three or higher $D$-dimensional systems. To this end, we employ concurrent composite observables which, in contrast with the standard approach, are mutually incompatible but still have a common eigenstate, here a generalized GHZ state. They can be realized by multiport beam splitters and phase shifters, as it is shown in Refs.~\cite{Zukowski99,Lee06}. We first illustrate our principal idea with four $3d$-dimensional systems and then provide a systematic method, so as to extend it to three or higher $D$-dimensional systems. Finally, we show a GHZ-type contradiction, as long as $N$ is not divided by all nonunit divisors of $D$, smaller than $N$. Our generalization is genuinely $N$-partite {\it and} $D$-dimensional and can reproduce the previous results~\cite{GHZ89,Cerf02a,Zukowski99,Lee06}. This approach can lead to a general GHZ theorem for $N$ qudits.
\section{Concurrent observables}{\label{sec_concurrent}}
Some sets of observables have a common eigenstate. If a system is prepared in the eigenstate, the measurement results for such observables are concurrently appearing with certainty. Such observables are called ``concurrent''~\cite{Lee06}. For a quantum system of dimension $D (>2)$, consider two Hermitian operators $\hat{A}$ and $\hat{B}$ such that $\hat{A}=a\ket{\psi}\bra{\psi}+\hat{A}_{\psi}^{\perp}$ and $\hat{B}=b\ket{\psi}\bra{\psi}+\hat{B}_{\psi}^{\perp}$ with $\hat{A}_{\psi}^{\perp}(\hat{B}_{\psi}^{\perp}) \ket{\psi}=0$. The state $\ket{\psi}$ is then a common eigenstate of both observables as $\hat{A}(\hat{B})\ket{\psi}=a(b)\ket{\psi}$, even if $[\hat{A},\hat{B}]=[\hat{A}_{\psi}^{\perp},\hat{B}_{\psi}^{\perp} ]\neq0$~\footnote{Note that compatible observables are clearly concurrent.}.
Such concurrent observables can be constructed by the method introduced in Ref.~\cite{Lee06}. Consider a unitary operator $\hat{U}$, which is of the form of $\hat{U}=e^{i \phi} \ket{\psi}\bra{\psi}+\hat{U}_{\psi}^{\perp}$ with $\hat{U}_{\psi}^{\perp} \ket{\psi}=0$. Here $\hat{U}_{\psi}^{\perp}$ is a unitary operator on a space $\mathcal{H}_{\psi}^{\perp}$ which is defined by the requirement $\mathcal{H} =\mathcal{H}_{\psi} \oplus \mathcal{H}_{\psi}^{\perp}$, where $\mathcal{H}_{\psi}$ is the one-dimensional space containing $\ket{\psi}$. Every such unitary operator leaves the state $\ket{\psi}$ unchanged, up to a global phase: If the state $\ket{\psi}$ satisfies $\hat{A}\ket{\psi}=\lambda \ket{\psi}$, then all transformed operators $\hat{B}_{U}=\hat{U} \hat{A} \hat{U}^{\dagger}$ are concurrent with $\hat{A}$.
Consider $N$ qudits prepared in a generalized GHZ state $\ket{\psi}=\frac{1}{\sqrt{D}}\sum_{n=0}^{D-1} \bigotimes_{k=1}^{N} \ket{n}_{k}$, where $\ket{n}$ denotes a basis state for a qudit. This GHZ state is a common eigenstate with the unity eigenvalue of any composite observable $\hat{X}^{\otimes N} \equiv \hat{X}\otimes \hat{X} \otimes \cdots \otimes \hat{X}$ as $\hat{X}^{\otimes N} \ket{\psi}=\ket{\psi}$, where the local observable $\hat{X}$ is defined by applying quantum Fourier transformation $\hat{F}$ on a reference unitary observable $\hat{Z}=\sum_{n=0}^{D-1} \omega^n \ket{n} \bra{n}$ with $\omega=\exp({2 \pi i/D})$, that is $\hat{X}=\hat{F}\hat{Z}\hat{F}^{\dagger}$~\footnote{Here, we shall use local unitary observables $\hat{V}=\sum_{n=0}^{D-1}\omega^n \left|n\right>_{\mathrm{v}}\left<n\right|$, where $\omega=\exp(2 \pi i/D)$, in $D$-dimensional Hilbert space. The observable $\hat{V}$ is unitary, however, one can uniquely relate it with a Hermitian observable $\hat{H}$ by requiring $\hat{V}=\exp(i \hat{H})$. Therefore the complex eigenvalues of $\hat{V}$ can be associated with the measurement results, denoted by real eigenvalues of $\hat{H}$. Such a unitary representation leads to a simplification of mathematics without changing any physical results~\cite{Cerf02a, Lee06}.}. An eigenvector of $\hat{X}$ associated with the eigenvalue $\omega^n$ is given by $ \ket{n}_\mathrm{x} =\hat{F}\ket{n} = \frac{1}{\sqrt{D}} \sum_{m=0}^{D-1} \omega^{nm} \ket{m}$. With the standard basis set $\{\ket{n} \}$, the observable $\hat{X}$ is written as $\hat{X} = \sum_{n=0}^{D-1} \ket{n}\bra{n+1}$, where $\ket{n} \equiv \ket{n \mod D}$.
To construct a set of concurrent composite observables, we employ a unitary operation in the form of $\hat{U}= \bigotimes_{k=1}^{N} \hat{P}_{k} (f_k)$ with a phase shifter $\hat{P}_k=\sum_{n=0}^{D-1} \omega^{f_k(n)} \ket{n}\bra{n}$. If ``phases" $f_{k}(n)$ satisfy a condition, \begin{equation} \sum_{k=1}^{N} f_{k} (n) \equiv 0 \mod D, \label{invariant_condition} \end{equation} for each $n$, then the unitary operator $\hat{U}$ leaves the GHZ state $\ket{\psi}$ invariant. This simple invariance condition enables one to construct a large number of concurrent observables which have a common eigenstate of the generalized GHZ state.
Let us apply the unitary operation $\hat{U}$ with the phases $f_{k}(n)=\alpha_{k} n$ with rational numbers $\alpha_{k}$ to $\hat{X}^{\otimes N}$. If the phases $f_{k} (n)$ satisfy the invariance condition~(\ref{invariant_condition}), the transformed observable $\hat{U} \hat{X}^{\otimes N} \hat{U}^{\dagger}=\hat{X}(\alpha_1)\otimes \hat{X}(\alpha_2)\otimes \cdots \otimes\hat{X}(\alpha_N)$ is concurrent with $\hat{X}^{\otimes N}$, i.e., $\hat{U} \hat{X}^{\otimes N} \hat{U}^{\dagger} \ket{\psi}=\ket{\psi}$. For each eigenvalue $\omega^{n}$, the eigenvector of the local observable $\hat{X}({\alpha})$ is given by applying the phase shifter $\hat{P}$ on $\ket{n}_{\mathrm{x}}$ as $\ket{n}_{\alpha} = \hat{P}\ket{n}_\mathrm{x}= \frac{1}{\sqrt{D}}\sum_{m=0}^{D-1} \omega^{(n+\alpha)m} \ket{m}$. The observable $\hat{X}({\alpha})$ can be written in the standard basis set $\{\ket{n} \}$ as \begin{equation} \hat{X}({\alpha})=\omega^{-\alpha} \left( \sum_{n=0}^{D-2} \ket{n}\bra{n+1} + \omega^{\alpha D} \ket{D-1}\bra{0} \right). \label{obs_y} \end{equation} Note that if $\alpha$ is an integer, the measurement basis set $\{ \ket{n}_{\alpha}\}$ of $\hat{X}(\alpha)$ will be the same as $\{\ket{n}_\mathrm{x} \}$ of $\hat{X}$ except the ordering, i.e., $\ket{n}_{\alpha} = \ket{n + \alpha}_\mathrm{x} $. Thus, $\hat{X}(\alpha) = \omega^{-\alpha} \hat{X}$. That is, the observable $\hat{X}(\alpha)$ is equivalent to $\hat{X}$, up to a phase factor $\omega^{-\alpha}$. Let $\ket{n}_{\alpha}$ be the eigenstate of $\hat{X}(\alpha)$ associated with eigenvalue $\omega^n$, and $\ket{m}_{\beta}$ the eigenstate of $\hat{X}(\beta)$ associated with $\omega^m$. If and only if $\alpha$ differs from $\beta$ by an integer, then two measurement bases satisfy $\abs{{}_{\alpha}\bra{n} m \rangle_{\beta}}^2 = \delta_{D} (\gamma)$, where $\gamma=m-n+\beta-\alpha$. Here $\delta_{D}(\gamma)=1$ if $\gamma$ is congruent to zero modulo $D$ and otherwise $\delta_{D}(\gamma)=0$. That is, if $\beta - \alpha$ is not an integer, two local observables $\hat{X}(\alpha)$ and $\hat{X}(\beta)$ are inequivalent.
\section{Generalized GHZ theorem}{\label{sec_4partite}}
\subsection{Four-qudit system} We first illustrate our idea by considering a four-qudit system. Already this case goes significantly beyond the previous studies~\cite{Zukowski99, Cerf02a, Lee06, Tang13}. Take a four-qudit GHZ state $\ket{\psi}=\frac{1}{\sqrt{D}}\sum_{n=0}^{D-1} \ket{n,n,n,n}$ (here $D$ is assumed to be an integral multiple of 3).
The qudits are distributed to four sufficiently separated parties. Each party performs one of two nondegenerate local measurements on his or her qudit, each of which produces distinguishable $D$ outcomes. We represent the measurement for the $k$-th party by $\hat{M}_k$, and the eigenvalues of the observables are of the form $\omega^{m_k}$, where $m_k$ is an integer. One can denote a joint probability that each party obtains the result $\omega^{m_k}$ by $\mathcal{P}(m_1,m_2,m_3,m_4)$, and define a correlation function $E_{\mathrm{QM}} (M_1,M_2,M_3,M_4)=\bra{\psi}\hat{M}_1\otimes\hat{M}_2\otimes\hat{M}_3\otimes\hat{M}_4\ket{\psi}$, equivalently the quantum average of products of the measurement results: $\sum_{m_{1}=0}^{D-1}\cdots\sum_{m_{4}=0}^{D-1} \omega^{\sum_{i=1}^{4} m_i} \mathcal{P} (m_1,m_2,m_3,m_4)$.
When all measurements are $\hat{X}$, that is, each $\hat{M}_i=\hat{X}$, since $\hat{X} \otimes \hat{X} \otimes \hat{X} \otimes \hat{X} \ket{\psi} = \ket{\psi}$, one has $ E_{\mathrm{QM}} (X,X,X,X)=1$. This implies that we have a perfect GHZ correlation. If arbitrary three parties know their own outcomes $\omega^{x_i}$ of measurements $\hat{X}$, then they can predict with certainty the remaining party's outcome. We will denote such a perfect correlation by \begin{equation} C_{\mathrm{QM}} (x_1 + x_2 + x_3 + x_4 \equiv 0 ). \label{eq:perfect_correlation} \end{equation} The sum is taken modulo $D$; such a convention is used below in all formulas.
Let us construct concurrent composite observables from the observable $\hat{v}_0 = \hat{X}^{\otimes 4}$, by applying a unitary operator $\hat{U}_{1}=\hat{P}_{1}\otimes\hat{P}_{2}^{\otimes3}$, with the phase shifters $\hat{P}_k$ of phases $f_{1}(n)=(D-1)n$ and $f_{2}(n)=n/3$. One of the new observables is $\hat{v}_{1} \equiv \hat{U}_{1} \hat{v}_{0} \hat{U}_{1}^{\dagger} = \hat{X}(D-1) \otimes \hat{X}(1/3)^{\otimes 3}$. The phases $f_k (n)$ satisfy the invariance condition~(\ref{invariant_condition}), $f_1 (n) + 3 f_2 (n) \equiv 0 \mod D$. Thus, the observable $\hat{v}_1$ has $|\psi\rangle $ as its eigenstate with eigenvalue $1$. By Eq.~(\ref{obs_y}) one has $\hat{X}(D-1) =\omega \hat{X}(0)$, and $\hat{Y} \equiv \hat{X}(1/3)$ is given by \begin{equation} \hat{Y}=\omega^{-1/3} \left( \sum_{n=0}^{D-2} \ket{n}\bra{n+1} + \omega^{D/3} \ket{D-1}\bra{0} \right). \label{4_obs_y} \end{equation} The observable $\hat{Y}=\hat{X}(1/3)$ is not equivalent to $\hat{X}=\hat{X}(0)$ [see the explanation below Eq.~(\ref{obs_y}]. The other three concurrent observables are obtained by $\hat{v}_l \equiv \hat{U}_{l} \hat{v}_0 \hat{U}_l^{\dagger},~l\in \{2,3,4\}$, where the unitary operators $\hat{U}_l$ are composed of the cyclic permutations of the local phase shifters: $\hat{U}_{2}=\hat{P}_{2}\otimes\hat{P}_{1}\otimes\hat{P}_{2}\otimes\hat{P}_{2}, \hat{U}_{3}=\hat{P}_{2}\otimes\hat{P}_{2}\otimes\hat{P}_{1}\otimes\hat{P}_{2}$, and $\hat{U}_{4}=\hat{P}_{2}\otimes\hat{P}_{2}\otimes\hat{P}_{2}\otimes\hat{P}_{1}$. The perfect correlations for $\hat{v}_{i}$ $(i=1,\dots,4)$ are, respectively, given by \begin{eqnarray} &C_{\mathrm{QM}} (x_1 + y_2 + y_3 + y_4 \equiv -1) &, \nonumber \\ &C_{\mathrm{QM}} (y_1 + x_2 + y_3 + y_4 \equiv -1) &, \nonumber \\ &C_{\mathrm{QM}} (y_1 + y_2 + x_3 + y_4 \equiv -1) &, \nonumber \\ &C_{\mathrm{QM}} (y_1 + y_2 + y_3 + x_4 \equiv -1) &, \label{prob_QM} \end{eqnarray} where $\omega^{y_i}$ is an outcome of measurement $\hat{Y}$ for the $i$-th party.
Local realistic theories assume that the outcomes of the measurements are predetermined, before the actual measurements. This implies that the values of local realistic predictions for the correlations (\ref{prob_QM}), for each experimental run, must satisfy: \begin{eqnarray} & \omega^{x_1}\omega^{y_2}\omega^{ y_3}\omega^{y_4} = \omega^{-1},& \nonumber \\ & \omega^{y_1}\omega^{x_2}\omega^{ y_3}\omega^{y_4} = \omega^{-1},& \nonumber \\ & \omega^{y_1}\omega^{y_2}\omega^{ x_3}\omega^{y_4} = \omega^{-1},& \nonumber \\ & \omega^{y_1}\omega^{y_2}\omega^{ y_3}\omega^{x_4} = \omega^{-1}.& \label{eq_LHV2} \end{eqnarray} If local realism is also to reproduce quantum perfect correlations for the case of $\hat{v}_0 = \hat{X} ^{\otimes4}$, one must have
for each run, \begin{eqnarray} & \omega^{x_1}\omega^{x_2}\omega^{ x_3}\omega^{x_4} =1.& \label{eq_LHV2B} \end{eqnarray} However, one sees that this is possible only provided $\omega^{ -3 \sum_{i=1}^{4} y_{i}-4} =1 $ if one multiplies side-by-side all equations (\ref{eq_LHV2}). As $\omega=\exp(2\pi i/D) $, if $D=3d$ where $d$ is an integer, one can use the fact that an elementary algebra shows that there is no integer solution of the equation $3{\bf y} + 4 \equiv 0 \mod D$. Thus local realistic correlation (\ref{eq_LHV2B}) is impossible.
It is worth noting that the approach with concurrent observables relaxes the restrictions of early studies requiring {\it compatible} observables. This enables one to generalize GHZ contradictions beyond the case $N>D$ studied in Refs.~\cite{Zukowski99,Cerf02a}.
Note, that to prove the four-partite GHZ contradiction, we chose the local dimension $D$ and the number of the observables $\hat{Y}$'s in the considered correlation functions, $N_2$, such that the greatest common divisor (gcd) of $D$ and $N_2$, here $\mathrm{gcd}(D,N_2)=3$, does not divide the number of parties $N=4$, or equivalently the number $N_1$ of the observables $\hat{X}$ (here equal to $1$). This mathematical property plays a central role in the generalization of the GHZ contradiction to an arbitrary number of parties.
\subsection{Extending to $N$ qudits system}{\label{sec_extend}}
We extend our approach into a general case of $N$ qudits, $N\geq3$, such that $N$ is nondivisible by any nonunit divisor of $D$, smaller than $N$. To this end, we use a set of $(N+1)$ concurrent observables given by $\hat{v}_{0}=\hat{X}^{\otimes N}$ and $N$ observables of the following forms:
$\hat{v}_{1}=\omega \hat{X}^{\otimes N_1} \otimes \hat{Y}^{\otimes N_2}$ and
$\hat{v}_{k}= \hat{Y}^{\otimes k-1}\otimes \omega \hat{X}^{\otimes N_1} \otimes \hat{Y}^{\otimes N_2-k+1}$ for $k=2, \dots, N_2 +1$ and finally
$\hat{v}_{k}= \hat{X}^{\otimes k-N_2 -1} \otimes \hat{Y}^{\otimes N_2}\otimes \omega \hat{X}^{\otimes N-k+1} $ for $k=N_2+2,\dots, N$.
The composite observable $\hat{v}_1$ is obtained by a unitary transformation, $\hat{U}_{1}=\hat{P}_{1}\otimes \openone^{\otimes N_1 -1} \otimes \hat{P}_{2}^{\otimes N_2}$, of the observable $\hat{v}_{0}$, i.e., $\hat{v}_{1}=\hat{U}_{1}\hat{v}_{0}\hat{U}_{1}^{\dagger}$, with the phase shifters $\hat{P}_1$ and $\hat{P}_2$ of phases $f_{1} (n)=(D-1)n$ and $f_{2}(n)=n/N_2$, respectively. The local observables $\hat{X}=\hat{X}(0)$ and $\hat{Y}=\hat{X}(1/N_{2})$ are given by Eq.~(\ref{obs_y}). Likewise, we obtain the other concurrent observables $\hat{v}_{l}~(2 \leq l \leq N)$ by cyclic permutations in the unitary operator $\hat{U}_{1}$, as it was done for the four-partite case. The phases satisfy the invariance condition~(\ref{invariant_condition}) as $f_1 (n) + N_2 f_2 (n) \equiv 0 \mod D$. Thus, the $N$-partite generalized GHZ state $\ket{\psi}=\frac{1}{\sqrt{D}}\sum_{n=0}^{D-1} \bigotimes_{k=1}^{N} \ket{n}_{k}$ is a common eigenstate of all the $(N+1)$ concurrent observables $\hat{v}_l$ $(l=0,\dots,N)$, with the same eigenvalue $1$. This leads to the following values of correlation functions (for later convenience we use party indices $i=1, \dots, N$, which will later on allow us to get a more concise notation in formulas). If all local observables are $\hat{X}$, that is, for global $\hat{v}_0$, one has $E_{\mathrm{QM}} (X,X,\dots,X) = 1$. Thus we have a perfect correlation which can be denoted, in the way introduced earlier, as $C_{\mathrm{QM}} (\sum_{i=1}^{N}x_i \equiv 0)$. For $\hat{v}_k$, where $k=1,2,\dots,N$, one has perfect correlations of the following forms: for $k=1$, $C_{\mathrm{QM}} ( \sum_{i=1}^{N_1}x_i + \sum_{i=N_1+1}^{N}y_{i} \equiv -1)$,
and for $k=2,\dots,N_2 +1$, $C_{\mathrm{QM}} ( \sum_{i=1}^{k-1}y_i + \sum_{i=k}^{N_1+k-1}x_{i} + \sum_{i=N_1+k}^{N}y_{i} \equiv -1)$,
and finally for $k=N_2+2,\dots, N$, $C_{\mathrm{QM}} ( \sum_{i=1}^{k-N_2 -1}x_i + \sum_{i=k-N_2}^{k-1}y_{i} + \sum_{i=k}^{N}x_{i} \equiv -1)$.
Following similar arguments as in the case of the four-partite GHZ contradiction, we obtain the following condition for the local realistic correlation function for the composite observable $\hat{v}_{0}=\hat{X}^{\otimes N}$, to have value equal to the quantum prediction, that is, 1. It reads (modulo $D$) \begin{equation} N_1 \sum_{i=1}^{N} x_{i} \equiv -N_2 \sum_{i=1}^{N} y_{i} - N\equiv 0 . \label{general_LR_condition} \end{equation} However, if $N_2$ is an integral multiple of $g$ but $N$ cannot be divided by $g$, then there are no solutions of ${\bf y}=\sum_{i} y_{i}$ to the equation $N_2 {\bf y} + N \equiv 0 \mod D$. The greatest common divisor of $N_2$ and $D$ is an integral multiple of $g$, i.e., gcd($N_2, D$)=$kg$ for some positive integer $k$ but $kg$ cannot divide $N$ as $N$ is not an integer multiple of $g$. Thus we have a contradiction with the quantum prediction.
In order to show a GHZ contradiction for $N$-partite and $D$-dimensional system, we choose that (a) $D=dg$, (b) $N_2 = \eta g$, where $d$ and $\eta$ are positive integers, and (c) $N$ cannot be divided by $g$. Choosing the integer $g$, a nonunit divisor $D$, plays a crucial role. For example, consider {\em four} six-dimensional systems. The nonunit divisors $g$, smaller than $N=4$, are $2$ and $3$. If we choose $g=2$, then we are unable to see any four-partite GHZ contradiction as the greatest common divisor (gcd) of $N_2$ and $D$, $\mathrm{gcd}(N_2 = 2, D=6)=2$, divides $N=4$. On the other hand, if we choose $g=3$, the four-partite GHZ contradiction can be proved as $\mathrm{gcd}(N_2 =3 , D=6)=3$ and $g=3$ does not divide $N=4$. This is a specific example of a GHZ contradiction for a $(4,2,3d)$ problem. As a consequence, we conclude that one is always able to prove the GHZ contradiction for the $N(\geq 3)$-partite and $D(\geq 2)$-dimensional systems as long as $N$ cannot be divided by all nonunit divisors of $D$.
For appropriate values of $N$ and $D$, our approach reproduces the previous works~\cite{Cerf02a,Zukowski99,Lee06}. A GHZ contradiction for $(D+1)$ qudits shown in Ref.~\cite{Zukowski99} is reproduced by choosing $N_1=1$ and $N_2=D$ in our method, and noticing the fact that $N_1=1$ is indivisible by $D=\mathrm{gcd} (N_2 =D, D)$. The case of $(\mathrm{odd}~N, 2, \mathrm{even}~D)$ studied in Refs.~\cite{Cerf02a,Lee06} can be also proved by choosing a nonunit divisor $g=2$ of $D$ and an arbitrary odd integer $N_1$. One can also easily check that if $N_2 = D=2$ and $N_1 =1$, then our contradiction is reduced to the original GHZ theorem~\cite{GHZ89}.
\subsection{Genuinely $N$-partite $D$-dimensional case}
The GHZ theorem for the two-dimensional systems seems to be fully understood. However, for more complex systems this is not so. Cerf {\it et al.} suggested a criterion for a genuinely $N$-partite ($D$-dimensional) GHZ contradiction~\cite{Cerf02a}. It arises only for the given full $N$-partite ($D$-dimensional) system, but not for any $n(<N)$-partite subset, or for an effectively lower dimensionality of the involved observables. For example, the three-qubit classic GHZ theorem can be put as the theorem for three qutrits, and specific entangled GHZ states involving only two-dimensional subspaces for each qutrit.
The GHZ contradiction we show here is a genuinely $N$-partite one, as it is constructed using a set of composite observables composed of cyclic permutations. Let us explain this with the four-partite GHZ contradiction, where we used the five concurrent composite observables: $\hat{X} \otimes \hat{X} \otimes \hat{X} \otimes \hat{X}$, $\hat{X} \otimes \hat{Y} \otimes \hat{Y} \otimes \hat{Y}$, $\hat{Y} \otimes \hat{X} \otimes \hat{Y} \otimes \hat{Y}$, $\hat{Y} \otimes \hat{Y} \otimes \hat{X} \otimes \hat{Y}$, and $\hat{Y} \otimes \hat{Y} \otimes \hat{Y} \otimes \hat{X}$. In such circumstances, if we eliminate one of the parties, we are unable to show a GHZ contradiction with the remaining observables. The four-partite GHZ state is no longer their common eigenstate. Similar argument can be put forward in the case of our $N$-partite theorem.
The genuine $D$ dimensionality is reflected by the fact that the operators are undecomposable to a direct sum of any subdimensional observables~\cite{Lee06}. In other words, if two local observables $\hat{X}$ and $\hat{Y}$ can be simultaneously block diagonalized by some similarity transformation $\hat{S}$ such that $\hat{S}\hat{X}\hat{S}^{\dagger}=\hat{X}_1 \oplus \cdots \oplus \hat{X}_K$ and $\hat{S}\hat{Y}\hat{S}^{\dagger}=\hat{Y}_1 \oplus \cdots \oplus \hat{Y}_K$, then there exist some eigenstates $\ket{n}_{\alpha}$ of $\hat{X}$ and $\ket{m}_{\beta}$ of $\hat{Y}$ such that ${}_{\alpha}\bra{n} \hat{S}^{\dagger} \hat{S} \ket{m}_{\beta}=0$ and one can find a sub-dimensional GHZ contradiction. However, there are no such eigenstates in our method because for every $n$ and $m$, $\abs{{}_{\alpha}\langle n \ket{m}_{\beta}}^2 = \frac{\sin^2 (\pi \xi)}{D^2 \sin^2 \left[(\pi/D) \xi \right]} > 0$,
where $\xi=m-n+\beta-\alpha$. As, the local observables $\hat{X}=\hat{X}(\alpha)$ and $\hat{Y}=\hat{X}(\beta)$ are such that $\beta - \alpha$ is not an integer, $\xi$ is a nonintegral rational number. Thus, our GHZ contradiction is genuinely $D$ dimensional.
\section{Summary}
We construct a generalized GHZ contradiction for {\it multipartite} and {\it high}-dimensional systems. The GHZ theorem holds as long as $N$ is not divisible by all nonunit divisors of $D$, smaller than $N$. We also demonstrate that our formulation of a generalized GHZ contradiction is genuinely $N$ partite {\it and} $D$ dimensional. For this purpose, we employ concurrent composite observables, which have a generalized GHZ state as a common eigenstate (even though these observables are {\it incompatible}). Our approach, by using concurrent observables, enables us to find a broader class of GHZ contradictions. There remain still more possibilities for constructing concurrent observables, which may help in further extension of the GHZ theorem. We hope that our approach of concurrent observables would be useful in the search of other kinds of quantum correlations, which are impossible classically.
\acknowledgements We thank \v{C}. Brukner for discussions. The work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (MEST) (Grants No. 2010-0015059 and No. 2010-0018295). J.R. and M.\.{Z}. are supported by the Foundation for Polish Science TEAM project cofinanced by the EU European Regional Development Fund and a NCBiR-CHIST-ERA Project QUASAR. C.L. is supported by the National Research Foundation and Ministry of Education, Singapore.
\end{document} |
\begin{document}
\title{Global Optimization with Parametric Function Approximation}
\begin{abstract} We consider the problem of global optimization with noisy zeroth order oracles — a well-motivated problem useful for various applications ranging from hyper-parameter tuning for deep learning to new material design. Existing work relies on Gaussian processes or other non-parametric family, which suffers from the curse of dimensionality. In this paper, we propose a new algorithm GO-UCB that leverages a parametric family of functions (e.g., neural networks) instead. Under a realizable assumption and a few other mild geometric conditions, we show that GO-UCB achieves a cumulative regret of $\tilde{O}(\sqrt{T})$ where $T$ is the time horizon. At the core of GO-UCB is a carefully designed uncertainty set over parameters based on gradients that allows optimistic exploration. Synthetic and real-world experiments illustrate GO-UCB works better than Bayesian optimization approaches in high dimensional cases, even if the model is misspecified. \end{abstract}
\section{Introduction}\label{sec:intro} We consider the problem of finding a global optimal solution to the following optimization problem \begin{align*} \max_{x \in \mathcal{X}} f(x), \end{align*} where $f: \mathcal{X} \rightarrow \mathbb{R}$ is an unknown non-convex function that is not necessarily differentiable in $x$.
This problem is well-motivated by many real-world applications. For example, the accuracy of a trained neural network on a validation set is complex non-convex function of a set of hyper-parameters (e.g., learning rate, momentum, weight decay, dropout, depth, width, choice of activation functions ...) that one needs to maximize \citep{kandasamy2020tuning}. Also in material design, researchers want to synthesize ceramic materials, e.g., titanium dioxide ($\mathrm{TiO}_2$) thin films, using microwave radiation \citep{nakamura2017design} where the film property is a non-convex function of parameters including temperature, solution concentration, pressure, and processing time. Efficiently solving such non-convex optimization problems could significantly reduce energy cost.
We assume having access to only noisy function evaluations, i.e., at round $t$, we select a point $x_t$ and receive a noisy function value $y_t$, \begin{align} y_t = f(x_t) + \eta_t,\label{eq:y} \end{align} where $\eta_t$ for $t=1,...,T$ are \emph{independent}, \emph{zero-mean}, $\sigma$-\emph{sub-Gaussian} noise. This is known as the \emph{noisy zeroth-order oracle} setting in optimization literature. Let $f^*$ be the optimal function value, following the tradition of Bayesian optimization (see e.g., \citet{frazier2018tutorial} for a review), throughout this paper, we use \emph{cumulative regret} as the evaluation criterion, defined as \begin{align*} R_T = \sum_{t=1}^T r_t = \sum_{t=1}^T f^* - f(x_t), \end{align*} where $r_t$ is called instantaneous regret at round $t$. An algorithm $\mathcal{A}$ is said to be a no-regret algorithm if $\lim_{T \rightarrow \infty} R_T(\mathcal{A})/T=0$.
Generally speaking, solving a global non-convex optimization is NP-hard \citep{jain2017non} and we need additional assumptions to efficiently proceed. Bayesian optimization assumes the objective function $f$ is drawn from a Gaussian process prior. \citet{srinivas2010gaussian} proposed the GP-UCB approach, which iteratively queries the argmax of an upper confidence bound of the current posterior belief, before updating the posterior belief using the new data point. However, Gaussian process relies on kernels, e.g., squared error kernel or Mat\'ern kernel, which suffer from the curse of dimensionality. A folklore rule-of-thumb is that GP-UCB becomes unwieldy when the dimension is larger than $10$.
A naive approach is to passively query $T$ data points uniformly at random, estimate $f$ by $\hat{f}$ using supervised learning, then return the maximizer of the plug-in estimator $\hat{x} =\mathop{\mathrm{argmax}}_{x \in \mathcal{X}}\hat{f}(x)$. This may side-step the curse-of-dimensionality depending on which supervised learning model we use. The drawback of this passive query model is that it does not consider the structure of the function nor does it quickly ``zoom-in'' to the region of the space that is nearly optimal. In contrast, an active query model allows the algorithm to iteratively interact with the function. At round $t$, the model collects information from all previous rounds $1,...,t-1$ and decides where to query next.
\textbf{GO-UCB Algorithm.} In this paper, we develop an algorithm that allows Bayesian optimization-style active queries to work for general supervised learning-based function approximation. We assume that the supervised learning model $f_w:\mathcal{X} \rightarrow \mathbb{R}$ is differentiable w.r.t. its $d_w$-dimensional parameter vector $w\in \mathcal{W}\subset \mathbb{R}^{d_w}$ and that the function class $\mathcal{F}=\{f_w | w\in\mathcal{W}\}$ is flexible enough such that the true objective function $f = f_{w^*}$ for some $w^*\in\mathcal{W}$, i.e., $\mathcal{F}$ is \emph{realizable}. Our algorithm --- \emph{Global Optimization via Upper Confidence Bound} (GO-UCB) --- has two phases:
\fbox{\parbox{0.9\columnwidth}{The \textit{GO-UCB} Framework: \small \noindent \begin{itemize}
\item Phase I: Uniformly explore $n$ data points.
\item Phase II: Optimistically explore $T$ data points. \end{itemize}}}
The goal of Phase I to sufficiently explore the function and make sure the estimated parameter $\hat{w}_0$ is close enough to true parameter $w^*$ such that exploration in Phase II are efficient. To solve the estimation problem, we rely on a regression oracle that is able to return an estimated $\hat{w}_0$ after $n$ observations. In details, after Phase I we have a dataset $\{(x_j,y_j)\}_{j=1}^{n}$, then \begin{align} \hat{w}_0 \leftarrow \mathop{\mathrm{argmin}}_{w\in\mathcal{W}} \sum_{j=1}^n (f_w(x_j) - y_j)^2.\label{eq:regression_oracle} \end{align} This problem is known as a \emph{non-linear least square} problem. It is computationally hard in the worst-case, but many algorithms are known (e.g., SGD, Gauss-Newton, Levenberg-Marquardt) to effectively solve this problem in practice. Our theoretical analysis of $\hat{w}_0$ uses techniques from \citet{nowak2007complexity}. See Section \ref{sec:mle} for details.
In Phase II, exploration is conducted following the principle of ``Optimism in the Face of Uncertainty'', i.e., the parameter is optimized within an uncertainty region that always contains the true parameter $w^*$. Existing work in bandit algorithms provide techniques that work when $f_w$ is a linear function \citep{abbasi2011improved} or a generalized linear function \citep{li2017provably}, but no solution to general differentiable function is known. At the core of our GO-UCB is a carefully designed uncertainty ball $\mathrm{Ball}_t$ over parameters based on gradients, which allows techniques from the linear bandit \citep{abbasi2011improved} to be adapted for the non-linear case. In detail, the ball is defined to be centered at $\hat{w}_t$ --- the solution to a regularized online regression problem after $t-1$ rounds of observations. And the radius of the ball is measured by the covariance matrix of the gradient vectors of all previous rounds. We prove that $w^*$ is always trapped within the ball with high probability.
\textbf{Contributions.} In summary, our main contributions are: \begin{enumerate} \item We initiate the study of global optimization problem with parametric function approximation and proposed a new optimistic exploration algorithm --- GO-UCB. \item Under realizable assumption and geometric conditions, we prove GO-UCB converges to the global optima with cumulative regret at the order of $\tilde{O}(\sqrt{T})$ where $T$ is the time horizon. \item GO-UCB does not suffer from the curse of dimensionality like Gaussian processes-based Bayesian optimization methods. The unknown objective function $f$ can be high-dimensional, non-convex, non-differentiable, and even discontinuous in its input domain. \item Synthetic test function and real-world hyperparameter tuning experiments show that GO-UCB works better than all compared Bayesian optimization methods in both realizable and misspecified settings. \end{enumerate}
\textbf{Technical novelties.} The design of GO-UCB algorithm builds upon the work of \citet{abbasi2011improved} and \citet{agarwal2021rl}, but requires substantial technical novelties as we handle a generic nonlinear parametric function approximation. Specifically: \begin{enumerate}
\item LinUCB analysis (e.g., self-normalized Martingale concentration, elliptical potential lemmas \citep{abbasi2011improved,agarwal2021rl}) is not applicable for nonlinear function approximation, but we showed that they can be adapted for this purpose if we can \emph{localize} the learner to a neighborhood of $w^*$.
\item We identify a new set of structural assumptions under which we can localize the learner sufficiently with only $O(\sqrt{T})$ rounds of pure exploration.
\item Showing that $w^*$ remains inside the parameter uncertainty ball $\mathrm{Ball}_t, \forall t \in [T]$ is challenging. We solve this problem by setting regularization centered at the initialization parameter $\hat{w}_0$ and presenting novel inductive proof of a lemma showing $\forall t\in [T], \hat{w}_t$ converges to $w^*$ in $\ell_2$-distance at the same rate. \end{enumerate} These new techniques could be of independent interest.
\section{Related Work}\label{sec:rw} Global non-convex optimization is an important problem that can be found in a lot of research communities and real-world applications, e.g., optimization \citep{rinnooy1987stochastic1,rinnooy1987stochastic2}, machine learning \citep{bubeck2011x,malherbe2017global}, hyperparameter tuning \citep{hazan2018hyperparameter}, neural architecture search \citep{kandasamy2018neural,wang2020learning}, and material discovery \citep{frazier2016bayesian}.
One of the most prominent approaches to this problem is Bayesian Optimization (BO) \citep{shahriari2015taking}, in which the objective function is modeled by a Gaussian Process (GP) \citep{williams2006gaussian}, so that the uncertainty can be updated under the Bayesian formalism. Among the many notable algorithms in GP-based BO \citep{srinivas2010gaussian,jones1998efficient,bull2011convergence,frazier2009knowledge,agrawal2013thompson,cai2021on}, GP-UCB \citep{srinivas2010gaussian} is the closest to our paper because our algorithm also selects data points in a UCB (upper confidence bound) style but the construction of the UCB in our paper is different since we are not working with GPs. \citet{scarlett2017lower} proves lower bounds on regret for noisy Gaussian process bandit optimization. GPs are highly flexible and can approximate any smooth functions, but such flexibility comes at a price to play --- curse of dimensionality. Most BO algorithms do not work well when $d>10$. Notable exceptions include the work of \citet{shekhar2018gaussian,calandriello2019gaussian,eriksson2019scalable,salgia2021domain,rando2022ada} who designed more specialized BO algorithms for high-dimensional tasks.
Besides GP, other nonparametric families were considered for global optimization tasks, but they, too, suffer from the curse of dimensionality. We refer readers to \citet{wang2018optimization} and the references therein.
Our problem is also connected to the bandits literature \citep{li2019nearly,foster2020beyond,russo2013eluder,filippi2010parametric}. The global optimization problem can be written as a nonlinear bandits problem in which queries points as actions and the function evaluations are rewards. However, no bandits algorithms can simultaneously handle an infinite action space and a generic nonlinear reward function. Here ``generic'' means the reward function is much more general than a linear or generalized linear function \citep{filippi2010parametric}. To the best of our knowledge, we are the first to address the infinite-armed bandit problems with a general differentiable value function (albeit with some additional assumptions).
A recent line of work studied bandits and global optimization with neural function approximation \citep{zhou2020neural,zhang2020neural,dai2022sample}. The main difference from us is that these results still rely on Gaussian processes with a Neural Tangent Kernel in their analysis, thus intrinsically linear. Their regret bounds also require the width of the neural network to be much larger than the number of samples to be sublinear. In contrast, our results apply to general nonlinear function approximations and do not require overparameterization.
\section{Preliminaries}\label{sec:pre}
\subsection{Notations}
We use $[n]$ to denote the set $\{1,2,...,n\}$. The algorithm queries $n$ points in Phase I and $T$ points in Phase II. Let $\mathcal{X} \subset \mathbb{R}^{d_x}$ and $\mathcal{Y} \subset \mathbb{R}$ denote the domain and range of $f$, and $\mathcal{W} \subset [0, 1]^{d_w}$ denote the parameter space of a family of functions $\mathcal{F} := \{f_w: \mathcal{X}\rightarrow \mathcal{Y} | w\in \mathcal{W}\}$. For convenience, we denote the bivariate function $f_w(x)$ by $f_x(w)$ when $w$ is the variable of interest. $\nabla f_x(w)$ and $\nabla^2 f_x(w)$ denote the gradient and Hessian of function $f$ w.r.t. $w$. $L(w) := \mathbb{E}_{x \sim \mathcal{U}} (f_x(w)-f_x(w^*))^2$ denotes the (expected) risk function where $\mathcal{U}$ is uniform distribution. For a vector $x$, its $\ell_p$ norm is denoted by $\|x\|_p = (\sum_{i=1}^d |x_i|^p)^{1/p}$ for $1\leq p < \infty$.
For a matrix $A$, its operator norm is denoted by $\|A\|_\mathrm{op}$.
For a vector $x$ and a square matrix $A$, define $\|x\|^2_A = x^\top A x$. Throughout this paper, we use standard big $O$ notations; and to improve the readability, we use $\tilde{O}$ to hide poly-logarithmic factors. For reader's easy reference, we list all symbols and notations in Appendix \ref{sec:table}.
\subsection{Assumptions} Here we list main assumptions that we will work with throughout this paper. The first assumption says that we have access to a differentiable function family that contains the unknown objective function.
\begin{assumption}[Realizability]\label{ass:parameter_class} There exists $w^*\in \mathcal{W}$ such that the unknown objective function $f= f_{w^*}$. Also, assume $\mathcal{W}\subset [0,1]^{d_w}$. This is w.l.o.g. for any compact $\mathcal{W}$. \end{assumption} Realizable parameter class is a common assumption in literature \citep{chu2011contextual,foster2018practical,foster2020beyond}, usually the starting point of a line of research for a new problem because one doesn't need to worry about extra regret incurred by misspecified parameter. Although in this paper we only theoretically study the realizable parameter class, our GO-UCB algorithm empirically works well in misspecified tasks too. The second assumption is on properties of function approximation. \begin{assumption}[Bounded, differentiable and smooth function approximation]\label{ass:objective} There exist constants $F, C_g, C_h > 0$ such that
$\forall x \in \mathcal{X}, \forall w \in \mathcal{W}$, it holds that $|f_x(w)|\leq F,$ \begin{align*}
\|\nabla f_x(w) \|_2 \leq C_g, \quad\text{ and }\quad
\|\nabla^2 f_x(w)\|_\mathrm{op} \leq C_h. \end{align*} \end{assumption}
The third assumption is on the expected loss function over uniform distribution in Phase I of GO-UCB.
\begin{assumption}[Geometric conditions on the loss function]\label{ass:loss} $L(w)=\mathbb{E}_{x\sim \mathcal{U}}(f_x(w)-f_x(w^*))^2$ satisfies $(\tau, \gamma)$-growth condition or local $\mu$-strong convexity at $w^*$, i.e., $\forall w \in \mathcal{W}$, \begin{align*}
\min\left\{\frac{\mu}{2}\|w-w^*\|_2^2,\frac{\tau}{2}\|w-w^*\|_2^\gamma \right\} \leq L(w)-L(w^*). \end{align*} Also, $L(w)$ is a self-concordant function at $w^*$. \end{assumption}
\begin{figure}
\caption{Example of a highly non-convex $L(w)$ satisfying Assumption \ref{ass:loss}. Solid lines denote the actual lower bound by taking $\min$ over strong convexity and growth condition. $L(w)$ is strongly convex near $w^*$ but can be highly non-convex away from $w^*$. }
\label{fig:loss}
\end{figure}
\begin{remark}
This assumption is strictly weaker than global strongly convex because it does not require convexity except in a local neighborhood near the global optimal $w^*$, i.e., $\{w|\|w - w^*\|_2 \leq (\tau/\mu)^\frac{1}{2-\gamma}\}$ and it does not limit the number of spurious local minima, as the global $\gamma$ growth condition only gives a mild lower bound as $w$ moves away from $w^*$. See Figure \ref{fig:loss} for an example. Our results work even if $\gamma$ is a small constant $<1$. \end{remark}
Note the assumption is only made on the expected loss function w.r.t. uniform distribution $\mathcal{U}$ as a function of $w$, rather than objective function $f_{w^*}(x)$. The problem to be optimized can still be arbitrarily complex in terms of $\mathcal{X}$, e.g., high-dimensional and non-continuous functions. As an example, in Gaussian process based Bayesian optimization approaches, $f_{w^*}(x)$ belongs to a reproducing kernel Hilbert space, but its loss function is globally convex in its ``infinite dimensional'' parameter $w$. Also, we no longer need this assumption in Phase II.
Self-concordance is needed for technical reasons, but again it is only required near $w^*$. It means that for a given point $w$ such that $\|w - w^*\|_{\nabla^2 L(w^*)} \leq 1$, then \begin{align*}
(1-\|w - w^*\|_{\nabla^2 L(w^*)})^2 \nabla^2 L(w^*) \preceq \nabla^2 L(w) \preceq \frac{\nabla^2 L(w^*)}{(1-\|w-w^*\|_{\nabla^2 L(w^*)})^2}. \end{align*} See Example 4 of \citet{zhang2017improved} for some self-concordance function examples.
\textbf{Additional notations.} For convenience, we define $\zeta >0$ such that $\|\nabla^2 L(w^*)\|_\mathrm{op} \leq \zeta$. The existence of a finite $\zeta$ is implied by Assumption~\ref{ass:objective} and it suffices to take $\zeta = 2C_g^2$ because $\nabla^2 L(w^*) = \mathbb{E}_{x\sim \mathcal{U}}[ 2\nabla f_x(w^*) \nabla f_x(w^*)^\top]$.
\section{Main Results}\label{sec:main} In Section \ref{sec:alg}, we state our Global Optimization with Upper Confidence Bound (GO-UCB) algorithm and explain key design points of it. Then in Section \ref{sec:regret_bound}, we prove that its cumulative regret bound is at the rate of $\tilde{O}(\sqrt{T})$.
\subsection{Algorithm}\label{sec:alg}
Our GO-UCB algorithm, shown in Algorithm \ref{alg:go_ucb}, has two phases. Phase I does uniform exploration in $n$ rounds and Phase II does optimistic exploration in $T$ rounds. In Step 1 of Phase I, $n$ is chosen to be large enough such that the objective function can be sufficiently explored. Step 2-3 are doing uniform sampling. In Step 5, we call regression oracle to estimate $\hat{w}_0$ given all observations in Phase I as in eq. \eqref{eq:regression_oracle}. Adapted from \citet{nowak2007complexity}, we prove the convergence rate of $\|\hat{w}_0 - w^*\|_2$ is at the rate of $\tilde{O}(1/\sqrt{n})$. See Theorem \ref{thm:mle_guarantee} for details.
\begin{algorithm}[!b] \caption{GO-UCB}
\label{alg:go_ucb}
{\bf Input:}
Time horizon $T$, uniform exploration phase length $n$, uniform distribution $\mathcal{U}$, regression oracle $\mathrm{Oracle}$, regularization weight $\lambda$,
confidence sequence $\beta_t$ for $t=1,2,...,T$.\\
{\bf Phase I} (Uniform exploration)
\begin{algorithmic}[1]
\FOR{$j = 1,...,n$}
\STATE Sample $x_j \sim \mathcal{U}(\mathcal{X})$.
\STATE Observe $y_j = f(x_j) + \eta_j$.
\ENDFOR
\STATE Estimate $\hat{w}_0 \leftarrow \mathrm{Oracle}(x_1, y_1, ..., x_n, y_n)$.
\end{algorithmic}
{\bf Phase II} (Optimistic exploration)
\begin{algorithmic}[1]
\FOR{$t = 1,...,T$}
\STATE Update $\Sigma_t$ by eq. \eqref{eq:sigma_t} with the input $\lambda$.
\STATE Update $\hat{w}_t$ by eq. \eqref{eq:inner} with the input $\lambda$.
\STATE Update $\mathrm{Ball}_t$ by eq. \eqref{eq:ball} with the input $\beta_t$.
\STATE Select $x_t=\mathop{\mathrm{argmax}}_{x \in \mathcal{X}} \max_{w \in \mathrm{Ball}_t} f_x(w)$.
\STATE Observe $y_t = f(x_t) + \eta_t$.
\ENDFOR
\end{algorithmic}
{\bf Output:}
$\hat{x} \sim \mathcal{U} (\{x_1, ..., x_T\})$. \end{algorithm}
The key challenge of Phase II of GO-UCB is to design an acquisition function to select $x_t, \forall t \in [T]$. Since we are using parametric function to approximate the objective function, we heavily rely on a feasible parameter uncertainty region $\mathrm{Ball}_t, \forall t \in [T]$, which should always contain the true parameter $w^*$ throughout the process. The shape of $\mathrm{Ball}_t$ is measured by the covariance matrix $\Sigma_t$, defined as \begin{align} \Sigma_t = \lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top.\label{eq:sigma_t} \end{align} Note $i$ is indexing over both $x$ and $w$, which means that as time $t$ goes from $0$ to $T$, the update to $\Sigma_t$ is always rank one. It allows us to bound the change of $\Sigma_t$ from $t=0$ to $T$.
$\mathrm{Ball}_t$ is centered at $\hat{w}_t$, the newly estimated parameter at round $t$. In Step 2, we update the estimated $\hat{w}_t$ by solving the following optimization problem: \begin{align}
\hat{w}_t &= \mathop{\mathrm{argmin}}_{w}\frac{\lambda}{2} \|w - \hat{w}_0\|_2^2 + \frac{1}{2} \sum_{i=0}^{t-1} ((w-\hat{w}_i)^\top \nabla f_{x_i}(\hat{w}_i) + f_{x_i}(\hat{w}_i) - y_i)^2.\label{eq:opt_inner} \end{align} The optimization problem is an online regularized least square problem involving gradients from all previous rounds, i.e., $\nabla f_{x_i}(\hat{w}_i), \forall i \in [T]$. The intuition behind it is that we use gradients to approximate the function since we are dealing with generic objective function. We set the regularization w.r.t. $\hat{w}_0$ rather than $0$ because from regression oracle we know how close is $\hat{w}_0$ to $w^*$. By setting the gradient of objective function in eq. \eqref{eq:opt_inner} to be $0$, the closed form solution of $\hat{w}_t$ is \begin{align} \hat{w}_t &= \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) (\nabla f_{x_i}(\hat{w}_i)^\top \hat{w}_i +y_i - f_{x_i}(\hat{w}_i)) \right) + \lambda \Sigma^{-1}_t \hat{w}_0.\label{eq:inner} \end{align}
Now we move to our definition of $\mathrm{Ball}_t$, shown as \begin{align}
\mathrm{Ball}_t = \{w: \|w-\hat{w}_t\|^2_{\Sigma_t} \leq \beta_t\},\label{eq:ball} \end{align} where $\beta_t$ is a pre-defined monotonically increasing sequence that we will specify later. Following the ``optimism in the face of uncertainty'' idea, our ball is centered at $\hat{w}_t$ with $\beta_t$ being the radius and $\Sigma_t$ measuring the shape. $\beta_t$ ensures that the true parameter $w^*$ is always contained in $\mathrm{Ball}_t$ w.h.p. In Section \ref{sec:ball}, we will show that it suffices to choose \begin{align} \beta_t &= \tilde{\Theta}\bigg(d_w \sigma^2 + \frac{d_w F^2}{\mu} + \frac{d^3_w F^4t}{\mu^2 T}\bigg),\label{eq:beta_t} \end{align} where $\tilde{\Theta}$ hides logarithmic terms in $t, T$ and $1/\delta$ (w.p. $1-\delta$).
Then in Step 5 of Phase II, $x_t$ is selected by joint optimization over $x \in \mathcal{X}$ and $w \in \mathrm{Ball}_t$. Finally, we collect all observations in $T$ rounds and output $\hat{x}$ by uniformly sampling over $\{x_1,...,x_T\}$.
\subsection{Regret Upper Bound}\label{sec:regret_bound} Now we present the cumulative regret upper bound of GO-UCB algorithm.
\begin{theorem}[Cumulative regret of GO-UCB]\label{thm:cr} Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. Let $C$ denote a universal constant and $C_\lambda$ denote a constant that is independent to $T$. Assume \begin{equation}\label{eq:n_lower_main}
T > C \max \left\{ \frac{2^{\gamma-2} d_w F^2\zeta^\frac{\gamma}{2} \iota}{\tau}, \frac{d_w F^2 \zeta \iota}{\mu} \right\}^2, \end{equation} where $\iota$ is a logarithmic term depending on $n, C_h, 2/\delta$. Algorithm~\ref{alg:go_ucb} with parameters $n = \sqrt{T}$, $\lambda = C_\lambda \sqrt{T}$ and $\beta_{1:T}$ as in eq. \eqref{eq:beta_t} obeys that with probability $> 1-\delta$, \begin{align} R_{\sqrt{T}+T} &= \tilde{O}\left(\sqrt{T} F + \sqrt{T\beta_T d_w + T\beta^2_T}\right) = \tilde{O}\left(\frac{d^3_w F^4 \sqrt{T}}{\mu^2}\right),\label{eq:final_R_T} \end{align} where $d_w, F$ are constants. \end{theorem}
Let us highlight a few interesting aspects of the result. \begin{remark} Without Gaussian process assumption, we propose the first algorithm to solve global optimization problem with $\tilde{O}(\sqrt{T})$ cumulative regret, which is dimension free in terms of its input domain $\mathcal{X}$. GO-UCB is a no-regret algorithm since $\lim_{T \rightarrow \infty} R_T/T =0$, and the output $\hat{x}$ satisfies that $f^* - \mathbb{E}[ f(\hat{x})] \leq \tilde{O}(1/\sqrt{T})$. The dependence in $T$ is optimal up to logarithmic factors, as it matches the lower bound for linear bandits \citep[Theorem 3]{dani2008stochastic}. \end{remark} \begin{remark}[Choice of $\lambda$] One important deviation from the classical linear bandit analysis is that we require a regularization that centers around $\hat{w}_0$ and the regularization weight $\lambda$ to be $C_\lambda \sqrt{T}$, comparing to $\lambda = O(1)$ in the linear case. The choice is to ensure that $\hat{w}_t$ stays within the local neighborhood of $\hat{w}_0$, and to delicately balance different terms that appear in the regret analysis to ensure that the overall regret bound is $\tilde{O}(\sqrt{T})$. \end{remark} \begin{remark}[Choice of $n$] We choose $n =\sqrt{T}$, therefore, it puts sample complexity requirement on $T$ shown in eq. \eqref{eq:n_lower_main}. The choice of $n$ plays two roles here. First, it guarantees that the regression result $\hat{w}_0$ lies in the neighboring region of $w^*$ of the loss function $L(w)$ with high probability. The neighboring region of $w^*$ has nice properties, e.g., local strong convexity, which allow us to build the upper bound of $\ell_2$-distance between $\hat{w}_0$ and $w^*$. Second, in Phase I, we are doing uniform sampling over the function so the cumulative regret in Phase I is bounded by $2Fn=2F\sqrt{T}$ which is at the same $\tilde{O}(\sqrt{T})$ rate as that in Phase II. \end{remark}
\section{Proof Overview}\label{sec:proof} In this section, we give a proof sketch of all theoretical results. A key insight of our analysis is that there is more mileage that seminal techniques developed by \citet{abbasi2011improved} for analyzing linearly parameterized bandits problems in analyzing non-linear bandits, though we need to localize to a nearly optimal region and carefully handle the non-linear components via more aggressive regularization. Other assumptions that give rise to a similarly good initialization may work too and our new proof can be of independent interest in analyzing other extensions of LinUCB, e.g., to contextual bandits, reinforcement learning and other problems.
In detail, first we prove the estimation error bound of $\hat{w}_0$ for Phase I of GO-UCB algorithm, then prove the feasibility of $\mathrm{Ball}_t$. Finally by putting everything together we prove the cumulative regret bound of GO-UCB algorithm. We list all auxiliary lemmas in Appendix \ref{sec:auxiliary} and show complete proofs in Appendix \ref{sec:miss}.
\subsection{Regression Oracle Guarantee}\label{sec:mle}
The goal of Phase I of GO-UCB is to sufficiently explore the unknown objective function with $n$ uniform queries and obtain an estimated parameter $\hat{w}_0$. By assuming access to a regression oracle, we prove the convergence bound of $\hat{w}_0$ w.r.t. $w^*$, i.e., $\|\hat{w}_0-w^*\|^2_2$. To get started, we need the following regression oracle lemma.
\begin{lemma}[Adapted from \citet{nowak2007complexity})]\label{lem:mle_oracle} Suppose Assumption \ref{ass:parameter_class} \& \ref{ass:objective} hold. There is an absolute constant $C'$, such that after round $n$ in Phase I of Algorithm \ref{alg:go_ucb}, with probability $>1 - \delta/2$, regression oracle estimated $\hat{w}_0$ satisfies \begin{align*} \mathbb{E}_{x \sim \mathcal{U}} [(f_x(\hat{w}_0) - f_x(w^*))^2] \leq \frac{C' d_w F^2 \iota}{n}, \end{align*} where $\iota$ is the logarithmic term depending on $n, C_h, 2/\delta$. \end{lemma}
\citet{nowak2007complexity} proves that expected square error of Empirical Risk Minimization (ERM) estimator can be bounded at the rate of $\tilde{O}(1/n)$ with high probability, rather than $\tilde{O}(1/\sqrt{n})$ rate achieved by Chernoff/Hoeffding bounds. It works with realizable and misspecified settings. Proof of Lemma \ref{lem:mle_oracle} includes simplifying it with regression oracle, Assumption \ref{ass:parameter_class}, and $\varepsilon$-covering number argument over parameter class. Basically Lemma \ref{lem:mle_oracle} says that expected square error of $f_x(\hat{w}_0)$ converges to $f_x(w^*)$ at the rate of $\tilde{O}(1/n)$ with high probability. Based on it, we prove the following regression oracle guarantee.
\begin{theorem}[Regression oracle guarantee]\label{thm:mle_guarantee} Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. There is an absolute constant $C$ such that after round $n$ in Phase I of Algorithm \ref{alg:go_ucb} where $n$ satisfies \begin{align*} n \geq C \max \left\{ \frac{2^{\gamma-2}d_w F^2 \zeta^\frac{\gamma}{2} \iota}{\tau}, \frac{d_w F^2 \zeta \iota}{\mu} \right\}, \end{align*} with probability $> 1-\delta/2$, regression oracle estimated $\hat{w}_0$ satisfies \begin{align*}
\|\hat{w}_0 - w^*\|^2_2 \leq \frac{C d_w F^2 \iota}{\mu n}, \end{align*} where $\iota$ is the logarithmic term depending on $n, C_h, 2/\delta$. \end{theorem}
Compared with Lemma \ref{lem:mle_oracle}, there is an extra sample complexity requirement on $n$ because we need $n$ to be sufficiently large such that the function can be sufficiently explored and more importantly $\hat{w}_0$ falls into the neighboring region (strongly convex region) of $w^*$. See Figure \ref{fig:loss} for illustration. It is also the reason why strong convexity parameter $\mu$ appears in the denominator of the upper bound.
\subsection{Feasibility of $\mathrm{Ball}_t$}\label{sec:ball} The following lemma is the key part of algorithm design of GO-UCB. It says that our definition of $\mathrm{Ball}_t$ is appropriate, i.e., throughout all rounds in Phase II, $w^*$ is contained in $\mathrm{Ball}_t$ with high probability.
\begin{lemma}[Feasibility of $\mathrm{Ball}_t$]\label{lem:feasible_ball} Set $\Sigma_t, \hat{w}_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}. Set $\beta_t$ as \begin{align} \beta_t = \tilde{\Theta} \left(d_w \sigma^2 + \frac{d_w F^2}{\mu} + \frac{d^3_w F^4 t }{\mu^2 T}\right).\label{eq:beta_2} \end{align} Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold and choose $n= \sqrt{T}, \lambda = C_\lambda \sqrt{T}$. Then $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, w.p. $>1-\delta$, \begin{align*}
\|\hat{w}_t - w^*\|^2_{\Sigma_t} &\leq \beta_t. \end{align*} \end{lemma}
For reader's easy reference, we write our choice of $\beta_t$ again in eq. \eqref{eq:beta_2}. Note this lemma requires careful choices of $\lambda$ and $n$ because $\beta_t$ appears later in the cumulative regret bound and $\beta_t$ is required to be at the rate of $\tilde{O}(1)$. The proof has three steps. First we obtain the closed form solution of $\hat{w}_t$ as in eq. \eqref{eq:inner}. Next we use induction to prove that $\forall t \in [T], \|\hat{w}_t - w^*\|_2 \leq \tilde{O}(\tilde{C}/\sqrt{n})$ for some universal constant $\tilde{C}$. Finally we prove $\|\hat{w}_t - w^*\|^2_{\Sigma_t} \leq \beta_t$.
\subsection{Regret Analysis}\label{sec:reg} To prove cumulative regrets bound of GO-UCB algorithm, we need following two lemmas of instantaneous regrets in Phase II of GO-UCB.
\begin{lemma}[Instantaneous regret bound]\label{lem:instant_regret} Set $\Sigma_t, \hat{w}_t, \beta_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}, \& \eqref{eq:beta_2} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold, then with probability $> 1- \delta$,
$w^*$ is contained in $\mathrm{Ball}_t$. Define $u_t = \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma^{-1}_t}$, then $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, \begin{align*} r_t \leq 2\sqrt{\beta_t}u_t + \frac{2\beta_t C_h}{\lambda}. \end{align*} \end{lemma} The first term of the upper bound is pretty standard, seen also in LinUCB \citep{abbasi2011improved} and GP-UCB \citep{srinivas2010gaussian}. After we apply first order gradient approximation of the objective function, the second term is the upper bound of the high order residual term, which introduces extra challenge to derive the upper bound.
Technically, proof of Lemma \ref{lem:instant_regret} requires $w^*$ is contained in our parameter uncertainty ball $\mathrm{Ball}_t$ with high probability throughout Phase II of GO-UCB, which has been proven in Lemma \ref{lem:feasible_ball}. Later, the proof utilizes Taylor's theorem and uses the convexity of $\mathrm{Ball}_t$ twice. See Appendix \ref{sec:regret}. The next lemma is an extension of Lemma \ref{lem:instant_regret}, where the proof uses monotonically increasing property of $\beta_t$ in $t$.
\begin{lemma}[Sum of squared instantaneous regret bound]\label{lem:sos_instant_regret} Set $\Sigma_t, \hat{w}_t, \beta_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}, \& \eqref{eq:beta_2} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold, then with probability $> 1- \delta$, $w^*$ is contained in $\mathrm{Ball}_t$ and $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, \begin{align*} \sum_{t=1}^T r^2_t \leq 16\beta_T d_w \log \left(1 + \frac{TC_g^2}{d_w \lambda}\right) + \frac{8\beta^2_T C^2_h T}{\lambda^2}. \end{align*} \end{lemma}
Proof of Theorem \ref{thm:cr} is by putting everything together.
\section{Experiments}\label{sec:experiment} We compare our GO-UCB algorithm with four Bayesian Optimization (BO) algorithms: GP-EI \citep{jones1998efficient}, GP-PI \citep{kushner1964new}, GP-UCB \citep{srinivas2010gaussian}, and Trust Region BO (TuRBO) \citep{eriksson2019scalable}, where the first three are classical methods and TuRBO is a more advanced algorithm designed for high-dimensional cases.
To run GO-UCB, we choose our model $\hat{f}$ to be a two linear layer neural network with sigmoid function being the activation function: \begin{align*} \hat{f}(x) = \textrm{linear2}(\textrm{sigmoid}(\textrm{linear1}(x))), \end{align*} where $w_1, b_1$ denote the weight and bias of $\textrm{linear1}$ layer and $w_2, b_2$ denote those of $\textrm{linear2}$ layer. Specifically, we set $w_1 \in \mathbb{R}^{25\times d_x}, b_1 \in \mathbb{R}^{25}, w_2 \in \mathbb{R}^{25}, b_2 \in \mathbb{R}$, meaning the dimension of activation function is $25$. All implementations are based on BoTorch framework \citep{balandat2020botorch} and sklearn package \citep{head2021skopt} with default parameter settings. To help readers reproduce our results, implementation details are shown in Appendix \ref{sec:imp_goucb} and we will release code once our paper gets accepted.
\subsection{Synthetic Experiments}\label{sec:syn} First, we test all algorithms on three high dimensional synthetic functions defined on $[-5, 5]^{d_x}$ where $d_x = 20$, including both realizable and misspecified cases. The first test function $f_1$ is created by setting all elements in $w_1, b_1, w_2, b_2$ in $\hat{f}$ to be $1$, so $f_1$ is a realizable function given $\hat{f}$. The second and third test functions $f_2, f_3$ are Styblinski-Tang function and Rastrigin function, defined as follows: \begin{align*} f_2 &= -\frac{1}{2} \sum_{i=1}^{20} x_i^4 - 16 x^2_i + 5 x_i,\\ f_3 &= -200 + \sum_{i=1}^{20} 10 \cos(2 \pi x_i) - x^2_i, \end{align*} where $x_i$ denotes the $i$-th element in its $20$ dimensions, so $f_2, f_3$ are misspecified functions given $\hat{f}$.
We set $n=5, T=25$ for $f_1$ and $n=8,T=64$ for $f_2, f_3$. To reduce the effect of randomness in all algorithms, we repeat the whole optimization process for $5$ times for all algorithms and report mean and error bar of cumulative regrets. The error bar is measured by Wald's test with $95\%$ confidence, i.e., $1.96 \nu/\sqrt{5}$ where $\nu$ is standard deviation of cumulative regrets and $5$ is the number of repetitions.
From Figure \ref{fig:simulation}, we learn that in all tasks our GO-UCB algorithm performs better than all other four BO approaches. Among BO approches, TuRBO performs the best since it is specifically designed for high-dimensional tasks. In Figure \ref{fig:simulation}(a), mean of cumulative regrets of GO-UCB and TuRBO stays the same when $t \geq 22$, which means that both of them have found the global optima, but GO-UCB algorithm is able to find the optimal point shortly after Phase I and enjoys the least error bar. It is well expected since $f_1$ is a realizable function for $\hat{f}$. Unfortunately, GP-UCB, GP-EI, and GP-PI incur almost linear regrets, showing the bad performances of classical BO algorithms in high-dimensional cases.
In Figure \ref{fig:simulation}(b) and \ref{fig:simulation}(c), all methods are suffering from linear regret because $f_2, f_3$ are misspecified functions. The gap between GO-UCB and other methods is smaller in Figure \ref{fig:simulation}(c) than in \ref{fig:simulation}(b) because optimizing $f_3$ is more challenging than $f_2$ since $f_3$ has much more local optimal points.
\begin{figure*}
\caption{Cumulative regrets (the lower the better) of all algorithms on $20$-dimensional $f_1, f_2, f_3$ synthetic functions.
}
\label{fig:simulation}
\end{figure*}
\begin{figure*}
\caption{Cumulative regrets (the lower the better) of all algorithms in real-world hyperparameter tuning task on Breast-cancer dataset. }
\label{fig:real}
\end{figure*}
\subsection{Real-World Experiments}\label{sec:real} To illustrate the GO-UCB algorithm works in real-world tasks, we do hyperparameter tuning experiments on three tasks using three classifiers. Three UCI datasets \citep{Dua:2019} are Breat-cancer, Australian, and Diabetes, and three classifiers are random forest, multi-layer perceptron, and gradient boosting where each of them has $7,8,11$ hyperparameters. For each classifier on each dataset, the function mapping from hyperparameters to classification accuracy is the black-box function that we are maximizing, so the input space dimension $d_x=7,8,11$ for each classifier. We use cumulative regret to evaluate hyperparameter tuning performances, however, best accuracy $f^*$ is unknown ahead of time so we set it to be the best empirical accuracy of each task. To reduce the effect of randomness, we divide each dataset into 5 folds and every time use 4 folds for training and remaining 1 fold for testing. We report mean and error bar of cumulative regrets where error bar is measured by Wald's test, the same as synthetic experiments.
Figure \ref{fig:real} shows results on Breast-cancer dataset. In Figure \ref{fig:real}(b)(c) GO-UCB performs statistically much better that all other BO algorithms since there is almost no error bar gap between TuRBO and GO-UCB. It shows that GO-UCB can be deployed in real-world applications to replace BO methods. Also, in Figure \ref{fig:real}(b) Phase I of GO-UCB is not good but GO-UCB is able to perform better than others in Phase II, which shows the effectiveness of Phase II of GO-UCB. In Figure \ref{fig:real}(a) all algorithms have similar performances. In Figure \ref{fig:real}(b), TuRBO performs similarly as GP-UCB, GP-EI, and GP-PI when $t \leq 23$, but after $t=23$ it performs better and shows a curved regret line by finding optimal points. Results on Australian and Diabetes datasets are shown in Appendix \ref{sec:real_detail} where similar algorithm performances can be seen.
\subsection{Discussion on Practical Usage} Note in experiments, we choose parametric model $\hat{f}$ to be a two linear layer neural network. In more real-world experiments, one can choose the model $\hat{f}$ in GO-UCB to be simpler functions or much more complex functions, e.g., deep neural networks, depending on task requirements.
\section{Conclusion}\label{sec:conclusion} Global non-convex optimization is an important problem that widely exists in many real-world applications, e.g., deep learning hyper-parameter tuning and new material design. However, solving this optimization problem in general is NP-hard. Existing work relies on Gaussian process assumption, e.g., Bayesian optimization, or other non-parametric family which suffers from the curse of dimensionality.
We propose the first algorithm to solve such global optimization with parametric function approximation, which shows a new way of global optimization. GO-UCB first uniformly explores the function and collects a set of observation points and then uses the optimistic exploration to actively select points. At the core of GO-UCB is a carefully designed uncertainty set over parameters based on gradients that allow optimistic exploration. Under realizable parameter class assumption and a few mild geometric conditions, our theoretical analysis shows that cumulative regret of GO-UCB is at the rate of $\tilde{O}(\sqrt{T})$, which is dimension-free in terms of function domain $\mathcal{X}$.
Our high-dimensional synthetic test shows that GO-UCB works better than BO methods even in misspecified setting. Moreover, GO-UCB performs better than BO algorithms in real-world hyperparameter tuning tasks, which may be of independent interest beyond this paper.
There is $\mu$, the strongly convexity parameter, in the denominator of upper bound in Theorem \ref{thm:cr}. $\mu$ can be small in practice, thus the upper bound can be large. Developing the cumulative regret bound containing a term depending on $\mu$ but being independent to $T$ remains a future problem.
\subsection*{Acknowledgments} The work is supported by NSF Award \#1934641 and \#2134214.
\onecolumn \appendix
\section{Notation Table}\label{sec:table} \begin{table}[!htbp] \centering \caption{Symbols and notations.}\label{tab:notations} \begin{tabular}{ccl} \noalign{
} \hline \textbf{Symbol} & \textbf{Definition} & \textbf{Description} \\ \hline
$\|A\|_\mathrm{op}$ & & operator norm\\ \hline $\mathrm{Ball}_t$ & eq. \eqref{eq:ball} & parameter uncertainty region at round $t$\\ \hline $\beta_t$ & eq. \eqref{eq:beta_t} & parameter uncertainty region radius at round $t$\\ \hline $C, \zeta$ & & constants\\ \hline $d_x$ & & domain dimension \\ \hline $d_w$ & & parameter dimension \\ \hline $\delta$ & & failure probability \\ \hline $\varepsilon$ & & covering number discretization distance \\ \hline $\eta$ & $\sigma$-sub-Gaussian & observation noise \\ \hline $f_w(x)$ & & objective function at $x$ parameterized by $w$ \\ \hline $f_x(w)$ & & objective function at $w$ parameterized by $x$ \\ \hline $\nabla f_x(w)$ & & 1st order derivative w.r.t. $w$ parameterized by $x$ \\ \hline $\nabla^2 f_x(w)$ & & 2nd order derivative w.r.t. $w$ parameterized by $x$ \\ \hline $F$ & & function range constant bound \\ \hline $\gamma, \tau$ & & growth condition parameters \\ \hline $\iota, \iota', \iota{''}$ & & logarithmic terms \\ \hline $L(w)$ & $\mathbb{E}[(f_x(w) - f_x(w^*))^2]$ & expected loss function \\ \hline $\lambda$ & & regularization parameter \\ \hline $\mu$ & & strong convexity parameter \\ \hline $n$ & & time horizon in Phase I \\ \hline $[n]$ & $\{1,2,...,n\}$ & integer set of size $n$\\ \hline $\mathrm{Oracle}$ & & regression oracle \\ \hline $r_t$ & $f_{w^*}(x^*) - f_{w^*}(x_t)$ & instantaneous regret at round $t$ \\ \hline $R_T$ & $\sum_{t=1}^T r_t$ & cumulative regret after round $T$\\ \hline $\Sigma_t$ & eq. \eqref{eq:sigma_t} & covariance matrix at round $t$ \\ \hline $T$ & & time horizon in Phase II \\ \hline $\mathcal{U}$ & & uniform distribution \\ \hline $w$ & $w \in \mathcal{W}$ & function parameter \\ \hline $w^*$ & $w^* \in \mathcal{W}$ & true parameter \\ \hline $\hat{w}_0$ & & oracle-estimated parameter after Phase I \\ \hline $\hat{w}_t$ & eq. \eqref{eq:inner} & updated parameter at round $t$\\ \hline $\mathcal{W}$ & $\mathcal{W} \subseteq [0,1]^{d_w}$ & parameter space \\ \hline $x$ & $x \in \mathcal{X}$ & data point \\\hline $x^*$ & & optimal data point \\\hline
$\|x\|_p$ &$(\sum_{i=1}^d |x_i|^p)^{1/p}$ & $\ell_p$ norm \\\hline
$\|x\|_A$ &$\sqrt{x^\top A x}$ & distance defined by square matrix $A$ \\\hline $\mathcal{X}$ & $\mathcal{X} \subseteq \mathbb{R}^{d_x}$ & function domain \\ \hline $\mathcal{Y}$ & $\mathcal{Y} = [-F, F]$ & function range \\ \hline \end{tabular} \end{table}
\section{Auxiliary Technical Lemmas}\label{sec:auxiliary} In this section, we list auxiliary lemmas that are used in proofs.
\begin{lemma}[Adapted from eq. (5) (6) of \citet{nowak2007complexity}]\label{lem:regression} Given a dataset $\{x_i,y_i\}_{j=1}^n$ where $y_j$ is generated from eq. \eqref{eq:y} and $f_0$ is the underlying true function. Let $\hat{f}$ be an ERM estimator taking values in $\mathcal{F}$ where $\mathcal{F}$ is a finite set and $\mathcal{F} \subset \{f: [0,1]^d \rightarrow [-F,F]\}$ for some $F \geq 1$. Then with probability $> 1- \delta$, $\hat{f}$ satisfies that \begin{align}
\mathbb{E}[(\hat{f} - f_0)^2] \leq \left(\frac{1+\alpha}{1-\alpha}\right) \left( \inf_{f\in \mathcal{F} } \mathbb{E}[(f - f_0)^2] + \frac{F^2 \log(|\mathcal{F}|) \log(2)}{n\alpha}\right) + \frac{2\log(2/\delta)}{n\alpha}, \end{align} for all $\alpha \in (0, 1]$. \end{lemma}
\begin{lemma}[Sherman-Morrison lemma \citep{sherman1950adjustment}]\label{lem:sherman} Let $A$ denote a matrix and $b,c$ denote two vectors. Then \begin{align} (A + bc^\top)^{-1} = A^{-1} - \frac{A^{-1}bc^\top A^{-1}}{1+ c^\top A^{-1} b}. \end{align} \end{lemma}
\begin{lemma}[Self-normalized bound for vector-valued martingales \citep{abbasi2011improved,agarwal2021rl}]\label{lem:self_norm}
Let $\{\eta_i\}_{i=1}^\infty$ be a real-valued stochastic process with corresponding filtration $\{\mathcal{F}_i\}_{i=1}^\infty$ such that $\eta_i$ is $\mathcal{F}_i$ measurable, $\mathbb{E}[\eta_i | \mathcal{F}_{i-1} ] = 0$, and $\eta_i$ is conditionally $\sigma$-sub-Gaussian with $\sigma \in \mathbb{R}^+$. Let $\{X_i\}_{i=1}^\infty$ be a stochastic process with $X_i \in \mathcal{H}$ (some Hilbert space) and $X_i$ being $F_t$ measurable. Assume that a linear operator $\Sigma:\mathcal{H} \rightarrow \mathcal{H}$ is positive definite, i.e., $x^\top \Sigma x > 0$ for any $x \in \mathcal{H}$. For any $t$, define the linear operator $\Sigma_t = \Sigma_0 + \sum_{i=1}^t X_i X_i^\top$ (here $xx^\top$ denotes outer-product in $\mathcal{H}$). With probability at least $1-\delta$, we have for all $t\geq 1$: \begin{align}
\left\|\sum_{i=1}^t X_i \eta_i \right\|^2_{\Sigma_t^{-1}} \leq \sigma^2 \log \left(\frac{\det(\Sigma_t) \det(\Sigma_0)^{-1}}{\delta^2} \right). \end{align} \end{lemma}
\section{Missing Proofs}\label{sec:miss} In this section, we show complete proofs of all technical results in the main paper. For reader's easy reference, we define $\iota$ as a logarithmic term depending on $n, C_h, 2/\delta$ (w.p. $>1-\delta/2$), $\iota'$ as a logarithmic term depending on $t, d_w, C_g, 1/\lambda, 2/\delta$ (w.p. $>1-\delta/2$), and $\iota{''}$ as a logarithmic term depending on $t, d_w, C_g, 1/\lambda$.
\subsection{Regression Oracle Guarantee}
\begin{lemma}[Restatement of Lemma \ref{lem:mle_oracle}] Suppose Assumption \ref{ass:parameter_class} \& \ref{ass:objective} hold. There is an absolute constant $C'$, such that after round $n$ in Phase I of Algorithm \ref{alg:go_ucb}, with probability $>1 - \delta/2$, regression oracle estimated $\hat{w}_0$ satisfies \begin{align*} \mathbb{E}_{x \sim \mathcal{U}} [(f_x(\hat{w}_0) - f_x(w^*))^2] \leq \frac{C' d_w F^2 \iota}{n}, \end{align*} where $\iota$ is the logarithmic term depending on $n, C_h, 2/\delta$. \end{lemma} \begin{proof} The regression oracle lemma establishes on Lemma \ref{lem:regression} which works only for finite function class. In order to work with our continuous parameter class $\mathcal{W}$, we need $\varepsilon$-covering number argument.
First, let $\tilde{w}, \widetilde{\mathcal{W}}$ denote the ERM parameter and finite parameter class after applying covering number argument on $\mathcal{W}$. By Lemma \ref{lem:regression}, we find that with probability $>1-\delta/2$, \begin{align}
\mathbb{E}_{x \sim \mathcal{U}}[(f_x(\tilde{w}) - f_x(w^*))^2] &\leq \left(\frac{1+\alpha}{1-\alpha}\right) \left( \inf_{w \in \widetilde{\mathcal{W}} \cup \{w^*\}} \mathbb{E}_{x\sim \mathcal{U}}[(f_x(w) - f_x(w^*))^2] + \frac{F^2 \log(|\widetilde{\mathcal{W}}|) \log(2)}{n\alpha}\right) \nonumber \\ &\qquad + \frac{2\log(4/\delta)}{n\alpha}\\
&\leq \left(\frac{1+\alpha}{1-\alpha}\right) \left( \frac{F^2 \log(|\widetilde{\mathcal{W}}|) \log(2)}{n\alpha}\right) + \frac{2\log(4/\delta)}{n\alpha}, \end{align}
where the second inequality is by realizable assumption (Assumption \ref{ass:parameter_class}). Our parameter class $\mathcal{W} \subseteq [0, 1]^{d_w}$, so $\log(|\widetilde{\mathcal{W}}|) = \log(1/\varepsilon^{d_w})= d_w\log(1/\varepsilon)$ and the new upper bound is that with probability $> 1-\delta/2$, \begin{align} \mathbb{E}_{x\sim \mathcal{U}}[(f_x(\tilde{w}) - f_x(w^*))^2] \leq C^{''}\left(\frac{d_w F^2 \log(1/\varepsilon)}{n} + \frac{\log(2/\delta)}{n}\right), \end{align} where $C^{''}$ is a universal constant obtained by choosing $\alpha=1/2$. Note $\tilde{w}$ is the ERM parameter in $\widetilde{\mathcal{W}}$ after discretization, not our target parameter $\hat{w}_0 \in \mathcal{W}$. By $(a+b)^2\leq 2a^2 + 2b^2$, \begin{align} \mathbb{E}_{x\sim \mathcal{U}}[(f_x(\hat{w}_0) - f_x(w^*))^2] &\leq 2\mathbb{E}_{x\sim \mathcal{U}}[(f_x(\Hat{w}_0) - f_x(\tilde{w}))^2] + 2\mathbb{E}_{x\sim \mathcal{U}}[(f_x(\tilde{w}) - f_x(w^*))^2]\\ &\leq 2\varepsilon^2 C_h^2 + 2C^{''}\left(\frac{d_w F^2 \log(1/\varepsilon)}{n} + \frac{\log(2/\delta)}{n}\right)\label{eq:interm1} \end{align} where the second line applies Lemma~\ref{lem:regression}, discretization error $\varepsilon$, and Assumption \ref{ass:objective}. By choosing $\varepsilon = 1/\sqrt{n C_h^2}$, we get $$ \eqref{eq:interm1} =\frac{2}{n} + \frac{C^{''}d_w F^2\log(n C_h^2)}{n} + \frac{2C^{''}\log(2/\delta)}{n} \leq C' \frac{d_w F^2\log(n C_h^2) +\log(2/\delta)}{n}$$ where we can take $C' = 2C^{''}$ (assuming $2<C^{''}d_w F^2\log(n C_h^2)$). The proof completes by defining $\iota$ as the logarithmic term depending on $n, C_h, 2/\delta$. \end{proof}
\begin{theorem}[Restatement of Theorem \ref{thm:mle_guarantee}] Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. There is an absolute constant $C$ such that after round $n$ in Phase I of Algorithm \ref{alg:go_ucb} where $n$ satisfies \begin{align*} n \geq C \max \left\{ \frac{2^{\gamma-2}d_w F^2 \zeta^\frac{\gamma}{2} \iota}{\tau}, \frac{d_w F^2 \zeta \iota}{\mu} \right\}, \end{align*} with probability $> 1-\delta/2$, regression oracle estimated $\hat{w}_0$ satisfies \begin{align*}
\|\hat{w}_0 - w^*\|^2_2 \leq \frac{C d_w F^2 \iota}{\mu n}, \end{align*} where $\iota$ is the logarithmic term depending on $n, C_h, 2/\delta$. \end{theorem} \begin{proof} Recall the definition of expected loss function $L(w) = \mathbb{E}_{x \sim \mathcal{U}}(f_x(w)-f_x(w^*))^2$ and the second order Taylor's theorem, $L(\hat{w}_0)$ at $w^*$ can be written as \begin{align}
L(\hat{w}_0) &= L(w^*) + (\hat{w}_0 - w^*) \nabla L(w^*) + \frac{1}{2} \|\hat{w}_0 - w^*\|^2_{\nabla^2 L(\tilde{w})}, \end{align} where $\tilde{w}$ lies between $\hat{w}_0$ and $w^*$. Also, because $\nabla L(w^*) = \nabla E_{x\sim \mathcal{U}}(f_x(w^*) - f_x(w^*))^2=0$, then with probability $> 1-\delta/2$, \begin{align}
\frac{1}{2} \|\hat{w}_0 - w^*\|^2_{\nabla^2 L(\tilde{w})} = L(\hat{w}_0) - L(w^*) \leq \frac{C' d_w F^2 \iota}{n},\label{eq:ll} \end{align} where the inequality is due to Lemma \ref{lem:mle_oracle}.
Next, we prove the following lemma stating after certain number of $n$ samples, $\|\hat{w}_0 - w^*\|_{\nabla^2 L(w^*)}$ can be bounded by a constant, e.g., $\frac{1}{2}$. \begin{lemma}\label{lem:n_hessian} Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. There is an absolute constant $C'$ such that after round $n$ in Phase I of Algorithm \ref{alg:go_ucb} where $n$ satisfies \begin{align*} n \geq \max \left\{ \frac{2^{\gamma+1}C' d_w F^2\zeta^\frac{\gamma}{2} \iota}{\tau}, \frac{8 C'd_w F^2 \zeta \iota }{\mu} \right\}, \end{align*} then with probability $> 1-\delta/2$, \begin{align*}
\|\hat{w}_0 - w^*\|_{\nabla^2 L(w^*)} \leq \frac{1}{2}. \end{align*} \end{lemma} \begin{proof} Consider two cases of relationships between $\hat{w}_0$ and $w^*$.
\textbf{Case I.} If $\hat{w}_0$ is far away from $w^*$, i.e., $\|\hat{w}_0 - w^*\|_2 \geq (\tau/\mu)^{1/(2-\gamma)}$, by growth condition (Assumption \ref{ass:loss}), \begin{align}
\frac{\tau}{2}\|\hat{w}_0-w^*\|^\gamma_2 &\leq L(\hat{w}_0)-L(w^*) \leq \frac{C' d_w F^2 \iota}{n}, \end{align} where the second inequality is due to eq. \eqref{eq:ll}. Then, \begin{align}
\|\hat{w}_0-w^*\|_{\nabla^2 L(w^*)} &\leq \sqrt{\zeta} \|\hat{w}_0-w^*\|_2 \leq \sqrt{\zeta}\left(\frac{2C' d_w F^2 \iota}{\tau n}\right)^\frac{1}{\gamma}, \end{align} where the first inequality is due to smoothness of loss function in Assumption \ref{ass:loss}. Therefore, setting $\sqrt{\zeta}(\frac{2 C' d_w F^2 \iota}{\tau n})^\frac{1}{\gamma}\leq \frac{1}{2}$ will result in a sample complexity bound on $n$: \begin{align} n \geq \frac{2^{\gamma+1}C' d_w F^2\zeta^\frac{\gamma}{2} \iota}{\tau}.\label{eq:n_1} \end{align}
\textbf{Case II.} If $\hat{w}_0$ is close to $w^*$, i.e., $\|\hat{w}_0 - w^*\|_2 \leq (\tau/\mu)^{1/(2-\gamma)}$, by local strong convexity (Assumption \ref{ass:loss}), \begin{align}
\frac{\mu}{2} \|\hat{w}_0-w^*\|^2_2 &\leq L(\hat{w}_0)-L(w^*) \leq \frac{C' d_w F^2 \iota}{n }, \end{align} where the second inequality is due to eq. \eqref{eq:ll}. Then, \begin{align}
\|\hat{w}_0-w^*\|_{\nabla^2 L(w^*)}&\leq \sqrt{\zeta} \|\hat{w}_0-w^*\|_2 \leq \sqrt{\frac{2\zeta C' d_w F^2 \iota}{\mu n}}. \end{align} where the first inequality is again due to smoothness of loss function in Assumption \ref{ass:loss}. Therefore, setting $\sqrt{\frac{2\zeta C' d_w F^2 \iota}{\mu n}}\leq \frac{1}{2}$ will lead to another sample complexity bound on $n$: \begin{align} n\geq \frac{8 C' d_w F^2 \zeta \iota}{\mu}.\label{eq:n_2} \end{align}
The proof completes by combining \textbf{Case I} and \textbf{Case II} discussion. \end{proof}
Now we continue the proof of Theorem \ref{thm:mle_guarantee}. By self-concordance assumption (Assumption \ref{ass:loss}), \begin{align}
(1-\|\tilde{w}- w^*\|_{\nabla^2 L(w^*)})^2 \|\hat{w}_0- w^*\|^2_{\nabla^2 L(w^*)}\leq \|\hat{w}_0- w^*\|^2_{\nabla^2 L(\tilde{w})}.\label{eq:self} \end{align} The LHS can be further lower bounded by \begin{align}
\frac{1}{4}\|\hat{w}_0- w^*\|^2_{\nabla^2 L(w^*)} &\leq (1-\|\hat{w}_0- w^*\|_{\nabla^2 L(w^*)})^2 \|\hat{w}_0- w^*\|^2_{\nabla^2 L(w^*)}\\
&\leq (1-\|\tilde{w}- w^*\|_{\nabla^2 L(w^*)})^2 \|\hat{w}_0- w^*\|^2_{\nabla^2 L(w^*)},\label{eq:self_con} \end{align} where the first inequality is because of Lemma \ref{lem:n_hessian} and the second inequality is due to the fact that $\hat{w}$ lies between $\hat{w}_0$ and $w^*$. Therefore, \begin{align}
\|\hat{w}_0 - w^*\|^2_{\nabla^2 L(w^*)} \leq \frac{8C' d_w F^2 \iota }{n}. \end{align}
The proof completes by inequality $\|\hat{w}_0 - w^*\|^2_2 \leq \|\hat{w}_0 - w^*\|^2_{\nabla^2 L(w^*)}/\mu$ due to $\mu$-strongly convexity of $L(w)$ at $w^*$ (Assumption \ref{ass:loss}) and defining $C=8C'$. \end{proof}
\subsection{Properties of Covariance Matrix $\Sigma_t$}\label{sec:sigma} In eq. \eqref{eq:sigma_t}, $\Sigma_t$ is defined as $\lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top$. In this section, we prove three lemmas saying the change of $\Sigma_t$ as $t \in 1,...,T$ is bounded in Phase II of GO-UCB. The key observation is that at each round $i$, the change made to $\Sigma_t$ is $\nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top$, which is only rank one.
\begin{lemma}[Adapted from \citet{agarwal2021rl}]\label{lem:det}
Set $\Sigma_t, \hat{w}_t$ as in eq. \eqref{eq:sigma_t} \& \eqref{eq:inner}, suppose Assumption \ref{ass:parameter_class} \& \ref{ass:loss} hold, and define $u_t = \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma^{-1}_t}$. Then \begin{align*} \det \Sigma_t = \det \Sigma_0 \prod_{i=0}^{t-1} (1 + u^2_i). \end{align*} \end{lemma} \begin{proof} Recall the definition of $\Sigma_t = \lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top$ and we can show that \begin{align} \det \Sigma_{t+1} &= \det (\Sigma_t + \nabla f_{x_t}(w_t) \nabla f_{x_t}(w_t)^\top)\\ &= \det (\Sigma^\frac{1}{2}_t(I + \Sigma^{-\frac{1}{2}}_t \nabla f_{x_t}(w_t) \nabla f_{x_t}(w_t)^\top \Sigma^{-\frac{1}{2}}_t)\Sigma^\frac{1}{2}_t)\\ &= \det (\Sigma_t) \det(I + \Sigma^{-\frac{1}{2}}_t \nabla f_{x_t}(w_t) (\Sigma^{-\frac{1}{2}}_t \nabla f_{x_t}(w_t))^\top)\\ &= \det (\Sigma_t) \det(I + v_t v_t^\top), \end{align}
where $v_t = \Sigma^{-\frac{1}{2}}_t \nabla f_{x_t}(w_t)$. Recall $u_t$ is defined as $\|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma_t^{-1}}$. Because $v_t v^\top_t$ is a rank one matrix, $\det(I + v_t v^\top_t) = 1 + u^2_t$. The proof completes by induction. \end{proof}
\begin{lemma}[Adapted from \citet{agarwal2021rl}]\label{lem:log_det} Set $\Sigma_t$ as in eq. \eqref{eq:sigma_t} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. Then \begin{align*} \log \left(\frac{\det \Sigma_{t-1}}{\det \Sigma_0}\right) \leq d_w \log \left(1 + \frac{t C_g^2}{d_w \lambda}\right). \end{align*} \end{lemma} Proof of Lemma \ref{lem:det} directly follows definition of $\Sigma_t$ and proof of Lemma \ref{lem:log_det} involves Lemma \ref{lem:det} and inequality of arithmetic and geometric means. Note $C_g$ is a constant coming from Assumption \ref{ass:objective}. We do not claim any novelty in proofs of these two lemmas which replace feature vector in linear bandit \citep{agarwal2021rl} with gradient vectors. \begin{proof} Let $\xi_1,...,\xi_{d_w}$ denote eigenvalues of $\sum_{i=0}^{t-1} \nabla f_{x_i}(w_i) \nabla f_{x_i}(w_i)^\top$, then \begin{align}
\sum_{k=1}^{d_w} \xi_k = \mathrm{tr} \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(w_i) \nabla f_{x_i}(w_i)^\top \right) = \sum_{i=0}^{t-1} \|\nabla f_{x_i}(w_i)\|^2_2 \leq t C_g^2,\label{eq:t_c_g} \end{align} where the inequality is by Assumption \ref{ass:objective}. By Lemma \ref{lem:det}, \begin{align} \log \left(\frac{\det \Sigma_{t-1}}{\det \Sigma_0}\right) &\leq \log \det \left(I + \frac{1}{\lambda} \sum_{i=0}^{t-1} \nabla f_{x_i}(w_i) \nabla f_{x_i}(w_i)^\top \right)\\ &= \log \left(\prod_{k=1}^{d_w} (1 + \xi_k/\lambda)\right)\\ &= d_w \log \left(\prod_{k=1}^{d_w} (1 + \xi_k/\lambda)\right)^{1/{d_w}}\\ &\leq d_w \log \left(\frac{1}{d_w} \sum_{k=1}^{d_w} (1 + \xi_k/\lambda)\right)\\ &\leq d_w \log \left(1+ \frac{t C_g^2}{d_w \lambda}\right), \end{align} where the second inequality is by inequality of arithmetic and geometric means and the last inequality is due to eq. \eqref{eq:t_c_g}. \end{proof}
\begin{lemma}\label{lem:sum_of_square} Set $\Sigma_t, \hat{w}_t$ as in eq. \eqref{eq:sigma_t} \& \eqref{eq:inner} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold. Then \begin{align*} \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i) \leq 2d_w \log\left(1 + \frac{tC^2_g}{d_w \lambda}\right). \end{align*} \end{lemma}
A trivial bound of LHS in Lemma \ref{lem:sum_of_square} could be simply $O(t C^2_g/\lambda)$. Lemma \ref{lem:sum_of_square} is important because it saves the upper bound to be $O(\log(t C^2_g/\lambda))$, which allows us to build a feasible parameter uncertainty ball, shown in the next section.
\begin{proof} First, we prove $\forall i \in \{0, 1,..., t-1\}, 0 < \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i) < 1$. Recall the definition of $\Sigma_t$, it's easy to see that $\Sigma_t$ is a positive definite matrix and thus $0 < \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i)$. To prove it's smaller than $1$, we need to decompose $\Sigma_t$ and write \begin{align} &\quad\ \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i)\\ &= \nabla f_{x_i}(\hat{w}_i)^\top \left(\lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top \right)^{-1} \nabla f_{x_i}(\hat{w}_i)\\ &= \nabla f_{x_i}(\hat{w}_i)^\top \left(\nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top - \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top + \lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top \right)^{-1} \nabla f_{x_i}(\hat{w}_i). \end{align} Let $A = - \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top + \lambda I + \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top$, and it becomes \begin{align} \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i) = \nabla f_{x_i}(\hat{w}_i)^\top (\nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top + A)^{-1} \nabla f_{x_i}(\hat{w}_i). \end{align} By applying Sherman-Morrison lemma (Lemma \ref{lem:sherman}), we have \begin{align} &\quad \ \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i)\\ &= \nabla f_{x_i}(\hat{w}_i)^\top \left(A^{-1} - \frac{A^{-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top A^{-1}}{1+ \nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i)} \right)\nabla f_{x_i}(\hat{w}_i)\\ &= \nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i) - \frac{\nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i)}{1+ \nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i)}\\ &= \frac{\nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i)}{1+ \nabla f_{x_i}(\hat{w}_i)^\top A^{-1} \nabla f_{x_i}(\hat{w}_i)} < 1. \end{align} Next, we use the fact that $\forall x \in (0,1), x \leq 2\log(1+ x)$, and we have \begin{align} \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i) &\leq \sum_{i=0}^{t-1} 2\log\left( 1+ \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i)\right)\\ &\leq 2\log\left(\frac{\det \Sigma_{t-1}}{\det \Sigma_0} \right)\\ &\leq 2d_w \log\left( 1 + \frac{tC^2_g}{d_w\lambda}\right), \end{align} where the last two inequalities are due to Lemma \ref{lem:det} and \ref{lem:log_det}. \end{proof}
\subsection{Feasibility of $\mathrm{Ball}_t$}
\begin{lemma}[Restatement of Lemma \ref{lem:feasible_ball}] Set $\Sigma_t, \hat{w}_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}. Set $\beta_t$ as \begin{align*} \beta_t = \tilde{\Theta} \left(d_w \sigma^2 + \frac{d_w F^2}{\mu} + \frac{d^3_w F^4 t }{\mu^2 T}\right). \end{align*} Suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold and choose $n= \sqrt{T}, \lambda = C_\lambda \sqrt{T}$. Then $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, w.p. $>1-\delta$, \begin{align*}
\|\hat{w}_t - w^*\|^2_{\Sigma_t} &\leq \beta_t. \end{align*} \end{lemma} \begin{proof}
The proof has three steps. First we obtain the closed form solution of $\hat{w}_t$. Next we derive the upper bound of $\|\hat{w}_i - w^*\|^2_2$. Finally we use it to prove that the upper bound of $\|\hat{w}_t - w^*\|^2_{\Sigma_t}$ matches our choice of $\beta_t$.
\textbf{Step 1: Closed form solution of $\hat{w}_t$.} The optimal criterion for the objective function in eq. \eqref{eq:opt_inner} is \begin{align} 0= \lambda (\hat{w}_t - \hat{w}_0) + \sum_{i=0}^{t-1} ((\hat{w}_t - \hat{w}_i)^\top \nabla f_{x_i}(\hat{w}_i) + f_{x_i}(\hat{w}_i) - y_i) \nabla f_{x_i}(\hat{w}_i). \end{align} Rearrange the equation and we have \begin{align} \lambda (\hat{w}_t- \hat{w}_0) + \sum_{i=0}^{t-1} (\hat{w}_t - \hat{w}_i)^\top \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i) &= \sum_{i=0}^{t-1} (y_i - f_{x_i}(\hat{w}_i) ) \nabla f_{x_i}(\hat{w}_i),\\ \lambda (\hat{w}_t- \hat{w}_0) + \sum_{i=0}^{t-1} (\hat{w}_t - \hat{w}_i)^\top \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i) &= \sum_{i=0}^{t-1} (y_i - f_{x_i}(w^*) + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i) ) \nabla f_{x_i}(\hat{w}_i),\\ \lambda (\hat{w}_t - \hat{w}_0) + \sum_{i=0}^{t-1} \hat{w}^\top_t \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i) &= \sum_{i=0}^{t-1} (\hat{w}^\top_i \nabla f_{x_i}(\hat{w}_i) +\eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i) ) \nabla f_{x_i}(\hat{w}_i),\\ \hat{w}_t\left(\lambda I + \sum_{i=1}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top \right) - \lambda \hat{w}_0 &= \sum_{i=0}^{t-1} (\hat{w}^\top_i \nabla f_{x_i}(\hat{w}_i) + \eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i))\nabla f_{x_i}(\hat{w}_i),\\ \hat{w}_t \Sigma_t &= \lambda \hat{w}_0 + \sum_{i=0}^{t-1} (\hat{w}^\top_i \nabla f_{x_i}(\hat{w}_i) + \eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i))\nabla f_{x_i}(\hat{w}_i), \end{align} where the second line is by removing and adding back $f_{x_i}(w^*)$, the third line is due to definition of observation noise $\eta$ and the last line is by our choice of $\Sigma_t$ (eq. \eqref{eq:sigma_t}). Now we have the closed form solution of $\hat{w}_t$. Further, $\hat{w}_t - w^*$ can be written as \begin{align} \hat{w}_t - w^* &= \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) (\nabla f_{x_i}(\hat{w}_i)^\top \hat{w}_i +\eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i)) \right) + \lambda \Sigma^{-1}_t \hat{w}_0 - \Sigma^{-1}_t \Sigma_t w^*\\ &= \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) (\nabla f_{x_i}(\hat{w}_i)^\top \hat{w}_i +\eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i)) \right) + \lambda \Sigma^{-1}_t (\hat{w}_0 - w^*)\nonumber\\ &\qquad - \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \nabla f_{x_i}(\hat{w}_i)^\top\right) w^* \\ &= \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) (\nabla f_{x_i}(\hat{w}_i)^\top (\hat{w}_i - w^*) +\eta_i + f_{x_i}(w^*) - f_{x_i}(\hat{w}_i)) \right) + \lambda \Sigma^{-1}_t (\hat{w}_0- w^*)\\
&= \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \frac{1}{2} \|w^* - \hat{w}_i\|^2_{\nabla^2 f_{x_i}(\tilde{w})}\right) + \Sigma^{-1}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \eta_i \right) +\lambda \Sigma^{-1}_t (\hat{w}_0 - w^*),\label{eq:w_t_w_star} \end{align} where the second line is again by our choice of $\Sigma_t$ and the last equation is by the second order Taylor's theorem of $f_{x_i}(w^*)$ at $\hat{w}_i$ where $\tilde{w}$ lies between $w^*$ and $\hat{w}_i$.
\textbf{Step 2: Upper bound of $\|\hat{w}_i - w^*\|^2_2$.} Note eq. \eqref{eq:w_t_w_star} holds $\forall i \in [T]$ because all $\hat{w}_i$ are obtained through the same optimization problem, which means \begin{align}
\hat{w}_i - w^* = \Sigma^{-1}_i \left(\sum_{\rho=0}^{i-1} \nabla f_{x_\rho}(\hat{w}_\rho) \frac{1}{2} \|w^* - \hat{w}_\rho\|^2_{\nabla^2 f_{x_\rho}(\tilde{w})}\right) + \Sigma^{-1}_i \left(\sum_{\rho=0}^{i-1} \nabla f_{x_\rho}(\hat{w}_\rho) \eta_\rho \right) +\lambda \Sigma^{-1}_i (\hat{w}_0 - w^*). \end{align} By inequality $(a+b+c)^2 \leq 4a^2 + 4b^2 + 4c^2$ and definition of $\Sigma_i$, we take the square of both sides and get \begin{align}
\|\hat{w}_i - w^*\|^2_2 &\leq \frac{4}{\lambda}\left\|\sum_{\rho=0}^{i-1} \nabla f_{x_\rho}(\hat{w}_\rho) \eta_\rho \right\|^2_{\Sigma^{-1}_i} + 4\|\hat{w}_0 - w^*\|^2_2 + \frac{1}{\lambda}\left\|\sum_{\rho=0}^{i-1} \nabla f_{x_\rho}(\hat{w}_\rho) \|w^* - \hat{w}_\rho\|^2_{\nabla^2 f_{x_\rho}(\tilde{w}_\rho)} \right\|^2_{\Sigma^{-1}_i}.\label{eq:w_i} \end{align}
Now we use induction to prove the convergence rate of $\|\hat{w}_i - w^*\|^2_2, \forall i \in [T]$. Recall at the very beginning of Phase II, by Theorem \ref{thm:mle_guarantee}, with probability $> 1- \delta/2$, \begin{align}
\|\hat{w}_0 - w^*\|^2_2 \leq \frac{C d_w F^2 \iota}{\mu n}. \end{align}
To derive a claim based on induction. Formally, we suppose at round $i$, there exists some universal constant $\tilde{C}$ such that with probability $> 1- \delta/2$, \begin{align}
\|\hat{w}_i - w^*\|^2_2 &\leq \frac{\tilde{C} d_w F^2 \iota}{\mu n}. \end{align} Our task is to prove that at round $i+1$ with probability $> 1- \delta/2$, \begin{align}
\|\hat{w}_{i+1} - w^*\|^2_2 &\leq \frac{\tilde{C} d_w F^2 \iota}{\mu n}. \end{align} Note $\tilde{C}$ is for induction purpose, which can be different from $C$.
From eq. \eqref{eq:w_i}, at round $i+1$ we can write \begin{align}
\|\hat{w}_{i+1} - w^*\|^2_2 &\leq \frac{4\sigma^2}{\lambda} \log\left(\frac{\det(\Sigma_i)\det(\Sigma_0)^{-1}}{\delta^2_i} \right) + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{1}{\lambda}\left\|\sum_{\rho=0}^{i} \nabla f_{x_\rho}(\hat{w}_\rho) \|w^* - \hat{w}_\rho\|^2_{\nabla^2 f_{x_\rho}(\tilde{w}_\rho)} \right\|^2_{\Sigma^{-1}_{i+1}}\\ &\leq \frac{4\sigma^2}{\lambda} \left(d_w \log \left(1+\frac{i C_g^2}{d_w \lambda} \right) + \log\left(\frac{\pi^2 i^2}{3\delta}\right) \right) + \frac{4Cd_w F^2 \iota}{\mu n } \nonumber\\
&\qquad + \frac{1}{\lambda}\left\|\sum_{\rho=0}^{i} \nabla f_{x_\rho}(\hat{w}_\rho) \|w^* - \hat{w}_\rho\|^2_{\nabla^2 f_{x_\rho}(\tilde{w}_\rho)} \right\|^2_{\Sigma^{-1}_{i+1}}\\
&\leq \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{1}{\lambda}\left\|\sum_{\rho=0}^{i} \nabla f_{x_\rho}(\hat{w}_\rho) \|w^* - \hat{w}_\rho\|^2_{\nabla^2 f_{x_\rho}(\tilde{w}_\rho)} \right\|^2_{\Sigma^{-1}_{i+1}}, \end{align}
where the first inequality is due to self-normalized bound for vector-valued martingales (Lemma \ref{lem:self_norm} in Appendix \ref{sec:auxiliary}) and Theorem \ref{thm:mle_guarantee}, the second inequality is by Lemma \ref{lem:log_det} and our choice of $\delta_i= 3\delta/(\pi^2 i^2)$, and the last inequality is by defining $\iota'$ as the logarithmic term depending on $i, d_w, C_g, 1/\lambda, 2/\delta$ (with probability $> 1- \delta/2$). The choice of $\delta_i$ guarantees the total failure probability over $t$ rounds is no larger than $\delta/2$. Now we use our assumption $\|\hat{w}_i - w^*\|^2_2 \leq \frac{\tilde{C} d_w F^2 \iota}{\mu n}$ to bound the last term. \begin{align}
\|\hat{w}_{i+1} - w^*\|^2_2 &\leq \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{1}{\lambda}\left\|\frac{\tilde{C} C_h d_w F^2 \iota}{\mu n} \sum_{\rho=0}^i \nabla f_{x_\rho}(\hat{w}_\rho) \right\|^2_{\Sigma^{-1}_{i+1}}\\ &\leq \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{\tilde{C}^2 C^2_h d_w^2 F^4 \iota^2}{\mu^2 \lambda n^2}\left(\sum_{\rho=0}^i \sqrt{\nabla f_{x_\rho}(\hat{w}_\rho)^\top \Sigma^{-1}_{i+1} \nabla f_{x_\rho}(\hat{w}_\rho)} \right)^2\\ &\leq \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{\tilde{C}^2 C^2_h d_w^2 F^4 \iota^2}{\mu^2 \lambda n^2}\left(\sum_{\rho=0}^i 1\right)\left(\sum_{\rho=0}^i \nabla f_{x_\rho}(\hat{w}_\rho)^\top \Sigma^{-1}_{i+1} \nabla f_{x_\rho}(\hat{w}_\rho) \right)\\ &\leq \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4Cd_w F^2 \iota}{\mu n } + \frac{\tilde{C}^2 C^2_h d^3_w F^4 i \iota{''} \iota^2}{\mu^2 \lambda n^2}, \end{align} where the first inequality is due to smoothness of loss function in Assumption \ref{ass:loss}, the third inequality is by Cauchy-Schwarz inequality, and the last inequality is because of Lemma \ref{lem:sum_of_square} and defining $\iota{''}$ as logarithmic term depending on $i, d_w, C_g, 1/\lambda$.
What we need is that there exists some universal constant $\tilde{C}$ such that \begin{align} \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4C d_w F^2 \iota}{\mu n} + \frac{\tilde{C}^2 C^2_h d^3_w F^4 i \iota^2 \iota{''}}{\lambda \mu^2 n^2} \leq \frac{\tilde{C}d_w F^2 \iota}{\mu n}. \end{align} Note the LHS is monotonically increasing w.r.t $i$ so the inequality must hold when $i=T$, i.e., \begin{align} \frac{4d_w \sigma^2 \iota'}{\lambda} + \frac{4C d_w F^2 \iota}{\mu n} + \frac{\tilde{C}^2 C^2_h d^3_w F^4 T \iota^2 \iota{''}}{\lambda \mu^2 n^2} \leq \frac{\tilde{C}d_w F^2 \iota}{\mu n}. \end{align} Recall the range of our function is $[-F, F]$, given any distribution, the variance $\sigma^2$ can always be upper bounded by $ F^2/4$, so we just need to show that \begin{align} \frac{d_w F^2 \iota'}{\lambda} + \frac{4C d_w F^2 \iota}{\mu n} + \frac{\tilde{C}^2 C^2_h d^3_w F^4 T \iota^2 \iota{''}}{\lambda \mu^2 n^2} &\leq \frac{\tilde{C}d_w F^2 \iota}{\mu n},\\ \mu^2 n^2 \iota' + 4 \lambda \mu n C \iota + \tilde{C}^2 C^2_h d^2_w F^2 T \iota^2 \iota{''} &\leq \lambda \mu n \tilde{C} \iota,\\ \tilde{C}^2 C^2_h d^2_w F^2 T \iota^2 \iota{''} - \tilde{C}\lambda \mu n \iota + \mu^2 n^2 \iota' + 4\lambda \mu n C \iota &\leq 0, \end{align} where the second and third lines are by rearrangement. A feasible solution on $\tilde{C}$ requires \begin{align} \lambda^2 \mu^2 n^2 \iota^2 - 4C^2_h d^2_w F^2 T \iota^2 \iota{''} (\mu^2 n^2 \iota' + 4\lambda \mu n C \iota) &\geq 0,\\ \lambda^2 \mu^2 n - 4C^2_h d^2_w F^2 T\iota{''} (\mu^2 n \iota' + 4\lambda \mu C \iota) &\geq 0,\label{eq:b^2-4ac} \end{align} where the second line is by rearrangement. Our choices of $\lambda = C_\lambda \sqrt{T}, n=\sqrt{T}$ where $C_\lambda$ is a constant independent to $T$ ensure eq. \eqref{eq:b^2-4ac} holds, thus such a $\tilde{C}$ exists. Therefore, by induction, we prove that $\forall i \in [T]$ there exists a universal constant $\tilde{C}$ such that with probability $> 1- \delta/2$, \begin{align}
\|\hat{w}_i - w^*\|^2_2 \leq \frac{\tilde{C}d_w F^2\iota}{n}. \end{align} With this result, now we are ready to move to \textbf{Step 3}.
\textbf{Step 3: Upper bound of $\|\hat{w}_t - w^*\|^2_{\Sigma_t}$.} Multiply both sides of eq. \eqref{eq:w_t_w_star} by $\Sigma^\frac{1}{2}_t$ and we have \begin{align}
\Sigma^{\frac{1}{2}}_t(\hat{w}_t - w^*) &\leq \frac{1}{2} \Sigma^{-\frac{1}{2}}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i)\|w^* - \hat{w}_i\|^2_{\nabla^2 f_{x_i}(\tilde{w})}\right) + \Sigma^{-\frac{1}{2}}_t \left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \eta_i \right) + \lambda \Sigma^{-\frac{1}{2}}_t (\hat{w}_0 - w^*). \end{align} Take square of both sides and by inequality $(a+b+c)^2\leq 4a^2 + 4b^2 + 4c^2$ we obtain \begin{align}
\|\hat{w}_t - w^*\|^2_{\Sigma_t} &\leq 4\left\| \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \eta_i\right\|^2_{\Sigma_t^{-1}} + 4\lambda^2 \|\hat{w}_0 - w^*\|^2_{\Sigma^{-1}_t} + \left\|\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i)\|w^* - \hat{w}_i\|^2_{\nabla^2 f_{x_i}(\tilde{w})}\right\|^2_{\Sigma^{-1}_t}. \end{align} The remaining proof closely follows \textbf{Step 2}, i.e., \begin{align}
\|\hat{w}_t - w^*\|^2_{\Sigma_t} &\leq 4d_w \sigma^2 \iota' + \frac{4\lambda C d_w F^2 \iota}{\mu n} + \left\|\frac{\tilde{C}C_h d_w F^2 \iota}{\mu n} \sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i) \right\|^2_{\Sigma^{-1}_{t}}\\ &\leq 4d_w \sigma^2 \iota' + \frac{4\lambda C d_w F^2 \iota}{\mu n} + \frac{\tilde{C}^2 C^2_h d_w^2 F^4 \iota^2}{\mu^2 n^2}\left(\sum_{i=0}^{t-1} \sqrt{\nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_{t} \nabla f_{x_i}(\hat{w}_i)} \right)^2\\ &\leq 4d_w \sigma^2 \iota' + \frac{4\lambda C d_w F^2 \iota}{\mu n } + \frac{\tilde{C}^2 C^2_h d_w^2 F^4 \iota^2}{\mu^2 n^2}\left(\sum_{i=0}^{t-1} 1\right)\left(\sum_{i=0}^{t-1} \nabla f_{x_i}(\hat{w}_i)^\top \Sigma^{-1}_t \nabla f_{x_i}(\hat{w}_i) \right)\\ &\leq 4d_w \sigma^2 \iota' + \frac{4\lambda Cd_w F^2 \iota}{\mu n } + \frac{\tilde{C}^2 C^2_h d_w^3 F^4 t \iota{''} \iota^2}{\mu^2 n^2}\\ &\leq O\left(d_w \sigma^2 \iota' + \frac{d_w F^2 \iota}{\mu} + \frac{d^3_w F^4 t \iota{''} \iota^2 }{\mu^2 T}\right), \end{align} where the last inequality is by our choices of $\lambda=C_\lambda \sqrt{T}, n = \sqrt{T}$. Therefore, our choice of \begin{align} \beta_t &= \tilde{\Theta}\left(d_w \sigma^2 + \frac{d_w F^2 }{\mu} + \frac{d^3_w F^4 t }{\mu^2 T}\right) \end{align} guarantees that $w^*$ is always contained in $\mathrm{Ball}_t$ with probability $1- \delta$. \end{proof}
\subsection{Regret Analysis}\label{sec:regret}
\begin{lemma}[Restatement of Lemma \ref{lem:instant_regret}] Set $\Sigma_t, \hat{w}_t, \beta_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}, \& \eqref{eq:beta_2} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold, then with probability $> 1- \delta$,
$w^*$ is contained in $\mathrm{Ball}_t$. Define $u_t = \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma^{-1}_t}$, then $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, \begin{align*} r_t \leq 2\sqrt{\beta_t}u_t + \frac{2\beta_t C_h}{\lambda}. \end{align*} \end{lemma} \begin{proof} By definition of instantaneous regret $r_t$, \begin{align} r_t &= f_{x^*}(w^*) - f_{x_t}(w^*). \end{align} Recall the selection process of $x_t$ and define $\tilde{w} = \mathop{\mathrm{argmax}}_{w \in \mathrm{Ball}_t} f_{x_t}(w)$, \begin{align} r_t \leq f_{x_t}(\tilde{w}) - f_{x_t}(w^*)= (\tilde{w} - w^*)^\top \nabla f_{x_t}(\dot{w}), \end{align} where the equation is by first order Taylor's theorem and $\dot{w}$ lies between $\tilde{w}$ and $w^*$ which means $\dot{w}$ is guaranteed to be in $\mathrm{Ball}_t$ since $\mathrm{Ball}_t$ is convex. Then, by adding and removing terms, \begin{align} r_t &= (\tilde{w} - \hat{w}_t + \hat{w}_t - w^*)^\top (\nabla f_{x_t}(\hat{w}_t) - \nabla f_{x_t}(\hat{w}_t) + \nabla f_{x_t}(\dot{w}))\\
&\leq \|\tilde{w}-\hat{w}_t\|_{\Sigma_t} \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma_t^{-1}} + \|\hat{w}_t - w^*\|_{\Sigma_t} \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma_t^{-1}} + (\tilde{w} - \hat{w}_t)^\top (\nabla f_{x_t}(\dot{w}_t) - \nabla f_{x_t}(\hat{w}_t))\nonumber\\ &\qquad + (\hat{w}_t - w^*)^\top (\nabla f_{x_t}(\dot{w}) - \nabla f_{x_t}(\hat{w}_t)), \end{align}
where the last inequality is due to Holder's inequality. By definitions of $\beta_t$ in $\mathrm{Ball}_t$ and $u_t = \|\nabla f_{x_t}(\hat{w}_t)\|_{\Sigma^{-1}_t}$, \begin{align} r_t &\leq 2\sqrt{\beta_t} u_t + (\tilde{w} - \hat{w}_t)^\top (\nabla f_{x_t}(\dot{w}) - \nabla f_{x_t}(\hat{w}_t)) + (\hat{w}_t - w^*)^\top (\nabla f_{x_t}(\dot{w}) - \nabla f_{x_t}(\hat{w}_t)). \end{align} Again by first order Taylor's theorem where $\ddot{w}$ lies between $\dot{w}$ and $\hat{w}$ and thus $\ddot{w}$ lies in $\mathrm{Ball}_t$, \begin{align} r_t &\leq 2\sqrt{\beta_t} u_t + (\tilde{w}-\hat{w}_t)^\top \Sigma^\frac{1}{2}_t \Sigma^{-\frac{1}{2}}_t \nabla^2 f_{x_t}(\ddot{w}) \Sigma^{-\frac{1}{2}}_t \Sigma^\frac{1}{2}_t (\dot{w}-\hat{w}_t)\nonumber\\ &\qquad + (\hat{w}_t- w^*)^\top \Sigma^\frac{1}{2}_t \Sigma^{-\frac{1}{2}}_t \nabla^2 f_{x_t}(\ddot{w}) \Sigma^{-\frac{1}{2}}_t \Sigma^\frac{1}{2}_t (\dot{w}-\hat{w}_t)\\
&\leq 2\sqrt{\beta_t} u_t + \|(\tilde{w}-\hat{w}_t)^\top \Sigma^\frac{1}{2}_t\|_2 \|\Sigma^{-\frac{1}{2}}_t \nabla^2 f_{x_t}(\ddot{w}) \Sigma^{-\frac{1}{2}}_t\|_\mathrm{op} \|\Sigma^\frac{1}{2}_t (\dot{w}-\hat{w}_t)\|_2 \nonumber\\
&\qquad + \|(\hat{w}_t - w^*)^\top \Sigma^\frac{1}{2}_t\|_2 \|\Sigma^{-\frac{1}{2}}_t \nabla^2 f_{x_t}(\ddot{w}) \Sigma^{-\frac{1}{2}}_t\|_\mathrm{op} \|\Sigma^\frac{1}{2}_t (\dot{w}-\hat{w}_t)\|_2\\ &\leq 2\sqrt{\beta_t}u_t + \frac{2\beta_t C_h}{\lambda}, \end{align} where the second inequality is by Holder's inequality and the last inequality is due to definition of $\beta_t$ in $\mathrm{Ball}_t$, Assumption \ref{ass:objective}, and our choice of $\Sigma_t$. \end{proof}
\begin{lemma}[Restatement of Lemma \ref{lem:sos_instant_regret}] Set $\Sigma_t, \hat{w}_t, \beta_t$ as in eq. \eqref{eq:sigma_t}, \eqref{eq:inner}, \& \eqref{eq:beta_2} and suppose Assumption \ref{ass:parameter_class}, \ref{ass:objective}, \& \ref{ass:loss} hold, then with probability $> 1- \delta$, $w^*$ is contained in $\mathrm{Ball}_t$ and $\forall t \in [T]$ in Phase II of Algorithm \ref{alg:go_ucb}, \begin{align*} \sum_{t=1}^T r^2_t \leq 16\beta_T d_w \log \left(1 + \frac{TC_g^2}{d_w \lambda}\right) + \frac{8\beta^2_T C^2_h T}{\lambda^2}. \end{align*} \end{lemma} \begin{proof} By Lemma \ref{lem:instant_regret} and inequality $(a+b)^2 \leq 2a^2 + 2b^2$, \begin{align} \sum_{t=1}^T r^2_t &\leq \sum_{t=1}^T 8\beta_t u^2_t + \frac{8 \beta^2_t C^2_h}{\lambda^2}\\ &\leq 8\beta_T \sum_{i=1}^T u^2_t + \frac{8\beta^2_T C^2_h T}{\lambda^2}\\ &\leq 16\beta_T d_w \log \left(1 + \frac{TC_g^2}{d_w \lambda}\right) + \frac{8\beta^2_T C^2_h T}{\lambda^2}, \end{align} where the second inequality is due to $\beta_t$ is increasing in $t$ and the last inequality is by Lemma \ref{lem:sum_of_square}. \end{proof}
By putting everything together, we are ready to prove the main cumulative regret theorem.
\begin{proof}[Proof of Theorem \ref{thm:cr}] By definition of cumulative regret including both Phase I and II, \begin{align*} R_{\sqrt{T}+T} &= \sum_{j=1}^{\sqrt{T}} r_j + \sum_{t=1}^T r_t\\ &\leq 2\sqrt{T}F + \sqrt{T\sum_{t=1}^T r^2_t}\\ &\leq 2\sqrt{T}F + \sqrt{16T\beta_T d_w \log \left(1 + \frac{T C_g^2}{d_w \lambda}\right) + \frac{8T^2\beta^2_T C^2_h}{\lambda^2}}\\ &\leq \tilde{O}\left(\sqrt{T} F + \sqrt{T\beta_T d_w + T \beta^2_T }\right), \end{align*} where the first inequality is due to function range and Cauchy-Schwarz inequality, the second inequality is by Lemma \ref{lem:sos_instant_regret} and the last inequality is obtained by setting $\lambda=C_\lambda \sqrt{T}, n=\sqrt{T}$ as required by Lemma \ref{lem:feasible_ball}.
Recall that $\beta_t$ is defined in eq. \eqref{eq:beta_2}, so \begin{align*} \beta_T &= \tilde{\Theta}\left(d_w \sigma^2 + \frac{d_w F^2}{\mu} + \frac{d^3_w F^4}{\mu^2} \right) \leq \tilde{O}\left(\frac{d^3_w F^4}{\mu^2} \right). \end{align*} The proof completes by plugging in upper bound of $\beta_T$. \end{proof}
\section{Additional Experimental Details} In addition to Experiments section in main paper, in this section, we show details of algorithm implementation and and real-world experiments.
\subsection{Implementation of GO-UCB}\label{sec:imp_goucb} Noise parameter $\sigma=0.01$. Regression oracle in GO-UCB is approximated by stochastic gradient descent algorithm on our two linear layer neural network model with mean squared error loss, $2,000$ iterations and $10^{-11}$ learning rate. Exactly solving optimization problem in Step 5 of Phase II may not be computationally tractable, so we use iterative gradient ascent algorithm over $x$ and $w$ with $2,000$ iterations and $10^{-4}$ learning rate. $\beta_t$ is set as $d^3_w F^4 t/T$. $\lambda$ is set as $\sqrt{T}\log^2 T$.
\subsection{Real-world Experiments}\label{sec:real_detail}
Hyperparameters can be continuous or categorical, however, in order to fairly compare GO-UCB with Bayesian optimization methods, in all hyperparameter tuning tasks, we set function domain to be $[0, 10]^{d_x}$, a continuous domain. If a hyperparameter is categorical, we allocate equal length domain for each hyperparameter. For example, the seventh hyperparameter of random forest is a bool value, True or False and we define $[0, 5)$ as True and $[5,10]$ as False. If a hyperparameter is continuous, we set linear mapping from the hyperparameter domain to $[0,10]$. For example, the sixth hyperparameter of multi-layer perceptron is a float value in $(0,1)$ thus we multiply it by $10$ and map it to $(0, 10)$.
\textbf{Hyperparameters in hyperparameter tuning tasks.} We list hyperparameters in all three tasks as follows.
\begin{itemize} \item Classification with Random Forest. \begin{enumerate}
\item Number of trees in the forest, (integer, [20, 200]).
\item Criterion, (string, ``gini'', ``entropy'', or ``logloss'').
\item Maximum depth of the tree, (integer, [1, 10]).
\item Minimum number of samples required to split an internal node, (integer, [2, 10]).
\item Minimum number of samples required to be at a leaf node, (integer, [1, 10]).
\item Maximum number of features to consider when looking for the best split, (string, ``sqrt'' or ``log2''). \item Bootstrap, (bool, True or False). \end{enumerate} \item Classification with Multi-layer Perceptron. \begin{enumerate}
\item Activation function (string, ``identity'', ``logistic'', ``tanh'', or ``relu'').
\item Strength of the L2 regularization term, (float, [$10^{-6}, 10^{-2}$]).
\item Initial learning rate used, (float, [$10^{-6}, 10^{-2}$]).
\item Maximum number of iterations, (integer, [100, 300]).
\item Whether to shuffle samples in each iteration, (bool, True or False).
\item Exponential decay rate for estimates of first moment vector, (float, (0, 1)).
\item Exponential decay rate for estimates of second moment vector (float, (0, 1)).
\item Maximum number of epochs to not meet tolerance improvement, (integer, [1, 10]). \end{enumerate}
\item Classification with Gradient Boosting. \begin{enumerate}
\item Loss, (string, ``logloss'' or ``exponential''). \item Learning rate, (float, (0, 1)). \item Number of estimators, (integer, [20, 200]). \item Fraction of samples to be used for fitting the individual base learners, (float, (0, 1)). \item Function to measure the quality of a split, (string, ``friedman mse'' or ``squared error''). \item Minimum number of samples required to split an internal node, (integer, [2, 10]). \item Minimum number of samples required to be at a leaf node, (integer, [1, 10]). \item Minimum weighted fraction of the sum total of weights, (float, (0, 0.5)). \item Maximum depth of the individual regression estimators, (integer, [1, 10]). \item Number of features to consider when looking for the best split, (float, ``sqrt'' or ``log2''). \item Maximum number of leaf nodes in best-first fashion, (integer, [2, 10]). \end{enumerate} \end{itemize}
\textbf{Results on Australian and Diabetes datasets.} We show experimental results of hyperparameter tuning tasks on Australian and Diabetes datasets in Figure \ref{fig:aus} and Figure \ref{fig:dia}. Our proposed GO-UCB algorithm performs consistently better than all other algorithms, which is the same as on Breast-cancer dataset in main paper.
\begin{figure*}
\caption{Cumulative regrets (the lower the better) of all algorithms in real-world hyperparameter tuning task on Australian dataset.
}
\label{fig:aus}
\end{figure*}
\begin{figure*}
\caption{Cumulative regrets (the lower the better) of all algorithms in real-world hyperparameter tuning task on Diabetes dataset.
}
\label{fig:dia}
\end{figure*}
\end{document} |
\begin{document}
\title{Hermitian
structures on the derived category of coherent sheaves} \author{ \\ \\
Jos\'e I. Burgos Gil\footnote{Partially supported by grant
MTM2009-14163-C02-01 and CSIC research project 2009501001.}\\
\small{Instituto de Ciencias Matem\'aticas (ICMAT-CSIC-UAM-UCM-UC3)}\\
\small{Consejo Superior de Investigaciones Cient\'ificas (CSIC)}\\
\small{Spain}\\
\small{\texttt{[email protected]}}\\
\and
Gerard Freixas i Montplet\footnote{Partially supported by
grant MTM2009-14163-C02-01}\\
\small{Institut de Math\'ematiques de Jussieu (IMJ)}\\
\small{Centre National de la Recherche Scientifique (CNRS)}\\
\small{France}\\
\small{\texttt{[email protected]}}\\
\and
R\u azvan Li\c tcanu\footnote{Supported by CNCSIS -UEFISCSU,
project number PNII - IDEI 2228/2008 .}\\
\small{Faculty of Mathematics}\\
\small{University Al. I. Cuza Iasi}\\
\small{Romania}\\
\small{\texttt{[email protected]}} }
\maketitle \begin{abstract}
The main objective of the present paper is to set up the theoretical
basis and the language needed to deal with the problem of direct
images of hermitian vector bundles for projective non-necessarily
smooth morphisms. To this end, we first define hermitian structures on the
objects of the bounded derived category of coherent sheaves on a
smooth complex variety. Secondly we extend the theory of Bott-Chern
classes to these hermitian structures. Finally we introduce the
category $\oSm_{\ast/{\mathbb C}}$ whose morphisms are projective morphisms
with a hermitian structure on the relative tangent complex. \end{abstract}
\section{Introduction} \label{sec:introduction}
Derived categories were introduced in the 60's of the last century by Grothendieck and Verdier in order to study and generalize duality phenomenons in Algebraic Geometry (see \cite{Hartshorne:rd}, \cite{Verdier:MR1453167}). Since then, derived categories had become a standard tool in Algebra and Geometry and the right framework to define derived functors and to study homological properties. A paradigmatic example is the definition of direct image of sheaves. Given a map $\pi\colon X\to Y$ between varieties and a sheaf $\mathcal{F}$ on $X$, there is a notion of direct image $\pi _{\ast}\mathcal{F}$. We are not specifying what kind of variety or sheaf we are talking about because the same circle of ideas can be used in many different settings. This direct image is not exact in the sense that if $f\colon \mathcal{F}\to \mathcal{G}$ is a surjective map of sheaves, the induced morphism $\pi _{\ast}f\colon \pi _{\ast}\mathcal{F}\to \pi _{\ast}\mathcal{G}$ is not necessarily surjective. One then can define a \emph{derived} functor $R\pi _{\ast}$ that takes values in the derived category of sheaves on $Y$ and that is exact in an appropriate sense. This functor encodes a lot of information about the topology of the fibres of the map $\pi $.
The interest for the derived category of coherent sheaves on a variety exploded with the celebrated 1994 lecture by Kontsevich \cite{Kontsevich:MR1403918}, interpreting mirror symmetry as an equivalence between the derived category of the Fukaya category of certain symplectic manifold and the derived category of coherent sheaves of a dual complex manifold. In the last decades, many interesting results about the derived category of coherent sheaves have been obtained, like Bondal-Orlov Theorem \cite{BondalOrlov:MR1818984} that shows that a projective variety with ample canonical or anti-canonical bundle can be recovered from its derived category of coherent sheaves. Moreover, new tools for studying algebraic varieties have been developed in the context of derived categories like the Fourier-Mukai transform \cite{Mukai:FT}. The interested reader is referred to books like \cite{Huybrechts:FM} and \cite{Bartoccietal:MR2511017} for a thorough exposition of recent developments in this area.
Hermitian vector bundles are ubiquitous in Mathematics. An interesting problem is to define the direct image of hermitian vector bundles. More concretely, let $\pi \colon X\to Y$ be a proper holomorphic map of complex manifolds and let $\overline E=(E,h)$ be a hermitian holomorphic vector bundle on $X$. We would like to define the direct image $\pi_{\ast}\overline E$ as something as close as possible to a hermitian vector bundle on $Y$. The information that would be easier to extract from such a direct image is encoded in the determinant of the cohomology \cite{Deligne:dc}, that can be defined directly. Assume that $\pi $ is a submersion and that we have chosen a hermitian metric on the relative tangent bundle $T_{\pi }$ of $\pi $ satisfying certain technical conditions. Then the determinant line bundle $\lambda (E)=\det(R\pi _{\ast}E)$ can be equipped with the Quillen metric (\cite{Quillen:dCRo}, \cite{BismutFreed:EFI}, \cite{BismutFreed:EFII}), that depends on the metrics on $E$ and $T_{\pi }$ and is constructed using the analytic torsion \cite{RaySinger:ATCM}. The Quillen metric has applications in Arithmetic Geometry (\cite{Faltings:cas}, \cite{Deligne:dc}, \cite{GilletSoule:aRRt}) and also in String Theory (\cite{Yau:MR915812}, \cite{AlvarezGaumeetals:MR908551}). Assume furthermore that the higher direct image sheaves $R^{i}\pi _{\ast}E$ are locally free. In general it is not possible to define an analogue of the Quillen metric as a hermitian metric on each vector bundle $R^{i}\pi _{\ast}E$. But following Bismut and K\"ohler \cite{Bismut-Kohler}, one can do something almost as good. We can define the $L^{2}$-metric on $R^{i}\pi _{\ast}E$ and \emph{correct} it using the higher analytic torsion form. Although this \emph{corrected metric} is not properly a hermitian metric, it is enough for constructing characteristic forms and it appears in the Arithmetic Grothendieck-Riemann-Roch Theorem in higher degrees~\cite{GilletRoesslerSoule:_arith_rieman_roch_theor_in_higher_degrees_p}.
The main objective of the present paper is to set up the theoretical basis and the language needed to deal with the problem of direct images of hermitian vector bundles for projective non-necessarily smooth morphisms. This program will be continued in the subsequent paper \cite{BurgosFreixasLitcanu:GenAnTor} where we give an axiomatic characterization of analytic torsion forms and we generalize them to projective morphisms. The ultimate goal of this program is to state and prove an Arithmetic Grothendieck-Riemann-Roch Theorem for general projective morphisms. This last result will be the topic of a forthcoming paper.
When dealing with direct images of hermitian vector bundles for non smooth morphisms, one is naturally led to consider hermitian structures on objects of the bounded derived category of coherent sheaves $\Db$. One reason for this is that, for a non-smooth projective morphism $\pi $, instead of the relative tangent bundle one should consider the relative tangent complex, that defines an object of $\Db(X)$. Another reason is that, in general, the higher direct images $R^{i}\pi _{\ast}E$ are coherent sheaves and the derived direct image $R\pi _{\ast}E$ is an object of $\Db(Y)$.
Thus the first goal of this paper is to define hermitian structures. A possible starting point is to define a hermitian metric on an object $\mathcal{F}$ of $\Db(X)$ as an isomorphism $E\dashrightarrow \mathcal{F}$ in $\Db(X)$, with $E$ a bounded complex of vector bundles, together with a choice of a hermitian metric on each constituent vector bundle of $E$. Here we find a problem, because even being $X$ smooth, in the bounded derived category of coherent sheaves of $X$, not every object can be represented by a bounded complex of locally free sheaves (see \cite{Voisin:counterHcK} and Remark \ref{rem:3}). Thus the previous idea does not work for general complex manifolds. To avoid this problem we will restrict ourselves to the algebraic category. Thus, from now on the letters $X,\ Y,\dots$ will denote smooth algebraic varieties over ${\mathbb C}$, and all sheaves will be algebraic.
With the previous definition of hermitian metric, for each object of $\Db(X)$ we obtain a class of metrics that is too wide. Different constructions that ought to produce the same metric produce in fact different metrics. This indicates that we may define a hermitian structure as an equivalence class of hermitian metrics.
Let us be more precise. Being $\Db(X)$ a triangulated category, to every morphism $\mathcal{F}\overset{f}{\dashrightarrow}\mathcal{G}$ in $\Db(X)$ we can associate its cone, that is defined up to a (not unique) isomorphism by the fact that \begin{displaymath}
\mathcal{F}\dashrightarrow \mathcal{G}\dashrightarrow \cone(f)\dashrightarrow \mathcal{F}[1] \end{displaymath} is a distinguished triangle. If now $\mathcal{F}$ and $\mathcal{G}$ are provided with hermitian metrics, we want that $\cone(f)$ has an induced hermitian structure that is well defined up to \emph{isometry}. By choosing a representative of the map $f$ by means of morphisms of complexes of vector bundles, we can induce a hermitian metric on $\cone(f)$, but this hermitian metric depends on the choices. The idea behind the definition of hermitian structures is to introduce the finest equivalence relation between metrics such that all possible induced hermitian metrics on $\cone(f)$ are equivalent.
Once we have defined hermitian structures a new invariant of $X$ can be naturally defined. Namely, the set of hermitian structures on a zero object of $\Db(X)$ is an abelian group that we denote $\KA(X)$ (Definition \ref{def:KA}). In the same way that $K_{0}(X)$ is the universal abelian group for additive characteristic classes of vector bundles, $\KA(X)$ is the universal abelian group for secondary characteristic classes of acyclic complexes of hermitian vector bundles (Theorem~\ref{thm:8}).
Secondary characteristic classes constitute other of the central topics of this paper. Recall that to each vector bundle we can associate its Chern character, that is an additive characteristic class. If the vector bundle is provided with a hermitian metric, we can use Chern-Weil theory to construct a concrete representative of the Chern character, that is a differential form. This characteristic form is additive only for orthogonally split short exact sequences and not for general short exact sequences. Bott-Chern classes were introduced in \cite{BottChern:hvb} and are secondary classes that measure the lack of additivity of the characteristic forms.
The Bott-Chern classes have been extensively used in Arakelov Geometry (\cite{GilletSoule:vbhm}, \cite{BismutGilletSoule:at}) and they can be used to construct characteristic classes in higher $K$-theory (\cite{Burgos-Wang}). The second goal of this paper is to extend the definition of additive Bott-Chern classes to the derived category. This is the most general definition of additive Bott-Chern classes and encompasses both, the Bott-Chern classes defined in \cite{BismutGilletSoule:at} and the ones defined in \cite{Ma:MR1765553} (Example \ref{exm:1}).
Finally, recall that the hermitian structure on the direct image of a hermitian vector bundle should also depend on a hermitian structure on the relative tangent complex. Thus the last goal of this paper is to introduce the category $\overline{\Sm}_{\ast/{\mathbb C}}$ (Definition \ref{def:16}, Theorem \ref{thm:17}). The objects of this category are smooth algebraic varieties over ${\mathbb C}$ and the morphisms are pairs $\overline{f}=(f,\overline{T}_{f})$ formed by a projective morphism of smooth complex varieties $f$, together with a hermitian structure on the relative tangent complex $T_{f}$. The main difficulty here is to define the composition of two such morphisms.
The remarkable fact is that the hermitian cone construction enables us to define a composition rule for these morphisms.
We describe with more detail the contents of each section.
In Section \ref{sec:meager-complexes} we define and characterize the notion of \textit{meager complex} (Definition \ref{def:9} and Theorem \ref{thm:3}). Roughly speaking, meager complexes are bounded acyclic complexes of hermitian vector bundles whose Bott-Chern classes vanish for structural reasons. We then introduce the concept of tight morphism (Definition \ref{def:tight_morphism}) and tight equivalence relation (Definition \ref{def:17}) between bounded complexes of hermitian vector bundles. We explain a series of useful computational rules on the monoid of hermitian vector bundles modulo tight equivalence relation, that we call \textit{acyclic calculus} (Theorem \ref{thm:7}). We prove that the submonoid of acyclic complexes modulo meager complexes has a structure of abelian group, this is the group $\KA(X)$ mentioned previously.
With these tools at hand, in Section \ref{sec:oDb} we define hermitian structures on objects of $\Db(X)$ and we introduce the category $\oDb(X)$. The objects of the category $\oDb(X)$ are objects of $\Db(X)$ together with a hermitian structure, and the morphisms are just morphisms in $\Db(X)$. Theorem \ref{thm:13} is devoted to describe the structure of the forgetful functor $\oDb(X)\to\Db(X)$. In particular, we show that the group $\KA(X)$ acts on the fibers of this functor, freely and transitively.
An important example of use of hermitian structures is the construction of the \textit{hermitian cone} of a morphism in $\oDb(X)$ (Definition \ref{def:her_cone}), which is well defined only up to tight isomorphism. We also study several elementary constructions in $\oDb(X)$. Here we mention the classes of isomorphisms and distinguished triangles in $\oDb(X)$. These classes lie in the group $\KA(X)$ and their properties are listed in Theorem \ref{thm:10}. As an application we show that $\KA(X)$ receives classes from $K_{1}(X)$ (Proposition \ref{prop:K1_to_KA}).
Section \ref{sec:bott-chern-classes} is devoted to the extension of Bott-Chern classes to the derived category. For every additive genus, we associate to each isomorphism or distinguished triangle in $\oDb(X)$ a Bott-Chern class satisfying properties analogous to the classical ones.
We conclude the paper with Section \ref{sec:multiplicative-genera}, where we extend the definition of Bott-Chern classes to multiplicative genera and in particular to the Todd genus. In this section we also define the category $\overline{\Sm}_{\ast/{\mathbb C}}$.
\textbf{Acknowledgements:} We would like to thank the following institutions where part of the research conducting to this paper was done. The CRM in Bellaterra (Spain), the CIRM in Luminy (France), the Morningside Institute of Beijing (China), the University of Barcelona and the IMUB, the Alexandru Ioan Cuza University of Iasi, the Institut de Math\'ematiques de Jussieu and the ICMAT (Madrid).
We would also like to thank D. Eriksson and D. R\"ossler for several discussions on the subject of this paper.
\section{Meager complexes and acyclic calculus} \label{sec:meager-complexes}
The aim of this section is to construct a universal group for additive Bott-Chern classes of acyclic complexes of hermitian vector bundles. To this end we first introduce and study the class of meager complexes. Any Bott-Chern class that is additive for certain short exact sequences of acyclic complexes (see \ref{thm:8}) and that vanishes on orthogonally split complexes, necessarily vanishes on meager complexes. Then we develop an acyclic calculus that will ease the task to check if a particular complex is meager. Finally we introduce the group $\KA$, which is the universal group for additive Bott-Chern classes.
Let $X$ be a complex algebraic variety over ${\mathbb C}$, namely a reduced and separated scheme of finite type over ${\mathbb C}$. We denote by $\Vb(X)$ the exact category of bounded complexes of algebraic vector bundles on $X$. Assume in addition that $X$ is smooth over ${\mathbb C}$. Then $\oV(X)$ is defined as the category of pairs $\overline E=(E,h)$, where $E\in \Ob \Vb(X)$ and $h$ is a smooth hermitian metric on the complex of analytic vector bundle $E^{\text{{\rm an}}}$. From now on we shall make no distinction between $E$ and $E^{\text{{\rm an}}}$. The complex $E$ will be called \emph{the underlying complex of } $\overline E$. We will denote by the symbol $\sim$ the quasi-isomorphisms in any of the above categories.
A basic construction in $\Vb(X)$ is the cone of a morphism of complexes. Recall that, if $f\colon E\to F$ is such a morphism, then, as a graded vector bundle $\cone(f)=E[1]\oplus F$ and the differential is given by $\dd(x,y)=(-\dd x,f(x)+\dd y).$ We can extend the cone construction easily to $\oV(X)$ as follows.
\begin{definition} \label{def:14} If $f\colon \overline E\to \overline F$ is a morphism in $\oV(X)$, \emph{the hermitian cone} of $f$, denoted by $\ocone(f)$, is defined as the cone of $f$ provided with the orthogonal sum hermitian metric.
When the morphism is clear from the context we will sometimes denote $\ocone(f)$ by $\ocone(\overline E,\overline F)$. \end{definition}
\begin{remark} \label{rem:2} Let $f\colon \overline E\to \overline F$ be a
morphism in $\oV(X)$. Then
there is an exact sequence of complexes
\begin{displaymath}
0\longrightarrow \overline F\longrightarrow \ocone(f)
\longrightarrow \overline E[1]\longrightarrow 0,
\end{displaymath}
whose constituent short exact sequences are orthogonally
split. Conversely, if
\begin{displaymath}
0\longrightarrow \overline F\longrightarrow \overline G
\longrightarrow \overline E[1]\longrightarrow 0
\end{displaymath}
is a short exact sequence all whose constituent exact sequences are
orthogonally split, then there is a natural section $s\colon E[1]\to
G$. The image of $\dd s-s\dd$ belongs to
$F$ and, in fact, determines a morphism
of complexes
\begin{displaymath}
f_{s}:=\dd s-s\dd\colon \overline E
\longrightarrow \overline F.
\end{displaymath}
Moreover, there is a natural isometry $\overline G \cong \ocone
(f_{s})$. \end{remark}
The hermitian cone has the following useful property.
\begin{lemma}\label{lemm:13}
Consider a diagram in $\oV(X)$
\begin{displaymath}
\xymatrix{
\overline{E}'\ar[r]^{f'}\ar[d]_{g'} &\overline{F}'\ar[d]^{g}\\
\overline{E}\ar[r]^{f} &\overline{F}.
}
\end{displaymath}
Assume that the diagram is commutative up to homotopy and fix a
homotopy $h$. The homotopy $h$ induces
morphisms of complexes
\begin{align*}
&\psi \colon\ocone(f')\longrightarrow\ocone(f)\\
&\phi \colon\ocone(-g')\longrightarrow\ocone(g)
\end{align*}
and there is a natural isometry of complexes
\begin{displaymath}
\ocone(\phi )\overset{\sim}{\longrightarrow}\ocone(\psi ).
\end{displaymath}
Morever, let $h'$ be a second homotopy between $g\circ f'$ and
$f\circ g'$ and let $\psi '$ be the induced morphism. If there
exists a higher homotopy between $h$ and $h'$, then $\psi
$ and $\psi '$ are homotopically equivalent.
\end{lemma}
\begin{proof}
Since $h\colon E'\to F[-1]$ is a homotopy between $gf'$ and $fg'$,
we have
\begin{equation}\label{eq:homotopy_square_1}
gf'-fg'=\dd h+h\dd.
\end{equation}
First of all, define the arrow $\psi \colon\ocone(f')\to\ocone(f)$ by
the following rule:
\begin{displaymath}
\psi (x',y')=(g'(x'),g(y')+h(x')).
\end{displaymath}
From the definition of the differential of a cone and the homotopy
relation (\ref{eq:homotopy_square_1}), one easily checks that $\psi $
is a morphism of complexes. Now apply the same construction to the
diagram
\begin{equation}\label{eq:homotopy_square_2}
\xymatrix{
\overline{E}'\ar[r]^{-g'}\ar[d]_{-f'} &\overline{E}\ar[d]^{f}\\
\overline{F'}\ar[r]^{g} &\overline{F}.
}
\end{equation}
The diagram (\ref{eq:homotopy_square_2}) is still commutative up
to homotopy and $h$ provides such a homotopy. We obtain a
morphism of complexes $\phi :\ocone(-g')\to\ocone(g)$, defined by the
rule
\begin{displaymath}
\phi (x',x)=(-f'(x'),f(x)+h(x')).
\end{displaymath}
One easily checks that a suitable reordering of factors sets an
isometry of complexes between $\ocone(\phi )$ and $\ocone(\psi )$.
Assume now that $h'$ is a second homotopy and that there is a
higher homotopy $s\colon \overline E' \to \overline F[-2]$ such that
\begin{displaymath}
h'-h=\dd s- s \dd.
\end{displaymath}
Let $H\colon \ocone(f')\to \ocone(f)[-1]$ be given by
$H(x',y')=(0,s(x'))$. Then
\begin{displaymath}
\psi '-\psi =\dd H + H \dd.
\end{displaymath}
Hence $\psi $ and $\psi' $ are homotopically equivalent.
\end{proof}
Recall that, given a morphism of complexes $f\colon \overline E \to \overline
F$, we use the abuse of notation $\ocone(f)=\ocone(\overline E, \overline
F)$. As seen in the previous lemma, sometimes it is natural to consider
$\ocone(-f)$. With the notation above it will be denoted also by
$\ocone(\overline E,\overline F)$. Note that this ambiguity is harmless because
there is a natural isometry between $\ocone(f)$ and $\ocone(-f)$. Of
course, when more than one morphism between $\overline E$ and $\overline F$ is
considered, the above notation should be avoided.
With this convention, Lemma \ref{lemm:13} can
be written as
\begin{equation}
\label{eq:53}
\ocone(\ocone(\overline E',\overline E),\ocone(\overline F',\overline F))\cong
\ocone(\ocone(\overline E',\overline F'),\ocone(\overline E,\overline F)).
\end{equation}
\begin{definition}\label{def:11}
We will denote by $\mathscr{M}_{0}=\mathscr{M}_{0}(X)$ the subclass of
$\oV(X)$ consisting of
\begin{enumerate}
\item the orthogonally split complexes;
\item all objects $\overline E$ such that there
is an acyclic complex $\overline F$ of $\oV(X)$, and an isometry $\overline E
\to \overline F\oplus \overline F[1]$.
\end{enumerate} \end{definition}
We want to stabilize $\mathscr{M}_{0}$ with respect to hermitian cones.
\begin{definition}\label{def:9}
We will denote by $\mathscr{M}=\mathscr{M}(X)$ the smallest subclass of
$\oV(X)$ that satisfies the following
properties:
\begin{enumerate}
\item \label{item:12} it contains $\mathscr{M}_{0}$;
\item \label{item:13} if $f\colon \overline
E\to \overline F$ is a morphism and two
of $\overline E$, $\overline F$ and $\ocone(f)$ belong
to $\mathscr{M}$, then so does the third.
\end{enumerate}
The elements of $\mathscr{M}(X)$ will be called \emph{meager
complexes}. \end{definition}
We next give a characterization of meager complexes. For this, we introduce two auxiliary classes. \begin{definition} \label{def:12}
\begin{enumerate}
\item \label{item:29} Let $\mathscr{M}_{F}$ be the subclass of
$\oV(X)$ that contains all complexes $\overline E$ that have a
finite filtration $\Fil$ such that
\begin{enumerate}
\item[({\bf A})] \label{item:14} for every $p,n\in {\mathbb Z}$, the exact
sequences
\begin{displaymath}
0\to \Fil^{p+1}\overline E^{n}\to \Fil^{p}\overline E^{n}\to
\Gr^{p}_{\Fil}\overline E^{n}\to 0,
\end{displaymath}
with the induced metrics, are orthogonally split short exact
sequences of vector bundles;
\item[({\bf B})] \label{item:15} the complexes
$\Gr^{\bullet}_{\Fil}\overline E$ belong to $\mathscr{M}_{0}$.
\end{enumerate}
\item \label{item:32} Let $\mathscr{M}_{S}$ be the subclass of
$\oV(X)$ that contains all complexes $\overline E$ such that there
is a morphism of complexes $f\colon \overline E\to \overline F$
and both $\overline F$ and $\ocone (f)$ belong to
$\mathscr{M}_{F}$.
\end{enumerate} \end{definition}
\begin{lemma} \label{lemm:10} Let $0\to \overline E\to \overline F\to \overline G\to
0$ be an exact sequence in $\oV(X)$ whose
constituent rows are orthogonally split. Assume $\overline E$ and $\overline
G$ are in $\mathscr{M}_{F}$ . Then $\overline F\in\mathscr{M}_{F}$. In
particular, $\mathscr{M}_{F}$ is closed under cone formation.
\end{lemma}
\begin{proof}
For the first claim, notice that the filtrations of $\overline E$ and $\overline G$
induce a filtration on $\overline F$ satisfying conditions
\ref{def:12}~({\bf A}) and \ref{def:12}~({\bf B}). The second
claim then follows by Remark \ref{rem:2}.
\end{proof}
\begin{example} \label{exm:2}
Given any complex $\overline E\in \Ob \oV(X)$, the complex
$\ocone(\Id_{\overline E})$ belongs to
$\mathscr{M}_{F}$. This can be seen by induction on the length of
$\overline E$ using Lemma \ref{lemm:10} and the b\^ete filtration of $\overline
E$. For the starting point of the induction one takes into account
that, if $\overline E$ has only one non zero
degree, then $\ocone(\Id_{\overline E})$ is orthogonally split. In fact,
this argument shows something slightly stronger. Namely, the complex
$\ocone(\Id_{\overline E})$ admits a finite filtration $\Fil$ satisfying
\ref{def:12}~({\bf A}) and such that the complexes
$\Gr^{\bullet}_{\Fil} \ocone(\Id_{\overline E})$ are orthogonally
split. \end{example}
\begin{theorem}\label{thm:3} The equality
\begin{math}
\mathscr{M}=\mathscr{M}_{S}
\end{math}
holds. \end{theorem} \begin{proof}
We start by proving that $\mathscr{M}_{F}\subset \mathscr{M}$.
Let $\overline E\in \mathscr{M}_{F}$ and let $\Fil$ be any
filtration that satisfies conditions \ref{def:12}~({\bf A}) and
\ref{def:12}~({\bf B}). We show that $
\overline E\in \mathscr{M}$ by induction
on the length of $\Fil$. If
$\Fil $ has length one, then $\overline E$ belongs to
$\mathscr{M}_{0}\subset \mathscr{M}$. If the length of $\Fil$ is
$k>1$, let $p$ be
such that $\Fil^{p}\overline E=\overline E$ and
$\Fil^{p+1}\overline E\not = \overline E$. On the one hand, $\Gr
^{p}_{\Fil}\overline E [-1]\in
\mathscr{M}_{0}\subset \mathscr{M}$ and, on the other hand, the
filtration $\Fil$
induces a filtration on $\Fil^{p+1}\overline E$ fulfilling
conditions \ref{def:12}~({\bf A}) and \ref{def:12}~({\bf B}) and has
length $k-1$. Thus, by induction hypothesis, $\Fil^{p+1}\overline
E\in \mathscr{M}$. Then, by
Lemma \ref{lemm:10}, we deduce that $\overline E\in
\mathscr{M}$.
Clearly, the fact that $\mathscr{M}_{F}\subset \mathscr{M}$
implies that $\mathscr{M}_{S}\subset \mathscr{M}$. Thus, to
prove the theorem, it only remains to show that $\mathscr{M}_{S}$
satisfies the condition \ref{def:9}~\ref{item:13}.
The content of the next result is that
the apparent asymmetry in the definition of
$\mathscr{M}_{S}$ is not real.
\begin{lemma}\label{lemm:9}
Let $\overline E\in \Ob \oV(X)$. Then there is a morphism $f\colon
\overline
E\to \overline F$ with $\overline F$ and
$\ocone(f)$ in $\mathscr{M}_{F}$ if and only if there is a
morphism $g\colon \overline G\to \overline E$ with
$\overline G$ and
$\ocone(g)$ in $\mathscr{M}_{F}$.
\end{lemma}
\begin{proof}
Assume that there is a morphism $f\colon \overline
E\to \overline F$ with $\overline F$ and
$\ocone(f)$ in $\mathscr{M}_{F}$. Then, write $\overline
G= \ocone(f)[-1]$ and let $g\colon \overline G\to
\overline E$ be the natural map. By hypothesis, $\overline
G\in \mathscr{M}_{F}$. Moreover, since there is a natural isometry
\begin{displaymath}
\ocone(\ocone(\overline E,\overline
F)[-1],\overline E)\cong
\ocone(\ocone(\Id_{\overline E})[-1],\overline
F),
\end{displaymath}
by Example \ref{exm:2} and Lemma \ref{lemm:10} we obtain that
$\ocone(g)\in \mathscr{M}_{F}$. Thus we have proved one
implication. The proof of the other implication is analogous.
\end{proof}
Let now $f\colon \overline E\to \overline F$ be a morphism of
complexes with $\overline E, \overline F\in
\mathscr{M}_{S}$. We want to show that $\ocone(f)\in
\mathscr{M}_{S}$. By Lemma \ref{lemm:9}, there are morphisms of
complexes
$g\colon \overline G\to \overline E$ and
$h\colon \overline H\to \overline F$ with $\overline
G,\ \overline H,\ \ocone(g),\ \ocone(h)\in
\mathscr{M}_{F}$. We consider the map $\overline G\to
\ocone(h)$ induced by $f\circ g$. Then we write
\begin{displaymath}
\overline{G'}=\ocone(\overline G, \ocone(h) )[-1].
\end{displaymath}
By Lemma \ref{lemm:10}, we have that $\overline{G'}\in
\mathscr{M}_{F}$. We denote by $g'\colon G'\to E$ and $k\colon G'\to H$ the
maps $g'(a,b,c)=g(a)$ and $k(a,b,c)=-b$.
There is an exact sequence
\begin{displaymath}
0\to \ocone(h)\to \ocone(g')\to \ocone(g)\to 0
\end{displaymath}
whose constituent short exact sequences are orthogonally split.
Since $\ocone(h)$ and $\ocone(g)$ belong to $\mathscr{M}_{F}$,
Lemma \ref{lemm:10} insures that $\ocone(g')$ belongs to
$\mathscr{M}_{F}$ as well.
There is a diagram
\begin{equation}\label{eq:51}
\xymatrix{
\overline {G'} \ar[d]_{g'} \ar[r]^{k} & \overline H\ar[d]^{h} \\
\overline E \ar[r]^{f} & \overline F
}
\end{equation}
that commutes up to homotopy. We fix the homotopy $s\colon \overline G'\to
F$ given by $s(a,b,c)=c$. By Lemma \ref{lemm:13} there is
a natural isometry
\begin{displaymath}
\ocone(\ocone(g'),\ocone(h))\cong \ocone(\ocone(-k),\ocone(f)).
\end{displaymath}
Applying Lemma \ref{lemm:10} again, we have that $\ocone(-k)$ and
$\ocone(\ocone(g'),\ocone(h))$ belong to
$\mathscr{M}_{F}$. Therefore $\ocone(f)$ belongs to
$\mathscr{M}_{S}$.
\begin{lemma} \label{lemm:11}
Let $f\colon \overline E\to \overline F$ be a morphism in $\oV(X)$.
\begin{enumerate}
\item \label{item:18} If $\overline E\in \mathscr{M}_{S}$ and $\ocone(f)\in
\mathscr{M}_{F}$ then $\overline F\in \mathscr{M}_{S}$.
\item \label{item:19} If $\overline F\in \mathscr{M}_{S}$ and $\ocone(f)\in
\mathscr{M}_{F}$ then $\overline E\in \mathscr{M}_{S}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Assume that $\overline E\in \mathscr{M}_{S}$ and $\ocone(f)\in
\mathscr{M}_{F}$. Let $g\colon \overline G\to \overline E$ with $\overline G\in
\mathscr{M}_{F}$ and $\ocone(g)\in \mathscr{M}_{F}$. By Lemma
\ref{lemm:10} and Example \ref{exm:2}, $\ocone(\ocone(\Id_{\overline
G}),\ocone(f))\in \mathscr{M}_{F}$. But
there is a natural isometry of complexes
\begin{displaymath}
\ocone(\ocone(\Id_{\overline G}),\ocone(f))\cong
\ocone(\ocone(\ocone(g)[-1],\overline G),\overline F).
\end{displaymath}
Since, by Lemma \ref{lemm:10}, $\ocone(\ocone(g)[-1],\overline G)\in
\mathscr{M}_{F}$, then $\overline F\in \mathscr{M}_{S}$.
The second statement of the lemma is proved using the dual
argument.
\end{proof}
\begin{lemma} \label{lemm:12}
Let $f\colon \overline E\to \overline F$ be a morphism in $\oV(X)$.
\begin{enumerate}
\item \label{item:20} If $\overline E\in \mathscr{M}_{F}$ and $\ocone(f)\in
\mathscr{M}_{S}$ then $\overline F\in \mathscr{M}_{S}$.
\item \label{item:21} If $\overline F\in \mathscr{M}_{F}$ and $\ocone(f)\in
\mathscr{M}_{S}$ then $\overline E\in \mathscr{M}_{S}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Assume that $\overline E\in \mathscr{M}_{F}$ and $\ocone(f)\in
\mathscr{M}_{S}$. Let $g\colon \overline G\to \ocone(f)$ with $\overline G$
and $\ocone(\overline G, \ocone(f))$ in $\mathscr{M}_{F}$. There is a
natural isometry of complexes
\begin{displaymath}
\ocone(\overline G,\ocone(f)))\cong \ocone(\ocone(\overline G[-1],\overline
E),\overline F)
\end{displaymath}
that shows $\overline F\in \mathscr{M}_{S}$.
The second statement of the lemma is proved by a dual argument.
\end{proof}
Assume now that $f\colon \overline E\to \overline F$ is a morphism in $\oV(X)$
and $\overline E,\ \ocone(f)\in
\mathscr{M}_{S}$. Let $g\colon \overline G \to \overline E$ with $\overline G,\
\ocone(g)\in \mathscr{M}_{F}$. There is a natural isometry
\begin{displaymath}
\ocone(\ocone(\overline G,\overline E),\ocone(\Id_{\overline F}))
\cong
\ocone(\ocone(\overline G,\overline F),\ocone(\overline E, \overline F)),
\end{displaymath}
that implies $\ocone(\ocone(\overline G,\overline F),\ocone(\overline E, \overline
F))\in \mathscr{M}_{F}$. By Lemma \ref{lemm:11}, we deduce that
$\ocone(\overline G,\overline F)\in \mathscr{M}_{S}$. By Lemma \ref{lemm:12},
$\overline F\in \mathscr{M}_{S}$.
With $f$ as above, the fact that, if $\overline F$ and $\ocone(f)$ belong
to $\mathscr{M}_{S}$ so does $\overline E$, is proved by a similar
argument. In conclusion, $\mathscr{M}_{S}$ satisfies the condition
\ref{def:9}~\ref{item:13}, hence $\mathscr{M}\subset
\mathscr{M}_{S}$, which completes the proof of the
theorem. \end{proof}
The class of meager complexes satisfies the next list of properties, that follow almost directly from Theorem \ref{thm:3}. \begin{theorem} \label{thm:4}
\begin{enumerate}
\item \label{item:22} If $\overline E$ is a meager complex and $\overline
F$ is a hermitian vector bundle, then the complexes
$\overline F \otimes \overline E$, $\Hom(\overline F,\overline E)$ and $\Hom(\overline E,\overline
F)$, with the induced metrics, are meager.
\item \label{item:23} If $\overline E^{\ast,\ast}$ is a bounded
double complex of hermitian vector bundles and all rows (or
columns) are meager complexes,
then the complex $\Tot(\overline E^{\ast,\ast})$ is meager.
\item \label{item:24} If $\overline E$ is a meager complex and $\overline F$ is another
complex of hermitian vector bundles, then the complexes
\begin{align*}
\overline E\otimes \overline F&=\Tot((\overline F^{i}\otimes \overline E^{j})_{i,j}),\\
\uHom (\overline E,\overline F)&= \Tot(\Hom ((\overline E^{-i},\overline F^{j})_{i,j}))\text{
and }\\
\uHom (\overline F,\overline E)&= \Tot(\Hom ((\overline F^{-i},\overline E^{j})_{i,j})),
\end{align*}
are meager.
\item \label{item:25} If $f\colon X\to Y$ is a morphism of smooth complex
varieties and $\overline E$ is a meager complex on $Y$, then $f^{\ast}
\overline E$ is a meager complex on $X$.
\end{enumerate} \end{theorem}
We now introduce the notion of tight morphism.
\begin{definition}\label{def:tight_morphism} A morphism $f\colon\overline E\to \overline F$ in $\oV(X)$ is said to be \emph{tight} if $\ocone(f)$ is a meager complex. \end{definition}
\begin{proposition} \label{prop:8}
\begin{enumerate}
\item Every meager complex is acyclic.
\item Every tight morphism is a quasi-isomorphism.
\end{enumerate} \end{proposition} \begin{proof}
Let $\overline E\in \mathscr{M}_{F}(X)$. Let $\Fil$ be any
filtration that satisfies conditions
\ref{def:12}~({\bf A}) and \ref{def:12}~({\bf B}).
By definition, the
complexes $\Gr_{\Fil}^{p}\overline E$ belong to $\mathscr{M}_{0}$,
so they are acyclic. Hence
$\overline E$ is acyclic.
If $\overline E\in
\mathscr{M}_{S}(X)$, let $\overline F$ and $\ocone(f)$
be as in Definition \ref{def:12}~\ref{item:32}. Then, $\overline F$
and
$\ocone(f)$ are acyclic, hence $\overline E$ is also
acyclic. Thus we have proved the first statement.
The second statement is a direct consequence of the first one. \end{proof}
Many arguments used for proving that a certain complex is meager or a certain morphism is tight involve cumbersome diagrams. In order to ease these arguments we will develop a calculus of acyclic complexes.
Before starting we need some preliminary lemmas.
\begin{lemma} \label{lemm:16}
Let $\overline E$, $\overline F$ be objects of $\oV(X)$. Then the following
conditions are equivalent.
\begin{enumerate}
\item \label{item:37} There exists an object $\overline G$ and a diagram
\begin{displaymath}
\xymatrix{
& \overline G \ar[dl]^{\sim}_{f} \ar[dr]^{g}&\\
\overline E && \overline F,}
\end{displaymath}
such that $\ocone(g)\oplus \ocone(f)[1]$ is meager.
\item \label{item:38}
There exists an object $\overline G$ and a diagram
\begin{displaymath}
\xymatrix{
& \overline G \ar[dl]^{\sim}_{f} \ar[dr]^{g}&\\
\overline E && \overline F,}
\end{displaymath}
such that $f$ and $g$ are tight morphisms.
\end{enumerate} \end{lemma} \begin{proof}
Clearly, \ref{item:38} implies \ref{item:37}. To prove the converse
implication, if $\overline G$ satisfies the conditions of \ref{item:37},
we put $G'=G\oplus \ocone(f)$ and consider the morphisms $f'\colon \overline G'\to
E$ and $g'\colon G'\to F$ induced by the first projection $G'\to
G$. Then
\begin{displaymath}
\ocone(f')=\ocone(f)\oplus \ocone(f)[1],
\end{displaymath}
that is meager because $\ocone(f)$ is acyclic, and
\begin{displaymath}
\ocone(g')=\ocone(g)\oplus \ocone(f)[1],
\end{displaymath}
that is meager by hypothesis. \end{proof}
\begin{lemma}\label{lemm:15} Any diagram of tight morphisms, of the following types:
\begin{equation}\label{diag:1}
\begin{array}{ccc}
\xymatrix{
\overline E\ar[rd]_{f} & &\overline G\ar[ld]^{g}\\
&\overline F
} &
\quad \quad &
\xymatrix{
&\overline H\ar[rd]^{g'}\ar[ld]_{f'} &\\
\overline E & &\overline G
}\\
(i) && (ii)
\end{array}
\end{equation}
can be completed into a diagram of tight morphisms
\begin{equation}\label{eq:meager_1}
\xymatrix{
&\overline H\ar[ld]_{f'}\ar[rd]^{g'} &\\
\overline E \ar[rd]_{f}& &\overline G\ar[ld]^{g}\\
&\overline F, &
}
\end{equation} which commutes up to homotopy. \end{lemma} \begin{proof}
We prove the statement only for the case (i), the other one being analogous.
Note that there is a natural arrow $\overline
G\to\ocone(f)$. Define
\begin{displaymath}
\overline H=\ocone(\overline G,\ocone(f))[-1].
\end{displaymath}
With this choice, diagram (\ref{eq:meager_1} i) becomes commutative up
to homotopy, taking the projection $H\to F[-1]$ as homotopy. We
first show that $\ocone(\overline H, \overline G)$ is meager. Indeed, there is a
natural isometry
\begin{displaymath}
\ocone(\overline H,\overline G)
\cong\ocone(\ocone(\Id_{\overline G}), \ocone(\overline E,\overline F)[-1])
\end{displaymath}
and the right hand side complex is meager. Now for $\ocone(\overline H,
\overline E)$. By Lemma \ref{lemm:13}, there is an isometry
\begin{equation}
\ocone(\ocone(\overline H, \overline E),\ocone(\overline G, \overline F))
\cong
\ocone(\ocone(\overline H, \overline G),\ocone(\overline E, \overline F)).
\end{equation}
The right hand side complex is meager, hence the left hand side is
meager as well. Since, by hypothesis, $\ocone(\overline G, \overline F)$ is
meager, the same is true for $\ocone(\overline H, \overline E)$. \end{proof}
\begin{definition} \label{def:17}
We will say that two complexes $\overline E$ and $\overline F$ are \emph{tightly
related} if any of the equivalent conditions of Lemma
\ref{lemm:16} holds. \end{definition}
It is easy to see, using Lemma \ref{lemm:15}, that to be tightly related is an equivalence relation.
\begin{definition} \label{def:10}
We denote by $\oV(X)/\mathscr{M}$ the set of classes of
tightly related complexes. The class of a complex $\overline E$ will be
denoted $[\overline E]$. \end{definition}
\begin{theorem}[Acyclic calculus] \label{thm:7}
\begin{enumerate}
\item \label{item:34} For a complex $\overline E\in \Ob\oV(X)$, the class
$[\overline E]=0$ if and only if $\overline E\in \mathscr{M}$.
\item \label{item:33} The operation $\oplus$ induces an operation,
that we denote $+$, in $\oV(X)/\mathscr{M}$. With this operation
$\oV(X)/\mathscr{M}$ is an associative abelian semigroup.
\item \label{item:39} For a complex $\overline E$, there exists a complex
$\overline F$ such that $[\overline F]+[\overline E]=0$, if and only if $\overline E$ is acyclic. In
this case $[\overline E[1]]=-[\overline E]$.
\item \label{item:35} For every morphism $f\colon \overline E \to \overline F$,
if $E$ is acyclic, then the equality
\begin{displaymath}
[\ocone(\overline E,\overline F)]=[\overline F]-[\overline E]
\end{displaymath}
holds.
\item \label{item:40} For every morphism $f\colon \overline E \to \overline F$,
if $F$ is acyclic, then the equality
\begin{displaymath}
[\ocone(\overline E,\overline F)]=[\overline F]+[\overline E[1]]
\end{displaymath}
holds.
\item \label{item:36} Given a diagram
\begin{displaymath}
\xymatrix{
\overline{E}'\ar[r]^{f'}\ar[d]_{g'} &\overline{F}'\ar[d]^{g}\\
\overline{E}\ar[r]^{f} &\overline{F}
}
\end{displaymath}
in $\oV(X)$, that commutes up to homotopy, then for every choice
of homotopy we have
\begin{displaymath}
[\ocone(\ocone(f'),\ocone(f))]=[\ocone(\ocone(-g'),\ocone(g))].
\end{displaymath}
\item \label{item:41} Let $f\colon \overline E\to \overline F$, $g\colon \overline
F\to \overline G$ be morphisms of complexes. Then
\begin{align*}
[\ocone(\ocone(g\circ f),\ocone(g))]&=[\ocone(f)[1]],\\
[\ocone(\ocone(f),\ocone(g\circ f))]&=[\ocone(g)].
\end{align*}
If one of $f$ or $g$ are quasi-isomorphisms, then
\begin{displaymath}
[\ocone(g\circ f)]=[\ocone(g)]+[\ocone(f)].
\end{displaymath}
If $g\circ f$ is a quasi-isomorphism, then
\begin{displaymath}
[\ocone(g)]=[\ocone(f)[1]]+[\ocone(g\circ f)].
\end{displaymath}
\end{enumerate} \end{theorem} \begin{proof}
The statements \ref{item:34} and \ref{item:33} are immediate.
For assertion \ref{item:39}, observe that, if $\overline E$ is
acyclic, then $\overline E\oplus \overline E[1]$ is meager. Thus
\begin{displaymath}
[\overline E]+[\overline E[1]]=[\overline E\oplus \overline E[1]]=0.
\end{displaymath}
Conversely, if $[\overline F]+[\overline E]=0$, then $\overline F\oplus \overline E$ is
meager, hence acyclic. Thus $\overline E$ is acyclic.
For property \ref{item:35} we consider the map $\overline F\oplus \overline E[1]\to
\ocone(f)$ defined by the map $\overline F\to \ocone(f)$. There is a
natural isometry
\begin{displaymath}
\ocone(\overline F\oplus \overline E[1],\ocone(f))\cong
\ocone(\overline E\oplus \overline E[1],\ocone(\Id_{F})).
\end{displaymath}
Since the right hand complex is meager, so is the first. In consequence
\begin{displaymath}
[\ocone(f)]=[\overline F\oplus \overline E[1]]=[\overline F]+[\overline E[1]]=[\overline F]-[\overline E].
\end{displaymath}
Statement \ref{item:40} is proved analogously.
Statement \ref{item:36} is a direct consequence of Lemma
\ref{lemm:13}.
Statement \ref{item:41} is an easy consequence of the previous
properties. \end{proof}
\begin{remark}\label{rem:9}
In $f\colon \overline E \to \overline F$ is a morphism and neither $\overline E$ nor
$\overline F$ are acyclic, then $[\ocone(f)]$ depends on the homotopy
class of $f$ and not only on $\overline E$ and $\overline F$. For instance, let
$\overline E$ be a non-acyclic complex of hermitian bundles. Consider the
zero map and the identity map $0,\Id\colon \overline E\to \overline E$. Since,
by Example \ref{exm:2}, we know that $\ocone (\Id)$ is
meager, then $[\ocone (\Id)]=0$. By contrast,
\begin{displaymath}
[\ocone(0)]=[\overline E]+[\overline E[-1]]\not = 0
\end{displaymath}
because $\overline E$ is not acyclic. This implies that we can not extend
Theorem \ref{thm:7}~\ref{item:35} or \ref{item:40} to the case
when none of the complexes are acyclic. \end{remark}
\begin{corollary}\label{cor:3}
\begin{enumerate}
\item \label{item:42}
Let
\begin{displaymath}
0\longrightarrow \overline E\longrightarrow \overline F\longrightarrow \overline G
\longrightarrow 0
\end{displaymath}
be a short exact sequence in $\oV(X)$ all whose constituent short
exact sequences are orthogonally split. If either $\overline E$ or
$\overline G$ is acyclic, then
\begin{displaymath}
[\overline F]=[\overline E]+[\overline G].
\end{displaymath}
\item \label{item:58} Let $\overline E^{\ast,\ast}$ be a bounded double complex of
hermitian vector bundles. If the columns of $\overline E^{\ast,\ast}$
are acyclic, then
\begin{displaymath}
[\Tot(\overline E^{\ast,\ast})]=\sum_{k}(-1)^{k} [\overline E^{k,\ast}].
\end{displaymath}
If the rows are acyclic, then
\begin{displaymath}
[\Tot(\overline E^{\ast,\ast})]=\sum_{k}(-1)^{k} [\overline E^{\ast,k}].
\end{displaymath}
In particular, if rows and columns are acyclic
\begin{displaymath}
\sum_{k}(-1)^{k} [\overline E^{k,\ast}]=\sum_{k}(-1)^{k} [\overline E^{\ast,k}].
\end{displaymath}
\end{enumerate} \end{corollary} \begin{proof}
The first item follows from Theorem \ref{thm:7}~\ref{item:35}
and \ref{item:40}, by using Remark \ref{rem:2}. The second assertion
follows from the first by induction on the size of the complex, by
using the usual filtration of $\Tot(E^{\ast,\ast})$. \end{proof}
As an example of the use of the acyclic calculus we prove
\begin{proposition}\label{prop:6}
Let $f\colon \overline E\to\overline F$ and $g\colon \overline F\to\overline G$ be morphisms
of complexes. If two of $f,\ g,\ g\circ f$ are tight, then so is
the third. \end{proposition}
\begin{proof}
Since tight morphisms are quasi-isomorphisms, by Theorem
\ref{thm:7}~\ref{item:41}
\begin{displaymath}
[\ocone(g\circ f)]=[\ocone(f)]+[\ocone(g)].
\end{displaymath}
Hence the result follows from \ref{thm:7}~\ref{item:34}. \end{proof}
\begin{definition}\label{def:KA}
We will denote by $\KA(X)$ the set of invertible elements of
$\oV(X)/\mathscr{M}$. This is an abelian subgroup. By Theorem
\ref{thm:7}~\ref{item:39} the group $\KA(X)$
agrees with the image in $\oV(X)/\mathscr{M}$ of the class of
acyclic complexes. \end{definition}
The group $\KA(X)$ is a universal abelian group for additive Bott-Chern classes. More precisely, let us denote by $\oVo(X)$ the full subcategory of $\oV(X)$ of acyclic complexes.
\begin{theorem} \label{thm:8}
Let $\mathscr{G}$ be an abelian group and let $\varphi\colon \Ob \oVo(X) \to
\mathscr{G}$ be an assignment such that
\begin{enumerate}
\item (Normalization) Every complex of the form
\begin{displaymath}
\overline E\colon\quad 0\longrightarrow \overline A
\overset{\Id}{\longrightarrow}
\overline A \longrightarrow 0
\end{displaymath} satisfies $\varphi(\overline E)=0$.
\item (Additivity for exact sequences) For every short exact
sequence in $\oVo(X)$
\begin{displaymath}
0\longrightarrow \overline E\longrightarrow \overline F\longrightarrow \overline G
\longrightarrow 0,
\end{displaymath}
all whose constituent short
exact sequences are orthogonally split, we have
\begin{displaymath}
\varphi(\overline F)=\varphi(\overline E)+\varphi(\overline G).
\end{displaymath}
\end{enumerate} Then $\varphi$ factorizes through a group homomorphism $\widetilde \varphi\colon \KA(X)\to \mathscr{G}$. \end{theorem} \begin{proof}
The second condition tells us that $\varphi$ is a morphism of
semigroups. Thus we only need to show that it vanishes on meager
complexes.
Again by the second condition, it is enough to prove that $\varphi$
vanishes on the class $\mathscr{M}_{0}$. Both conditions together
imply that $\varphi$ vanishes on orthogonally split
complexes. Therefore, by Example
\ref{exm:2}, it vanishes on complexes of the form
$\ocone(\Id_{E})$. Once more by the second condition, if $E$ is acyclic,
\begin{displaymath}
\varphi(E)+\varphi(E[1])=\varphi(\ocone(\Id_{E}))=0.
\end{displaymath}
Thus $\varphi$ vanishes also on the complexes described in Definition \ref{def:11} (ii).
Hence $\varphi$ vanishes on the class $\mathscr{M}$. \end{proof}
\begin{remark}
The considerations of this section carry over to the category of
complex analytic varieties. If $M$ is a complex analytic variety, one thus obtains for
instance a group $\KA^{\text{{\rm an}}}(M)$. Observe that, by GAGA principle, whenever $X$ is a
proper smooth algebraic variety over ${\mathbb C}$, the group $\KA^{\text{{\rm an}}}(X^{\text{{\rm an}}})$ is
canonically isomorphic to $\KA(X)$. \end{remark}
As an example, we consider the simplest case $\Spec {\mathbb C}$ and we compute the group $\KA(\Spec {\mathbb C})$. Given an acyclic complex $E$ of ${\mathbb C}$-vector spaces, there is a canonical isomorphism \begin{displaymath}
\alpha :\det E\longrightarrow {\mathbb C}. \end{displaymath}
If we have an acyclic complex of hermitian vector bundles $\overline E$, there is an induced metric on $\det E$. If we put on ${\mathbb C}$ the trivial hermitian metric, then there is a well defined positive real number $\|\alpha \|$, namely the norm of the isomorphism $\alpha$.
\begin{theorem}
The assignment $\overline E\mapsto \log\|\alpha \|$ induces an isomorphism
\begin{displaymath}
\widetilde{\tau }\colon \KA(\Spec
{\mathbb C})\overset{\simeq}{\longrightarrow}{\mathbb R}.
\end{displaymath} \end{theorem} \begin{proof}
First, we observe that the assignment in the theorem
satisfies the hypothesis of Theorem \ref{thm:8}. Thus,
$\widetilde{\tau }$ exists and is a group morphism.
Second, for every $a\in {\mathbb R}$ we consider the acyclic complex
\begin{displaymath}
e^{a}:= 0\longrightarrow
\overline{{\mathbb C}}\overset{e^{a}}{\longrightarrow}
\overline{{\mathbb C}}\longrightarrow 0,
\end{displaymath}
where $\overline {{\mathbb C}}$ has the standard metric and the left copy of
$\overline {{\mathbb C}}$ sits in degree $0$.
Since $\widetilde {\tau }([e^{a}])=a$ we deduce that $\widetilde{\tau }$ is
surjective.
Next we prove that the complexes of the form $[e^{a}]$ form a set of
generators of $\KA(\Spec {\mathbb C})$.
Let
$\overline E=(\overline E^{\ast},f^{\ast})$ be an acyclic complex. Let $r=\sum_{i}
\rk(E^{i})$. We will show by induction on $r$ that
$[\overline E]=\sum_{k}(-1)^{i_{k}}[e^{a_{k}}]$ for certain integers
$i_{k}$ and real numbers $a_{k}$. Let $n$ be the
smallest integer such that $f^{n}\colon E^{n}\to E^{n+1}$ is
non-zero. Let $v\in E^{n}\setminus \{0\}$. By acyclicity, $f^{n}$ is
injective, hence $\|f^{n}(v)\|\not=0.$ Set $i_{1}=n$ and
$a_{1}=\log(\|f^{n}(v)\|/\|v\|)$ and consider the diagram
\begin{displaymath}
\xymatrix{
& &0\ar[d] &0\ar[d] & &\\
&0\ar[r] &\overline{{\mathbb C}}\ar[r]^{e^{a}}\ar[d]^{\gamma ^{n}}
&\overline{{\mathbb C}}\ar[r]\ar[d]^{\gamma ^{n+1}}
&0 \ar[r]\ar[d]
& \dots
\\
&0\ar[r] &\overline E^{n}\ar[r]\ar[d] &\overline E^{n+1}\ar[r]\ar[d] &\overline E^{n+2}\ar[r]\ar[d] &\dots
\\
&0\ar[r] &\overline F^{n}\ar[r]\ar[d] &\overline F^{n+1}\ar[r]\ar[d] &\overline F^{n+2}\ar[r]\ar[d] &\dots
\\
& &0 &0 &0 & &
}
\end{displaymath}
where $\gamma ^{n}(1)=v$, $\gamma ^{n+1}(1)=f^{n}(v)$ and all the
columns are orthogonally split short exact sequences. By Corollary
\ref{cor:3}~\ref{item:42} and Theorem \ref{thm:7}~\ref{item:39}, we have
\begin{displaymath}
[\overline E]=(-1)^{i_{1}}[e^{a_{1}}]+[\overline F].
\end{displaymath}
Thus we deduce the claim.
Considering now the diagram
\begin{displaymath}
\xymatrix{ \overline {\mathbb C} \ar[r]^{e^{a}}\ar[d]_{\Id}& \overline {\mathbb C}\ar[d]^{e^{b}}\\
\overline {\mathbb C} \ar[r]_{e^{a+b}}& \overline {\mathbb C}
}
\end{displaymath}
and using Corollary \ref{cor:3}~\ref{item:58} we deduce that
$[e^{a}]+[e^{b}]=[e^{a+b}]$ and $[e^{-a}]=-[e^{a}]$. Therefore
every element of $\KA(\Spec {\mathbb C})$ is of the form $[e^{a}]$. Hence
$\widetilde{\tau }$ is also injective. \end{proof}
\section{Definition of $\oDb(X)$ and basic
constructions}\label{sec:oDb}
Let $X$ be a smooth algebraic variety over ${\mathbb C}$. We denote by $\Coh(X)$ the abelian category of coherent sheaves on $X$ and by $\Db(X)$ its bounded derived category. The objects of $\Db(X)$ are complexes of quasi-coherent sheaves with bounded coherent cohomology. The reader is referred to \cite{GelfandManin:MR1950475} for an introduction to derived categories. For notational convenience, we also introduce $\Cb(X)$, the abelian category of bounded cochain complexes of coherent sheaves on $X$. Arrows in $\Db(X)$ will be written as $\dashrightarrow$, while arrows in $\Cb(X)$ will be denoted by $\rightarrow$. The symbol $\sim$ will mean either quasi-isomorphism in $\Cb(X)$ or isomorphism in $\Db(X)$. Every functor from $\Db(X)$ to another category will tacitly be assumed to be the derived functor. Therefore we will denote just by $\Rd f_{\ast}$, $\Ld f^{\ast}$, $\otimes^{\Ld}$ and $\Rd\uHom$ the derived direct image, inverse image, tensor product and internal Hom. Finally, we will refer to (complexes of) locally free sheaves by normal upper case letters (such as $F$) whereas we reserve script upper case letters for (complexes of) quasi-coherent sheaves in general (for instance $\mathcal{F}$).
\begin{remark}\label{rem:3}
Because $X$ is in particular a smooth noetherian scheme over ${\mathbb C}$,
every object $\mathcal{F}$ of $\Cb(X)$ admits a quasi-isomorphism
$F\to \mathcal{F}$, with $F$ an object of $\Vb(X)$. Hence, if
$\mathcal{F}$ is an object in $\Db(X)$, then there is an
isomorphism $F\dashrightarrow\mathcal{F}'$ in $\Db(X)$, for some
object $F\in\Vb(X)$. In general, the analogous
statement is no longer true if we work with complex manifolds, as
shown by the counterexample \cite[Appendix,
Cor. A.5]{Voisin:counterHcK}.
\end{remark}
For the sake of completeness, we recall how morphisms in $\Db(X)$ between bounded complexes of vector bundles can be represented.
\begin{lemma}\label{lemma:morphisms_Db}
\begin{enumerate}
\item \label{item:26} Let $F, G$ be bounded complexes of vector
bundles on $X$. Every morphism $F\dashrightarrow G$ in $\Db(X)$ may be
represented by a diagram in $\Cb(X)$
\begin{displaymath}
\xymatrix{
&E\ar[ld]_{f}\ar[rd]^{g} &\\
F & &G,
}
\end{displaymath}
where $E\in \Ob \Vb(X)$ and $f$ is a quasi-iso\-morphism.
\item \label{item:27} Let $E$, $E'$, $F$, $G$ be bounded
complexes of
vector bundles on $X$. Let $f$, $f'$, $g$, $g'$
be morphisms in $\Cb(X)$ as in the diagram below, with $f$, $f'$
quasi-isomorphisms. These data define the same morphism $F \dashrightarrow
G$ in $\Db(X)$ if, and only if, there exists a bounded complex of
vector bundles $E''$ and a diagram
\begin{displaymath}
\xymatrix{
& & E''\ar[ld]_{\alpha}\ar[rd]^{\beta} & &\\
&E\ar[ld]_{f}\ar[rrrd] &{}_g\hspace{2cm} {}_{f'}
&E'\ar[llld]\ar[rd]^{g'} &\\
F & & & &G,
}
\end{displaymath}
whose squares are commutative up to homotopy and where $\alpha$
and $\beta$ are quasi-isomorphisms.
\end{enumerate}
\end{lemma}
\begin{proof}
This follows from the equivalence of $\Db(X)$ with the localization
of the homotopy category of $\Cb(X)$ with respect to the class of
quasi-isomorphisms and Remark \ref{rem:3}.
\end{proof}
\begin{proposition} \label{prop:7} Let $f\colon \overline E\to \overline E$ be an
endomorphism in $\oV(X)$ that represents $\Id_{E}$ in $\Db(X)$. Then
$\ocone(f)$ is meager. \end{proposition} \begin{proof}
By Lemma \ref{lemma:morphisms_Db}~\ref{item:27}, the fact that $f$
represents the identity in $\Db(X)$ means that there are diagrams
\begin{displaymath}
\xymatrix{
\overline E' \ar[r]^{\alpha }_{\sim} \ar[d]_{\beta }^{\sim}& \overline E \ar[d]^{\Id_{E}} &&
\overline E' \ar[r]^{\alpha }_{\sim} \ar[d]_{\beta }^{\sim}& \overline E\ar[d]^{f}\\
\overline E \ar[r]_{\Id_{E}}& \overline E, && \overline E \ar[r]_{\Id_{E}}& \overline E,
}
\end{displaymath}
that commute up to homotopy. By Theorem \ref{thm:7}~\ref{item:35}
and \ref{item:36} the equalities
\begin{align*}
[\ocone(\alpha )]-[\ocone(\Id_{E})]&=
[\ocone(\beta )]-[\ocone(\Id_{E})]\\
[\ocone(\alpha )]-[\ocone(\Id_{E})]&= [\ocone(\beta )]-[\ocone(f)]
\end{align*}
hold in the group $\KA(X)$ (observe that these relations do not
depend on the choice of homotopies).Therefore
\begin{displaymath}
[\ocone(f)]=[\ocone(\Id_{E})]=0.
\end{displaymath}
Hence $\ocone(f)$ is meager. \end{proof}
\begin{definition}\label{definition:hermitian_structure}
Let $\mathcal{F}$ be an object of $\Db(X)$. A \textit{hermitian
metric on} $\mathcal{F}$ consists of the following data:
\begin{enumerate}
\item[--] an isomorphism
$E\overset{\sim}{\dashrightarrow}\mathcal{F}$ in $\Db(X)$, where
$E\in \Ob \Vb(X)$;
\item[--] an object $\overline E\in \Ob \oV(X)$, whose underlying complex
is $E$.
\end{enumerate}
We write $\overline{E}\dashrightarrow\mathcal{F}$ to refer to the
data above and we call it a \textit{metrized object of} $\Db(X)$.
\end{definition}
Our next task is to define the category $\oDb(X)$, whose objects are objects of $\Db(X)$ provided with equivalence classes of metrics. We will show that in this category there is a hermitian cone well defined up to isometries.
\begin{lemma}\label{lemm:14}
Let $\overline E, \overline E'\in \Ob(\oV(X))$ and consider an arrow $E\dashrightarrow E'$ in
$\Db(X)$.
Then the following statements are equivalent:
\begin{enumerate}
\item \label{item:30} for any diagram
\begin{equation}\label{diag:2}
\xymatrix{
& E'' \ar[dl]_{\sim} \ar[dr]&\\
E && E',}
\end{equation}
that represents $E \dashrightarrow E'$, and any choice of hermitian metric on
$E''$, the complex
\begin{equation}\label{eq:54}
\ocone(\overline E'',\overline E)[1]\oplus \ocone(\overline E'',\overline E')
\end{equation}
is meager;
\item \label{item:31} there is a diagram (\ref{diag:2})
that represents $E \dashrightarrow E'$, and a choice of hermitian metric on
$E''$, such that the complex (\ref{eq:54}) is meager;
\item \label{item:31bis}there is a diagram (\ref{diag:2})
that represents $E \dashrightarrow E'$, and a choice of hermitian metric on
$E''$, such that the arrows $\overline E''\to \overline E$ and $\overline E'\to \overline
E'$ are tight morphisms.
\end{enumerate} \end{lemma} \begin{proof}
Clearly \ref{item:30} implies \ref{item:31}. To prove the converse
we assume the existence of a $\overline E''$ such that the complex
\eqref{eq:54} is meager, and let $\overline E'''$ be any complex that satisfies the
hypothesis of \ref{item:30}. Then there is a diagram
\begin{displaymath}
\xymatrix{
& & E^{''''}\ar[ld]_{\alpha}\ar[rd]^{\beta} & &\\
&E''\ar[ld]_{f}\ar[rrrd] &{}_g\hspace{2cm} {}_{f'}
&E'''\ar[llld]\ar[rd]^{g'} &\\
E & & & &E'
}
\end{displaymath}
whose squares commute up to homotopy. Using acyclic calculus we have
\begin{multline*}
[\ocone(g')]-[\ocone(f')]=\\
[\ocone(\beta )]+[\ocone(g)]-[\ocone(\alpha )]
-[\ocone(\beta )]-[\ocone(f)]+[\ocone(\alpha )]=\\
[\ocone(g)]-[\ocone(f)]=0.
\end{multline*}
Now repeat the argument of Lemma \ref{lemm:16} to prove that
\ref{item:31} and \ref{item:31bis} are equivalent. The only point is
to observe that the diagram constructed in Lemma \ref{lemm:16}
represents the same morphism in the derived category as the
original diagram. \end{proof}
\begin{definition}\label{def:13}
Let $\mathcal{F}\in \Ob\Db(X)$ and let $\overline E\dashrightarrow \mathcal{F}$ and
$\overline E'\dashrightarrow \mathcal{F}$ be two hermitian metrics on
$\mathcal{F}$. We say that they \emph{fit tight} if the induced
arrow $\overline E\dashrightarrow\overline E'$ satisfies any of the equivalent conditions
of Lemma \ref{lemm:14} \end{definition}
\begin{theorem} \label{thm:5} The relation ``to fit tight'' is an
equivalence relation. \end{theorem} \begin{proof}
The reflexivity and the symmetry are obvious. To prove the
transitivity, consider a diagram
\begin{displaymath}
\xymatrix{
& \overline F \ar[dl]_{f} \ar[dr]^{g}& & \overline F' \ar[dl]_{f'} \ar[dr]^{g'} &\\
\overline E & & \overline E' & & \overline E'' ,
}
\end{displaymath}
where all the arrows are tight morphisms and $f$, $f'$ are quasi-isomorphisms. By Lemma \ref{lemm:15},
this diagram can be completed into a diagram
\begin{displaymath}
\xymatrix{
& & \overline F'' \ar[dl]_{\alpha } \ar[dr]^{\beta }& & \\
& \overline F \ar[dl]_{f} \ar[dr]^{g}& & \overline F' \ar[dl]_{f'} \ar[dr]^{g'} &\\
\overline E & & \overline E' & & \overline E'' ,
}
\end{displaymath}
where all the arrows are tight morphisms and the square commutes up
to homotopy. Now observe that $f\circ\alpha$ and $g'\circ\beta$
represent the morphism $E\dashrightarrow E''$ in $\Db(X)$ and are both tight
morphisms by Proposition \ref{prop:6}. This finishes the proof.
\end{proof}
\begin{definition}\label{def:category_oDb}
We denote by $\oDb(X)$ the category whose objects are pairs
$\ocF=(\mathcal{F},h)$ where $\mathcal{F}$ is an object of $\Db(X)$
and $h$ is an equivalence class of metrics that fit tight, and with
morphisms
\begin{displaymath}
\Hom_{\oDb(X)}(\overline{\mathcal{F}},\overline{\mathcal{G}})
=
\Hom_{\Db(X)}(\mathcal{F},\mathcal{G}).
\end{displaymath}
A class $h$ of metrics will be called \emph{a hermitian structure},
and may be referenced by any representative $\overline E\dashrightarrow \mathcal{F}$
or, if the arrow is clear, by the complex $\overline E$. We will denote by
$\overline 0\in \Ob \oDb(X)$ a zero object of $\Db(X)$ provided with a
trivial hermitian structure given by any meager complex.
If the underlying complex to an object $\overline{\mathcal{F}}$ is
acyclic, then its hermitian structure has a well defined class in
$\KA(X)$. We will use the notation $[\overline{\mathcal{F}}]$ for this
class. \end{definition}
\begin{definition}
A morphism in $\oDb(X)$, $f\colon (\overline E\dashrightarrow\mathcal{F})
\dashrightarrow(\overline F\dashrightarrow \mathcal{G})$, is called \emph{a tight
isomorphism} whenever the underlying morphism $f\colon
\mathcal{F}\dashrightarrow \mathcal{G}$ is an isomorphism and the metric on
$\mathcal{G}$ induced by $f$ and $\overline E$ fits tight with $\overline F$. An
object of $\oDb(X)$ will be called \emph{meager} if it is tightly
isomorphic to the zero object with the trivial metric. \end{definition}
\begin{remark} \label{rem:5} A word of warning should be said about
the use of acyclic calculus to show that a particular map is a tight
isomorphism. There is an assignment $\Ob\oDb(X)\to
\oV(X)/\mathscr{M}$ that sends $\overline E\dashrightarrow \mathcal{F}$ to $[\overline
E]$. This assignment is not injective. For instance, let $r>0$ be a
real number and consider the trivial bundle $\mathcal{O}_{X}$ with
the trivial metric $\|1\|=1$ and with the metric $\|1\|'=1/r$. Then
the product by $r$ induces an isometry between both bundles. Hence,
if $\overline E$ and $\overline E'$ are the complexes that have
$\mathcal{O}_{X}$ in degree $0$ with the above hermitian metrics,
then $[\overline E]=[\overline E']$, but they define different hermitian
structures on $\mathcal{O}_{X}$ because the product by $r$ does not
represent $\Id_{\mathcal{O}_{X}}$.
Thus the right procedure to show that a morphism $f\colon (\overline E\dashrightarrow
\mathcal{F})\dashrightarrow (\overline F\dashrightarrow \mathcal{G})$ is a tight isomorphism, is
to construct a diagram
\begin{displaymath}
\xymatrix{
& \overline G \ar[dl]^{\sim}_{\alpha } \ar[dr]^{\beta }&\\
\overline E && \overline F}
\end{displaymath}
that represents $f$ and use the acyclic calculus to show that
$[\ocone(\beta )]-[\ocone(\alpha )]=0$. \end{remark}
By definition, the forgetful functor $\mathfrak{F}\colon \oDb(X)\to \Db(X)$ is fully faithful. The structure of this functor will be given in the next result that we suggestively summarize by saying that $\oDb(X)$ is a principal fibered category over $\Db(X)$ with structural group $\KA(X)$ provided with a flat connection.
\begin{theorem}\label{thm:13}
The functor $\mathfrak{F}\colon \oDb(X)\to \Db(X)$ defines a
structure of category fibered in grupoids. Moreover
\begin{enumerate}
\item \label{item:43} The fiber $\mathfrak{F}^{-1}(0)$ is the
grupoid associated to the abelian group
$\KA(X)$. The object $\overline 0$ is the neutral element of $\KA(X)$.
\item \label{item:44} For any object $\mathcal{F}$ of $\Db(X)$, the
fiber $\mathfrak{F}^{-1}(\mathcal{F})$ is the grupoid associated
to a torsor over $\KA(X)$. The action of $\KA(X)$ over
$\mathfrak{F}^{-1}(\mathcal{F})$ is given by orthogonal direct
sum. We will denote this action by $+$.
\item \label{item:45} Every isomorphism $f\colon \mathcal{F}\dashrightarrow
\mathcal{G}$ in $\Db(X)$ determines an isomorphism of
$\KA(X)$-torsors
\begin{displaymath}
\mathfrak{t}_{f}\colon \mathfrak{F}^{-1}(\mathcal{F})\longrightarrow
\mathfrak{F}^{-1}(\mathcal{G}),
\end{displaymath}
that sends the hermitian structure $\overline E\overset{\epsilon }{\dashrightarrow}
\mathcal{F}$ to the hermitian structure $\overline E\overset{f\circ
\epsilon }{\dashrightarrow} \mathcal{G}$. This isomorphism will be called
the parallel transport along $f$.
\item \label{item:46} Given two isomorphisms $f\colon
\mathcal{F}\dashrightarrow \mathcal{G}$ and $g\colon \mathcal{G}\dashrightarrow
\mathcal{H}$, the equality
$$\mathfrak{t}_{g\circ f}=\mathfrak{t}_{g}\circ \mathfrak{t}_{f}$$
holds. \end{enumerate}
\end{theorem} \begin{proof} Recall that $\mathfrak{F}^{-1}(\mathcal{F})$ is the
subcategory of $\oDb(X)$ whose objects satisfy
$\mathfrak{F}(A)=\mathcal{F}$ and whose morphisms satisfy
$\mathfrak{F}(f)=\Id_{\mathcal{F}}$. The first assertion is
trivial. To prove that $\mathfrak{F}^{-1}(\mathcal{F})$ is a torsor
under $\KA(X)$, we need to show that $\KA(X)$ acts freely and
transitively on this fiber. For the freeness, it is enough to
observe that if for $\overline E\in\oV(X)$ and $\overline M\in\oVo(X)$, the
complexes $\overline E$ and $\overline E\oplus \overline M$ represent the same
hermitian structure, then the inclusion $\overline E\hookrightarrow \overline
E\oplus\overline M$ is tight. Hence $\ocone(\overline E, \overline E\oplus \overline M)$ is
meager. Since
\begin{displaymath}
\ocone(\overline E, \overline E\oplus \overline M)=\ocone(\overline E, \overline E)\oplus \overline M
\end{displaymath}
and $\ocone(\overline E, \overline E)$ is meager, we deduce that $\overline M$ is
meager. For the transitivity, any two hermitian structures on
$\mathcal{F}$ are related by a diagram
\begin{displaymath}
\xymatrix{
&\overline E''\ar[ld]_{\sim}^{f}\ar[rd]^{\sim}_{g} &\\
\overline E & &\overline E'.
}
\end{displaymath}
After possibly replacing $\overline E''$ by $\overline E''\oplus\ocone(f)$, we
may assume that $f$ is tight. We consider the natural arrow $\overline
E''\to \overline E'\oplus\ocone(g)[1]$ induced by $g$. Observe that
$\ocone(g)[1]$ is acyclic. Finally, we find
\begin{displaymath}
\ocone(\overline E'', \overline E'\oplus\ocone(g)[1])=\ocone(g)\oplus\ocone(g)[1],
\end{displaymath}
that is meager. Thus the hermitian structure represented by $\overline
E''$ agrees with the hermitian structure represented by $\overline
E'\oplus\ocone(g)[1]$.
The remaining properties are straightforward. \end{proof}
Our next objective is to define the cone of a morphism in $\oDb(X)$. This will be an object of $\oDb(X)$ uniquely defined up to tight isomorphism. Let $f\colon (\overline{E}\dashrightarrow\mathcal{F})\dashrightarrow (\overline{E}'\dashrightarrow\mathcal{G})$ be a morphism in $\oDb(X)$, where $\overline E$ and $\overline E'$ are representatives of the hermitian structures.
\begin{definition}\label{def:her_cone}
A \textit{hermitian cone} of $f$, denoted $\ocone(f)$, is an
object $(\cone(f),h_{f})$ of $\oDb(X)$ where:
\begin{enumerate}
\item[--] $\cone(f)\in\Ob\Db(X)$ is a choice of cone of $f$. Namely
an object of $\Db(X)$ completing $f$ into a distinguished
triangle;
\item[--] $h_{f}$ is a hermitian structure on $\cone(f)$
constructed as follows. The morphism $f$ induces an arrow $E\dashrightarrow
E'$. Choose any bounded complex $E''$ of vector bundles with a
diagram
\begin{displaymath}
\xymatrix{
&E''\ar[ld]^{\sim}\ar[rd] &\\
E & &E'
}
\end{displaymath}
that represents $E\dashrightarrow E'$, and an arbitrary hermitian metric on
$E''$. Put
\begin{equation}\label{eq:her_cone}
\overline{C}(f)=\ocone(\overline{E}'',\overline{E})[1]\oplus\ocone(\overline{E}'',\overline{E}').
\end{equation}
There are morphisms defined as compositions
\begin{displaymath}
\overline{E}'\longrightarrow \ocone(\overline{E}'',\overline{E}')
\longrightarrow \overline{C}(f),
\end{displaymath}
where the second arrow is the natural inclusion, and
\begin{displaymath}
\overline{C}(f) \longrightarrow \ocone(\overline{E}'',\overline{E}')
\longrightarrow \overline{E}''[1] \longrightarrow
\overline{E}[1],
\end{displaymath}
where the first arrow is the natural projection.
These morphisms fit into a natural distinguished triangle completing
$\overline{E}\dashrightarrow\overline{E}'$. By the axioms of triangulated category,
there is a quasi-isomorphism $\overline{C}(f)\dashrightarrow\cone(f)$ such that
the following diagram (where the rows are distinguished triangles)
\begin{displaymath}
\xymatrix{
\overline{E}\ar@{-->}[r]\ar@{-->}[d] &\overline{E}'\ar@{-->}[r]\ar@{-->}[d]
&\overline{C}(f)\ar@{-->}[d]\ar@{-->}[r] &\overline{E}[1]\ar@{-->}[d]\\
\mathcal{F}\ar@{-->}[r] &\mathcal{G}\ar@{-->}[r]
&\cone(f)\ar@{-->}[r] &\mathcal{F}[1]
}
\end{displaymath}
commutes. We take the hermitian structure that
$\overline{C}(f)\dashrightarrow\cone(f)$ defines on $\cone(f)$. By Theorem
\ref{thm:6bis} below, this hermitian structure does not depend on
the particular choice of arrow $\overline{C}(f)\dashrightarrow\cone(f)$. Moreover,
by Theorem \ref{thm:6}, the hermitian structure will not depend
on the choices of representatives of hermitian structures nor on
the choice of $\overline{E}''$.
\end{enumerate}
\end{definition}
\begin{remark}\label{rem:8}
The factor $\ocone(\overline{E}'',\overline{E})[1]$ has to be seen as a
correction term to take into account the difference of metrics from
$\overline E $ and $\overline E''$. We would have obtained an equivalent
definition using the factor $\ocone(\overline{E}'',\overline{E})[-1]$.
\end{remark}
\begin{theorem}\label{thm:6bis}
Let
\begin{displaymath}
\xymatrix{
\mathcal{F}\ar@{-->}[r]\ar[d]^{\Id}
&\mathcal{G}\ar@{-->}[r]\ar[d]^{\Id}
&\mathcal{H}\ar@{-->}[r]\ar@{-->}[d]^{\alpha }
& \mathcal{F}[1]\ar@{-->}[r] \ar[d]^{\Id}
&\dots\\
\mathcal{F}\ar@{-->}[r]
&\mathcal{G}\ar@{-->}[r]
&\mathcal{H}\ar@{-->}[r]
& \mathcal{F}[1]\ar@{-->}[r]
&\dots
}
\end{displaymath}
be a commutative diagram in $\Db(X)$, where the rows are the same
distinguished triangle. Let $\overline H\dashrightarrow \mathcal{H}$ be any
hermitian structure. Then $\alpha\colon (\overline H\dashrightarrow
\mathcal{H})\dashrightarrow (\overline H\dashrightarrow \mathcal{H}) $ is a tight
isomorphism.
\end{theorem}
\begin{proof}
First of all, we claim that if
$\gamma:\overline{\mathcal{B}}\dashrightarrow\overline{\mathcal{H}}$ is any isomorphism,
then $\gamma^{-1}\circ\alpha\circ\gamma$ is tight if, and only if,
$\alpha$ is tight. Indeed, denote by $\overline G\dashrightarrow\mathcal{B}$ a
representative of the hermitian structure on
$\overline{\mathcal{B}}$. Then there is a diagram
\begin{displaymath}
\xymatrix{
& & &\overline
R\ar[ld]^{t_{1}}_{\sim}\ar[rd]_{t_{2}}^{\sim} & &\\
& &\overline P\ar[ld]^{w_{1}}_{\sim}\ar[rd]_{w_{2}}^{\sim}
& &\overline Q\ar[ld]^{w_{3}}_{\sim}\ar[rd]_{w_{4}}
^{\sim}&\\
&\overline{G}'\ar[ld]_{\sim}^{u}\ar[rd]^{\sim}_{v} &
&\overline{H}'\ar[rd]^{\sim}_{f}\ar[ld]_{\sim}^{g} &
&\overline{G}'\ar[ld]_{\sim}^{v}\ar[rd]^{\sim}_{u}&\\
\overline G\ar@{-->}[rr] & &\overline H\ar@{-->}[rr] &
&\overline H\ar@{-->}[rr] & &\overline G
}
\end{displaymath}
for the liftings of $\gamma^{-1}$, $\alpha$, $\gamma$ to
representatives, as well as for their composites, all whose squares
are commutative up to homotopy. By acyclic calculus, we have the
following chain of equalities
\begin{multline*}
[\ocone(u\circ w_{1}\circ t_{1})[1]]+[\ocone(u\circ w_{4}\circ
t_{2})]=\\
[\ocone(u)[1]]+[\ocone(v)]+[\ocone(g)[1]]+
[\ocone(f)]+[\ocone(v)[1]]+[\ocone(u)]=\\
[\ocone(g)[1]]+[\ocone(f)].
\end{multline*}
Thus, the right hand side vanishes if and only if the left hand
side vanishes, proving the claim. This observation allows to reduce
the proof of the lemma to the following situation: consider a
diagram of complexes of hermitian vector bundles
\begin{displaymath}
\xymatrix{
\overline{E}\ar[d]^{\Id}\ar[r]^{f}
&\overline{F}\ar[d]^{\Id}\ar[r]^{\iota}
&\overline{\cone}(f)\ar[r]^{\pi}\ar@{-->}[d]^{\phi}_{\sim} &
\overline{E}[1]\ar[d]^{\Id}\ar[r] &\dots\\
\overline{E}\ar[r]^{f} &\overline{F}\ar[r]^{\iota}
&\overline{\cone}(f)\ar[r]^{\pi} &\overline{E}[1]\ar[r]
&\dots,
}
\end{displaymath}
which commutes in $\Db(X)$. We need to show that $\phi$ is a tight
isomorphism. The commutativity of the diagram translates into the
existence of bounded complexes of hermitian vector bundles
$\overline{P}$ and $\overline{Q}$ and a diagram
\begin{displaymath}
\xymatrix{
& &
&\overline{\cone}(f)\ar[rd]^{\pi}\ar@{-->}[dd]^{\phi}_{\sim}
&\\
\overline{F}\ar@/^/[rrru]^{\iota}\ar@/_/[rrrd]_{\iota}
&\overline{P}\ar[l]_{\hspace{0.2cm} j}^{\sim}\ar[r]^{g}
&\overline{Q}
\ar[ru]_{u}^{\sim}\ar[rd]^{v}_{\sim} & &\overline{E}[1]\\
& &
&\overline{\cone}(f)\ar[ru]_{\pi} &
}
\end{displaymath}
fulfilling the following properties: (a) $j$, $u$, $v$ are
quasi-isomorphisms; (b) the squares formed by $\iota, j, g, u$ and
$\iota, j, g, v$ are commutative up to homotopy; (c) the morphisms
$u$, $v$ induce $\phi$ in the derived category. We deduce a
commutative up to homotopy square
\begin{displaymath}
\xymatrix{
\ocone(g)\ar[d]_{\tilde{v}}^{\sim}\ar[r]^{\tilde{u}}_{\sim}
&\ocone(\iota)\ar[d]^{\tilde{\pi}}_{\sim}\\
\ocone(\iota)\ar[r]^{\tilde{\pi}}_{\sim} &\overline{E}[1].
}
\end{displaymath}
The arrows $\tilde{u}$, $\tilde{v}$ are induced by $j,u$ and $j, v$
respectively. Observe they are quasi-isomorphisms. Also the natural
projection $\tilde{\pi}$ is a quasi-isomorphism. By acyclic
calculus, we have
\begin{displaymath}
[\ocone(\tilde{\pi})]+[\ocone(\tilde{u})]=[\ocone(\tilde{\pi})]+[\ocone(\tilde{v})].
\end{displaymath}
Therefore we find
\begin{equation}\label{eq:59}
[\ocone(\tilde{u})]=[\ocone(\tilde{v})].
\end{equation}
Finally, notice there is an exact sequence
\begin{displaymath}
0\longrightarrow\ocone(u)\longrightarrow
\ocone(\tilde{u})\longrightarrow \ocone(j[1]) \longrightarrow 0,
\end{displaymath}
whose rows are orthogonally split. Therefore,
\begin{equation}\label{eq:60}
[\ocone(\tilde{u})]=[\ocone(u)]+[\ocone(j[1])].
\end{equation}
Similarly we prove
\begin{equation}\label{eq:61}
[\ocone(\tilde{v})]=[\ocone(v)]+[\ocone(j[1])].
\end{equation}
From equations (\ref{eq:59})--(\ref{eq:61}) we infer
\begin{displaymath}
[\ocone(u)[1]]+[\ocone(v)]=0,
\end{displaymath}
as was to be shown.
\end{proof}
\begin{theorem} \label{thm:6} The object $\overline{C}(f)$ of equation
\eqref{eq:her_cone} is well defined up to tight isomorphism.
\end{theorem}
\begin{proof}
We first show the independence on the choice of $\overline E''$, up to
tight isomorphism. To this end, it is enough to assume that there
is a diagram
\begin{displaymath}
\xymatrix{
&& \overline E''' \ar[dl]_{\sim} \ar[rdd]&\\
& \overline E''\ar[dl]_{\sim}\ar[rrd]&&\\
\overline E &&&\overline E'
}
\end{displaymath}
such that the triangle commutes up to homotopy. Fix such a homotopy. Then
\begin{align*}
[\ocone(\ocone(\overline E''',\overline E'),\ocone(\overline E'',\overline E'))]&=
-[\ocone(E''', E'')],\\
[\ocone(\ocone(\overline E''',\overline E),\ocone(\overline E'',\overline E))]&=
-[\ocone(E''', E'')].
\end{align*}
By Lemma \ref{lemm:13}, the left hand sides of these relations
agree and hence this implies that the hermitian structure does not
depend on the choice of $\overline E''$.
We now prove the independence on the choice of the representative
$\overline E$. Let $\overline F\to \overline E$ be a tight morphism. Then we can
construct a diagram
\begin{displaymath}
\xymatrix{
& \overline E''' \ar[ddl]_{\sim} \ar[rd]^{\sim}&&\\
&& \overline E''\ar[dl]_{\sim}\ar[rd]&\\
\overline F \ar[r]^{\sim} &\overline E &&\overline E',
}
\end{displaymath}
where the square commutes up to homotopy. Choose one
homotopy. Taking into account Lemma \ref{lemm:13}, we find
\begin{align*}
[\ocone(\ocone(\overline E''',\overline E'),\ocone(\overline E'',\overline E'))]&=
-[\ocone(E''', E'')],\\
[\ocone(\ocone(\overline E''',\overline F),\ocone(\overline E'',\overline E))]&=
-[\ocone(E''', E'')]+[\ocone(\overline F,\overline
E)]\\
&=-[\ocone(E''', E'')].
\end{align*}
Hence the definitions of $\overline{C}(f)$ using $\overline E$ or $\overline F$ agree
up to tight isomorphism. The remaining possible choices of
representatives are treated analogously.
\end{proof}
\begin{remark}
The construction of $\ocone(f)$ involves the choice of $\cone(f)$,
which is unique up to isomorphism. Since the construction of
$\overline{C}(f)$ \eqref{eq:her_cone} does not depend on the choice of
$\cone(f)$, by Theorem \ref{thm:6bis}, we see that different
choices of $\cone(f)$ give rise to tightly isomorphic hermitian
cones. Therefore $\ocone(f)$ is well defined up to tight isomorphism
and we will usually call it \emph{the}
hermitian cone of $f$. When the morphism is clear, we will also write
$\ocone(\overline{\mathcal{F}},\overline{\mathcal{G}})$ to refer to it. \end{remark}
The hermitian cone satisfies the same relations than the usual cone.
\begin{proposition} \label{prop:10} Let $f\colon \overline {\mathcal{F}}\dashrightarrow
\overline {\mathcal{G}}$ be a morphism in $\oDb(X)$. Then, the natural
morphisms
\begin{gather*}
\ocone(\overline{\mathcal{G}},\ocone(f))\dashrightarrow
\overline{\mathcal{F}}[1],\\
\overline{\mathcal{G}}\dashrightarrow \ocone(\ocone(f)[-1],\overline{\mathcal{F}})
\end{gather*}
are tight isomorphisms. \end{proposition} \begin{proof}
After choosing representatives, there are isometries
\begin{align*}
\ocone(\ocone(\overline{\mathcal{G}},\ocone(f)),
\overline{\mathcal{F}}[1])\cong &
\ocone(\ocone(\Id_{\mathcal{F}}),\ocone(\Id_{\mathcal{G}}))\cong\\
&\ocone(\overline{\mathcal{G}}, \ocone(\ocone(f)[-1],\overline{\mathcal{F}})).
\end{align*}
Since the middle term is meager, the same is true for the other two. \end{proof}
We next extend some basic constructions in $\Db(X)$ to $\oDb(X)$.
\noindent\textbf{Derived tensor product.} Let $\overline{\mathcal{F}}_{i}=( \overline{E}_{i}\dashrightarrow\mathcal{F}_{i})$, $i=1,2$, be objects of $\oDb(X)$. The derived tensor product $\mathcal{F}_{1}\otimes^{\Ld} \mathcal{F}_{2}$ is endowed with a natural hermitian structure \begin{equation}
\label{eq:29}
\overline{E}_{1}\otimes\overline{E}_{2}\dashrightarrow
\mathcal{F}_{1}\otimes^{\Ld} \mathcal{F}_{2}, \end{equation} that is well defined by Theorem \ref{thm:4}~\ref{item:24}. We write $\overline{\mathcal{F}}_{1}\otimes^{\Ld} \overline{\mathcal{F}}_{2}$ for the resulting object in $\oDb(X)$.
\noindent\textbf{Derived internal $\Hom$ and dual objects.} Let $\overline{\mathcal{F}}_{i}=( \overline{E}_{i}\dashrightarrow\mathcal{F}_{i})$, $i=1,2$, be objects of $\oDb(X)$. The derived internal $\Hom$, $ \Rd\uHom(\mathcal{F}_{1},\mathcal{F}_{2})$ is endowed with a natural hermitian structure \begin{equation}
\label{eq:22}
\uHom(\overline{E}_{1},\overline{E}_{2})\dashrightarrow
\Rd\uHom(\mathcal{F}_{1},\mathcal{F}_{2}), \end{equation} that is well defined by Theorem \ref{thm:4}~\ref{item:24}. We write $\Rd\uHom(\overline{\mathcal{F}}_{1},\overline{\mathcal{F}}_{2})$ for the resulting object in $\oDb(X)$.
In particular, denote by $\overline {\mathcal{O}}_{X}$ the structural sheaf with the metric $\|1\|=1$. Then, for every object $\overline{\mathcal{F}} \in \oDb(X)$, the \emph{dual object} is defined to be \begin{equation}
\label{eq:30}
\overline{\mathcal{F}}^{\vee}=\Rd\uHom(\overline
{\mathcal{F}},\overline{\mathcal{O}}_{X}). \end{equation}
\noindent\textbf{Left derived inverse image.} Let
$g\colon X^{\prime}\rightarrow X$ be a morphism of smooth algebraic
varieties over ${\mathbb C}$ and $\overline{\mathcal{F}}=(\overline E\dashrightarrow \mathcal{F})\in
\Ob \oDb(X)$. Then the left derived inverse image $\Ld
g^{\ast}(\mathcal{F})$ is equipped with the hermitian structure
$g^{\ast}(\overline{E})\dashrightarrow\Ld g^{\ast}(\mathcal{F})$,
that is well defined up to tight isomorphism by Theorem
\ref{thm:4}~\ref{item:25}. As it is customary, we will pretend that
$\Ld g^{\ast}$ is a functor. The notation for the corresponding
object in $\oDb(X^{\prime})$ is $\Ld
g^{\ast}(\overline{\mathcal{F}})$. If $f\colon
\overline{\mathcal{F}}_{1}\dashrightarrow \overline{\mathcal{F}}_{2}$
is a morphism in $\oDb(X)$, we denote by $\Ld g^{\ast}(f)\colon \Ld
g^{\ast}(\overline{\mathcal{F}}_{1})\dashrightarrow\Ld g^{\ast}
(\overline{\mathcal{F}}_{2})$ its left derived inverse image by $g$.
The functor $\Ld g^{\ast}$ preserves the structure of principal
fibered category with flat connection and the formation of hermitian
cones. Namely we have the following result that is easily proved.
\begin{theorem} \label{thm:9} Let $g\colon X^{\prime}\rightarrow X$
be a morphism of smooth algebraic varieties over ${\mathbb C}$ and let
$f\colon \overline {\mathcal{F}}_{1}\dashrightarrow \overline {\mathcal{F}}_{2}$ be a
morphism in $\oDb(X)$.
\begin{enumerate}
\item The functor $\Ld g^{\ast}$ preserves the forgetful functor:
\begin{displaymath}
\mathfrak{F}\circ \Ld g^{\ast}=\Ld g^{\ast}\circ \mathfrak{F}
\end{displaymath}
\item The restriction $\Ld g^{\ast}\colon\KA(X)\to \KA(X')$ is a
group homomorphism.
\item The functor $\Ld g^{\ast}$ is equivariant with respect to the
actions of $\KA(X)$ and $\KA(X')$.
\item The functor $\Ld g^{\ast}$ preserves parallel transport: if
$f$ is an isomorphism, then
\begin{displaymath}
\Ld g^{\ast}\circ \mathfrak{t}_{f}=\mathfrak{t}_{\Ld
g^{\ast}(f)}\circ \Ld g^{\ast}.
\end{displaymath}
\item The functor $\Ld g^{\ast}$ preserves hermitian cones:
\begin{displaymath}
\Ld g^{\ast}(\ocone(f))=\ocone(\Ld g^{\ast}(f)).
\end{displaymath}
\end{enumerate}
\end{theorem}
$\square$
\noindent\textbf{Classes of isomorphisms and distinguished
triangles.}
Let $f\colon \overline {\mathcal{F}}\overset{\sim}{\dashrightarrow}
\overline{\mathcal{G}}$ be an isomorphism in $\oDb(X)$. To it, we attach a
class $[f]\in \KA(X)$ that measures the default of being a tight
isomorphism. This class is defined using the hermitian cone.
\begin{equation}
\label{eq:57}
[f]=[\ocone(f)].
\end{equation}
Observe the abuse of notation: we wrote $[\ocone(f)]$ for the class
in $\KA(X)$ of the hermitian structure of a hermitian cone of
$f$. This is well defined, since the hermitian cone is unique up to
tight isomorphism. Alternatively, we can construct $[f]$ using
parallel transport as follows. There is a unique element $\overline
A\in\KA(X)$ such that
\begin{displaymath}
\overline {\mathcal{G}}=\mathfrak{t}_{f}\overline {\mathcal{F}}+\overline A.
\end{displaymath}
We denote this element by $\overline {\mathcal{G}}-\mathfrak{t}_{f}\overline
{\mathcal{F}}$. Then
\begin{displaymath}
[f]=\overline {\mathcal{G}}-\mathfrak{t}_{f}\overline
{\mathcal{F}}.
\end{displaymath}
By the very definition of parallel transport, both definitions
clearly agree.
\begin{definition}\label{def:6}
A \textit{distinguished triangle in} $\oDb(X)$, consists in a
diagram
\begin{equation}\label{eq:64}
\overline{\tau}=(u,v,w):
\overline{\mathcal{F}}\overset{u}{\dashrightarrow}
\overline{\mathcal{G}}
\overset{v}{\dashrightarrow}\overline{\mathcal{H}}
\overset{w}{\dashrightarrow}\overline{\mathcal{F}}[1]
\overset{u}{\dashrightarrow}\dots
\end{equation}
in $\oDb(X)$, whose underlying morphisms in $\Db(X)$ form a
distinguished triangle. We will say that it is \emph{tightly
distinguished} if there is a commutative diagram
\begin{equation}\label{eq:63}
\xymatrix{
\overline{\mathcal{F}}\ar@{-->}[r]\ar[d]^{\Id}
&\overline{\mathcal{G}}\ar@{-->}[r]\ar[d]^{\Id}
&\ocone(\overline {\mathcal{F}},\overline{\mathcal{G}})
\ar@{-->}[r]\ar@{-->}[d]^{\alpha }
& \overline{\mathcal{F}}[1]\ar@{-->}[r] \ar[d]^{\Id}
&\dots\\
\overline{\mathcal{F}}\ar@{-->}[r]
&\overline{\mathcal{G}}\ar@{-->}[r]
&\overline{\mathcal{H}}\ar@{-->}[r]
& \overline{\mathcal{F}}[1]\ar@{-->}[r]
&\dots,
}
\end{equation}
with $\alpha $ a tight isomorphism.
\end{definition}
To every distinguished triangle in $\oDb(X)$ we can associate a class
in $\KA(X)$ that measures the default of being tightly distinguished.
Let $\overline {\tau }$ be a distinguished triangle as in
\eqref{eq:64}. Then there is a diagram as \eqref{eq:63}, but with
$\alpha $ an isomorphism non-necessarily tight. Then we define
\begin{equation}
\label{eq:65}
[\overline{\tau }]=[\alpha ].
\end{equation}
By Theorem \ref{thm:6bis}, the class $[\alpha]$ does not depend on
the particular choice of morphism $\alpha$ in $\oDb(X)$ for which
\eqref{eq:63} commutes. Hence \eqref{eq:65} only depends on
$\overline{\tau}$.
\begin{theorem}\label{thm:10}\
\begin{enumerate}
\item \label{item:51} Let $f$ be an isomorphism in $\oDb(X)$
(respectively $\overline \tau $ a distinguished triangle). Then $[f]=0$
(respectively $[\overline \tau ]=0$) if and only if $f$ is a tight isomorphism
(respectively $\overline \tau $ is tightly distinguished).
\item \label{item:50} Let $g\colon X'\to X$ be a morphism of smooth
complex varieties, let $f$ be an isomorphism in $\oDb(X)$ and
$\overline \tau $ a distinguished triangle in $\oDb(X)$. Then
\begin{displaymath}
\Ld g^{\ast}[f]=[\Ld g^{\ast}f],\qquad \Ld g^{\ast}[\overline \tau
]=[\Ld g^{\ast}\overline \tau ].
\end{displaymath}
In particular, tight isomorphisms and tightly distinguished
triangles are preserved under left derived inverse images.
\item \label{item:16}
Let $f\colon
\overline{\mathcal{F}}\dashrightarrow\overline{\mathcal{G}}$ and
$h\colon \overline{\mathcal{G}}\dashrightarrow
\overline{\mathcal{H}}$ be two isomorphisms in $\oDb(X)$. Then:
\begin{displaymath}
[h\circ f]=[h]+[f].
\end{displaymath}
In particular, $[f^{-1}]=-[f]$.
\item \label{item:17} For any distinguished triangle $\overline \tau $ in
$\oDb(X)$ as in Definition \ref{def:6}, the rotated triangle
\begin{displaymath}
\overline{\tau}'\colon\
\overline{\mathcal{G}}\overset{v}{\dashrightarrow}\overline{\mathcal{H}}
\overset{w}{\dashrightarrow}\overline{\mathcal{F}}[1]
\overset{-u[1]}{\dashrightarrow}\overline{\mathcal{G}}[1]
\overset{v[1]}{\dashrightarrow}\dots
\end{displaymath}
satisfies
\begin{math}
[\overline \tau ']=-[\overline \tau ].
\end{math}
In particular, rotating preserves tightly distinguished
triangles.
\item \label{item:28} For any acyclic complex $\overline{\mathcal{F}}$,
we have
\begin{displaymath}
[\overline{\mathcal{F}}\to 0\to
0\to\dots]=
[\overline{\mathcal{F}}].
\end{displaymath}
\item \label{item:47} If
$f\colon\overline{\mathcal{F}}\dashrightarrow\overline{\mathcal{G}}$ is an isomorphism
in $\oDb(X)$, then
\begin{displaymath}
[0\to\overline{\mathcal{F}}\dashrightarrow\overline{\mathcal{G}}
\to\dots]=[f].
\end{displaymath}
\item \label{item:48} For a commutative diagram of distinguished
triangles
\begin{displaymath}
\xymatrix{
\overline \tau \ar@{-->}[d]
&\overline{\mathcal{F}}\ar@{-->}[r]\ar@{-->}[d]^{f}_{\sim}
&\overline{\mathcal{G}}\ar@{-->}[r]\ar@{-->}[d]^{g}_{\sim}
&\overline{\mathcal{H}}
\ar@{-->}[r]\ar@{-->}[d]^{h} _{\sim}
& \overline{\mathcal{F}}[1]\ar@{-->}[r] \ar@{-->}[d]^{f[1]}_{\sim}
&\dots\\
\overline \tau ' &\overline{\mathcal{F}}'\ar@{-->}[r]
&\overline{\mathcal{G}}'\ar@{-->}[r]
&\overline{\mathcal{H}}'\ar@{-->}[r]
& \overline{\mathcal{F}}'[1]\ar@{-->}[r]
&\dots,
}
\end{displaymath}
the following relation holds:
\begin{displaymath}
[\overline \tau ']
-[\overline \tau ]=
[f]-[g]+[h].
\end{displaymath}
\item \label{item:49} For a commutative diagram of distinguished
triangles
\begin{equation}\label{eq:66}
\xymatrix{
\overline \tau \ar@{-->}[d]
&\overline{\mathcal{F}}\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{G}}\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{H}}
\ar@{-->}[r]\ar@{-->}[d]
& \overline{\mathcal{F}}[1]\ar@{-->}[r] \ar@{-->}[d]
&\dots\\
\overline \tau' \ar@{-->}[d]
&\overline{\mathcal{F}}'\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{G}}'\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{H}}'
\ar@{-->}[r]\ar@{-->}[d]
& \overline{\mathcal{F}}'[1]\ar@{-->}[r] \ar@{-->}[d]
&\dots\\
\overline \tau '' &\overline{\mathcal{F}}''\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{G}}''\ar@{-->}[r] \ar@{-->}[d]
&\overline{\mathcal{H}}''\ar@{-->}[r] \ar@{-->}[d]
& \overline{\mathcal{F}}''[1]\ar@{-->}[r] \ar@{-->}[d]
&\dots\\
&\overline{\mathcal{F}}[1]\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{G}}[1]\ar@{-->}[r]\ar@{-->}[d]
&\overline{\mathcal{H}}[1]
\ar@{-->}[r]\ar@{-->}[d] &\overline{\mathcal{F}}[2]\ar@{-->}[r]
\ar@{-->}[d]
&\dots&\\
&\vdots &\vdots &\vdots & \vdots &&\\
& \overline \eta \ar@{-->}[r] & \overline \eta' \ar@{-->}[r] & \overline \eta''
&&&
}
\end{equation}
the following relation holds:
\begin{displaymath}
[\overline \tau ]-[\overline \tau' ]
+[\overline \tau'' ]=
[\overline \eta ]-[\overline \eta' ]
+[\overline \eta'' ].
\end{displaymath}
\end{enumerate}
\end{theorem}
\begin{proof}
The first two statements are clear. For the third, we may assume
that $f$ and $g$ are realized by quasi-isomorphisms
\begin{displaymath}
f\colon \overline F\longrightarrow \overline G,
\quad g\colon \overline G\longrightarrow \overline H.
\end{displaymath}
Then the result follows from Theorem \ref{thm:7}~\ref{item:41}.
The fourth assertion is a consequence of Proposition
\ref{prop:10}. Then \ref{item:28}, \ref{item:47} and \ref{item:48}
follow from equation \eqref{eq:65} and the fourth statement. The
last property is derived from \ref{item:48} by comparing the
diagram to a diagram of tightly distinguished triangles.
\end{proof}
As an application of the class in $\KA(X)$ attached to a
distinguished triangle, we exhibit a natural morphism
$K_{1}(X)\to\KA(X)$. This is included for the sake of completeness,
but won't be needed in the sequel.
\begin{proposition}\label{prop:K1_to_KA}
There is a natural morphism of groups $K_{1}(X)\to\KA(X)$. \end{proposition} \begin{proof}
We follow the definitions and notations of \cite{Burgos-Wang}. From
\emph{loc. cit.} we know it is enough to construct a morphism of
groups
\begin{equation}\label{eq:K1-KA}
H_{1}(\widetilde{\mathbb{Z}}C(X))\to\KA(X).
\end{equation}
By definition, the piece of degree $n$ of the homological complex
$\widetilde{\mathbb{Z}}C(X)$ is
\begin{displaymath}
\widetilde{\mathbb{Z}}C_{n}(X)=\mathbb{Z}C_{n}(X)/D_{n}.
\end{displaymath}
Here $\mathbb{Z}C_{n}(X)$ stands for the free abelian group on
metrized exact $n$-cubes and $D_{n}$ is the subgroup of degenerate
elements. A metrized exact $1$-cube is a short exact sequence of
hermitian vector bundles. Hence, for such a $1$-cube
$\overline{\varepsilon}$, there is a well defined class in
$\KA(X)$. Observe that this class coincides with the class of
$\overline{\varepsilon}$ thought as a distinguished triangle in
$\oDb(X)$. Because $\KA(X)$ is an abelian group, it follows the
existence of a morphism of groups
\begin{displaymath}
\mathbb{Z}C_{1}(X)\longrightarrow\KA(X).
\end{displaymath}
From the definition of degenerate cube \cite[Def. 3.3]{Burgos-Wang}
and the construction of $\KA(X)$, this morphism clearly factors
through $\widetilde{\mathbb{Z}}C_{1}(X)$. The definition of the
differential $d$ of the complex $\widetilde{\mathbb{Z}}C(X)$
\cite[(3.2)]{Burgos-Wang} and Theorem \ref{thm:10} \ref{item:49}
ensure that $d\mathbb{Z}C_{2}(X)$ is in the kernel of the
morphism. Hence we derive the existence of a morphism
\eqref{eq:K1-KA}. \end{proof}
\noindent\textbf{Classes of complexes and of direct images of
complexes.} In \cite[Section 2]{BurgosLitcanu:SingularBC} the notion of homological exact sequences of metrized coherent sheaves is treated. In the present article, this situation will arise in later considerations. Therefore we provide the link between the point of view of \emph{loc. cit.} and the formalism adopted here. The reader will find no difficulty to translate it to cohomological complexes.
Consider a homological complex \begin{displaymath}
\overline{\varepsilon }:\quad
0\to \overline{\mathcal{F}}_{m}\to \dots \to
\overline{\mathcal{F}}_{l}\to 0 \end{displaymath} of metrized coherent sheaves, namely coherent sheaves provided with hermitian structures $\overline {\mathcal{F}}_{i}=(\mathcal{F}_{i},\overline F_{i}\dashrightarrow \mathcal{F}_{i})$. We may equivalently see $\overline{\varepsilon}$ as a cohomological complex, by the usual relabeling $\overline{\mathcal{F}}^{-i}=\overline{\mathcal{F}}_{i}$. This will be freely used in the sequel, especially in cone constructions. \begin{definition}\label{def:1}
The complex $\overline \varepsilon $ defines an object $[\overline \varepsilon
]\in \Ob \oDb(X)$ that is determined inductively by the condition
\begin{displaymath}
[\overline \varepsilon ]=\ocone(\overline{\mathcal{F }}_{m}[m],[\sigma _{<m}\overline
\varepsilon ]).
\end{displaymath}
Here $\sigma _{<m}$ is the homological b\^ete filtration and
$\overline{\mathcal{F
}}_{m}$ denotes a cohomological complex concentrated in degree
zero. Hence, $\overline{\mathcal{F }}_{m}[m]$ is a cohomological complex
concentrated in degree $-m$. \end{definition}
If $\overline{E}$ is a hermitian vector bundle on $X$, then \begin{math}
[\overline{\varepsilon}\otimes \overline{E}]=[\overline{\varepsilon}]\otimes \overline{E} \end{math}. According to Definition \ref{def:category_oDb}, if $\varepsilon $ is an acyclic complex, then we also have the corresponding class $[[\overline{\varepsilon}]]$ in $\KA(X)$. We will employ the lighter notation $[\overline{\varepsilon}]$ for this class.
Given a morphism $\varphi\colon \overline{\varepsilon}\to\overline{\mu}$ of bounded complexes of metrized coherent sheaves, the pieces of the complex $\cone(\varepsilon,\mu)$ are naturally endowed with hermitian metrics. We thus get a complex of metrized coherent sheaves $\overline{\cone(\varepsilon,\mu)}$. Hence Definition \ref{def:1} provides an object $[\overline{\cone(\varepsilon,\mu)}]$ in $\oDb(X)$. On the other hand, Definition \ref{def:her_cone} attaches to $\varphi$ the hermitian cone $\ocone([\overline{\varepsilon}],[\overline{\mu}])$, which is well defined up to tight isomorphism. Both constructions actually agree. \begin{lemma}\label{lemm:3}
Let $\overline{\varepsilon}\to\overline{\mu}$ be a morphism of bounded complexes
of metrized coherent sheaves on $X$. Then there is a tight
isomorphism
\begin{displaymath}
\ocone([\overline{\varepsilon}],[\overline{\mu}])\cong [\overline{\cone(\varepsilon,\mu)}],
\end{displaymath} \end{lemma} \begin{proof}
The case when $\varepsilon$ and $\mu$ are both concentrated in a
single degree $d$ is clear. The general case follows by induction
taking into account Definition \ref{def:1}. \end{proof}
Assume now that $f\colon X\to Y$ is a morphism of smooth complex varieties and, for each complex $\Rd f_{\ast}\mathcal{F}_{i}$, we have chosen a hermitian structure $\overline{\Rd f_{\ast} \mathcal{F}_{i}}=(\overline E_{i}\dashrightarrow \Rd f_{\ast} \mathcal{F}_{i})$. Denote by $\overline {\Rd
f_{\ast}\varepsilon }$ this choice of metrics. \begin{definition} \label{def:5} The family of hermitian structures
$\overline {\Rd f_{\ast}\varepsilon }$ defines an object $[\overline{\Rd
f_{\ast}\varepsilon }]\in \Ob \oDb(Y)$ that is determined
inductively by the condition
\begin{displaymath}
[\overline {\Rd f_{\ast}\varepsilon }]=\ocone (\overline {\Rd f_{\ast}
\mathcal{F}_{m}}[m],[\overline {\Rd f_{\ast}\sigma _{< m} \varepsilon} ]).
\end{displaymath} \end{definition}
We remark that the notation $\overline {\Rd
f_{\ast}\varepsilon }$ means that the hermitian structure is chosen after taking the direct image and it is not determined by the hermitian structure on $\overline \varepsilon $.
If $\overline{F}$ is a hermitian vector bundle on $Y$, then the object $[\overline{\Rd f_{\ast}(\varepsilon\otimes f^{\ast} F)}]$ (whose definition is obvious) satisfies \begin{displaymath}
[\overline{\Rd f_{\ast}(\varepsilon\otimes f^{\ast} F)}]
=[\overline{\Rd f_{\ast}\varepsilon}]\otimes\overline{F}. \end{displaymath} Notice also that if $\varepsilon$ is an acyclic complex on $X$, we have the class $[\overline {\Rd f_{\ast}\varepsilon}]\in \KA(Y)$.
Let $\varepsilon\to\mu$ be a morphism of bounded complexes of coherent sheaves on $X$ and $f\colon X\to Y$ a morphism of smooth complex varieties. Fix choices of metrics $\overline{\Rd f_{\ast}\varepsilon}$ and $\overline{\Rd f_{\ast}\mu}$. Then there is an obvious choice of metrics on $\Rd f_{\ast}\cone(\varepsilon,\mu)$, that we denote $\overline{\Rd
f_{\ast}\cone(\varepsilon,\mu)}$, and hence an object $[\overline{\Rd
f_{\ast}\cone(\varepsilon,\mu)}]$ in $\oDb(Y)$. On the other hand, we also have the hermitian cone $\ocone([\overline{\Rd
f_{\ast}\varepsilon}],[\overline{\Rd f_{\ast}\mu}])$. Again both definitions agree.
\begin{lemma}\label{lemm:3bis}
Let $\varepsilon\to\mu$ be a morphism of bounded complexes of
coherent sheaves on $X$ and $f\colon X\to Y$ a morphism of smooth
complex varieties. Assume that families of metrics $\overline{\Rd
f_{\ast}\varepsilon}$ and $\overline{\Rd f_{\ast}\mu}$ are chosen. Then there is a
tight isomorphism
\begin{displaymath}
\ocone([\overline{\Rd f_{\ast}\varepsilon}],[\overline{\Rd f_{\ast}\mu}])\cong [\overline{\Rd f_{\ast}\cone(\varepsilon,\mu)}].
\end{displaymath} \end{lemma} \begin{proof}
If $\varepsilon$ and $\mu$ are concentrated in a single degree $d$,
then the statement is obvious. The proof follows by induction and
Definition \ref{def:5}. \end{proof}
The objects we have defined are compatible with short exact sequences, in the sense of the following statement. \begin{proposition} \label{prop:4} Consider a commutative diagram of
exact sequences of coherent sheaves on $X$
\begin{displaymath}
\xymatrix{
& & &0\ar[d] & &0\ar[d] & \\
&\mu' &0\ar[r] &\mathcal{F}_{m}'\ar[r]\ar[d] &\dots\ar[r] &\mathcal{F}_{l}'\ar[r]\ar[d] &0
\\
&\mu &0\ar[r] &\mathcal{F}_{m}\ar[r]\ar[d] &\dots\ar[r] &\mathcal{F}_{l}\ar[r]\ar[d] &0
\\
&\mu'' &0\ar[r] &\mathcal{F}_{m}''\ar[r]\ar[d] &\dots\ar[r] &\mathcal{F}_{l}''\ar[r]\ar[d] &0
\\
& & &0 & &0 & \\
& & &\xi_{m} &\dots &\xi_{l}. &
}
\end{displaymath}
Let $f\colon X\to Y$ be a morphism of smooth complex varieties and
choose hermitian structures on the sheaves $\mathcal{F}_{j}'$,
$\mathcal{F}_{j}$, $\mathcal{F}_{j}''$ and on the objects $\Rd
f_{\ast}\mathcal{F}_{j}'$, $\Rd f_{\ast}\mathcal{F}_{j}$ and $\Rd
f_{\ast}\mathcal{F}_{j}''$, $j=l,\dots, m$. Then the following
equalities hold in $\KA(X)$ and $\KA(Y)$, respectively:
\begin{align*}
&\sum_{j}(-1)^{j}[\overline{\xi}_{j}]=[\overline{\mu}']-[\overline{\mu}]+[\overline{\mu}''],\\
&\sum_{j}(-1)^{j}[\overline{\Rd f_{\ast}\xi}_{j}]=[\overline{\Rd
f_{\ast}\mu}']-[\overline{\Rd f_{\ast} \mu}]+[\overline{\Rd f_{\ast}\mu}''].
\end{align*} \end{proposition} \begin{proof}
The lemma follows inductively taking into account definitions
\ref{def:1} and \ref{def:5} and Theorem \ref{thm:10}~\ref{item:49}. \end{proof} \begin{corollary}\label{cor:5}
Let $\overline{\varepsilon}\to\overline{\mu} $ be a morphism of exact sequences
of metrized coherent sheaves. Let $f\colon X\to Y$ be a morphism of
smooth complex varieties and fix families of metrics $\overline{\Rd
f_{\ast}\varepsilon}$ and $\overline{\Rd f_{\ast}\mu}$. Then the following
equalities in $\KA(X)$ and $\KA(Y)$, respectively, hold
\begin{align}
&[\overline{\cone(\varepsilon,\mu)}]=[\overline{\mu}]-[\overline{\varepsilon}],\\
&[\overline{\Rd f_{\ast}\cone(\varepsilon,\mu})]=[\overline{\Rd
f_{\ast}\mu}]-[\overline{\Rd f_{\ast}\varepsilon}].
\end{align} \end{corollary} \begin{proof}
The result readily follows from lemmas \ref{lemm:3}, \ref{lemm:3bis}
and Proposition \ref{prop:4}. \end{proof}
\noindent\textbf{Hermitian structures on cohomology.} Let $\mathcal{F}$ be an object of $\Db(X)$ and denote by $\mathcal{H}$ its cohomology complex. Observe that $\mathcal{H}$ is a bounded complex with 0 differentials. By the preceding discussion and because $\KA(X)$ acts transitively on hermitian structures, giving a hermitian structure on $\mathcal{H}$ amounts to give hermitian structures on the individual pieces $\mathcal{H}^{i}$. We show that to these data there is attached a natural hermitian structure on the complex $\mathcal{F}$. This situation will arise when considering cohomology sheaves endowed with $L^2$ metric structures. The construction is recursive. If the cohomology complex is trivial, then we endow $\mathcal{F}$ with the trivial hermitian structure. Otherwise, let $\mathcal{H}^{m}$ be the highest non-zero cohomology sheaf. The canonical filtration $\tau ^{\leq m}$ is given by \begin{displaymath}
\tau^{\leq
m}\mathcal{F}\colon\quad\dots\to\mathcal{F}^{m-2}\to\mathcal{F}^{m-1}
\to\ker(\dd^{m})\to 0. \end{displaymath} By the condition on the highest non vanishing cohomology sheaf, the natural inclusion is a quasi-isomorphism: \begin{equation}\label{eq:her_coh_1}
\tau^{\leq m}\mathcal{F}\overset{\sim}{\longrightarrow}\mathcal{F}. \end{equation} We also introduce the subcomplex \begin{displaymath}
\widetilde{\mathcal{F}}\colon\quad\dots\to\mathcal{F}^{m-2}\to
\mathcal{F}^{m-1}\to{\im}(\dd^{m-1})\to 0. \end{displaymath} Observe that the cohomology complex of $\widetilde{\mathcal{F}}$ is the b\^ete truncation $\mathcal{H}/\sigma^{\ge m}\mathcal{H}$. By induction, $\widetilde{\mathcal{F}}$ carries an induced hermitian structure. We also have an exact sequence \begin{equation}\label{eq:her_cor_2}
0\to\widetilde{\mathcal{F}}\to\tau^{\leq m}\mathcal{F}\to\mathcal{H}^{m}[-m]\to 0. \end{equation} Taking into account the quasi-isomorphism \eqref{eq:her_coh_1} and the exact sequence \eqref{eq:her_cor_2}, we construct a natural commutative diagram of distinguished triangles in $\Db(X)$ \begin{displaymath}
\xymatrix{
\mathcal{H}^{m}[-m-1]\ar@{-->}[r]^{\hspace{0.6cm} 0}\ar[d]^{\Id} &\widetilde{\mathcal{F}}\ar@{-->}[r]\ar[d]^{\Id}
&\mathcal{F}\ar@{-->}[r]\ar@{-->}[d]^{\sim} &\mathcal{H}^{m}[m]\ar[d]^{\Id}\\
\mathcal{H}^{m}[-m-1]\ar@{-->}[r]^{\hspace{0.6cm} 0} &\widetilde{\mathcal{F}}\ar@{-->}[r]
&\cone(\mathcal{H}^{m}[-m-1],\widetilde{\mathcal{F}})\ar@{-->}[r] &\mathcal{H}^{m}[m].
} \end{displaymath} By the hermitian cone construction and Theorem \ref{thm:6bis}, we see that hermitian structures on $\widetilde{\mathcal{F}}$ and $\mathcal{H}^{m}$ induce a well defined hermitian structure on $\mathcal{F}$.
\begin{definition}\label{def:her_coh}
Let $\mathcal{F}$ be an object of $\Db(X)$ with cohomology complex
$\mathcal{H}$. Assume the pieces $\mathcal{H}^{i}$ are endowed with
hermitian structures. The hermitian structure on $\mathcal{F}$
constructed above will be called the \emph{hermitian structure induced by
the hermitian structure on the cohomology complex} and will be denoted $(\mathcal{F},\overline{\mathcal{H}})$. \end{definition}
The following proposition is a direct consequence of the definitions.
\begin{proposition}\label{prop:her_coh}
Let $\varphi\colon\mathcal{F}_{1}\dashrightarrow\mathcal{F}_{2}$ be an
isomorphism in $\Db(X)$. Assume the pieces of the cohomology
complexes $\mathcal{H}_{1}$, $\mathcal{H}_{2}$ of $\mathcal{F}_{1}$,
$\mathcal{F}_{2}$ are endowed with hermitian structures. If the
induced isomorphism in cohomology
$\varphi_{\ast}\colon\mathcal{H}_{1}\to\mathcal{H}_{2}$ is tight,
then $\varphi$ is tight for the induced hermitian structures on
$\mathcal{F}_{1}$ and $\mathcal{F}_{2}$. \end{proposition}
$\square$
\section{Bott-Chern classes for isomorphisms and distinguished triangles in $\oDb(X)$} \label{sec:bott-chern-classes}
In this section we will define Bott-Chern classes for isomorphisms and distinguished triangles in $\oDb(X)$. The natural context where one can define the Bott-Chern classes is that of Deligne complexes. For details about Deligne complexes the reader is referred to \cite{Burgos:CDB} and \cite{BurgosKramerKuehn:cacg}. In this section we will use the same notations as in \cite{BurgosLitcanu:SingularBC} \S1. In particular, the \emph{Deligne algebra of differential forms} on $X$ is denoted by \begin{math}
\mathcal{D}^{\ast}(X,\ast). \end{math} and we use the notation \begin{displaymath}
\widetilde
{\mathcal{D}}^{n}(X,p)=\left. \mathcal{D}^{n}(X,p)\right/
\dd_{\mathcal{D}}\mathcal{D}^{n-1}(X,p). \end{displaymath}
When characterizing axiomatically Bott-Chern classes, the basic tool to exploit the functoriality axiom is to use a deformation parametrized by ${\mathbb P}^{1}$. This argument leads to the following lemma that will be used to prove the uniqueness of the Bott-Chern classes introduced in this section.
\begin{lemma} \label{lemm:1}
Let $X$ be a smooth complex variety. Let
$\widetilde \varphi$ be an assignment that, to each smooth morphism
of complex
varieties $g\colon X'\to X$ and each acyclic complex $\overline A$ of
hermitian vector bundles on $X'$
assigns a class
\begin{displaymath}
\widetilde \varphi(\overline A)\in \bigoplus
_{n,p}\widetilde{\mathcal{D}}^{n}(X',p)
\end{displaymath}
fulfilling the following properties:
\begin{enumerate}
\item (Differential equation) the equality
\begin{math}
\dd_{\mathcal{D}}\widetilde \varphi(\overline A)=0
\end{math}
holds;
\item (Functoriality) for each morphism of smooth complex varieties
$h\colon X''\to X'$ with $g\circ h $ smooth, we have
\begin{math}
h^{\ast} \widetilde \varphi(\overline A)=\widetilde \varphi(h^{\ast}\overline A)
\end{math};
\item (Normalization) if $\overline A$ is orthogonally split, then $\widetilde
\varphi(\overline A)=0$.
\end{enumerate}
Then $\widetilde \varphi=0$. \end{lemma} \begin{proof}
The argument of the proof of \cite[Thm.
2.3]{BurgosLitcanu:SingularBC} applies \emph{mutatis mutandis} to
the present situation. \end{proof}
\begin{definition} \label{def:24}
An \emph{additive genus in Deligne cohomology} is a characteristic class $\varphi$ for vector bundles of any rank in the sense of \cite[Def. 1.5]{BurgosLitcanu:SingularBC} that satisfies the equation \begin{equation}
\label{eq:75}
\varphi(E_{1}\oplus E_{2})=\varphi(E_{1})+\varphi(E_{2}). \end{equation}
\end{definition} Let ${\mathbb D}$ denote the base ring for Deligne cohomology (see \cite{BurgosLitcanu:SingularBC} before Definition 1.5). A consequence of \cite[Thm. 1.8]{BurgosLitcanu:SingularBC} is that there is a bijection between the set of additive genera in Deligne cohomology and the set of power series in one variable ${\mathbb D}[[x]]$. To each power series $\varphi \in {\mathbb D}[[x]]$ it corresponds the unique additive genus such that \begin{displaymath}
\varphi(L)=\varphi (c_{1}(L)) \end{displaymath} for every line bundle $L$.
\begin{definition} \label{def:25}
A \emph{real additive genus} is an additive genus such that the
corresponding power series belong to ${\mathbb R}[[x]]$. \end{definition}
\begin{remark}\label{rem:7}
It is clear that, if $\varphi$ is a real additive genus, then for each
vector bundle $E$ we have
\begin{displaymath}
\varphi(E)\in \bigoplus_{p}H_{\mathcal{D}}^{2p}(X,{\mathbb R}(p))
\end{displaymath}
\end{remark}
We now focus on additive genera, for instance the Chern character is a real additive genus. Let $\varphi $ be such a genus. Using Chern-Weil theory, to each hermitian vector bundle $\overline E$ on $X$ we can attach a closed characteristic form \begin{displaymath}
\varphi (\overline E)\in \bigoplus_{n,p}\mathcal{D}^{n}(X,p). \end{displaymath} If $\overline E$ is an object of $\oV(X)$, then we define $$\varphi (\overline E)=\sum_{i} (-1)^{i} \varphi (\overline E^{i}).$$ If $\overline E$ is acyclic, following \cite[Sec. 2]{BurgosLitcanu:SingularBC}, we associate to it a Bott-Chern characteristic class \begin{displaymath}
\widetilde \varphi (\overline E)\in
\bigoplus_{n,p}\widetilde {\mathcal{D}}^{n-1}(X,p) \end{displaymath} that satisfies the differential equation \begin{displaymath}
\dd_{\mathcal{D}}\widetilde \varphi (\overline E)=\varphi
(\overline E). \end{displaymath}
In fact, \cite[Thm. 2.3]{BurgosLitcanu:SingularBC} for additive genera can be restated as follows.
\begin{proposition} \label{prop:9}
Let $\varphi $ be an additive genus. Then there
is a unique group homomorphism
\begin{displaymath}
\widetilde \varphi \colon \KA(X)\to \bigoplus_{n,p}\widetilde
{\mathcal{D}}^{n-1}(X,p)
\end{displaymath}
satisfying the properties:
\begin{enumerate}
\item (Differential equation) $\dd_{\mathcal{D}}
\widetilde\varphi(\overline E)= \varphi(\overline E).$
\item (Functoriality) If $f\colon X\to Y$ is a morphism of smooth
complex varieties, then
$
\widetilde{\varphi}(\Ld f^{\ast}(\overline{E}))=f^{\ast}(\widetilde{\varphi}(\overline{E})).
$
\end{enumerate} \end{proposition} \begin{proof}
For the uniqueness, we observe that, if $\widetilde \varphi$ is a group
homomorphism then $\widetilde\varphi (\overline 0)=0$. Hence, if $\overline E$
is a orthogonally split complex, then it is meager and therefore
$\widetilde\varphi (\overline E)=0$. Thus, the assignment that, to each acyclic
complex bounded $\overline E$, associates the class $\widetilde
\varphi([\overline E])$ satisfies the
conditions of \cite[Thm. 2.3]{BurgosLitcanu:SingularBC}, hence is
unique. For the existence, we note that
Bott-Chern classes for additive genera satisfy the
hypothesis of Theorem \ref{thm:8}. Hence the result follows. \end{proof}
\begin{remark} \label{rem:6} If \begin{displaymath}
\overline{\varepsilon }:\quad
0\to \overline{\mathcal{F}}_{m}\to \dots \to
\overline{\mathcal{F}}_{l}\to 0 \end{displaymath} is an acyclic complex of coherent sheaves on $X$ provided with hermitian structures $\overline {\mathcal{F}}_{i}=(\mathcal{F}_{i},\overline F_{i}\dashrightarrow \mathcal{F}_{i})$, by Definition \ref{def:1} we have an object $[\overline \varepsilon ]\in \KA(X)$, hence a class $\widetilde \varphi([\overline \varepsilon ])$. In the case of the Chern character, in \cite[Thm. 2.24]{BurgosLitcanu:SingularBC}, a class $\widetilde {\ch}(\overline \varepsilon )$ is defined. It follows from \cite[Thm. 2.24]{BurgosLitcanu:SingularBC} that both classes agree. That is, $\widetilde{\ch}([\overline \varepsilon ])=\widetilde{\ch}(\overline \varepsilon )$. For this reason we will denote $\widetilde \varphi([\overline \varepsilon ])$ by $\widetilde \varphi(\overline \varepsilon)$. \end{remark}
\begin{definition}\label{definition:forms_complexes}
Let
$\overline{\mathcal{F}}=(\overline E\overset{\sim}{\dashrightarrow}\mathcal{F})$
be an object of $\oDb(X)$. Let $\varphi$ denote
an additive genus. We denote the form
\begin{equation*}
\varphi(\overline{\mathcal{F}})=\varphi(\overline E)\in \bigoplus_{n,p}\mathcal{D}^{n}(X,p)
\end{equation*}
and the class
\begin{equation*}
\varphi(\mathcal{F})=[\varphi(\overline E)]\in
\bigoplus_{n,p}H_{\mathcal{D}}^{n}(X,{\mathbb R}(p)).
\end{equation*}
Note that the form $\varphi(\overline{\mathcal{F}})$ only depends on the hermitian structure
and not on a particular representative thanks to Proposition
\ref{prop:7} and Proposition \ref{prop:9}. The class
$\varphi(\mathcal{F})$ only depends on the object $\mathcal{F}$ and
not on the hermitian structure.
\end{definition} \begin{remark}
The reason to restrict to additive genera when working with the
derived category is now clear: there is no
canonical way to attach a rank to $\oplus_{i\even}\mathcal{F}^{i}$
(respectively $\oplus_{i\odd} \mathcal{F}^{i}$). The naive choice
$\rk(\oplus_{i\even} E^{i})$ (respectively $\rk(\oplus_{i\odd} E^{i})$)
does depend on $E\dashrightarrow\mathcal{F}$. Thus we can not define
Bott-Chern classes by the general rule from
\cite{BurgosLitcanu:SingularBC}. The case of a multiplicative genus
such as the Todd genus will be considered later. \end{remark}
Next we will construct Bott-Chern classes for isomorphisms in $\oDb(X)$.
\begin{definition}
Let $f\colon \overline{\mathcal{F}}\dashrightarrow\overline{\mathcal{G}}$
be a morphism in $\oDb(X)$ and $\varphi$ an additive
genus. We define the differential form
\begin{equation*}
\varphi(f)=\varphi(\overline{\mathcal{G}})-
\varphi(\overline{\mathcal{F}}).
\end{equation*} \end{definition}
\begin{theorem}\label{theorem:ch_tilde_qiso}
Let $\varphi$ be an additive genus. There is a unique way to attach to
every isomorphism in $\oDb(X)$
\begin{math}
f\colon (\overline{F}\dashrightarrow\mathcal{F})
\overset{\sim}{\dashrightarrow}
(\overline{G}\dashrightarrow\mathcal{G})
\end{math}
a Bott-Chern class
\begin{displaymath}
\widetilde{\varphi}(f)\in\bigoplus_{n,p}\widetilde{\mathcal{D}}^{n-1}(X,p)
\end{displaymath}
such that the following axioms are satisfied:
\begin{enumerate}
\item (Differential equation)
\begin{math}
\dd_{\mathcal{D}}\widetilde{\varphi}(f)=\varphi(f).
\end{math}
\item (Functoriality) If $g\colon X'\rightarrow X$ is a morphism of smooth
noetherian schemes over ${\mathbb C}$, then
\begin{displaymath}
\widetilde{\varphi}(\Ld g^{\ast}(f))=g^{\ast}(\widetilde{\varphi}(f)).
\end{displaymath}
\item (Normalization) If $f$ is a tight isomorphism, then
\begin{math}
\widetilde{\varphi}(f)=0.
\end{math}
\end{enumerate} \end{theorem} \begin{proof} For the existence we define \begin{equation}
\label{eq:56}
\widetilde \varphi(f)=\widetilde \varphi([f]), \end{equation} where $[f]\in\KA(X)$ is the class of $f$ given by equation \eqref{eq:57}. That $\widetilde \varphi$ satisfies the axioms follows from Proposition \ref{prop:9} and Theorem \ref{thm:9}.
We now focus on the uniqueness. Assume such a theory $f\mapsto\widetilde{\varphi}_{0}(f)$ exists. Fix $f$ as in the statement. Since $\widetilde \varphi_{0}$ is well defined, by replacing $\overline F$ by one that is tightly related, we may assume that $f$ is realized by a morphism of complexes \begin{displaymath}
f\colon \overline F\longrightarrow \overline G. \end{displaymath} We factorize $f$ as \begin{displaymath}
\overline F\overset{\alpha }{\longrightarrow }
\overline G\oplus \ocone(\overline F,\overline G)[-1]
\overset{\beta }{\longrightarrow}
\overline G, \end{displaymath} where both arrows are zero on the second factor of the middle complex. Since $\alpha $ is a tight morphism and $\ocone(\overline F,\overline G)[-1]$ is acyclic, we are reduced to the case when $\overline F=\overline G\oplus \overline A$, with $\overline A$ an acyclic complex and $f$ is the projection onto the first factor.
For each smooth morphism $g\colon X'\to X$ and each acyclic complex of vector bundles $\overline E$ on $X'$, we denote \begin{displaymath}
\widetilde \varphi_{1}(\overline E)=\widetilde \varphi_{0} (g^{\ast}\overline G\oplus \overline E\to g^{\ast}\overline G)+\widetilde \varphi(\overline E), \end{displaymath} where $\widetilde \varphi$ is the usual Bott-Chern form for acyclic complexes of hermitian vector bundles associated to $\varphi$. Then $\widetilde \varphi_{1}$ satisfies the hypothesis of Lemma \ref{lemm:1}, so $\widetilde \varphi_{1}=0$. Therefore \begin{math}
\widetilde \varphi(f)=-\widetilde \varphi(\overline A). \end{math} \end{proof}
\begin{proposition}\label{proposition:ch_tilde_comp} Let $f\colon \overline{\mathcal{F}}\dashrightarrow\overline{\mathcal{G}}$ and $g\colon \overline{\mathcal{G}}\dashrightarrow \overline{\mathcal{H}}$ be two isomorphisms in $\oDb(X)$. Then: \begin{displaymath}
\widetilde{\varphi}(g\circ f)=
\widetilde{\varphi}(g)+\widetilde{\varphi}(f). \end{displaymath} In particular, $\widetilde{\varphi}(f^{-1})=-\widetilde{\varphi}(f)$. \end{proposition} \begin{proof}
The statement follows from Theorem \ref{thm:10}~\ref{item:16}. \end{proof}
The Bott-Chern classes behave well under shift.
\begin{proposition}
Let $f\colon \overline{\mathcal{F}}\dashrightarrow \overline{\mathcal{G}}$ be an
isomorphism in $\oDb(X)$. Let $f[i]\colon \overline{\mathcal{F}}[i]\dashrightarrow
\overline{\mathcal{G}}[i]$ be the shifted isomorphism. Then
\begin{displaymath}
(-1)^{i}\widetilde{\varphi}(f[i])=\widetilde{\varphi}(f).
\end{displaymath} \end{proposition} \begin{proof}
The assignment $f\mapsto (-1)^{i}\widetilde{\varphi}(f[i])$
satisfies the characterizing properties of Theorem
\ref{theorem:ch_tilde_qiso}. Hence it agrees with $\widetilde
\varphi$. \end{proof}
The following notation will be sometimes used. \begin{notation} Let $\mathcal{F}$ be an object of $\Db(X)$ and consider two choices of hermitian structures $\overline{\mathcal{F}}$ and $\overline{\mathcal{F}}'$. Then we write \begin{displaymath}
\widetilde{\varphi}(\overline{\mathcal{F}},\overline{\mathcal{F}}')=
\widetilde{\varphi}(\overline{\mathcal{F}}
\overset{\Id}{\dashrightarrow}\overline{\mathcal{F}}'). \end{displaymath} Thus $\dd_{\mathcal{D}}\widetilde{\varphi} (\overline{\mathcal{F}},\overline{\mathcal{F}}') = \varphi(\overline{\mathcal{F}}')-\varphi(\overline{\mathcal{F}}). $ \end{notation}
\begin{example}\label{exm:1}
Let $\overline
{\mathcal{F}}=(\mathcal{F},\mathcal{F}\dashrightarrow\overline E)$
be an object of $\oDb(X)$. Let $\mathcal{H}^{i}$ denote the
cohomology sheaves of $\mathcal{F}$ and assume that we have chosen
hermitian structures $\overline{\mathcal{H}}^{i} $ of each
$\mathcal{H}^{i}$. In the case when the sheaves $\mathcal{H}^{i}$
are vector bundles and the hermitian structures are hermitian
metrics, X. Ma, in the paper \cite{Ma:MR1765553}, has associated to
these data a Bott-Chern class, that we
denote $M(\overline {\mathcal{F}},\overline {\mathcal{H}})$. By
the characterization given by Ma of $M(\overline {\mathcal{F}},\overline
{\mathcal{H}})$, it is immediate that
\begin{displaymath}
M(\overline {\mathcal{F}},\overline {\mathcal{H}})=
\cht(\overline {\mathcal{F}},(\mathcal{F},\overline {\mathcal{H}})),
\end{displaymath}
where $(\mathcal{F},\overline {\mathcal{H}})$ is as in Definition
\ref{def:her_coh}. \end{example}
Our next aim is to construct Bott-Chern classes for distinguished triangles.
\begin{definition}
Let $\overline \tau $ be a distinguished triangle in $\oDb(X)$, \begin{displaymath}
\overline{\tau}\colon\
\overline{\mathcal{F}}\overset{u}{\dashrightarrow}\overline{\mathcal{G}}
\overset{v}{\dashrightarrow}\overline{\mathcal{H}}
\overset{w}{\dashrightarrow}\overline{\mathcal{F}}[1]
\overset{u}{\dashrightarrow}\dots
\end{displaymath} For an additive genus $\varphi$, we attach the differential form
\begin{displaymath}
\varphi(\overline{\tau})=\varphi(\overline{\mathcal{F}})-
\varphi(\overline{\mathcal{G}})+\varphi(\overline{\mathcal{H}}).
\end{displaymath} \end{definition}
Notice that if $\overline \tau$ is tightly distinguished, then $\varphi(\overline \tau )=0$. Moreover, for any distinguished triangle $\overline \tau $ as above, the rotated triangle \begin{displaymath}
\overline{\tau}'\colon\
\overline{\mathcal{G}}\overset{v}{\dashrightarrow}\overline{\mathcal{H}}
\overset{w}{\dashrightarrow}\overline{\mathcal{F}}[1]
\overset{-u[1]}{\dashrightarrow}\overline{\mathcal{G}}[1]
\overset{v[1]}{\dashrightarrow}\dots \end{displaymath} satisfies \begin{math}
\varphi(\overline \tau ')=-\varphi(\overline \tau). \end{math}
\begin{theorem}\label{thm:16}
Let $\varphi$ be an additive genus. There is a unique way to attach to
every distinguished triangle in $\oDb(X)$
\begin{displaymath}
\overline{\tau}:\quad
\overline{\mathcal{F}}\overset{u}{\dashrightarrow}\overline{\mathcal{G}}
\overset{v}{\dashrightarrow}\overline{\mathcal{H}}
\overset{w}{\dashrightarrow}\overline{\mathcal{F}}[1]
\overset{u[1]}{\dashrightarrow}\dots
\end{displaymath}
a Bott-Chern class
\begin{displaymath}
\widetilde{\varphi}(\overline{\tau})
\in\bigoplus_{n,p}\widetilde{\mathcal{D}}^{n-1}(X,p)
\end{displaymath}
such that the following axioms are satisfied:
\begin{enumerate}
\item (Differential equation)
\begin{math}
\dd_{\mathcal{D}}\widetilde{\varphi}(\overline{\tau})=
\varphi(\overline{\tau}).
\end{math}
\item (Functoriality) If $g\colon X^{\prime}\rightarrow X$ is a morphism of smooth noetherian schemes over ${\mathbb C}$, then
\begin{displaymath}
\widetilde{\varphi}(\Ld g^{\ast}(\overline{\tau}))=g^{\ast}\widetilde{\varphi}(\overline{\tau}).
\end{displaymath}
\item (Normalization) If $\overline{\tau}$ is tightly distinguished, then
\begin{math}
\widetilde{\varphi}(\overline{\tau})=0.
\end{math}
\end{enumerate} \end{theorem}
\begin{proof} To show the existence we write \begin{equation}
\label{eq:58}
\widetilde \varphi(\overline \tau)=\widetilde \varphi([\overline \tau ]). \end{equation} Theorem \ref{thm:10} implies that it satisfies the axioms.
To prove the uniqueness, observe that, by replacing representatives of the hermitian structures by tightly related ones, we may assume that the distinguished triangle is represented by \begin{displaymath}
\overline F \longrightarrow \overline G \longrightarrow \ocone(\overline F,\overline
G)\oplus \overline K \longrightarrow \overline F[1], \end{displaymath} with $\overline K$ acyclic. Then Lemma \ref{lemm:1} shows that the axioms imply \begin{math}
\widetilde \varphi(\overline \tau )=\widetilde \varphi(\overline K). \end{math} \end{proof}
\begin{remark}\label{rem:1} The normalization axiom can be replaced by
the apparently weaker condition that $\widetilde \varphi(\overline \tau
)=0$ for all distinguished triangles of the form
\begin{displaymath}
\overline {\mathcal{F}}\dashrightarrow \overline {\mathcal{F}}\overset{\perp}{\oplus}
\overline {\mathcal{G}}\dashrightarrow \overline {\mathcal{G}} \dashrightarrow
\end{displaymath}
where the maps are the natural inclusion and projection. \end{remark}
Theorem \ref{thm:10}~\ref{item:17}-\ref{item:49} can be easily translated to Bott-Chern classes.
\section{Multiplicative genera, the Todd genus and the category
$\oSm_{\ast/{\mathbb C}}$} \label{sec:multiplicative-genera} Let $\psi $ be a multiplicative genus, such that the piece of degree zero is $\psi ^{0}=1$, and \begin{displaymath}
\varphi=\log(\psi ). \end{displaymath} It is a well defined additive genus, because, by the condition above, the power series $\log (\psi )$ contains only finitely many terms in each degree.
If $\overline \theta $ is either a hermitian vector bundle, a complex of hermitian vector bundles, a morphism in $\oDb(X)$ or a distinguished triangle in $\oDb(X)$ we can write \begin{displaymath}
\psi (\overline \theta )=\exp(\varphi(\overline \theta )). \end{displaymath}
All the results of the previous sections can be translated to the multiplicative genus $\psi $. In particular, if $\overline \theta $ is an acyclic complex of hermitian vector bundles, an isomorphism in $\oDb(X)$ or a distinguished triangle in $\oDb(X)$, we define a Bott-Chern class \begin{displaymath}
\widetilde \psi_{m} (\overline \theta)=
\frac{\exp(\varphi(\overline \theta))-1}{\varphi(\overline \theta)}
\widetilde\varphi(\overline \theta). \end{displaymath}
\begin{theorem}\label{thm:11} The characteristic class $\widetilde
\psi_{m} (\overline \theta)$ satisfies:
\begin{enumerate}
\item (Differential equation)
\begin{math}
\dd_{\mathcal{D}}\widetilde{\psi}_{m}(\overline{\theta
})=\psi(\overline{\theta })-1.
\end{math}
\item (Functoriality) If $g\colon X^{\prime}\rightarrow X$ is a morphism
of smooth noetherian schemes over ${\mathbb C}$, then
\begin{displaymath}
\widetilde{\psi}_{m}(\Ld g^{\ast}(\overline{\theta }))
=g^{\ast}\widetilde{\psi}_{m}(\overline{\theta }).
\end{displaymath}
\item (Normalization) If $\overline{\theta }$ is either a meager
complex, a tight isomorphism or a tightly distinguished triangle,
then
\begin{math}
\widetilde{\psi}_{m}(\overline{\theta })=0.
\end{math}
\end{enumerate}
Moreover $\widetilde \psi_{m} $ is uniquely characterized by these
properties. \end{theorem}
\begin{remark}
For an acyclic complex of vector bundles $\overline E$, using the general
procedure for arbitrary symmetric power series, we can associate a
Bott-Chern class $\widetilde \psi (\overline E)$ (see for instance
\cite[Thm. 2.3]{BurgosLitcanu:SingularBC}) that satisfies the
differential equation
\begin{displaymath}
\dd_{\mathcal{D}} \widetilde \psi (\overline E)= \prod _{k \text{ even }}\psi (\overline
E^{k})-\prod _{k \text{ odd }}\psi (\overline
E^{k}),
\end{displaymath}
whereas $\widetilde \psi _{m}$ satisfies the differential equation
\begin{equation} \label{eq:62}
\dd_{\mathcal{D}} \widetilde \psi _{m}(\overline E)= \prod _{k}\psi (\overline
E^{k})^{(-1)^{k}}-1.
\end{equation}
In fact both Bott-Chern classes are related by
\begin{equation}\label{eq:76}
\widetilde \psi _{m}(\overline E)=
\widetilde \psi (\overline E)\prod_{k \text{ odd}}\psi (\overline E^{k})^{-1}.
\end{equation} \end{remark}
The main example of a multiplicative genus with the above properties is the Todd genus $\Td$. From now on we will treat only this case. Following the above procedure, to the Todd genus we can associate two Bott-Chern classes for acyclic complexes of vector bundles: the one given by the general theory, denoted by $\widetilde {\Td}$, and the one given by the theory of multiplicative genera, denoted $\widetilde {\Td}_{m}$. Both are related by the equation \eqref{eq:76}. Note however that, for isomorphisms and distinguished triangles in $\oDb(X)$, we only have the multiplicative version.
We now consider morphisms between smooth complex varieties and relative hermitian structures.
\begin{definition} Let $f\colon X\rightarrow Y$ be a morphism of smooth complex varieties. The \textit{tangent complex} of $f$ is the complex
\begin{displaymath}
T_{f}\colon \quad 0\longrightarrow T_{X}
\overset{df}{\longrightarrow} f^{\ast}T_{Y}\longrightarrow 0
\end{displaymath}
where $T_{X}$ is placed in degree 0 and $f^{\ast}T_{Y}$ is placed in degree 1. It defines an object $T_{f}\in \Ob \Db(X)$. A \emph{relative hermitian structure on} $f$ is the choice of an object $\overline T_{f}\in \oDb(X)$ over $T_{f}$.
\end{definition}
The following particular situations are of special interest:
\begin{enumerate} \item[--] suppose $f\colon X\hookrightarrow Y$ is a closed immersion. Let
$N_{X/Y}[-1]$ be the normal bundle to $X$ in $Y$, considered as a
complex concentrated in degree 1. By definition, there is a natural
quasi-isomorphism $p\colon T_{f}\overset{\sim}{\rightarrow} N_{X/Y}[-1]$ in
$\Cb(X)$, and hence an isomorphism $p^{-1}\colon N_{X/Y}[-1]\overset{\sim}
{\dashrightarrow} T_{f}$ in $\Db(X)$. Therefore, a hermitian metric $h$
on the vector bundle $N_{X/Y}$ naturally induces a hermitian
structure $p^{-1}\colon (N_{X/Y}[-1],h) \dashrightarrow T_{f}$ on $T_{f}$. Let
$\overline{T}_{f}$ be the corresponding object in $\oDb(X)$. Then we have
\begin{displaymath}
\Td(\overline{T}_{f})=\Td(N_{X/Y}[-1],h)=\Td(N_{X/Y},h)^{-1};
\end{displaymath}
\item[--] suppose $f\colon X\rightarrow Y$ is a smooth morphism. Let
$T_{X/Y}$ be the relative tangent bundle on $X$, considered as a
complex concentrated in degree $0$. By definition, there is a
natural quasi-isomorphism $\iota\colon T_{X/Y}\overset{\sim}{\rightarrow}
T_{f}$ in $\Cb(X)$. Any choice of hermitian metric $h$ on $T_{X/Y}$
naturally induces a hermitian structure
$\iota\colon (T_{X/Y},h)\dashrightarrow T_{f}$. If $\overline{T}_{f}$ denotes
the corresponding object in $\oDb(X)$, then we find
\begin{displaymath}
\Td(\overline{T}_{f})=\Td(T_{X/Y},h).
\end{displaymath}
\end{enumerate}
Let now $g\colon Y\rightarrow Z$ be another morphism of smooth varieties over ${\mathbb C}$. The tangent complexes $T_{f}$, $T_{g}$ and $T_{g\circ f}$ fit into a distinguished triangle in $\Db(X)$
\begin{displaymath}
\mathcal{T}\colon T_f\dashrightarrow T_{g\circ f}\dashrightarrow
\Ld f^{\ast}T_g\dashrightarrow T_f[1].
\end{displaymath}
\begin{definition}\label{def:16} We denote $\oSm_{\ast/{\mathbb C}}$ the following data:
(i) The class $\rm{Ob}\,\oSm_{\ast/{\mathbb C}}$ of smooth complex varieties.
(ii) For each $X,Y\in \rm{Ob}\,\oSm_{\ast/{\mathbb C}}$, a set of morphisms $\oSm_{\ast/{\mathbb C}}(X,Y)$ whose elements are pairs $\overline f=(f,\overline T_{f})$, where $f\colon X\to Y$ is a projective morphism and $\overline T_{f}$ is a hermitian structure on $T_{f}$. When $\overline f$ is given we will denote the hermitian structure by $T_{\overline f}$. A hermitian structure on $T_{f}$ will also be called a hermitian structure on $f$.
(iii) For each pair of morphisms $\overline f\colon X\to Y$ and $\overline g\colon Y\to Z$, the composition defined as \begin{displaymath}
\overline g\circ \overline f=(g\circ f,\ocone(\Ld f^{\ast} T_{\overline g}[-1], T_{\overline f})). \end{displaymath} \end{definition}
We shall prove (Theorem \ref{thm:17}) that $\oSm_{\ast/{\mathbb C}}$ is a category. Before this, we proceed with some examples emphasizing some properties of the composition rule.
\begin{example}\label{exm:3}
Let $f\colon X\to Y$
and $g\colon Y\to Z$, be projective morphisms of smooth complex
varieties. Assume
that we have chosen hermitian metrics on the tangent vector bundles
$T_{X}$, $T_{Y}$ and $T_{Z}$. Denote by $\overline f$, $\overline g$ and
$\overline{g\circ f}$ the morphism of $\oSm_{\ast/{\mathbb C}}$ determined by
these metrics. Then
\begin{displaymath}
\overline g\circ \overline f=\overline{g\circ f}.
\end{displaymath}
This is seen as follows. By the choice of metrics, there is a tight
isomorphism
\begin{displaymath}
\ocone(T_{\overline f},T_{\overline{g\circ f}})\to \Ld
f^{\ast}T_{\overline g}.
\end{displaymath}
Then the natural maps
\begin{displaymath}
T_{\overline g\circ \overline f}\to \ocone(\Ld
f^{\ast}T_{\overline g}[-1],T_{\overline f})\to
\ocone(\ocone(T_{\overline f},T_{\overline{g\circ f}})[-1],T_{\overline f})\to
T_{\overline{g\circ f}}
\end{displaymath}
are tight isomorphisms. \end{example}
\begin{example} \label{exm:4}
Let $f\colon X\to Y$
and $g\colon Y\to Z$, be smooth projective morphisms of smooth complex
varieties. Choose hermitian metrics on the relative tangent
vector bundles
$T_{f}$, $T_{g}$ and $T_{g\circ f}$. Denote by $\overline f$, $\overline g$ and
$\overline{g\circ f}$ the morphism of $\oSm_{\ast/{\mathbb C}}$ determined by
these metrics. There is a short exact sequence of hermitian vector
bundles
\begin{displaymath}
\overline \varepsilon \colon
0\longrightarrow \overline T_{f}
\longrightarrow \overline T_{g\circ f}
\longrightarrow f^{\ast} \overline T_{g}
\longrightarrow 0,
\end{displaymath}
that we consider as an acyclic complex declaring $f^{\ast} \overline T_{g}$
of degree $0$. The morphism $f^{\ast}T_{\overline g}[-1]\dashrightarrow T_{\overline f}$ is
represented by the diagram
\begin{displaymath}
\xymatrix{
& \ocone(T_{\overline f},T_{\overline{g\circ f}})[-1]
\ar[dl]_{\sim} \ar[rd] &\\
f^{\ast}T_{\overline g}[-1] && T_{\overline f}.
}
\end{displaymath} Thus, by the definition of a composition we have \begin{displaymath}
T_{\overline g \circ \overline f}=
\ocone(\ocone(T_{\overline f},T_{\overline{g\circ f}})[-1],f^{\ast}T_{\overline
g}[-1])[1]\oplus
\ocone(\ocone(T_{\overline f},T_{\overline{g\circ f}})[-1],T_{\overline f}). \end{displaymath} In general this hermitian structure is different to $T_{\overline{g\circ
f}}$.
\emph{Claim.} The equality of hermitian structures \begin{equation}
\label{eq:84}
T_{\overline g \circ \overline f}=
T_{\overline{g\circ f}}+ [\overline \varepsilon ] \end{equation} holds. \begin{proof}[Proof of the claim]
We have a commutative diagram of distinguished triangles
\begin{displaymath}
\xymatrix{
\overline{\varepsilon} &T_{\overline{f}}\ar[r]\ar[d]_{\Id} &T_{\overline{g\circ f}}\ar[r]\ar[d]
&f^{\ast}T_{\overline{g}}\ar[d]_{\Id}\ar@{-->}[r] &T_{\overline{f}}[1]\ar[d]_{\Id}\\
\overline{\tau} &T_{\overline{f}}\ar[r] &T_{\overline{g}\circ \overline{f}}\ar[r]
&f^{\ast}T_{\overline{g}}\ar@{-->}[r] &T_{\overline{f}}[1].
}
\end{displaymath} By construction the triangle $\overline{\tau}$ is tightly distinguished, hence $[\overline{\tau}]=0$. Therefore, according to Theorem \ref{thm:10} \ref{item:48}, we have \begin{displaymath}
[T_{\overline{g\circ f}}\rightarrow T_{\overline{g}\circ\overline{f}}]=[\overline{\varepsilon}]. \end{displaymath} The claim follows. \end{proof} \end{example}
\begin{theorem}\label{thm:17} $\oSm_{\ast/{\mathbb C}}$ is a category. \end{theorem}
\noindent\emph{Proof} The only non-trivial fact to prove is the associativity of the composition, given by the following lemma: \begin{lemma}\label{lem:Sm_cat}
Let $\overline{f}:X\to Y$, $\overline{g}:Y\to Z$ and $\overline{h}:Z\to W$ be
projective morphisms together with hermitian structures. Then
$\overline{h}\circ(\overline{g}\circ\overline{f})=(\overline{h}\circ\overline{g})\circ\overline{f}$. \end{lemma} \begin{proof}
First of all we observe that if the hermitian structures on
$\overline{f}$, $\overline{g}$ and $\overline{h}$ come from fixed hermitian metrics on
$T_{X}$, $T_{Y}$, $T_{Z}$ and $T_{W}$, Example \ref{exm:3} ensures
that the proposition holds. For the general case, it is enough to
see that if the proposition holds for a fixed choice of hermitian
structures $\overline{f}$, $\overline{g}$, $\overline{h}$, and we change the metric on
$f$, $g$ or $h$, then the proposition holds for the new choice of
metrics. We treat, for instance, the case when we change the
hermitian structure on $g$, the proof of the other cases being analogous.
Denote by $\overline{g}'$ the new hermitian structure on
$g$. Then there exists a unique class $\varepsilon\in\KA(Y)$ such
that $T_{\overline{g}'}=T_{\overline{g}}+\varepsilon$. According to the
definitions, we have
\begin{displaymath}
T_{\overline{h}\circ(\overline{g}'\circ\overline{f})}=
\ocone((g\circ
f)^{\ast}T_{\overline{h}}[-1],\ocone(f^{\ast}(T_{\overline{g}}+\varepsilon)[-1],T_{\overline{f}}))
=T_{\overline{h}\circ(\overline{g}\circ\overline{f})}+f^{\ast}\varepsilon.
\end{displaymath}
Similarly, we find
\begin{displaymath}
T_{(\overline{h}\circ\overline{g}')\circ\overline{f}}=
\ocone(f^{\ast}\ocone(g^{\ast}T_{\overline{h}}[-1],T_{\overline{g}})[-1]+
f^{\ast}(-\varepsilon),
T_{\overline{f}})
=T_{(\overline{h}\circ\overline{g})\circ\overline{f}}+f^{\ast}\varepsilon.
\end{displaymath}
By assumption,
$T_{\overline{h}\circ(\overline{g}\circ\overline{f})}=T_{(\overline{h}\circ\overline{g})\circ\overline{f}}$. Hence
the relations above show
\begin{displaymath}
T_{\overline{h}\circ(\overline{g}'\circ\overline{f})}=T_{(\overline{h}\circ\overline{g}')\circ\overline{f}}.
\end{displaymath}
This concludes the proofs of Lemma \ref{lem:Sm_cat} and of Theorem \ref{thm:17}. \end{proof}
Let $f\colon X\to Y$ and $g\colon Y\to Z$ be projective morphisms of smooth complex varieties. By the definition of composition, hermitian structures on $f$ and $g$ determine a hermitian structure on $g\circ f$. Conversely we have the following result.
\begin{lemma}\label{lemm:4}
Let $\overline {g}$ and $\overline {g\circ f}$ be hermitian structures on $g$
and $g\circ f$. Then there is a unique hermitian structure $\overline f$ on
$f$ such that
\begin{equation}
\label{eq:85}
\overline{g\circ f}=\overline g\circ \overline f.
\end{equation} \end{lemma} \begin{proof}
We have the distinguished triangle
\begin{displaymath}
T_{f}\dashrightarrow T_{g\circ f}\dashrightarrow f^{\ast}T_{g}\dashrightarrow T_{f}[1].
\end{displaymath}
The unique hermitian structure that satisfies equation
\eqref{eq:85} is $\ocone(T_{\overline{g\circ
f}},f^{\ast}T_{\overline{g}})[-1]$. \end{proof}
\begin{remark}
By contrast with the preceding result, it is not true in general that
hermitian structures $\overline f$ and $\overline{g\circ f}$ determine a
unique hermitian structure $\overline g$ that satisfies equation
\eqref{eq:85}. For instance, if $X=\emptyset$, then any hermitian
structure on $g$ will satisfy this equation. \end{remark}
If $\Sm_{\ast/{\mathbb C}}$ denotes the category of smooth complex varieties and projective morphisms and $\mathfrak{F}\colon \oSm_{\ast/{\mathbb C}}\to \Sm_{\ast/{\mathbb C}}$ is the forgetful functor, for any object $X$ we have that \begin{align*}
\Ob \mathfrak{F}^{-1}(X)&=\{X\},\\
\Hom_{\mathfrak{F}^{-1}(X)}(X,X)&=\KA(X). \end{align*}
To any arrow $\overline f\colon X\to Y$ in $\oSm_{\ast/{\mathbb C}}$ we associate a Todd form \begin{equation}\label{eq:1}
\Td(\overline f):=\Td(T_{\overline f})\in \bigoplus_{p}\mathcal{D}^{2p}(X,p). \end{equation}
The following simple properties of $\Td(\overline f)$ follow directly from the definitions.
\begin{proposition}
\begin{enumerate}
\item Let $\overline f\colon X\to Y$ and $\overline{g}\colon Y\to Z$ be
morphisms in $\oSm_{\ast/{\mathbb C}}$. Then
\begin{displaymath}
\Td(\overline g\circ\overline f)=f^{\ast}\Td(\overline{g})\bullet\Td(\overline{f}).
\end{displaymath}
\item Let $f,f'\colon X\to Y$ be two morphisms in $\oSm_{\ast/{\mathbb C}}$
with the same underlying algebraic morphism. There is an
isomorphism $\overline \theta \colon T_{\overline f}\to T_{\overline f'}$ whose
Bott-Chern class $\widetilde{\Td}_{m}(\overline\theta)$ satisfies
\begin{displaymath}
\dd_{\mathcal{D}}\widetilde {\Td}_{m}(\overline \theta )=
\Td(T_{\overline f'}) \Td(T_{\overline f})^{-1}-1.
\end{displaymath}
\end{enumerate} \end{proposition}
\newcommand{\etalchar}[1]{$^{#1}$} \newcommand{\noopsort}[1]{} \newcommand{\printfirst}[2]{#1}
\newcommand{\singleletter}[1]{#1} \newcommand{\switchargs}[2]{#2#1} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title[Higher order elliptic systems] {The conormal derivative problem for higher order elliptic systems with irregular coefficients}
\author[H. Dong]{Hongjie Dong} \address[H. Dong]{Division of Applied Mathematics, Brown University, 182 George Street, Providence, RI 02912, USA} \email{Hongjie\[email protected]} \thanks{H. Dong was partially supported by the NSF under agreement DMS-0800129 and DMS-1056737.}
\author[D. Kim]{Doyoon Kim} \address[D. Kim]{Department of Applied Mathematics, Kyung Hee University, 1732 Deogyeong-daero, Giheung-gu, Yongin-si, Gyeonggi-do 446-701, Republic of Korea} \email{[email protected]} \thanks{D. Kim was supported by Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Education, Science and Technology (2011-0013960).}
\subjclass[2000]{35K52, 35J58,35R05}
\date{}
\begin{abstract} We prove $L_p$ estimates of solutions to a conormal derivative problem for divergence form complex-valued higher-order elliptic systems on a half space and on a Reifenberg flat domain. The leading coefficients are assumed to be merely measurable in one direction and have small mean oscillations in the orthogonal directions on each small ball. Our results are new even in the second-order case. The corresponding results for the Dirichlet problem were obtained recently in \cite{DK10}. \end{abstract}
\maketitle
\section{Introduction}
This paper is concerned with $L_p$ theory for higher-order elliptic systems in divergence form with {\em conormal} derivative boundary conditions. Our focus is to seek minimal regularity assumptions on the leading coefficients of elliptic systems defined on regular and irregular domains. The paper is a continuation of \cite{DK09_01,DK10}, where the authors considered higher-order systems in the whole space and on domains with {\em Dirichlet} boundary conditions.
There is a vast literature on $L_p$ theory for second-order and higher-order elliptic and parabolic equations/systems with {\em constant} or {\em uniformly continuous} coefficients. We refer the reader to the classical work \cite{A65,ADN64,Solo,LSU,Fried}. Concerning possibly discontinuous coefficients, a notable class is the set of bounded functions with vanishing mean oscillations (VMO). This class of coefficients was firstly introduced in \cite{CFL1,CFL2} in the case of second-order non-divergence form elliptic equations, and further considered by a number of authors in various contexts, including higher-order equations and systems; see, for instance, \cite{CFF,HHH,Mi06,PS3}.
Recently, in \cite{DK09_01,DK10} the authors studied the {\em Dirichlet} problem for higher-order elliptic and parabolic systems with possibly measurable coefficients. In \cite{DK09_01}, we established the $L_p$-solvability of both divergence and non-divergence form systems with coefficients (called $\text{VMO}_x$ coefficients in \cite{Krylov_2005}) having locally small mean oscillations with respect to the spatial variables, and measurable in the time variable in the parabolic case. While in \cite{DK10}, divergence form elliptic and parabolic systems of arbitrary order are considered in the whole space, on a half space, and on Reifenberg flat domains, with {\em variably partially BMO coefficients}. This class of coefficients was introduced in \cite{Krylov08} in the context of second-order non-divergence form elliptic equations in the whole space, and naturally appears in the homogenization of layered materials; see, for instance, \cite{CKV}. It was later considered by the authors of the present article in \cite{DK09_02,DK10} and by Byun and Wang in \cite{BW10}. Loosely speaking, on each cylinder (or ball in the elliptic case), the coefficients are allowed to be merely measurable in one spatial direction called the {\em measurable direction}, which may vary for different cylinders. It is also assumed that the coefficients have small mean oscillations in the orthogonal directions, and near the boundary the measurable direction is sufficiently close to the ``normal'' direction of the boundary. Note that the boundary of a Reifenberg flat domain is locally trapped in thin discs, which allows the boundary to have a fractal structure; cf. \eqref{eq3.49}. Thus the normal direction of the boundary may not be well defined for Reifenberg flat domains, so instead we take the normal direction of the top surface of these thin discs.
The proofs in \cite{DK09_01,DK10} are in the spirit of \cite{Krylov_2005} by N. V. Krylov, in which the author gave a unified approach of $L_p$ estimates for both divergence and non-divergence second-order elliptic and parabolic equations in the whole space with $\text{VMO}_x$ coefficients. One of the crucial steps in \cite{Krylov_2005} is to establish certain interior {\em mean oscillation estimates}\footnote{Also see relevant early work \cite{Iw83,DM93}.} of solutions to equations with ``simple'' coefficients, which are measurable functions of the time variable only. Then the estimates for equations with $\text{VMO}_x$ coefficients follow from the mean oscillation estimates combined with a perturbation argument. In this connection, we point out that in \cite{DK09_01,DK10} a great deal of efforts were made to derive boundary and interior mean oscillation estimates for solutions to higher-order systems. For systems in Reifenberg flat domains, we also used an idea in \cite{CaPe98}.
In this paper, we study a {\em conormal} derivative problem for elliptic operators in divergence form of order $2m$: \begin{equation} \label{eq0617_02}
\mathcal{L} \textit{\textbf{u}} :=\sum_{|\alpha|\le m,|\beta|\le m}D^\alpha(a_{\alpha\beta}D^\beta\textit{\textbf{u}}), \end{equation} where $\alpha$ and $\beta$ are $d$-dimensional multi-indices, $a_{\alpha\beta}=[a_{\alpha\beta}^{ij}(x)]_{i,j=1}^n$ are $n\times n$ complex matrix-valued functions, and $\textit{\textbf{u}}$ is a complex vector-valued function. For $\alpha=(\alpha_1,\ldots,\alpha_d)$, we use the notation $D^\alpha \textit{\textbf{u}} =D_1^{\alpha_1}\ldots D_d^{\alpha_d} \textit{\textbf{u}}$. All the coefficients are assumed to be bounded and measurable, and $\mathcal{L}$ is uniformly elliptic; cf. \eqref{eq11.28}. Consider the following elliptic system \begin{equation}
\label{eq9.15}
(-1)^m\mathcal{L} \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha\textit{\textbf{f}}_\alpha \end{equation} on a domain $\Omega$ in $\mathbb{R}^d$, where $\textit{\textbf{f}}_\alpha \in L_p(\Omega)$, $p\in (1,\infty)$, and $\lambda\ge 0$ is a constant. A function $\textit{\textbf{u}}\in W^m_p$ is said to be a weak solution to \eqref{eq9.15} on $\Omega$ with the conormal derivative boundary condition associated with $\textit{\textbf{f}}_\alpha$ (on $\partial \Omega$) if \begin{equation}
\label{eq3.02}
\int_{\Omega}\sum_{|\alpha|\le m,|\beta|\le m}(-1)^{m+|\alpha|} D^\alpha\phi \cdot a_{\alpha\beta}D^\beta\textit{\textbf{u}} +\lambda \phi\cdot \textit{\textbf{u}}\,dx
=\sum_{|\alpha|\le m}\int_{\Omega}(-1)^{|\alpha|}D^\alpha\phi\cdot \textit{\textbf{f}}_\alpha\,dx \end{equation} for any test function $\phi=(\phi^1,\phi^2,\ldots,\phi^n)\in W^m_q(\Omega)$, where $q=p/(p-1)$. We emphasize that the phrase ``associated with $\textit{\textbf{f}}_\alpha$'' is appended after ``the conormal derivative boundary condition'' because for different representations of the right-hand side of \eqref{eq9.15}, even if they are pointwise equal, the weak formulation \eqref{eq3.02} could still be different. In the sequel, we omit this phrase when there is no confusion. We note that the equation above can also be understood as $$
\int_{\Omega}\sum_{|\alpha|\le m,|\beta|\le m}(-1)^{m+|\alpha|} D^\alpha\phi \cdot a_{\alpha\beta}D^\beta\textit{\textbf{u}} +\lambda \phi\cdot \textit{\textbf{u}}\,dx=\textit{\textbf{F}}(\phi)\quad \forall \phi\in W^m_{q}(\Omega), $$ where $\textit{\textbf{F}}$ is a given vector-valued bounded linear functional on $W^m_{q}(\Omega)$. The main objective of the paper is to show the unique $W^m_p(\Omega)$-solvability of \eqref{eq9.15} on a half space or on a possibly unbounded Reifenberg domain with the same regularity conditions on the leading coefficients, that is, {\em variably partially BMO coefficients}, as those in \cite{DK10}. See Section \ref{sec082001} for the precise statements of the assumptions and main results.
Notably, our results are new even for second-order scalar equations. In the literature, an $L_p$ estimate for the conormal derivative problem can be found in \cite{BW05}, where the authors consider second-order divergence elliptic equations without lower-order terms and with coefficients small BMO with respect to all variables on bounded Reifenberg domains. The proof in \cite{BW05} contains a compactness argument, which does not apply to equations with coefficients measurable in some direction discussed in the current paper. For other results about the conormal derivative problem, we refer the reader to \cite{Lieb1} and \cite{Lieb2}.
We prove the main theorems by following the strategy in \cite{DK10}. First, for systems with homogeneous right-hand side and coefficients measurable in one direction, we estimate the H\"older norm of certain linear combinations of $D^m\textit{\textbf{u}}$ in the interior of the domain, as well as near the boundary if the boundary is flat and perpendicular to the measurable direction. Then by using the H\"{o}lder estimates, we proceed to establish mean oscillation estimates of solutions to elliptic systems. As is expected, the obstruction is in the boundary mean oscillation estimates, to which we give a more detailed account. Note that when obtaining mean oscillation estimates of solutions, even in the half space case we do not require the measurable direction to be exactly perpendicular to the boundary, but allow it to be sufficiently close to the normal direction. For the Dirichlet problem in \cite{DK10}, we used a delicate cut-off argument together with a generalized Hardy's inequality. However, this method no longer works for the conormal derivative problem as solutions do not vanish on the boundary. The key observation in this paper is Lemma \ref{lem4.2} which shows that if one modifies the right-hand side a little bit, then the function $\textit{\textbf{u}}$ itself still satisfies the system with the conormal derivative boundary condition on a subdomain with a flat boundary perpendicular to the measurable direction. This argument is also readily adapted to elliptic systems on Reifenberg flat domains with variably partially BMO coefficients.
The corresponding parabolic problem, however, seems to be still out of reach by the argument mentioned above. In fact, in the modified equation in Lemma \ref{lem4.2} there would be an extra term involving $\textit{\textbf{u}}_t$ on the right-hand side. At the time of this writing, it is not clear to us how to estimate this term.
The remaining part of the paper is organized as follows. We state the main theorems in the next section. Section \ref{sec_aux} contains some auxiliary results including $L_2$-estimates, interior and boundary H\"older estimates, and approximations of Reifenberg domains. In Section \ref{sec4} we establish the interior and boundary mean oscillation estimates and then prove the solvability of systems on a half space. Finally we deal with elliptic systems on a Reifenberg flat domain in Section \ref{Reifenberg}.
We finish the introduction by fixing some notation. By $\mathbb{R}^d$ we mean a $d$-dimensional Euclidean space, a point in $\mathbb{R}^d$ is denoted by $x=(x_1,\ldots,x_d)=(x_1,x')$, and $\{e_j\}_{j=1}^d$ is the standard basis of $\mathbb{R}^d$. Throughout the paper, $\Omega$ indicates an open set in $\mathbb{R}^d$. For vectors $\xi,\eta\in \mathbb{C}^n$, we denote $$ (\xi,\eta)=\sum_{i=1}^n \xi^i\overline{\eta^i}. $$ For a function $f$ defined on a subset $\mathcal{D}$ in $\mathbb{R}^{d}$, we set \begin{equation*} (f)_{\mathcal{D}} = \dashint_{\mathcal{D}}
f(x) \, dx= \frac{1}{|\mathcal{D}|} \int_{\mathcal{D}} f(x) \, dx, \end{equation*}
where $|\mathcal{D}|$ is the $d$-dimensional Lebesgue measure of $\mathcal{D}$. Denote \begin{align*} \mathbb{R}^d_+ &= \{(x_1,x') \in \mathbb{R}^d: x_1 > 0\},\\
B_r(x) &= \{ y \in \mathbb{R}^d: |x-y| < r\},\quad B'_r(x') = \{ y' \in
\mathbb{R}^{d-1}: |x'-y'| < r\},\\ B_r^+(x)&=B_r(x)\cap \mathbb{R}^d_+,\quad \Gamma_r(x)=B_r(x)\cap \partial\mathbb{R}^d_+,\quad \Omega_r(x)=B_r(x)\cap \Omega. \end{align*} For a domain $\Omega$ in $\mathbb{R}^d$, we define the solution spaces $W_p^m(\Omega)$ as follows: \begin{equation*} W_p^m(\Omega) =\{u\in L_p(\Omega): D^{\alpha}u \in L_p(\Omega), 1
\le |\alpha| \le m \}, \end{equation*} $$
\|u\|_{W_p^m(\Omega)} = \sum_{|\alpha|\le m}
\|D^{\alpha}u\|_{L_p(\Omega)}. $$ We denote $C_{\text{loc}}^{\infty}(\mathcal{D})$ to be the set of all infinitely differentiable functions on $\mathcal{D}$, and $C_0^{\infty}(\mathcal{D})$ the set of infinitely differentiable functions with compact support $\Subset \mathcal{D}$.
\section{Main results} \label{sec082001}
Throughout the paper, we assume that the $n \times n$ complex-valued coefficient matrices $a_{\alpha\beta}$ are measurable and bounded, and the leading coefficients
$a_{\alpha\beta}$, $|\alpha|=|\beta|=m$, satisfy an ellipticity condition. More precisely, we assume: \begin{enumerate}
\item There exists a constant $\delta \in (0,1)$ such that the leading coefficients $a_{\alpha\beta}$, $|\alpha|=|\beta|=m$, satisfy \begin{equation}
\label{eq11.28}
\delta |\xi|^2 \le
\sum_{|\alpha|=|\beta|=m}\Re(a_{\alpha\beta}(x) \xi_{\beta}, \xi_{\alpha}), \quad
|a_{\alpha\beta}| \le \delta^{-1} \end{equation} for any $ x \in \mathbb{R}^{d}$ and $\xi =
(\xi_{\alpha})_{|\alpha|=m}$, $\xi_{\alpha} \in \mathbb{C}^n$. Here we use $\Re(f)$ to denote the real part of $f$.
\item All the lower-order coefficients $a_{\alpha\beta}$,
$|\alpha| \ne m$ or $|\beta| \ne m$, are bounded by a constant $K\ge 1$. \end{enumerate}
We note that the ellipticity condition \eqref{eq11.28} can be relaxed. For instance, the operator $\mathcal{L}=D_1^4+D_2^4$ is allowed when $d=m=2$. See Remark 2.5 of \cite{DK10}.
Throughout the paper we write $\{\bar{a}_{\alpha\beta}\}_{|\alpha|=|\beta|=m} \in \mathbb{A}$ whenever the $n \times n$ complex-valued matrices $\bar{a}_{\alpha\beta}=\bar{a}_{\alpha\beta}(y_1)$ are measurable functions satisfying the condition \eqref{eq11.28}. For a linear map $\mathcal{T}$ from $\mathbb{R}^d$ to $\mathbb{R}^d$, we write $\mathcal{T} \in \mathbb{O}$ if $\mathcal{T}$ is of the form $$ \mathcal{T}(x) = \rho x + \xi, $$ where $\rho$ is a $d \times d$ orthogonal matrix and $\xi \in \mathbb{R}^d$.
Let $\mathcal{L}$ be the elliptic operator defined in \eqref{eq0617_02}. Our first result is about the conormal derivative problem on a half space. The following mild regularity assumption is imposed on the leading coefficients, with a parameter $\gamma \in (0,1/4)$ to be determined later. \begin{assumption}[$\gamma$] \label{assumption20100901} There is a constant $R_0\in (0,1]$ such that the following hold with $B:=B_r(x_0)$.
(i) For any $x_0\in \mathbb{R}^d_+$ and any $r\in \left(0,R_0\right]$ so that
$B\subset \mathbb{R}^{d}_+$, one can find $\mathcal{T}_B \in \mathbb{O}$ and coefficient matrices $\{\bar a_{\alpha\beta}\}_{|\alpha|=|\beta|=m} \in \mathbb{A}$ satisfying \begin{equation}
\label{eq10_23}
\sup_{|\alpha|=|\beta|=m}\int_B |a_{\alpha\beta}(x) -
\bar{a}_{\alpha\beta}(y_1)| \, dx \le \gamma |B|, \end{equation} where $y = \mathcal{T}_B (x)$.
(ii) For any $x_0\in \partial \mathbb{R}^d_+$ and any $r\in (0,R_0]$, one can find $\mathcal{T}_B \in \mathbb{O}$ satisfying $\rho_{11}\ge \cos (\gamma/2)$
and coefficient matrices $\{\bar a_{\alpha\beta}\}_{|\alpha|=|\beta|=m} \in \mathbb{A}$ satisfying \eqref{eq10_23}. \end{assumption}
The condition $\rho_{11}\ge \cos (\gamma/2)$ with a sufficiently small $\gamma$ means that at any boundary point the $y_1$-direction is sufficiently close to the $x_1$-direction, i.e., the normal direction of the boundary.
\begin{theorem}[Systems on a half space]
\label{thm3} Let $\Omega=\mathbb{R}^d_+$, $p \in (1,\infty)$, and $$
\textit{\textbf{f}}_\alpha= (f_\alpha^1, \ldots, f_\alpha^n)^{\text{tr}} \in L_p(\Omega), \quad |\alpha|\le m. $$ Then there exists a constant $\gamma=\gamma(d,n,m,p,\delta)$ such that, under Assumption \ref{assumption20100901} ($\gamma$), the following hold true.
\noindent (i) For any $\textit{\textbf{u}} \in W^m_p(\Omega)$ satisfying \begin{equation}
\label{eq1.55}
(-1)^m\mathcal{L} \textit{\textbf{u}} +\lambda \textit{\textbf{u}} = \sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha \end{equation} in $\Omega$ and the conormal derivative condition on $\partial\Omega$, we have \begin{equation*}
\sum_{|\alpha|\le m}\lambda^{1-\frac {|\alpha|} {2m}} \|D^\alpha
\textit{\textbf{u}} \|_{L_p(\Omega)} \le N \sum_{|\alpha|\le m}\lambda^{\frac
{|\alpha|} {2m}} \| \textit{\textbf{f}}_\alpha \|_{L_p(\Omega)}, \end{equation*} provided that $\lambda \ge \lambda_0$, where $N$ and $\lambda_0 \ge 0$ depend only on $d$, $n$, $m$, $p$, $\delta$, $K$ and $R_0$.
\noindent (ii) For any $\lambda > \lambda_0$, there exists a unique solution $\textit{\textbf{u}} \in W_p^m(\Omega)$ to \eqref{eq1.55} with the conormal derivative boundary condition.
\noindent (iii) If all the lower-order coefficients of $\mathcal{L}$ are zero and the leading coefficients are measurable functions of $x_1\in \mathbb{R}$ only, then one can take $\lambda_0=0$. \end{theorem}
For elliptic systems on a Reifenberg flat domain which is possibly unbounded, we impose a similar regularity assumption on $a_{\alpha\beta}$ as in Assumption \ref{assumption20100901}. Near the boundary, we require that in each small scale the direction in which the coefficients are only measurable coincides with the ``normal'' direction of a certain thin disc, which contains a portion of $\partial\Omega$. More precisely, we assume the following, where the parameter $\gamma\in (0,1/50)$ will be determined later. \begin{assumption}[$\gamma$]
\label{assump1} There is a constant $R_0\in (0,1]$ such that the following hold.
(i) For any $x\in \Omega$ and any $r\in (0,R_0]$ such that $B_r(x)\subset \Omega$, there is an orthogonal coordinate system depending on $x$ and $r$ such that in this new coordinate system, we have \begin{equation}
\label{eq13.07}
\dashint_{B_r(x)}\Big| a_{\alpha\beta}(y_1, y') -
\dashint_{B'_r(x')} a_{\alpha\beta}(y_1,z') \, dz' \Big| \, dy\le \gamma. \end{equation}
(ii) The domain $\Omega$ is Reifenberg flat: for any $x\in \partial\Omega$ and $r\in (0,R_0]$, there is an orthogonal coordinate system depending on $x$ and $r$ such that in this new coordinate system, we have \eqref{eq13.07} and \begin{equation}
\label{eq3.49}
\{(y_1,y'):x_1+\gamma r<y_1\}\cap B_r(x)
\subset\Omega_r(x)
\subset \{(y_1,y'):x_1-\gamma r<y_1\}\cap B_r(x). \end{equation} \end{assumption}
In particular, if the boundary $\partial \Omega$ is locally the graph of a Lipschitz continuous function with a small Lipschitz constant, then $\Omega$ is Reifenberg flat. Thus all $C^1$ domains are Reifenberg flat for any $\gamma>0$.
The next theorem is about the conormal derivative problem on a Reifenberg flat domain.
\begin{theorem}[Systems on a Reifenberg flat domain]
\label{thm5} Let $\Omega$ be a domain in $\mathbb{R}^d$ and $p \in (1,\infty)$. Then there exists a constant $\gamma=\gamma(d,n,m,p,\delta)$ such that, under Assumption \ref{assump1} ($\gamma$), the following hold true.
\noindent (i) Let $\textit{\textbf{f}}_\alpha= (f_\alpha^1, \ldots, f_\alpha^n)^{\text{tr}} \in L_p(\Omega)$, $|\alpha|\le m$. For any $\textit{\textbf{u}} \in W^m_p(\Omega)$ satisfying \begin{equation}
\label{eq11_01}
(-1)^m \mathcal{L} \textit{\textbf{u}} +\lambda \textit{\textbf{u}} = \sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha \quad \text{in}\quad \Omega \end{equation} with the conormal derivative condition on $\partial\Omega$, we have \begin{equation*}
\sum_{|\alpha|\le m}\lambda^{1-\frac {|\alpha|} {2m}} \|D^\alpha
\textit{\textbf{u}} \|_{L_p(\Omega)} \le N \sum_{|\alpha|\le m}\lambda^{\frac
{|\alpha|} {2m}} \| \textit{\textbf{f}}_\alpha \|_{L_p(\Omega)}, \end{equation*} provided that $\lambda \ge \lambda_0$, where $N$ and $\lambda_0 \ge 0$ depend only on $d$, $n$, $m$, $p$, $\delta$, $K$, and $R_0$.
\noindent (ii) For any $\lambda > \lambda_0$ and $\textit{\textbf{f}}_\alpha \in L_p(\Omega)$, $|\alpha|\le m$, there exists a unique solution $\textit{\textbf{u}} \in W_p^m(\Omega)$ to \eqref{eq11_01} with the conormal derivative boundary condition. \end{theorem}
For $\lambda=0$, we have the following solvability result for systems without lower-order terms on bounded domains.
\begin{corollary}
\label{cor7}
Let $\Omega$ be a bounded domain in $\mathbb{R}^d$, and $p \in (1,\infty)$. Assume that $a_{\alpha\beta}\equiv 0$ for any $\alpha,\beta$ satisfying $|\alpha|+|\beta|<2m$. Then there exists a constant $\gamma=\gamma(d,n,m,p,\delta)$ such that, under Assumption
\ref{assump1} ($\gamma$), for any $\textit{\textbf{f}}_\alpha \in L_p(\Omega)$, $|\alpha|= m$, there exists a solution $\textit{\textbf{u}} \in W_p^m(\Omega)$ to \begin{equation}
\label{eq22.34}
(-1)^m \mathcal{L} \textit{\textbf{u}} = \sum_{|\alpha|= m}D^\alpha \textit{\textbf{f}}_\alpha \quad \text{in}\quad \Omega \end{equation} with the conormal derivative boundary condition, and $\textit{\textbf{u}}$ satisfies \begin{equation}
\label{eq23.08}
\|D^m \textit{\textbf{u}} \|_{L_p(\Omega)} \le N \sum_{|\alpha|= m}\|\textit{\textbf{f}}_\alpha
\|_{L_p(\Omega)}, \end{equation} where $N$ depends only on $d$, $n$, $m$, $p$, $\delta$, $K$,
$R_0$, and $|\Omega|$. Such a solution is unique up to a polynomial of order at most $m-1$. \end{corollary}
Finally, we present a result for second-order scalar elliptic equations in the form \begin{equation}
\label{eq1005_2} D_i(a_{ij}D_j u)+D_i(a_i u)+b_iD_i u+cu=\operatorname{div} g+f\quad \text{in}\,\,\Omega \end{equation} with the conormal derivative boundary condition. The result generalizes Theorem 5 of \cite{DongKim08a}, in which bounded Lipschitz domains with small Lipschitz constants are considered. It also extends the main result of \cite{BW05} to equations with lower-order terms and with leading coefficients in a more general class.
In the theorem below we assume that all the coefficients are bounded and measurable, and $a_{ij}$ satisfies \eqref{eq11.28} with $m=1$. As usual, we say that $D_i a_i+c\le 0$ in $\Omega$ holds in the weak sense if $$ \int_\Omega(-a_i D_i\phi+c\phi)\,dx\le 0 $$ for any nonnegative $\phi\in C_0^\infty(\Omega)$. By Assumption ($\text{H}$) we mean that $$ \int_\Omega(-a_i D_i\phi+c\phi)\,dx=0\quad \forall \phi\in C^\infty(\overline{\Omega}). $$ Similarly, Assumption ($\text{H}^*$) is satisfied if $$ \int_\Omega(b_i D_i\phi+c\phi)\,dx=0\quad \forall \phi\in C^\infty(\overline{\Omega}). $$
\begin{theorem}[Scalar equations on a bounded domain]
\label{thmB} Let $p\in (1,\infty)$ and $\Omega$ be a bounded domain. Assume $D_ia_i+c\le 0$ in $\Omega$ in the weak sense. Then there exists a constant $\gamma=\gamma(d,p,\delta)$ such that, under Assumption \ref{assump1} ($\gamma$), the following hold true.
\noindent (i) If Assumption ($\text{H}$) is satisfied, then for any $f$, $g = (g_1, \cdots, g_d) \in L_{p}(\Omega)$, the equation \eqref{eq1005_2} has a unique up to a constant solution $u\in W^1_p(\Omega)$ provided that Assumption ($\text{H}^*$) is also satisfied. Moreover, we have \begin{equation*}
\|Du\|_{L_p(\Omega)}\le N\|f\|_{L_p(\Omega)}+N\|g\|_{L_p(\Omega)}. \end{equation*}
\noindent (ii) If Assumption ($\text{H}$) is not satisfied, the solution is unique and we have \begin{equation*}
\|u\|_{W^1_p(\Omega)}\le N\|f\|_{L_p(\Omega)}+N\|g\|_{L_p(\Omega)}. \end{equation*} The constants $N$ are independent of $f$, $g$, and $u$. \end{theorem}
\section{Some auxiliary estimates} \label{sec_aux}
In this section we consider operators without lower-order terms. Denote $$
\mathcal{L}_0 \textit{\textbf{u}} = \sum_{|\alpha|=|\beta|=m}D^\alpha( a_{\alpha\beta} D^\beta \textit{\textbf{u}}). $$
\subsection{$L_2$-estimates} \label{sec3.1} The following $L_2$-estimate for elliptic operators in divergence form with measurable coefficients is classical. We give a sketched proof for the sake of completeness.
\begin{theorem} \label{theorem08061901} Let $\Omega = \mathbb{R}^d$ or $\mathbb{R}^d_+$. There exists $N = N(d,m,n, \delta)$ such that, for any $\lambda \ge 0$, \begin{equation}
\label{eq2010_01}
\sum_{|\alpha|\le m}\lambda^{1-\frac {|\alpha|} {2m}} \|D^\alpha \textit{\textbf{u}} \|_{L_2(\Omega)}
\le N \sum_{|\alpha|\le m}\lambda^{\frac {|\alpha|} {2m}} \| \textit{\textbf{f}}_\alpha \|_{L_2(\Omega)}, \end{equation}
provided that $\textit{\textbf{u}} \in W_2^m(\Omega)$ and $\textit{\textbf{f}}_\alpha \in L_2(\Omega)$, $|\alpha|\le m$, satisfy \begin{equation} \label{eq080501}
(-1)^m\mathcal{L}_0 \textit{\textbf{u}} + \lambda \textit{\textbf{u}} = \sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha \end{equation}
in $\Omega$ with the conormal derivative condition on $\partial\Omega$. Furthermore, for any $\lambda > 0$ and $\textit{\textbf{f}}_\alpha \in L_2(\Omega),|\alpha|\le m$, there exists a unique solution $\textit{\textbf{u}}\in W_2^m(\Omega)$ to the equation \eqref{eq080501} in $\Omega$ with the conormal derivative boundary condition. \end{theorem} \begin{proof} By the method of continuity and a standard density argument, it suffices to prove the estimate \eqref{eq2010_01} for $\textit{\textbf{u}} \in C^{\infty}(\overline{\Omega})\cap W_2^m(\Omega)$. From the equation, it follows that \begin{equation*}
\int_{\Omega} \left[ (D^{\alpha}\textit{\textbf{u}}, a_{\alpha\beta}D^{\beta}\textit{\textbf{u}})
+ \lambda |\textit{\textbf{u}}|^2 \right]\,dx
= \sum_{|\alpha|\le m} (-1)^{|\alpha|} \int_{\Omega} (D^{\alpha}\textit{\textbf{u}}, \textit{\textbf{f}}_{\alpha}) \, dx. \end{equation*} By the uniform ellipticity \eqref{eq11.28}, we get $$
\delta \int_{\Omega} |D^m\textit{\textbf{u}}|^2 \, dx \le \int_{\Omega} \Re(a_{\alpha\beta} D^\beta \textit{\textbf{u}}, D^\alpha \textit{\textbf{u}}) \, dx. $$ Hence, for any $\varepsilon>0$, \begin{align*}
&\delta \int_{\Omega} |D^m\textit{\textbf{u}}|^2 \, dx+ \lambda \int_{\Omega} |\textit{\textbf{u}}|^2 \, dx
\le \sum_{|\alpha|\le m}(-1)^{|\alpha|} \int_{\Omega}\Re(D^\alpha \textit{\textbf{u}}, \textit{\textbf{f}}_\alpha) \, dx\\
&\le \varepsilon \sum_{|\alpha|\le m}\lambda^{\frac {m-|\alpha|} m} \int_{\Omega} |D^\alpha \textit{\textbf{u}}|^2 \, dx + N\varepsilon^{-1} \sum_{|\alpha|\le m}\lambda^{-\frac {m-|\alpha|} m}\int_{\Omega} |\textit{\textbf{f}}_\alpha|^2 \, dx. \end{align*} To finish the proof, it suffices to use interpolation inequalities
and choose $\varepsilon$ sufficiently small depending on $\delta$, $d$, $m$, and $n$. \end{proof}
We say that a function $\textit{\textbf{u}}\in W_p(\Omega)$ satisfies \eqref{eq9.15} with the conormal derivative condition on $\Gamma\subset \partial\Omega$ if $u$ satisfies \eqref{eq3.02} for any $\phi\in W^m_q(\Omega)$ which is supported on $\Omega\cup \Gamma$.
By Theorem \ref{theorem08061901} and adapting the proofs of Lemmas 3.2 and 7.2 in \cite{DK10} to the conormal case, we have the following local $L_2$-estimate.
\begin{lemma}
\label{lem6.2} Let $0<r<R<\infty$. Assume $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\overline{\mathbb{R}^{d}_+})$ satisfies \begin{equation}
\label{eq2.54} \mathcal{L}_0 \textit{\textbf{u}}=0 \end{equation} in $B_{R}^+$ with the conormal derivative boundary condition on $\Gamma_R$. Then there exists a constant $N=N(d,m,n,\delta)$ such that for $j=1,\ldots,m$, $$
\|D^j\textit{\textbf{u}}\|_{L_2(B_r^+)}\leq N(R-r)^{-j}\|\textit{\textbf{u}}\|_{L_2(B_R^+)}. $$ \end{lemma}
\begin{corollary}
\label{cor6.3}
Let $0<r<R<\infty$ and $a_{\alpha\beta}=a_{\alpha\beta}(x_1)$, $|\alpha|=|\beta|=m$. Assume that $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\overline{\mathbb{R}^{d}_+})$ satisfies \eqref{eq2.54}
in $B_R^+$ with the conormal derivative boundary condition on $\Gamma_R$. Then for any multi-index $\theta$ satisfying $\theta_1\le m$ and $|\theta|\ge m$, we have \begin{equation*}
\|D^\theta\textit{\textbf{u}}\|_{L_2(B_r^+)}\le N\|D^m\textit{\textbf{u}}\|_{L_2(B_R^+)}, \end{equation*} where $N=N(d,m,n,\delta, R, r, \theta)$. \end{corollary} \begin{proof} It is easily seen that $D_{x'}^{mk} \textit{\textbf{u}},k=1,2,\ldots,$ also satisfies \eqref{eq2.54} with the conormal derivative boundary condition on $\Gamma_R$. Then by applying Lemma \ref{lem6.2} repeatedly, we obtain \begin{equation*}
\|D^mD^{mk}_{x'}\textit{\textbf{u}}\|_{L_2(B_{R'}^+)}\le N\|D^m \textit{\textbf{u}}\|_{L_2(B_{R}^+)}, \end{equation*} where $R'=(r+R)/2$. From this inequality and the interpolation inequality, we get the desired estimate. \end{proof}
By using a Sobolev-type inequality, we shall obtain from Corollary \ref{cor6.3} a H\"older estimate of all the $m$-th derivatives of $\textit{\textbf{u}}$ except $D^{\bar\alpha} \textit{\textbf{u}}$, where $\bar\alpha=me_1=(m,0,\ldots,0)$. To compensate this lack of regularity of $D^{\bar\alpha} \textit{\textbf{u}}$, we consider the quantity $$
\Theta:=\sum_{|\beta|=m}a_{\bar\alpha\beta}D^\beta \textit{\textbf{u}}. $$
We recall the following useful estimate proved in \cite[Corollary 4.4]{DK10}.
\begin{lemma}
\label{corA.2} Let $k\ge 1$ be an integer, $r\in (0,\infty)$, $p\in [1,\infty]$, $\mathcal{D}=[0,r]^{d}$, and $u(x)\in L_p(\mathcal{D})$. Assume that $D_1^k u=f_0+D_1f_1+\ldots+D_1^{k-1}f_{k-1}$ in $\mathcal{D}$, where $f_j\in L_p(\mathcal{D}),j=0,\ldots,k-1$. Then $D_1 u \in L_p(\mathcal{D})$ and \begin{equation*}
\|D_1 u\|_{L_p(\mathcal{D})}\le N\|u\|_{L_p(\mathcal{D})}+N\sum_{j=0}^{k-1}\|f_j\|_{L_p(\mathcal{D})}, \end{equation*} where $N=N(d,k, r)>0$. \end{lemma}
\begin{corollary}
\label{lem6.6} Let $0<r<R<\infty$ and $a_{\alpha\beta}=a_{\alpha\beta}(x_1)$. Assume $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\overline{\mathbb{R}^{d}_+})$ satisfies \eqref{eq2.54} in $B_R^+$ with the conormal derivative boundary condition on $\Gamma_R$. Then, for any nonnegative integer $j$, \begin{equation*}
\|D^j_{x'} \Theta\|_{L_2(B_r^+)}
+ \|D^j_{x'} D_1 \Theta \|_{L_2(B_r^+)}
\le N \|D^m\textit{\textbf{u}}\|_{L_2(B_R^+)}, \end{equation*} where $N=N(d, m,n,r,R,\delta, j)>0$. \end{corollary}
\begin{proof} Due to Corollary \ref{cor6.3} and the fact that $D_{x'}^j\textit{\textbf{u}}$ satisfies \eqref{eq2.54} with the conormal derivative boundary condition, it suffices to prove the desired inequality when $j = 0$ and $R$ is replaced by another $R'$ such that $r < R' < R$. Obviously, we have \begin{equation*}
\|\Theta \|_{L_2(B_r^+)}
\le N \|D^m\textit{\textbf{u}}\|_{L_2(B_r^+)}. \end{equation*} Thus we prove that, for $R'=(r+R)/2$, \begin{equation}
\label{eq0924}
\|D_1 \Theta \|_{L_2(B_r^+)}
\le N \|D^m\textit{\textbf{u}}\|_{L_2(B_{R'}^+)}. \end{equation} From \eqref{eq2.54}, in $B_R^+$ we have $$
D_1^m\Theta=-\sum_{\substack{|\alpha|=|\beta|=m\\\alpha_1<m}} D^\alpha(a_{\alpha\beta} D^\beta \textit{\textbf{u}})
=-\sum_{\substack{|\alpha|=|\beta|=m\\\alpha_1<m}}D_1^{\alpha_1} (a_{\alpha\beta}D_{x'}^{\alpha'}D^\beta \textit{\textbf{u}}). $$ Then the estimate \eqref{eq0924} follows from Lemma \ref{corA.2} with a covering argument and Corollary \ref{cor6.3}. The corollary is proved. \end{proof}
\subsection{H\"older estimates}
By using the $L_2$ estimates obtained in Section \ref{sec3.1}, in this section we shall derive several H\"older estimates of derivatives of $\textit{\textbf{u}}$. As usual, for $\mu\in (0,1)$ and a function $u$ defined on $\mathcal{D} \subset \mathbb{R}^{d} $, we denote $$ [u]_{C^{\mu}(\mathcal{D})}
= \sup_{\substack{x,y\in\mathcal{D}\\x\ne y}}\frac{|u(x)-u(y)|}
{|x-y|^{\mu}}, $$ $$
\|u\|_{C^{\mu}(\mathcal{D})}=[u]_{C^{\mu}(\mathcal{D})}+\|u\|_{L_{\infty}(\mathcal{D})}. $$
\begin{lemma}
\label{lem6.4} Let $a_{\alpha\beta}=a_{\alpha\beta}(x_1)$. Assume that $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\overline{\mathbb{R}^{d}_+})$ satisfies
\eqref{eq2.54} in $B_2^+$ with the conormal derivative boundary condition on $\Gamma_2$. Then for any $\alpha$ satisfying $|\alpha|=m$ and $\alpha_1<m$, we have \begin{equation*}
\|\Theta\|_{C^{1/2}(B_1^+)}+\|D^\alpha\textit{\textbf{u}}\|_{C^{1/2}(B_1^+)}
\le N \|D^m\textit{\textbf{u}}\|_{L_2(B_2^+)}, \end{equation*} where $N=N(d,m,n,\delta)>0$. \end{lemma} \begin{proof} The lemma follows from the proof of Lemma 4.1 in \cite{DK10} by using Corollaries \ref{cor6.3} and \ref{lem6.6}. \end{proof}
For $\lambda\ge0$, let \begin{equation*}
U=\sum_{|\alpha| \le m}\lambda^{\frac 1 2-\frac {|\alpha|}{2m}} |D^\alpha \textit{\textbf{u}}|,
\quad U'=\sum_{|\alpha|\le m,\alpha_1<m} \lambda^{\frac 1 2-\frac {|\alpha|} {2m}} |D^\alpha \textit{\textbf{u}}|. \end{equation*} Notably, since the matrix $[a_{\bar\alpha\bar\alpha}^{ij}]_{i,j=1}^n$ is positive definite, we have \begin{equation}
\label{eq12.01}
N^{-1}U\le U'+|\Theta|\le NU, \end{equation} where $N=N(d,m,n,\delta)$.
\begin{lemma}
\label{lem6.7} Let $a_{\alpha\beta}=a_{\alpha\beta}(x_1)$ and $\lambda\ge 0$. Assume that $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\overline{\mathbb{R}^{d}_+})$ satisfies \begin{equation*}
(-1)^m\mathcal{L}_0 \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=0 \end{equation*} in $B_2^+$ with the conormal derivative condition on $\Gamma_2$. Then we have \begin{align}
\label{eq3.11}
\|\Theta\|_{C^{1/2}(B_1^+)}+\|U'\|_{C^{1/2}(B_1^+)}
\le N \|U\|_{L_2(B_2^+)},\\
\label{eq14.51}
\|U\|_{L_\infty(B_1^+)}\le N\|U\|_{L_2(B_2^+)}, \end{align} where $N=N(d,m,n,\delta)>0$. \end{lemma} \begin{proof} First we prove \eqref{eq3.11}. The case when $\lambda=0$ follows from Lemma \ref{lem6.4}. To deal with the case $\lambda>0$, we follow an idea by S. Agmon, which was originally used in a quite different situation. Let $ \eta(y)=\cos(\lambda^{1/(2m)}y)+\sin(\lambda^{1/(2m)}y) $ so that $\eta$ satisfies $$ D^{2m}\eta=(-1)^m\lambda \eta,\quad
\eta(0)=1,\quad |D^j\eta(0)|=\lambda^{j/(2m)},\,\,\,j=1,2,\ldots. $$ Let $z = (x,y)$ be a point in $\mathbb{R}^{d+1}$, where $x \in \mathbb{R}^{d}$, $y \in \mathbb{R}$, and $\hat{\textit{\textbf{u}}}(z)$ and $\hat{B}_r^+$ be given by $$ \hat{\textit{\textbf{u}}}(z) = \hat{\textit{\textbf{u}}}(x,y) = \textit{\textbf{u}}(x)\eta(y), \quad
\hat{B}_r^+ = \{ |z| < r: z \in \mathbb{R}^{d+1},x_1>0 \}. $$ Also define $$
\hat\Theta=\sum_{|\beta|=m}a_{\bar\alpha\beta}D^{(\beta,0)} \hat\textit{\textbf{u}}. $$ It is easily seen that $\hat{\textit{\textbf{u}}}$ satisfies $$ (-1)^m\mathcal{L}_0\hat{\textit{\textbf{u}}}+(-1)^mD^{2m}_y \hat{\textit{\textbf{u}}} = 0 $$ in $\hat{B}_2^+$ with the conormal derivative condition on $\hat{B}_2\cap \partial \mathbb{R}^{d+1}_+$. By Lemma \ref{lem6.4} applied to $\hat{\textit{\textbf{u}}}$ we have \begin{equation} \label{eq0804}
\|\hat\Theta\|_{C^{1/2}(\hat{B}_1^+)}+\big\| D_z^\beta\hat{\textit{\textbf{u}}}\big\|_{C^{1/2}(\hat{B}_1^+)}
\le N(d,m,n,\delta) \|D^m_z\hat{\textit{\textbf{u}}}\|_{L_2(\hat{B}_2^+)} \end{equation}
for any $\beta=(\beta_1,\ldots,\beta_{d+1})$ satisfying $|\beta|=m$ and $\beta_1<m$. Notice that for any $\alpha=(\alpha_1,\ldots,\alpha_d)$ satisfying $|\alpha|\le m$ and $\alpha_1<m$, $$
\lambda^{\frac 1 2-\frac {|\alpha|} {2m}} \big\|D^\alpha\textit{\textbf{u}}\big\|_{C^{1/2}(B_1^+)}
\le N\big\| D^\beta_z\hat{\textit{\textbf{u}}} \big\|_{C^{1/2}(\hat{B}_1^+)},\quad \beta=(\alpha_1,\ldots,\alpha_d,m-|\alpha|), $$ $$
\|\Theta\|_{C^{1/2}(B_1^+)}\le \|\hat\Theta\|_{C^{1/2}(\hat{B}_1^+)}, $$ and $D_z^m \hat{\textit{\textbf{u}}}$ is a linear combination of $$ \lambda^{\frac 1 2-\frac k {2m}}\cos( \lambda^{\frac 1 {2m}} y) D^k_x\textit{\textbf{u}}, \quad \lambda^{\frac 1 2-\frac k {2m}}\sin( \lambda^{\frac 1 {2m}} y) D^k_x\textit{\textbf{u}},\quad k=0,1,\ldots,m. $$ Thus the right-hand side of \eqref{eq0804} is less than the right-hand side of \eqref{eq3.11}. This completes the proof of \eqref{eq3.11}. Finally, we get \eqref{eq14.51} from \eqref{eq3.11} and \eqref{eq12.01}. \end{proof}
Similarly, we have the following interior estimate.
\begin{lemma}
\label{cor3.5} Let $a_{\alpha\beta}=a_{\alpha\beta}(x_1)$ and $\lambda\ge0$. Assume that $\textit{\textbf{u}}\in C_{\text{loc}}^\infty(\mathbb{R}^{d})$ satisfies \begin{equation*}
(-1)^m\mathcal{L}_0 \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=0 \end{equation*} in $B_2$. Then we have \begin{align*}
\|\Theta\|_{C^{1/2}(B_1)}+\left\|U'\right\|_{C^{1/2}(B_1)}
\le N \|U\|_{L_2(B_2)},\\
\|U\|_{L_\infty(B_1)}\le N\|U\|_{L_2(B_2)}, \end{align*} where $N=N(d,m,n,\delta)>0$. \end{lemma}
\subsection{The maximal function theorem and a generalized Fefferman-Stein theorem} We recall the maximal function theorem and a generalized Fefferman-Stein theorem. Let $$ \mathcal{Q}=\{B_r(x): x \in \mathbb{R}^{d}, r \in (0,\infty)\}. $$ For a function $g$ defined in $\mathbb{R}^{d}$, the maximal function of $g$ is given by $$
\mathcal{M} g (x) = \sup_{B \in \mathcal{Q}, x \in B} \dashint_{B} |g(y)| \, dy. $$ By the Hardy--Littlewood maximal function theorem, $$
\| \mathcal{M} g \|_{L_p(\mathbb{R}^{d})} \le N \| g\|_{L_p(\mathbb{R}^{d})}, $$ if $g \in L_p(\mathbb{R}^{d})$, where $1 < p < \infty$ and $N = N(d,p)$.
Theorem \ref{th081201} below is from \cite{Krylov08} and can be considered as a generalized version of the Fefferman-Stein Theorem. To state the theorem, let $$ \mathbb{C}_l = \{ C_l(i_1, \ldots, i_d), i_1, \ldots, i_d \in \mathbb{Z}, i_1\ge 0 \}, \quad l \in \mathbb{Z} $$ be the collection of partitions given by dyadic cubes in $\mathbb{R}^{d}_+$ \begin{equation*}
[ i_1 2^{-l}, (i_1+1)2^{-l} ) \times \ldots \times [ i_d 2^{-l}, (i_d+1)2^{-l} ). \end{equation*}
\begin{theorem} \label{th081201}
Let $p \in (1, \infty)$, and $U,V,F\in L_{1,\text{loc}}(\mathbb{R}^{d}_+)$. Assume that we have $|U| \le V$ and, for each $l \in \mathbb{Z}$ and $C \in \mathbb{C}_l$, there exists a measurable function $U^C$ on $C$
such that $|U| \le U^C \le V$ on $C$ and \begin{equation*}
\int_C |U^C - \left(U^C\right)_C| \,dx \le \int_C F(x) \,dx. \end{equation*} Then $$
\| U \|_{L_p(\mathbb{R}^{d}_+)}^p
\le N(d,p) \|F\|_{L_p(\mathbb{R}^{d}_+)}\| V \|_{L_p(\mathbb{R}^{d}_+)}^{p-1}. $$ \end{theorem}
\subsection{Approximations of Reifenberg domains}
Let $\Omega$ be a domain in $\mathbb{R}^d$. Throughout this subsection, we assume that, for any $x\in \partial\Omega$ and $r\in (0,1]$, $\Omega$ satisfies \eqref{eq3.49} in an appropriate coordinate system. That is, $\Omega$ satisfies the following assumption with $\gamma<1/50$.
\begin{assumption}[$\gamma$]
\label{assump11} There is a constant $R_0\in (0,1]$ such that the following holds. For any $x\in \partial\Omega$ and $r\in (0,R_0]$, there is a coordinate system depending on $x$ and $r$ such that in this new coordinate system, we have \begin{equation}
\label{eq1005_1}
\{(y_1,y'):x_1+\gamma r<y_1\}\cap B_r(x)
\subset\Omega_r(x)
\subset \{(y_1,y'):x_1-\gamma r<y_1\}\cap B_r(x). \end{equation} \end{assumption}
For any $\varepsilon\in (0,1)$, we define \begin{equation}
\label{eq2.22}
\Omega^\varepsilon=\{x\in \Omega\,|\,\text{dist}(x,\partial\Omega)>\varepsilon\}. \end{equation}
We say that a domain is a Lipschitz domain if locally the boundary is the graph of a Lipschitz function in some coordinate system. More precisely,
\begin{assumption}[$\theta$]
\label{assump3} There is a constant $R_1\in (0,1]$ such that, for any $x\in \partial\Omega$ and $r\in(0,R_1]$, there exists a Lipschitz function $\phi$: $\mathbb{R}^{d-1}\to \mathbb{R}$ such that $$ \Omega\cap B_r(x_0) = \{x \in B_r(x_0)\, :\, x_1 >\phi(x')\} $$ and $$
\sup_{x',y'\in B_r'(x_0'),x' \neq y'}\frac {|\phi(y')-\phi(x')|}{|y'-x'|}\le \theta $$ in some coordinate system. \end{assumption}
We note that if $\Omega$ satisfies Assumption \ref{assump3} ($\theta$) with a constant $R_1$, then $\Omega$ satisfies Assumption \ref{assump11} with $R_1$ and $\theta$ in place of $R_0$ and $\gamma$, respectively.
Next we show that $\Omega^\varepsilon$ is a Lipschitz domain and Reifenberg flat with uniform parameters if $\Omega$ is Reifenberg flat. A related result was proved in \cite{BW07} which, in our opinion, contains a flaw.
\begin{lemma}
\label{lem3.11} Let $\Omega$ satisfy Assumption \ref{assump11} ($\gamma$). Then for any $\varepsilon\in (0,R_0/4)$, $\Omega^\varepsilon$ satisfies Assumption \ref{assump11} ($N_0\gamma^{1/2}$) with $R_0/2$ in place of $R_0$, and satisfies Assumption \ref{assump3} ($N_0\gamma^{1/2}$) with $R_1=\varepsilon$. Here $N_0$ is a universal constant. \end{lemma}
\begin{proof} We first prove that $\Omega^\varepsilon$ satisfies Assumption \ref{assump3} ($N_0\gamma^{1/2}$) with $R_1 = \varepsilon >0$. In particular, we show that, for each $x_0 \in \partial\Omega^{\varepsilon}$, there exists a function $\phi:\mathbb{R}^{d-1} \to \mathbb{R}$ such that \begin{equation}
\label{eq0930} \Omega^{\varepsilon} \cap B_{\varepsilon}(x_0) = \{ x \in B_{\varepsilon}(x_0) : x_1 > \phi(x') \}, \quad
\frac{|\phi(y') - \phi(x')|}{|x'-y'|} \le N_0 \gamma^{1/2} \end{equation} for all $x',y' \in B'_{\varepsilon}(x_0')$, $x' \ne y'$. Indeed, this implies Assumption \ref{assump3} ($N_0\gamma^{1/2}$) since for a fixed $x_0 \in \partial \Omega^\varepsilon$ we can use the same $\phi$ for all $r \in (0,\varepsilon)$.
Let $0$ be a point on $\partial \Omega$ such that $|x_0 - 0| = \varepsilon$. That is, we have a coordinate system and $r_0 := 4 \varepsilon < R_0$ such that $\partial \Omega \cap B_{r_0}(0)$ is trapped between $\{x_1 = \gamma r_0\}$ and $\{x_1 = - \gamma r_0\}$. See Figure \ref{fg3}. Note that $B_{\varepsilon}(x_0) \subset B_{r_0}(0)$ since, for $x \in B_{\varepsilon}(x_0)$, $$
|x| \le |x-x_0| + |x_0| < 2\varepsilon < r_0 = 4\varepsilon. $$ We show that for any $y, z \in \partial \Omega^\varepsilon \cap B_{\varepsilon}(x_0)$ \begin{equation}
\label{eq1003_1}
|y_1 - z_1| \le N_0 \gamma^{1/2} |y'-z'|, \end{equation} which implies \eqref{eq0930}. For $y, z \in \partial \Omega^\varepsilon \cap B_{\varepsilon}(x_0)$, we see that \begin{equation}
\label{eq1003_2} \varepsilon - \gamma r_0 < y_1 < \varepsilon + \gamma r_0, \quad \varepsilon - \gamma r_0 < z_1 < \varepsilon + \gamma r_0. \end{equation} Without loss of generality we assume that $y_1 \ge z_1$. \begin{figure}\label{fg3}
\end{figure}
To prove \eqref{eq1003_1}, let us consider two cases. First, let $\varepsilon \gamma^{1/2} \le |y'-z'|$. In this case, due to the inequalities \eqref{eq1003_2}, we have $$
\frac{|y_1-z_1|}{|y'-z'|} \le \frac{2\gamma r_0}{\varepsilon \gamma^{1/2}} = 8 \gamma^{1/2}, $$ which proves \eqref{eq1003_1}.
Now let $|y'-z'| \le \varepsilon \gamma^{1/2}$. In this case, find $w \in \partial \Omega$ such that $|y-w| = \varepsilon$. Note that $B_\varepsilon(w) \subset B_{r_0}(0)$ since $$
|w| \le |w-y| + |y-x_0| + |x_0| < 3 \varepsilon < r_0 = 4\varepsilon. $$
We estimate $|w'-z'|$ as follows. Using the fact that $-\gamma r_0 < w_1 < \gamma r_0$ and the first inequality in \eqref{eq1003_2}, we have $$
|y_1-w_1| \ge \varepsilon - 2 \gamma r_0 >0. $$ Thus using the equality $$
|w'-y'|^2 + |w_1-y_1|^2 = \varepsilon^2, $$ we see that $$
|w'-y'|^2 \le \varepsilon^2 - (\varepsilon - 2\gamma r_0)^2 \le 4 \varepsilon \gamma r_0 = 4^2 \varepsilon^2 \gamma. $$ Hence $$
|w'-z'| \le |w'-y'| + |y'-z'| < 5\varepsilon \gamma^{1/2}. $$
Since $y_1 \ge z_1$, $|w'-z'| \le 5\varepsilon \gamma^{1/2}$, and $z$ is above the ball $B_\varepsilon(\omega)$ (recall that $\gamma<1/50$), it follows that $$
\frac{y_1 - z_1}{|y'-z'|}
\le \frac{d}{dx} \left(-\sqrt{\varepsilon^2 - x^2}\right) \bigg|_{x=5 \varepsilon \gamma^{1/2}} \le N_0 \gamma^{1/2}. $$ Thus \eqref{eq1003_1} is proved. Therefore, we have proved that $\Omega^\varepsilon$ satisfies Assumption \ref{assump3} ($N_0\gamma^{1/2}$) with $R_1 = \varepsilon$. As pointed out earlier, this shows that $\Omega^\varepsilon$ satisfies \eqref{eq1005_1} for all $0 < r < \varepsilon$. Thus in order to completely prove that $\Omega^\varepsilon$ satisfies Assumption \ref{assump11} ($N_0\gamma^{1/2}$) with $R_0/2$, we need to prove that $\Omega^\varepsilon$ satisfies \eqref{eq1005_1} for $\varepsilon \le r < R_0/2$.
Let $\varepsilon \le r < R_0/2$ and $x_0 \in \partial\Omega^{\varepsilon}$. Find $0 \in \partial\Omega$
such that $|x_0 - 0| = \varepsilon$. Then $$ B_r(x_0) \subset B_R(0), $$ where $R = \varepsilon+r < R_0$. Then the first coordinate $x_1$ of the point $x \in \partial \Omega^{\varepsilon} \cap B_r(x_0)$ is trapped by $$ \varepsilon - \gamma R < x_1 < \varepsilon + \gamma R, $$ which is the same as $$ \varepsilon - \gamma ( \varepsilon+r) < x_1 < \varepsilon + \gamma ( \varepsilon+r). $$ Note that $$ \gamma (\varepsilon + r) \le 2 \gamma r \le 2 \gamma^{1/2}r. $$ Thus each $x_1$ of $x \in \partial\Omega^\varepsilon \cap B_r(x_0)$ satisfies \eqref{eq1005_1} with $2\gamma^{1/2}$ in place of $\gamma$. The lemma is proved. \end{proof}
The next approximation result is well known. See, for instance, \cite{Lie85}.
\begin{lemma}
\label{lem3.19} Let $\Omega$ be a domain in $\mathbb{R}^d$ and satisfy Assumption \ref{assump3} ($\theta$) with some $\theta>0$ and $R_1\in (0,1]$. Then there exists a sequence of expanding smooth subdomains $\Omega^k,k=1,2,\ldots$, such that $\Omega^k\to \Omega$ as $k\to \infty$ and each $\Omega^k$ satisfies Assumption \ref{assump3} ($N_0\theta$) with $R_1/2$ in place of $R_1$. Here $N_0$ is a universal constant. \end{lemma}
\section{Systems on a half space}
\label{sec4}
\subsection{Estimates of mean oscillations} Now we prove the following estimate of mean oscillations. As in Section \ref{sec_aux}, we assume that all the lower-order coefficients of $\mathcal{L}$ are zero. For $\textit{\textbf{f}}_\alpha= (f_\alpha^1, \ldots, f_\alpha^n)^{\text{tr}}$, we denote $$
F=\sum_{|\alpha|\le m}\lambda^{\frac {|\alpha|} {2m}-\frac 1 2}|\textit{\textbf{f}}_\alpha|. $$
\begin{proposition}
\label{prop7.9} Let $x_0\in \overline{\mathbb{R}^d_+}$, $\gamma \in (0,1/4)$, $r\in (0,\infty)$, $\kappa\in [64,\infty)$, $\lambda\ge 0$, $\nu \in (2,\infty)$, $\nu'=2\nu/(\nu-2)$, and $\textit{\textbf{f}}_\alpha= (f_\alpha^1, \ldots, f_\alpha^n)^{\text{tr}} \in L_{2,\text{loc}}(\overline{\mathbb{R}^{d}_+})$. Assume that $\kappa r\le R_0$ and $\textit{\textbf{u}}\in W_{\nu,\text{loc}}^m(\overline{\mathbb{R}^d_+})$ satisfies \begin{equation}
\label{eq11.13}
(-1)^m\mathcal{L} \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha \end{equation} in $B^+_{\kappa r}(x_0)$ with the conormal derivative condition on $\Gamma_{\kappa r}(x_0)$. Then under Assumption \ref{assumption20100901} ($\gamma$), there exists a function $U^B$ depending on $B^+:=B^+_{\kappa r}(x_0)$ such that $N^{-1}U\le U^B\le NU$ and $$
\big(|U^B-(U^B)_{B_r^+(x_0)}|\big)_{B_r^+(x_0)} \le N(\kappa^{-1/2}+(\kappa \gamma)^{1/2} \kappa^{d/2}) \big(U^2\big)_{B^+_{\kappa r}(x_0)}^{1/2} $$ \begin{equation}
\label{eq5.42} +N\kappa^{d/2}\left[(F^2)_{B^+_{\kappa r}(x_0)}^{1/2}+\gamma^{1/\nu'} (U^\nu)_{B^+_{\kappa r}(x_0)}^{1/\nu}\right], \end{equation} where $N=N(d,m,n,\delta,\nu)>0$. \end{proposition}
The proof of the proposition is split into two cases.
{\em Case 1: the first coordinate of $x_0$ $\ge \kappa r/16$.} In this case, we have $$ B_r^+(x_0)=B_r(x_0)\subset B_{\kappa r/16}(x_0)\subset \mathbb{R}^{d}_+. $$ With $B_{\kappa r/16}$ in place of $B^+_{\kappa r}$ in the right-hand side of \eqref{eq5.42}, the problem is reduced to an interior mean oscillation estimate. Thus the proof can be done in the same way as in Proposition 7.10 in \cite{DK10} using Theorem \ref{theorem08061901} and Lemma \ref{cor3.5}.
{\em Case 2: $0\le$ the first coordinate of $x_0$ $< \kappa r/16$.} Notice that in this case, \begin{equation}
\label{eq22.15} B_r^+(x_0)\subset B^+_{\kappa r/8}(\hat x_0) \subset B^+_{\kappa r/4}(\hat x_0)\subset B^+_{\kappa r/2}(\hat x_0) \subset B^+_{\kappa r}(x_0), \end{equation} where $\hat x_0:=(0,x_0')$. Denote $R=\kappa r/2(< R_0)$. Because of Assumption \ref{assumption20100901}, after a linear transformation, which is an orthogonal transformation determined by $B=B_R(\hat x_0)$ followed by a translation downward, we may assume \begin{equation}
\label{eq17.29} B_R^+(\hat{y}_0)
\subset\Omega_{R}(\hat{y}_0)
\subset \{(y_1,y'):-2\gamma R< y_1\}\cap B_{R}(\hat{y}_0) \end{equation} and \begin{equation}
\label{eq17_50}
\sup_{|\alpha|=|\beta|=m}\int_{B_R(\hat{y}_0)} |a_{\alpha\beta}(x) - \bar{a}_{\alpha\beta}(y_1)| \, dy \le \gamma |B_R|. \end{equation} \begin{figure}
\caption{$\hat y_0 $ (or $y_0$) is the new coordinates of $\hat x_0$ (or $x_0$).}
\label{fg1}
\end{figure} Here $\Omega$ is the image of $\mathbb{R}^d_+$ under the linear transformation and $\hat y_0 $ (or $y_0$) is the new coordinates of $\hat x_0$ (or $x_0$). See Figure \ref{fg1}. Then \eqref{eq22.15} becomes \begin{equation}
\label{eq22.16} \Omega_r(y_0)\subset \Omega_{R/4}(\hat y_0) \subset \Omega_{R/2}(\hat y_0) \subset \Omega_{R}(\hat y_0) \subset \Omega_{\kappa r}(y_0). \end{equation}
For convenience of notation, in the new coordinate system we still denote the corresponding unknown function, the coefficients, and the data by $\textit{\textbf{u}}$, $a_{\alpha\beta}$, $\bar{a}_{\alpha\beta}$, and $\textit{\textbf{f}}_\alpha$, respectively. Note that, without loss of generality, we may assume that the coefficients $\bar{a}_{\alpha\beta}(y_1)$ in \eqref{eq17_50} are infinitely differentiable.
Below we present a few lemmas, which should be read as parts of the proof of the second case.
Let us introduce the following well-known extension operator. Let $\{c_1, \cdots, c_{m}\}$ be the solution to the system: \begin{equation} \label{eq0514-02} \sum_{k=1}^{m} \left(-\frac{1}{k}\right)^j c_k = 1, \quad j=0,\cdots,m-1. \end{equation} For a function $w$ defined on $\mathbb{R}^d_+$, set \begin{equation*}
\mathcal{E}_{m} w = \left\{ \begin{aligned} &w(y_1,y') \quad \text{if} \quad y_1 > 0\\ &\sum_{k=1}^{m} c_k w(-\frac{1}{k}y_1,y') \quad \text{otherwise} \end{aligned} \right.. \end{equation*} Note that $\mathcal{E}_{m} w \in W^{m}_{2,\text{loc}}(\mathbb{R}^d)$ if $w \in W^{m}_{2,\text{loc}}(\overline{\mathbb{R}^d_+})$. Indeed, by \eqref{eq0514-02} $$
D_1^j \left(\sum_{k=1}^{m} c_k w(-\frac{1}{k}y_1,y')\right)\bigg|_{y_1=0} = \sum_{k=1}^{m} \left(-\frac1k\right)^j c_k D_1^jw(0,y') = D_1^jw(0,y') $$ for $j = 0, \cdots, m-1$.
Denote $\Omega^*=\mathbb{R}^d_-\cap\Omega\cap B_R(\hat y_0)$. Recall that in the new coordinate system we still denote the corresponding unknown function, the coefficients, and the data by $\textit{\textbf{u}}$, $a_{\alpha\beta}$, $\bar{a}_{\alpha\beta}$, and $\textit{\textbf{f}}_\alpha$, respectively. Throughout the end of this subsection, the derivatives are taken with respect to the $y$-coordinates. The following lemma contains the key observation in the proof of Proposition \ref{prop7.9}.
\begin{lemma}
\label{lem4.2} The function $\textit{\textbf{u}}$ satisfies \begin{align}
\label{eq17.23b}
(-1)^m \mathcal{L}_0 \textit{\textbf{u}}+\lambda \textit{\textbf{u}}&=(-1)^m \sum_{|\alpha|=|\beta|=m}D^\alpha\left( (\bar a_{\alpha\beta}- a_{\alpha\beta})D^\beta \textit{\textbf{u}}\right)\\
&\quad + \sum_{|\alpha|\le m} D^\alpha \tilde \textit{\textbf{f}}_\alpha+\sum_{|\alpha|=m}D^\alpha \textit{\textbf{g}}_\alpha-\lambda \textit{\textbf{h}}\nonumber \end{align} in $B_R^+(\hat y_0)$ with the conormal derivative boundary condition on $\Gamma_R(\hat y_0)$. In the above, $\mathcal{L}_0$ is the differential operator with the coefficients $\bar a_{\alpha\beta}$ from \eqref{eq17_50}, and \begin{align*} \tilde \textit{\textbf{f}}_\alpha&=\textit{\textbf{f}}_\alpha+c_{\alpha,k} \textit{\textbf{f}}_\alpha(-ky_1,y')\,1_{(-ky_1,y')\in \Omega^*},\\
\textit{\textbf{g}}_\alpha&=c_{\alpha,k}(-1)^{m+1}\sum_{|\beta|=m}\sum_{k=1}^m a_{\alpha\beta}(-ky_1,y') (D^{\beta}\textit{\textbf{u}})(-ky_1,y')\,1_{(-ky_1,y')\in \Omega^*},\\ \textit{\textbf{h}}&=\sum_{k=1}^m kc_k\textit{\textbf{u}}(-ky_1,y')\, 1_{(-ky_1,y')\in \Omega^*}, \end{align*} where $c_{\alpha,k}=(-1)^{\alpha_1}c_kk^{-\alpha_1+1}$ are constants. \end{lemma} \begin{proof} Take a test function $\phi=(\phi^1,\phi^2,\ldots,\phi^n)\in W^m_2(B_R^+(\hat y_0))$ which vanishes near $\mathbb{R}^d_+\cap \partial B_R(\hat y_0)$. Due to \eqref{eq17.29}, it is easily seen that $\mathcal{E}_m \phi\in W^m_2(\Omega_R(\hat y_0))$ and vanishes near $\Omega\cap \partial B_R(\hat y_0)$. Since $\textit{\textbf{u}}$ satisfies \eqref{eq11.13} with the conormal derivative condition on $\partial\Omega\cap B_R(\hat y_0)$, we have $$
\int_{\Omega_R(\hat y_0)}\sum_{|\alpha|=|\beta|=m}D^\alpha\mathcal{E}_m\phi \cdot a_{\alpha\beta}D^\beta\textit{\textbf{u}} +\lambda \mathcal{E}_m\phi\cdot \textit{\textbf{u}}\,dy $$ $$
=\sum_{|\alpha|\le m}\int_{\Omega_R(\hat y_0)}(-1)^{|\alpha|}D^\alpha\mathcal{E}_m\phi\cdot \textit{\textbf{f}}_\alpha\, dy. $$ From this identity and the definition of the extension operator $\mathcal{E}_m$, a straightforward calculation gives $$
\int_{B_R^+(\hat y_0)}\sum_{|\alpha|=|\beta|=m}D^\alpha\phi \cdot \bar a_{\alpha\beta}D^\beta\textit{\textbf{u}} +\lambda \phi\cdot \textit{\textbf{u}}\,dy $$ $$
=\sum_{|\alpha|=|\beta|=m} \int_{B_R^+(\hat y_0)}D^\alpha\phi \cdot (\bar a_{\alpha\beta}-a_{\alpha\beta})D^\beta\textit{\textbf{u}} \, dy
+\sum_{|\alpha|\le m}\int_{B_R^+(\hat y_0)}(-1)^{|\alpha|}D^\alpha\phi\cdot \tilde\textit{\textbf{f}}_\alpha\,dy $$ $$
=\sum_{|\alpha|=m} \int_{B_R^+(\hat y_0)}D^\alpha\phi \cdot
(-1)^{|\alpha|}\textit{\textbf{g}}_\alpha \,dy -\lambda \int_{B_R^+(\hat y_0)}\phi\cdot\textit{\textbf{h}}\,dy. $$ The lemma is proved. \end{proof}
Set $$
G_\alpha = (-1)^m \sum_{|\beta|=m} \left(\bar a_{\alpha\beta} - a_{\alpha\beta}\right)D^\beta u + \tilde{\textit{\textbf{f}}}_\alpha + \textit{\textbf{g}}_\alpha \quad \text{for} \quad
|\alpha|=m, $$ $$ G_\alpha = \tilde{\textit{\textbf{f}}}_\alpha \quad \text{for}
\quad 0\le |\alpha| < m. $$ We see that $G_\alpha \in L_2(B_R^+(\hat{y}_0))$, and by \eqref{eq17.23b} $$
(-1)^m \mathcal{L}_0 \textit{\textbf{u}} + \lambda \textit{\textbf{u}} = \sum_{|\alpha| \le m} D^{\alpha} G_\alpha - \lambda \textit{\textbf{h}}. $$ Take $\varphi$ to be an infinitely differentiable function such that $$ 0 \le \varphi \le 1, \quad \varphi = 1 \,\, \text{on} \,\, B_{R/2}(\hat y_0), \quad \varphi = 0 \,\, \text{outside} \,\, B_R(\hat y_0). $$ Then we find a unique solution $\textit{\textbf{w}} \in W_2^m(\mathbb{R}^d_+)$ satisfying \begin{equation}
\label{eq08_01}
(-1)^m \mathcal{L}_0 \textit{\textbf{w}} + \lambda \textit{\textbf{w}} = \sum_{|\alpha| \le m} D^{\alpha} (\varphi G_\alpha) - \lambda \varphi\textit{\textbf{h}} \end{equation} with the conormal derivative condition on $\partial \mathbb{R}^d_+$. By Theorem \ref{theorem08061901} we have \begin{equation}
\label{eq08_02}
\sum_{|\alpha|\le m}\lambda^{\frac12-\frac {|\alpha|} {2m}} \|D^\alpha \textit{\textbf{w}} \|_{L_2(\mathbb{R}^d_+)}
\le N \sum_{|\alpha|\le m}\lambda^{\frac {|\alpha|} {2m}-\frac12} \|\varphi G_\alpha\|_{L_2(\mathbb{R}^d_+)}
+ N \lambda \|\varphi \textit{\textbf{h}}\|_{L_2(\mathbb{R}^d_+)}. \end{equation}
Now we set $\textit{\textbf{v}} := \textit{\textbf{u}}-\textit{\textbf{w}}$ in $B_R(\hat y_0) \cap \overline{\mathbb{R}^d_+}$. Then $\textit{\textbf{v}}$ satisfies \begin{equation}
\label{eq08_03} (-1)^m \mathcal{L}_0 \textit{\textbf{v}} + \lambda \textit{\textbf{v}} = 0 \end{equation} in $B_{R/2}^+(\hat y_0)$ with the conormal derivative condition on $\Gamma_{R/2}(\hat y_0)$. Since the coefficients of $\mathcal{L}_0$ are infinitely differentiable, by the classical theory $\textit{\textbf{v}}$ is infinitely differentiable in $B_{R/2}(\hat{y}_0) \cap \overline{\mathbb{R}^d_+}$.
Recall that $$
U=\sum_{|\alpha| \le m}\lambda^{\frac 1 2-\frac {|\alpha|}{2m}} |D^\alpha \textit{\textbf{u}}|,
\quad F=\sum_{|\alpha|\le m}\lambda^{\frac {|\alpha|} {2m}-\frac 1 2}|\textit{\textbf{f}}_\alpha|. $$
\begin{lemma} We have \begin{equation}
\label{eq21.52h}
\sum_{k=0}^m\lambda^{\frac 1 2-\frac k {2m}}(|D^k \textit{\textbf{w}}|^2)_{B_R^+(\hat y_0)}^{1/2} \le N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(\hat y_0)}^{1/ \nu}+ N(F^2)_{\Omega_R(\hat y_0)}^{1/2}, \end{equation} where $\nu$ and $\nu'$ are from Proposition \ref{prop7.9}. \end{lemma}
\begin{proof} By \eqref{eq08_02} and the definition of $G_\alpha$, we have \begin{multline*}
\sum_{|\alpha|\le m}\lambda^{\frac 1 2-\frac {|\alpha|} {2m}}\|D^{\alpha} \textit{\textbf{w}}\|_{L_2(\mathbb{R}^d_+)}
\le N \sum_{|\alpha|=|\beta|=m}\|\varphi(\bar a_{\alpha\beta}- a_{\alpha\beta})D^\beta \textit{\textbf{u}}\|_{L_2(\mathbb{R}^d_+)} \\
+N\sum_{|\alpha|\le m}\lambda^{\frac{|\alpha|}{2m}-\frac 1 2} \|\varphi\tilde \textit{\textbf{f}}_\alpha\|_{L_2(\mathbb{R}^d_+)}
+N \sum_{|\beta|=m}\|\varphi \textit{\textbf{g}}_\alpha\|_{L_2(\mathbb{R}^d_+)}
+N \lambda \|\varphi \textit{\textbf{h}}\|_{L_2(\mathbb{R}^d_+)}. \end{multline*}
Note that $\Omega^*$ lies in the strip $B_R(\hat y_0)\cap \{y: -2\gamma R<y_1<0\}$. Thus, by the definitions of $\tilde \textit{\textbf{f}}_\alpha$, $\textit{\textbf{g}}_\alpha$, and $\textit{\textbf{h}}$, it follows that the left-hand side of \eqref{eq21.52h} is less than a constant times \begin{align*}
\sum_{|\alpha|=|\beta|=m}&\left(|(\bar a_{\alpha\beta}- a_{\alpha\beta})D^\beta \textit{\textbf{u}}|^2\right)^{1/2}_{B_R^+(\hat y_0)}
+ \sum_{|\alpha|\le m}\lambda^{\frac{|\alpha|}{2m}-\frac 1 2} \left(|\textit{\textbf{f}}_\alpha|^2\right)^{1/2}_{\Omega_R(\hat y_0)}\\
&+ \left(I_{\{-2\gamma R<y_1<0\}}|D^{m}\textit{\textbf{u}}|^2\right)^{1/2}_{\Omega_R(\hat y_0)}+\lambda
\left(I_{\{-2\gamma R<y_1<0\}}|\textit{\textbf{u}}|^2\right)^{1/2}_{\Omega_R(\hat y_0)}\\ &:=I_1+I_2+I_3+I_4. \end{align*} By using \eqref{eq17_50} and H\"older's inequality, we see that $$ I_1\le N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(\hat y_0)}^{1/ \nu}. $$ It is clear that $I_2$ is bounded by $N(F^2)_{\Omega_R(\hat y_0)}^{1/2}$. Observe that by H\"older's inequality we have \begin{align*} \left(I_{\{-2\gamma R < y_1 < 0\}}
|D^{m}\textit{\textbf{u}}|^2\right)^{1/2}_{\Omega_R(\hat y_0)} &\le \left(I_{\{-2\gamma R < y_1 < 0\}}\right)_{\Omega_R(\hat y_0)}^{1/\nu'}
\left(|D^m\textit{\textbf{u}}|^\nu\right)^{1/\nu}_{\Omega_R(\hat y_0)} \\
&\le N\gamma^{1/\nu'}(|D^m\textit{\textbf{u}}|^{\nu})_{\Omega_R(\hat y_0)}^{1/\nu}. \end{align*} Thus $I_3$ is also bounded by $N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(\hat y_0)}^{1/ \nu}$. In a similar way, $I_4$ is bounded by $N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(\hat y_0)}^{1/ \nu}$. Therefore, we conclude \eqref{eq21.52h}. \end{proof}
Now we are ready to complete the proof of Proposition \ref{prop7.9}. We shall show that $U^B:=U'+|\Theta|$ satisfies the inequalities in the proposition. First, we consider the case when $\kappa \gamma \le 1/10$. By \eqref{eq21.52h}, it follows that \begin{equation}
\label{eq18.34h} (W^2)_{B_{R}^+(\hat y_0)}^{1/2} \le N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_{R}(\hat y_0)}^{1/ \nu}+ N(F^2)_{\Omega_{R}(\hat y_0)}^{1/2}. \end{equation}
Noting that $B_r^+(y_0) \subset B_R^+(\hat y_0)$ and, since $\kappa \gamma\le 1/10$, $|B_R^+(\hat y_0)|/|B_r^+(y_0)| \le N(d) \kappa^{d}$, we obtain from \eqref{eq18.34h} \begin{equation}
\label{eq28_01} (W^2)_{B_r^+(y_0)}^{1/2} \le N\kappa^{\frac d 2} \left(\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_{R}(\hat y_0)}^{1/ \nu}+ (F^2)_{\Omega_{R}(\hat y_0)}^{1/2}\right). \end{equation} Next we denote $$ \mathcal{D}_1=\Omega_{r}(y_0)\cap \{y_1<0\}=\Omega^*, \quad \mathcal{D}_2=B_{r}^+(y_0), \quad \mathcal{D}_3=B_{R/4}^+(\hat y_0). $$ Since $\textit{\textbf{v}}$ in \eqref{eq08_03} is infinitely differentiable, by applying Lemma \ref{lem6.7} to the system \eqref{eq08_03} with a scaling argument, we compute \begin{multline}
\label{eq23.10}
\big(|V'-(V')_{\mathcal{D}_2}|\big)_{\mathcal{D}_2}
+\big(|\hat \Theta-(\hat \Theta)_{\mathcal{D}_2}|\big)_{\mathcal{D}_2} \le N r^{1/2}\big([V']_{C^{1/2}(\mathcal{D}_2)} +[\hat \Theta]_{C^{1/2}( \mathcal{D}_2)}\big)\\ \le N r^{1/2}\big([V']_{C^{1/2}(\mathcal{D}_3)} +[\hat \Theta]_{C^{1/2}( \mathcal{D}_3)}\big) \le N\kappa^{-1/2}(V^2)_{B^+_{R/2}(\hat y_0)}^{1/2}. \end{multline} Thanks to the fact that $\kappa\gamma\le 1/10$, we have \begin{equation}
\label{eq21.31}
|\mathcal{D}_1|\le N\kappa\gamma |\mathcal{D}_2|,\quad
|\Omega_R(\hat y_0)|\le N\kappa^d |\mathcal{D}_2|. \end{equation} By combining \eqref{eq18.34h}, \eqref{eq28_01}, and \eqref{eq23.10}, we get \begin{align*}
\big(|&U^{B}-(U^B)_{\mathcal{D}_2}|\big)_{\mathcal{D}_2}\nonumber\\
&\le N\big(|V'-(V')_{\mathcal{D}_2}|\big)_{\mathcal{D}_2}
+ N \big(|\hat \Theta-(\hat \Theta)_{\mathcal{D}_2}|\big)_{{\mathcal{D}_2}} + N \big(W\big)_{\mathcal{D}_2}\nonumber\\ &\le N\kappa^{-1/2}(U^2)_{B^+_{R/2}(\hat y_0)}^{1/2} +N\kappa^{\frac d 2} \left(\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_{R}(\hat y_0)}^{1/ \nu}+ (F^2)_{\Omega_{R}(\hat y_0)}^{1/2}\right). \end{align*} By using the triangle inequality and the assumption $\kappa \gamma\le 1/10$, $$
\big(|U^B-(U^B)_{\Omega_r(y_0)}|\big)_{\Omega_r(y_0)}
\le N\big(|U^B-(U^B)_{\mathcal{D}_2}|\big)_{\mathcal{D}_2} $$ $$ +N\kappa\gamma (U^B)_{\mathcal{D}_2} +N(1_{\mathcal{D}_1} U^B )_{\Omega_r(y_0)}. $$ We use \eqref{eq12.01}, \eqref{eq21.31}, and H\"older's inequality to bound the last two terms on the right-hand side above as follows: $$
\kappa \gamma (U^B)_{\mathcal{D}_2}\le N\kappa \gamma (|U|^2)^{1/2}_{\mathcal{D}_2}
\le N\kappa \gamma \kappa^{d/2}(|U|^2)^{1/2}_{\Omega_R(\hat y_0)}, $$ \begin{equation*}
(1_{\mathcal{D}_1} U^B )_{\Omega_r(y_0)}\le (1_{\mathcal{D}_1})^{1/2}_{\Omega_r(y_0)}(|U|^2)^{1/2}_{\Omega_r(y_0)}
\le N(\kappa \gamma)^{1/2} \kappa^{d/2}(|U|^2)_{\Omega_R(\hat y_0)}^{1/2}. \end{equation*} Therefore, \begin{multline}
\label{eq21.12}
\big(|U^B-(U^B)_{\Omega_r(y_0)}|\big)_{\Omega_r(y_0)}
\le N\big(\kappa^{-1/2}+(\kappa \gamma)^{1/2} \kappa^{d/2}\big) (|U|^2)_{\Omega_R(\hat y_0)}^{1/2}\\ +N\kappa^{\frac d 2} \left(\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_{R}(\hat y_0)}^{1/ \nu}+(F^2)_{\Omega_{R}(\hat y_0)}^{1/2}\right). \end{multline}
In the remaining case when $\kappa\gamma>1/10$, by \eqref{eq12.01} and \eqref{eq22.16}, \begin{align*}
\big(|U^B-(U^B)_{\Omega_r(y_0)}|\big)_{\Omega_r(y_0)}&\le N(U^B)_{\Omega_r(y_0)}\le N(U)_{\Omega_r(y_0)}\\
&\le N(|U|^2)_{\Omega_r(y_0)}^{1/2}
\le N\kappa^{d/2}(|U|^2)_{\Omega_R(\hat y_0)}^{1/2}, \end{align*}
where in the last inequality, we used the obvious inequality $|\Omega_R(\hat y_0)|\le N\kappa^d|\Omega_r(y_0)|$. Therefore, in this case, \eqref{eq21.12} still holds. Finally, we transform the obtained inequality back to the original coordinates to get the inequality \eqref{eq5.42}. This completes the proof of Proposition \ref{prop7.9}.
\subsection{Proof of Theorem \ref{thm3}} We finish the proof of Theorem \ref{thm3} in this subsection. First we observe that by taking a sufficiently large $\lambda_0$ and using interpolation inequalities, we can move all the lower-order terms of $\mathcal{L} \textit{\textbf{u}}$ to the right-hand side. Thus in the sequel we assume that all the lower-order coefficients of $\mathcal{L}$ are zero.
Recall the definition of $\mathbb{C}_l,l\in \mathbb{Z}$ above Theorem \ref{th081201}. Notice that if $x\in C\in \mathbb{C}_l$, then for the smallest $r>0$ such that $C\subset B_r(x)$ we have $$
\dashint_{C} \dashint_{C}|g(y)-g(z)| \,dy\,dz\leq N(d)
\dashint_{B^+_r(x)} \dashint_{B^+_r(x)}|g(y)-g(z)| \,dy\,dz. $$ We use this inequality in the proof of the following corollary.
\begin{corollary} \label{cor001b} Let $\gamma \in (0,1/4)$, $\lambda > 0$, $\nu\in (2,\infty)$, $\nu'=2\nu/(\nu-2)$, and $z_0 \in \overline{\mathbb{R}^d_+}$. Assume that $\textit{\textbf{u}}\in W_{\nu,\text{loc}}^m(\overline{\mathbb{R}^d_+})$ vanishes outside $B_{\gamma R_0}(z_0)$ and satisfies $$
(-1)^m\mathcal{L} \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha $$ locally in $\mathbb{R}^d_+$ with the conormal derivative condition on $\partial \mathbb{R}^d_+$, where $\textit{\textbf{f}}_\alpha\in L_{2,\text{loc}}(\overline{\mathbb{R}^d_+})$. Then under Assumption \ref{assumption20100901} ($\gamma$), for each $l \in \mathbb{Z}$, $C \in \mathbb{C}_l$, and $\kappa \ge 64$, there exists a function $U^C$ depending on $C$ such that $N^{-1}U\le U^C\le NU$ and \begin{equation}
\label{eq08_04}
\left(|U^C-(U^C)_{C}|\right)_{C} \le N \left(F_{\kappa}\right)_C, \end{equation} where $N=N(d,\delta,m,n,\tau)$ and \begin{align*} F_{\kappa}&= \big(\kappa^{-1/2}+(\kappa \gamma)^{1/2}\kappa^{d/2}\big)\big(\mathcal{M} (1_{\mathbb{R}^d_+}U^2)\big)^{1/2}\\ &\quad +\kappa^{d/2}\big[\big(\mathcal{M}(1_{\mathbb{R}^d_+}F^2)\big)^{1/2}+\gamma^{1/\nu'} (\mathcal{M}(1_{\mathbb{R}^d_+}U^{\nu}))^{1/\nu}\big]. \end{align*} \end{corollary}
\begin{proof} For each $\kappa \ge 64$ and $C \in \mathbb{C}_l$, let $B_r(x_0)$ be the smallest ball containing $C$. Clearly, $x_0 \in \overline{\mathbb{R}^d_+}$.
If $\kappa r > R_0$, then we take $U^C = U$. Note that the volumes of $C$, $B_r(x_0)$, and $B^+_r(x_0)$ are comparable, and $C\subset B_r^+(x_0)$. Then by the triangle inequality and H\"{o}lder's inequality, the left-hand side of \eqref{eq08_04} is less than \begin{multline*} N \left(U^2\right)^{1/2}_{B_r^+(x_0)} \le N \kappa^{d/2} \big( 1_{B^+_{\gamma R_0}(z_0)}U^2 \big)^{1/2}_{B_{\kappa r}(x_0)}\\ \le N \kappa^{d/2} \big( 1_{B_{\gamma R_0(z_0)}} \big)_{B_{\kappa r}(x_0)}^{1/\nu'} \big( 1_{\mathbb{R}^d_+ }U^\nu \big)^{1/\nu}_{B_{\kappa r}(x_0)} \le N \kappa^{d/2} \gamma^{1/\nu'} \big( 1_{\mathbb{R}^d_+ }U^\nu \big)^{1/\nu}_{B_{\kappa r}(x_0)}. \end{multline*}
Here the first inequality is because $|B_{\kappa r}(x_0)|\le 2\kappa^d |B_r^+(x_0)|$ and $U$ vanishes outside $B_{\gamma R_0}(z_0)$. The second inequality follows from H\"older's inequality. In the last inequality we used $\kappa r>R_0$ and $\gamma^d \le \gamma$. Note that \begin{equation}
\label{eq08_05} \big( 1_{\mathbb{R}^d_+ }U^\nu \big)^{1/\nu}_{B_{\kappa r}(x_0)} \le \mathcal{M}^{1/\nu} \big(1_{\mathbb{R}^d_+ }U^\nu \big)(x) \end{equation} for all $x \in C$. Hence the inequality \eqref{eq08_04} follows.
If $\kappa r \le R_0$, from Proposition \ref{prop7.9}, we find $U^B$ with $B^+ = B^+_{\kappa r}(x_0)$. Take $U^C = U^B$. Then by Proposition \ref{prop7.9} we have \begin{equation}
\label{eq08_06}
\left( |U^C - \left(U^C\right)_C| \right)_C \le N(d) I, \end{equation} where $I$ is the right-hand side of the inequality \eqref{eq5.42}. Note that, for example, $$ \left( U^2 \right)_{B_{\kappa r}^+(x_0)} \le N(d) \big( 1_{\mathbb{R}^d_+}U^2 \big)_{B_{\kappa r}(x_0)}. $$ Using this and inequalities like \eqref{eq08_05}, we see that \eqref{eq08_06} implies the desired inequality \eqref{eq08_04}. \end{proof}
\begin{theorem} \label{theorem001b} Let $p \in (2,\infty)$, $\lambda > 0$, $z_0\in \overline{\mathbb{R}^d_+}$, and $\textit{\textbf{f}}_{\alpha} \in L_p(\mathbb{R}^d_+)$. There exist positive constants $\gamma\in (0,1/4)$ and $N$, depending only on $d$, $\delta$, $m$, $n$, $p$, such that under Assumption \ref{assumption20100901} ($\gamma$), for $\textit{\textbf{u}} \in W_p^m(\mathbb{R}^d_+)$ vanishing outside $B_{\gamma R_0}(z_0)$ and satisfying $$
(-1)^m\mathcal{L} \textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha $$ in $\mathbb{R}^d_+$ with the conormal derivative condition on $\partial \mathbb{R}^d_+$, we have $$
\|U\|_{L_p(\mathbb{R}^d_+)}
\le N \| F\|_{L_p(\mathbb{R}^d_+)}, $$ where $N = N(d,\delta,m,n,p)$. \end{theorem}
\begin{proof} Let $\gamma > 0$ and $\kappa \ge 64$ be constants to be specified below. Take a constant $\nu$ such that $p > \nu > 2$. Then we see that $\textit{\textbf{u}} \in W_{\nu,\text{loc}}^m(\overline{\mathbb{R}^d_+})$ and all the conditions in Corollary \ref{cor001b} are satisfied.
For each $l \in \mathbb{Z}$ and $C \in \mathbb{C}_l$, let $U^C$ be the function from Corollary \ref{cor001b}. Then by Corollary \ref{cor001b} and Theorem \ref{th081201} we have $$
\| U \|_{L_p(\mathbb{R}^d_+)}^p \le N \|F_\kappa\|_{L_p(\mathbb{R}^d_+)} \|U\|_{L_p(\mathbb{R}^d_+)}^{p-1}. $$ The implies that $$
\|U \|_{L_p(\mathbb{R}^d_+)} \le N \|F_\kappa\|_{L_p(\mathbb{R}^d_+)}. $$ Now we observe that by the Hardy--Littlewood maximal function theorem \begin{multline*}
\|F_\kappa\|_{L_p(\mathbb{R}^d_+)}
\le \|F_\kappa\|_{L_p(\mathbb{R}^d)}
\le N \big(\kappa^{-1/2} + (\kappa \gamma)^{1/2} \kappa^{d/2}\big)\|1_{\mathbb{R}^d_+} U\|_{L_p(\mathbb{R}^d)}\\
+ N \kappa^{d/2} \|1_{\mathbb{R}^d_+} F\|_{L_p(\mathbb{R}^d)} + N \kappa^{d/2}\gamma^{1/\nu'} \|1_{\mathbb{R}^d_+} U\|_{L_p(\mathbb{R}^d)}. \end{multline*} To complete the proof, it remains to choose a sufficiently large $\kappa$, and then a sufficiently small $\gamma$ so that $$ N \big(\kappa^{-1/2} + (\kappa \gamma)^{1/2} \kappa^{d/2}\big) + N \kappa^{d/2}\gamma^{1/\nu'} < 1/2. $$ \end{proof}
\begin{proof}[Proof of Theorem \ref{thm3}] We treat the following three cases separately.
{\em Case 1: $p=2$.} In this case, the theorem follows from Theorem \ref{theorem08061901}.
{\em Case 2: $p\in (2,\infty)$.} Assertion (i) follows from Theorem \ref{theorem001b} and the standard partition of unity argument. Then Assertion (ii) is derived from Assertion (i) by using the method of continuity. Finally, Assertion (iii) is due to a standard scaling argument.
{\em Case 3: $p\in (1,2)$.} In this case, Assertion (i) is a consequence of the duality argument and the $W^m_q$-solvability obtained above for $q=p/(p-1)\in (2,\infty)$. With the a priori estimate, the remaining part of the theorem is proved in the same way as in Case 2. The theorem is proved. \end{proof}
\section{Systems on a Reifenberg flat domain}
\label{Reifenberg}
In this section, we consider elliptic systems on a Reifenberg flat domain. The crucial ingredients of the proofs below are the interior and the boundary estimates established in Sections \ref{sec_aux}, a result in \cite{Sa80,KS80} on the ``crawling of ink drops'', and an idea in \cite{CaPe98}.
By a scaling, in the sequel we may assume $R_0=1$ in Assumption \ref{assump1}. Recall the definitions of $U$ and $F$ in Sections \ref{sec_aux} and \ref{sec4}.
\begin{lemma}
\label{lem7.3}
Let $\gamma \in (0,1/50)$, $R\in (0,1]$, $\lambda\in (0,\infty)$, $\nu\in (2,\infty)$, $\nu'=2\nu/(\nu-2)$, $\textit{\textbf{f}}_\alpha= (f_\alpha^1, \ldots, f_\alpha^n)^{\text{tr}} \in L_{2,\text{loc}}(\overline{\Omega})$, $|\alpha|\le m$. Assume that $a_{\alpha\beta}\equiv 0$ for any $\alpha,\beta$ satisfying $|\alpha|+|\beta|<2m$ and that $\textit{\textbf{u}}\in W_{\nu, \text{loc}}^m(\overline{\Omega})$ satisfies \eqref{eq11_01} locally in $\Omega$ with the conormal derivative condition on $\partial \Omega$. Then the following hold true.
\noindent(i) Suppose $0\in \Omega$, $\text{dist}(0,\partial\Omega)\ge R$, and Assumption \ref{assump1} ($\gamma$) (i) holds at the origin. Then there exists nonnegative functions $V$ and $W$ in $B_R$ such that $U \le V+W$ in $B_R$, and $V$ and $W$ satisfy \begin{equation*}
(W^2)_{B_R}^{1/2} \le N\gamma^{1/\nu'} (U^\nu)_{B_R}^{1/\nu}+ N(F^2)_{B_R}^{1/2} \end{equation*} and \begin{equation*}
\|V\|_{L_\infty(B_{R/4})} \le N\gamma^{1/\nu'} (U^\nu)_{B_{R}}^{1/ \nu}+ N(F^2)_{B_{R}}^{1/2}+ N(U^2)_{B_{R}}^{1/2}, \end{equation*} where $N=N(d,n,m,\delta,\nu)>0$ is a constant.
\noindent(ii) Suppose $0\in \partial\Omega$ and Assumption \ref{assump1} ($\gamma$) (ii) holds at the origin. Then there exists nonnegative functions $V$ and $W$ in $\Omega_R$ such that $U \le V+W$ in $\Omega_R$, and $W$ and $V$ satisfy \begin{equation}
\label{eq17.10} (W^2)_{\Omega_R}^{1/2} \le N\gamma^{1 /\nu'} (U^\nu)_{\Omega_R}^{1/\nu}+ N(F^2)_{\Omega_R}^{1/2} \end{equation} and \begin{equation} \label{eq17.11}
\|V\|_{L_\infty(\Omega_{R/4})} \le N\gamma^{1/\nu'} (U^\nu)_{\Omega_R}^{1/\nu} +N(F^2)_{\Omega_R}^{1/2}+ N(U^2)_{\Omega_R}^{1/2}, \end{equation} where $N=N(d,n,m,\delta,\nu)>0$ is a constant. \end{lemma}
\begin{proof} The proof is similar to that of Proposition \ref{prop7.9} with some modifications. We assume that Assumption \ref{assump1} holds in the original coordinates. Without loss of generality, we may further assume that the coefficients $\bar a_{\alpha\beta}$ are infinitely differentiable.
Assertion (i) is basically an interior estimate which does not involve boundary conditions, so the proof is exactly the same as that of Assertion (i) in \cite[Lemma 8.3]{DK10}.
Next, we prove Assertion (ii). Due to Assumption \ref{assump1}, by shifting the origin upward, we can assume that $$ B_R^+(x_0) \subset \Omega_R(x_0) \subset \{(x_1,x') : -2\gamma R < x_1 \} \cap B_R(x_0) $$ where $x_0 \in \partial \Omega$ (see Figure \ref{fg2}). Define $\bar a_{\alpha\beta}$ as in Section \ref{sec4}. \begin{figure}\label{fg2}
\end{figure} Then $\textit{\textbf{u}}$ satisfies \eqref{eq17.23b} in $B_R^+(x_0)$ with the conormal derivative condition on $\Gamma_R(x_0)$. By following the argument in the proof of Proposition \ref{prop7.9}, we can find $\textit{\textbf{w}} \in W_2^m(\mathbb{R}^d_+)$ and $\textit{\textbf{v}} \in W_{2}^m(B_R^+(x_0))$ such that $\textit{\textbf{u}} = \textit{\textbf{w}} + \textit{\textbf{v}}$, the function $\textit{\textbf{w}}$ satisfies \begin{equation}
\label{eq08_07}
\sum_{k=0}^m\lambda^{\frac 1 2-\frac k {2m}}(|D^k \textit{\textbf{w}}|^2)_{B_R^+(x_0)}^{1/2} \le N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(x_0)}^{1/ \nu}+ N(F^2)_{\Omega_R(x_0)}^{1/2}, \end{equation} and the function $\textit{\textbf{v}}$ satisfies $$ (-1)^m \mathcal{L} \textit{\textbf{v}} + \lambda \textit{\textbf{v}} = 0 $$ in $B_{R/2}^+(x_0)$ with the conormal derivative condition on $\Gamma_{R/2}(x_0)$. We define $V$ and $W$ in $B_R^+(x_0)$ as in Section \ref{sec4}. As noted in the proof of Proposition \ref{prop7.9}, we can assume that $\textit{\textbf{v}}$ is infinitely differentiable. Applying Lemma \ref{lem6.7}, we get \begin{equation}
\label{eq08_08}
\|V\|_{L_\infty(B^+_{R/4}(x_0))} \le N (V^2)_{B^+_{R/2}(x_0)}^{1/2}. \end{equation} Now we extend $W$ and $V$ on $\Omega^* = \mathbb{R}^d_- \cap \Omega_R(x_0)$ by setting $W = U$ and $V = 0$, respectively. Then we see that by H\"{o}lder's inequality and \eqref{eq08_07} \begin{align*} \left(W^2\right)^{1/2}_{\Omega_R(x_0)}
&= \left[ \frac{1}{|\Omega_R(x_0)|}\int_{B_R^+(x_0)} W^2 \, dx
+ \frac{1}{|\Omega_R(x_0)|} \int_{\Omega^*} U^2 \, dx \right]^{1/2}\\ &\le N \left( W^2 \right)_{B_R^+(x_0)}^{1/2} + N \left( 1_{\Omega^*} \right)_{\Omega_R(x_0)}^{1/\nu'} \left( U^\nu \right)^{1/\nu}_{\Omega_R(x_0)}\\ &\le N\gamma^{1/ {\nu'}} (U^\nu)_{\Omega_R(x_0)}^{1/ \nu}+ N(F^2)_{\Omega_R(x_0)}^{1/2}. \end{align*} Upon recalling that the origin was shifted from $x_0$, we arrive at \eqref{eq17.10}. To prove \eqref{eq17.11} we observe that by \eqref{eq08_08} and the fact that $V\le U+W$ $$
\|V\|_{L_\infty(\Omega_{R/4}(x_0))}
= \|V\|_{L_\infty(B_{R/4}^+(x_0))} \le N \left(V^2\right)^{1/2}_{B_{R/2}^+(x_0)} $$ $$ \le N \left(V^2\right)^{1/2}_{\Omega_R(x_0)} \le N \left(W^2\right)^{1/2}_{\Omega_R(x_0)} + N \left(U^2\right)^{1/2}_{\Omega_R(x_0)}. $$ This together with \eqref{eq17.10} gives \eqref{eq17.11}. This completes the proof of the lemma. \end{proof}
For a function $f$ on a set $\mathcal{D}\subset\mathbb{R}^{d}$, we define its maximal function $\mathcal{M} f$ by $\mathcal{M} f=\mathcal{M} (I_{\mathcal{D}}f)$. For any $s>0$, we introduce two level sets $$ \mathcal{A}(s)=\{x\in \Omega:U> s\}, $$ $$ \mathcal{B}(s)=\Big\{x\in \Omega:\gamma^{-1/\nu'}\big(\mathcal{M}(F^2)\big)^{1/2}+ \big(\mathcal{M}(U^\nu)\big)^{1/\nu}>s\Big\}. $$ With Lemma \ref{lem7.3} in hand, we get the following corollary.
\begin{corollary}
\label{cor7.5} Under the assumptions of Lemma \ref{lem7.3}, suppose $0\in \overline{\Omega}$ and Assumption \ref{assump1} ($\gamma$) holds. Let $s\in (0,\infty)$ be a constant. Then there exists a constant $\kappa\in (1,\infty)$, depending only on $d$, $n$, $m$, $\delta$, and $\nu$, such that the following holds. If \begin{equation}
\label{eq15.59}
\big|\Omega_{R/32}\cap \mathcal{A}(\kappa s)\big|> \gamma^{2/\nu'} |\Omega_{R/32}|, \end{equation} then we have $\Omega_{R/32}\subset \mathcal{B}(s)$. \end{corollary} \begin{proof} By dividing $\textit{\textbf{u}}$ and $\textit{\textbf{f}}$ by $s$, we may assume $s=1$. We prove by contradiction. Suppose at a point $x\in \Omega_{R/32}$, we have \begin{equation}
\label{eq15.38} \gamma^{-1/\nu'}\big(\mathcal{M}(F^2)(x)\big)^{1/2}+ \big(\mathcal{M}(U^\nu)(x)\big)^{1/\nu}\le 1. \end{equation} Let us consider two cases.
First we consider the case when $\text{dist}(0,\partial \Omega)\ge R/8$. Notice that $$ x\in\Omega_{R/32}=B_{R/32}\subset B_{R/8}\subset \Omega. $$ Due to Lemma \ref{lem7.3} (i), we have $U \le V + W$ and, by \eqref{eq15.38}, \begin{equation}
\label{eq15.47}
\|V\|_{L_\infty(B_{R/32})} \le N_1, \quad (W^2)_{B_{R/8}}^{1/2} \le N_1\gamma^{1/\nu'}, \end{equation} where $N_1$ and constants $N_i$ below depend only on $d$, $n$, $m$, $\delta$, and $\nu$. By \eqref{eq15.47}, the triangle inequality and Chebyshev's inequality, we get \begin{multline}
\label{eq5.20}
\big|\Omega_{R/32}\cap \mathcal{A}(\kappa)\big|=
\big|\{x\in \Omega_{R/32}: U>\kappa\}\big|\\
\le \big|\{x\in \Omega_{R/32}: W>\kappa-N_1\}\big|
\le (\kappa-N_1)^{-2} N_1^2 \gamma^{2/\nu'}|B_{R/8}|, \end{multline} which contradicts with \eqref{eq15.59} if we choose $\kappa$ sufficiently large.
Next we consider the case when $\text{dist}(0,\partial \Omega)< R/8$. We take $y\in \partial\Omega$ such that $|y|=\text{dist}(0,\partial \Omega)$. Notice that in this case we have $$ x\in\Omega_{R/32}\subset \Omega_{R/4}(y)\subset \Omega_{R}(y). $$ Due to Lemma \ref{lem7.3} (ii), we have $U\le V+ W$ in $\Omega_R(y)$ and, by \eqref{eq15.38}, \begin{equation}
\label{eq17.23c}
\|V\|_{L_\infty(\Omega_{R/32})}\le \|V\|_{L_\infty(\Omega_{R/4}(y))} \le N_2, \quad (W^2)_{\Omega_{R}(y)}^{1/2} \le N_2\gamma^{1/\nu'}. \end{equation}
By \eqref{eq17.23c}, the triangle inequality and Chebyshev's inequality, we still get \eqref{eq5.20} with $N_2$ in place of $N_1$, which contradicts with \eqref{eq15.59} if we choose $\kappa$ sufficiently large. \end{proof}
\begin{theorem} \label{theorem101} Let $p \in (2,\infty)$, $\lambda > 0$, $x_0\in \mathbb{R}^{d}$
and $\textit{\textbf{f}}_{\alpha} \in L_p(\Omega)$. Suppose that $a_{\alpha\beta}\equiv 0$ for any $\alpha,\beta$ satisfying $|\alpha|+|\beta|<2m$, and $\textit{\textbf{u}} \in W_p^m(\Omega)$ is supported on $B_{\gamma}(x_0)\cap \overline{\Omega}$ and satisfies \eqref{eq11_01} in $\Omega$ with the conormal derivative boundary condition. There exist positive constants $\gamma \in (0,1/50)$ and $N$, depending only on $d,\delta,m,n, p$, such that, under Assumption \ref{assump1} ($\gamma$) we have \begin{equation}
\label{eq16.00}
\|U \|_{L_p(\Omega)}
\le N \|F\|_{L_p(\Omega)}, \end{equation} where $N = N(d,\delta,m,n,p)$. \end{theorem}
\begin{proof} We fix $\nu=p/2+1$ and let $\nu'=2\nu/(\nu-2)$. Then we see that $\textit{\textbf{u}} \in W_{\nu,\text{loc}}^m(\overline{\Omega})$. Let $\kappa$ be the constant in Corollary \ref{cor7.5}. Recall the elementary identity: $$
\|f\|_{L_p(\mathcal{D})}^p=p\int_0^\infty \big|\{x\in\mathcal{D}:|f(x)|> s\}\big|s^{p-1}\,ds, $$ which implies that \begin{equation}
\label{eq5.07}
\|U\|_{L_p(\Omega)}^p=p\kappa^p \int_0^\infty|\mathcal{A}(\kappa s)|s^{p-1}\,ds. \end{equation} Thus, to obtain \eqref{eq16.00} we need to estimate $\mathcal{A}(\kappa s)$. First, we note that by Chebyshev's inequality \begin{equation}
\label{eq17.57}
|\mathcal{A}(\kappa s)|\le (\kappa s)^{-2}\|U\|_{L_2(\Omega)}^2. \end{equation}
When $\kappa s \ge \gamma^{-1/\nu'} \|U\|_{L_2(\Omega)}$, this indicates that $$
|\mathcal{A}(\kappa s)|\le \gamma^{2/\nu'}. $$ With the above inequality and Corollary 5.2 in hand, we see that all the conditions of the ``crawling of ink drops'' lemma are satisfied; see \cite{Sa80}, \cite[Sect. 2]{KS80}, or \cite[Lemma 3]{BW05} for the lemma. Hence we have \begin{equation}
\label{eq18.34}
|\mathcal{A}(\kappa s)|\le N_4\gamma^{2/\nu'}|\mathcal{B}(s)|. \end{equation}
Now we estimate $\|U\|_{L_p(\Omega)}^p$ in \eqref{eq5.07} by splitting the integral into two depending on the range of $s$. If $\kappa s \ge \gamma^{-1/\nu'}\|U\|_{L_2(\Omega)}$, we use \eqref{eq18.34}. Otherwise, we use \eqref{eq17.57}. Then it follows that \begin{align*}
\|U\|_{L_p(\Omega)}^p &\le N_5\gamma^{(2-p)/\nu'}\big(\|U\|_{L_2(\Omega)}^p+
\big\|\big(\mathcal{M}(F^2)\big)^{1/2}\big\|_{L_p(\Omega)}^p\big)\\
&\quad +N_5\gamma^{2/\nu'}\big\|\big(\mathcal{M}(U^\nu)\big)^{1/\nu}
\big\|_{L_p(\Omega)}^p\\
&\le N_5\gamma^{(2-p)/\nu'}\|U\|_{L_2(\Omega)}^p+N_6\gamma^{(2-p)/\nu'}
\|F\|_{L_p(\Omega)}^p+N_6\gamma^{2/\nu'}\|U\|_{L_p(\Omega)}^p, \end{align*} where we used the Hardy--Littlewood maximal function theorem in the last inequality. By H\"older's inequality, \begin{equation}
\label{eq20.31bb}
\|U\|_{L_2(\Omega)}=\|U\|_{L_2(B_\gamma(x_0)\cap\Omega)}
\le N\|U\|_{L_p(\Omega)}\gamma^{d(1/2-1/p)}. \end{equation} By the choice of $\nu$, $d(p/2-1)+(2-p)/\nu'>2/\nu'$. Thus, we get $$
\|U\|_{L_p(\Omega)}^p \le N_6\gamma^{(2-p)/\nu'}
\|F\|_{L_p(\Omega)}^p
+N_6\gamma^{2/\nu'}\|U\|_{L_p(\Omega)}^p. $$ To get the estimate \eqref{eq16.00}, it suffices to take $\gamma=\gamma(d,n,m,\delta,p)\in (0,1/50]$ sufficiently small such that $N_6\gamma^{2/\nu'}\le 1/2$. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm5}] We again consider the following three cases separately.
{\em Case 1: $p=2$.} In this case, the theorem follows directly from the well-known Lax--Milgram lemma.
{\em Case 2: $p\in (2,\infty)$.} Assertion (i) follows from Theorem \ref{theorem101} and the standard partition of unity argument. By the method of continuity, for Assertion (ii) it suffices to prove the solvability for the operator $\mathcal{L}_1:=\delta_{ij}\Delta^m$, which is not immediate because the domain $\Omega$ is irregular. We approximate $\Omega$ by regular domains. Recall the definition of $\Omega^\varepsilon$ in \eqref{eq2.22}. By Lemma \ref{lem3.11}, for any $\varepsilon\in (0,1/4)$, $\Omega^\varepsilon$ satisfies Assumption \ref{assump3} ($N_0 \gamma^{1/2}$) with a constant $R_1(\varepsilon)>0$. Thanks to Lemma \ref{lem3.19}, there is a sequence of expanding smooth domains $\Omega^{\varepsilon,k}$ which converges to $\Omega^\varepsilon$ as $k\to \infty$. Moreover, $\Omega^{\varepsilon,k}$ satisfies Assumption \ref{assump3} ($N_0 \gamma^{1/2}$) with the constant $R_1(\varepsilon)/2$ which is independent of $k$. In particular, $\Omega^{\varepsilon,k}$ satisfies Assumption \ref{assump1} ($N_0\gamma^{1/2}$) with the constant $R_1(\varepsilon)/2$. By the classical result, there is a constant $\lambda_\varepsilon=\lambda_\varepsilon(d,n,m,p,\delta)\ge \lambda_0$ such that, for any $\lambda>\lambda_\varepsilon$, the equation $$
(-1)^m\mathcal{L}_1\textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha\quad \text{in}\,\,\Omega^{\varepsilon,k} $$ with the conformal derivative boundary condition has a unique solution $\textit{\textbf{u}}^{\varepsilon,k}\in W^{m}_p(\Omega^{\varepsilon,k})$. The a priori estimate above gives \begin{equation}
\label{eq3.07}
\|\textit{\textbf{u}}^{\varepsilon,k}\|_{W^m_p(\Omega^{\varepsilon,k})}\le N_\varepsilon, \end{equation}
where $N_\varepsilon>0$ is a constant independent of $k$. By the weak compactness, there is a subsequence, which is still denoted by $\textit{\textbf{u}}^{\varepsilon,k}$, and functions $\textit{\textbf{v}}^\varepsilon,\textit{\textbf{v}}^\varepsilon_\alpha\in L_p(\Omega^{\varepsilon}),1\le |\alpha|\le m$, such that weakly in $L_p(\Omega^{\varepsilon})$, $$
\textit{\textbf{u}}^{\varepsilon,k}I_{\Omega^{\varepsilon,k}}\rightharpoonup \textit{\textbf{v}}^\varepsilon,\quad D^\alpha \textit{\textbf{u}}^{\varepsilon,k}I_{\Omega^{\varepsilon,k}}\rightharpoonup \textit{\textbf{v}}^\varepsilon_\alpha\quad \forall\, \alpha,\,1\le |\alpha|\le m. $$ It is easily seen that $\textit{\textbf{v}}^\varepsilon_\alpha=D^\alpha \textit{\textbf{v}}^\varepsilon$ in the sense of distributions. Thus, by \eqref{eq3.07} and the weak convergence,
$\textit{\textbf{v}}^{\varepsilon}\in W^m_p(\Omega^\varepsilon)$ is a solution to \begin{equation}
\label{eq4.58}
(-1)^m\mathcal{L}_1\textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha\quad \text{in}\,\,\Omega^{\varepsilon} \end{equation} with the conormal derivative boundary condition. We have proved the solvability for any $\lambda>\lambda_\varepsilon$. Recall that, by Lemma \ref{lem3.11}, $\Omega^\varepsilon$ satisfies Assumption \ref{assump1} ($N_0\gamma^{1/2}$) with $R_0=1/2$. By the a priori estimate in Assertion (i) and the method of continuity, for any $\lambda>\lambda_0$ there is a unique solution $\textit{\textbf{u}}^\varepsilon\in W^m_p(\Omega^\varepsilon)$ to \eqref{eq4.58} with the conormal derivative boundary condition. Moreover, we have \begin{equation}
\label{eq3.07b}
\|\textit{\textbf{u}}^{\varepsilon}\|_{W^m_p(\Omega^{\varepsilon})}\le N, \end{equation}
where $N$ is a constant independent of $\varepsilon$. Again by the weak compactness, there is a subsequence $\textit{\textbf{u}}^{\varepsilon_j}$, and functions $\textit{\textbf{u}},\textit{\textbf{u}}_\alpha\in L_p(\Omega),1\le |\alpha|\le m$, such that weakly in $L_p(\Omega)$, $$
\textit{\textbf{u}}^{\varepsilon_j}I_{\Omega^{\varepsilon_j}}\rightharpoonup \textit{\textbf{u}},\quad D^\alpha \textit{\textbf{u}}^{\varepsilon_j}I_{\Omega^{\varepsilon_j}}\rightharpoonup \textit{\textbf{u}}_\alpha\quad \forall\, \alpha,\,1\le |\alpha|\le m. $$ It is easily seen that $\textit{\textbf{u}}_\alpha=D^\alpha \textit{\textbf{u}}$ in the sense of distributions. Thus, by \eqref{eq3.07b} and the weak convergence, $\textit{\textbf{u}}\in W^m_p(\Omega)$ is a solution to \begin{equation*}
(-1)^m\mathcal{L}_1\textit{\textbf{u}}+\lambda \textit{\textbf{u}}=\sum_{|\alpha|\le m}D^\alpha \textit{\textbf{f}}_\alpha\quad \text{in}\,\,\Omega \end{equation*} with the conormal derivative boundary condition. The uniqueness then follows from the a priori estimate. This completes the proof of Assertion (ii).
{\em Case 3: $p\in (1,2)$.} The a priori estimate in Assertion (i) is a directly consequence of the solvability when $p\in (2,\infty)$ and the duality argument. Then the solvability in Assertion (ii) follows from the a priori estimate by using the same argument as in Case 2.
The theorem is proved. \end{proof}
We now give the proofs of Corollary \ref{cor7} and Theorem \ref{thmB}.
\begin{proof}[Proof of Corollary \ref{cor7}] {\em Case 1: $p=2$.} We define a Hilbert space $$
H:=\{\textit{\textbf{u}}\in W^m_2(\Omega)\,|\,(\textit{\textbf{u}})_\Omega=(D\textit{\textbf{u}})_\Omega=\ldots=(D^{m-1}\textit{\textbf{u}})_\Omega=0\}. $$ By the Lax--Milgram lemma, there is a unique $\textit{\textbf{u}}\in H$ such that for any $\textit{\textbf{v}}\in H$, \begin{equation}
\label{eq22.44} \int_\Omega a_{\alpha\beta}D^\beta \textit{\textbf{u}} D^\alpha \textit{\textbf{v}}\,dx=
\sum_{|\alpha|=m}\int_\Omega (-1)^{|\alpha|}\textit{\textbf{f}}_\alpha D^\alpha \textit{\textbf{v}} \end{equation} and $$
\|D^m \textit{\textbf{u}}\|_{L_2(\Omega)}\le N\sum_{|\alpha|=m}\|\textit{\textbf{f}}_\alpha\|_{L_2(\Omega)}. $$ Note that any function $\textit{\textbf{v}}\in W^m_2(\Omega)$ can be decomposed as a sum of a function in $H$ and a polynomial of degree at most $m-1$. Therefore, \eqref{eq22.44} also holds for any $\textit{\textbf{v}}\in W^m_2(\Omega)$. This implies that $\textit{\textbf{u}}\in W_2^m(\Omega)$ is a solution to the original equation. On the other hand, by the uniqueness of the solution in $H$, any solution $\textit{\textbf{w}}\in W_2^m(\Omega)$ can only differ from $\textit{\textbf{u}}$ by a polynomial of order at most $m-1$.
{\em Case 2: $p\in (2,\infty)$.} First we suppose that $p$ satisfies $1/p>1/2-1/d$. Since $\Omega$ is bounded, $\textit{\textbf{f}}\in L_2(\Omega)$. Let $\textit{\textbf{u}}$ be the unique $H$-solution to the equation. By Theorem \ref{thm5}, there is a unique solution $\textit{\textbf{v}}\in W^m_p(\Omega)$ to the equation \begin{equation}
\label{eq6.01}
(-1)^m\mathcal{L} \textit{\textbf{v}}+(\lambda_0+1)\textit{\textbf{v}}=\sum_{|\alpha|= m}D^\alpha \textit{\textbf{f}}_\alpha+(\lambda_0+1)\textit{\textbf{u}}\quad \text{in}\,\,\Omega \end{equation} with the conormal derivative boundary condition. Moreover, we have \begin{equation}
\label{eq23.17}
\|\textit{\textbf{v}} \|_{W^{m}_p(\Omega)}
\le N \sum_{|\alpha|= m}\|\textit{\textbf{f}}_\alpha \|_{L_p(\Omega)}+N\|\textit{\textbf{u}} \|_{L_p(\Omega)}. \end{equation} By the Sobolev imbedding theorem and the $W^m_2$ estimate, we have $$
\|\textit{\textbf{u}}\|_{L_p(\Omega)}\le N\|\textit{\textbf{u}}\|_{W^1_2(\Omega)}\le N\sum_{|\alpha|= m}\|\textit{\textbf{f}}_\alpha\|_{L_2(\Omega)}\le N\sum_{|\alpha|= m}\|\textit{\textbf{f}}_\alpha\|_{L_{p}(\Omega)}, $$ which together with \eqref{eq23.17} gives $$
\|\textit{\textbf{v}} \|_{W^{m}_p(\Omega)}\le N \sum_{|\alpha|= m}\|\textit{\textbf{f}}_\alpha \|_{L_p(\Omega)}. $$ Since both $\textit{\textbf{v}}$ and $\textit{\textbf{u}}$ are $W^m_2(\Omega)$-solutions to \eqref{eq6.01}, by Theorem \ref{thm5} we have $\textit{\textbf{u}}=\textit{\textbf{v}}$. Therefore, the solvability is proved under the assumption $1/p>1/2-1/d$. The general case follows by using a bootstrap argument. The uniqueness is due to the uniqueness of $W^m_2$-solutions.
{\em Case 3: $p\in (1,2)$.} By the duality argument and Case 2, we have the a priori estimate \eqref{eq23.08} for any $\textit{\textbf{u}}\in W^m_p(\Omega)$ satisfying \eqref{eq22.34} with the conormal derivative boundary condition. For the solvability, we take a sequence $$ \textit{\textbf{f}}^{\,k}_\alpha=\min\{\max\{\textit{\textbf{f}}_\alpha,-k\},k\}\in L_2(\Omega) $$ which converges to $\textit{\textbf{f}}_\alpha$ in $L_p(\Omega)$. Let $\textit{\textbf{u}}^k$ be the $H$-solution to the equation with the right-hand side $\textit{\textbf{f}}^{\,k}_\alpha$. Since $\Omega$ is bounded, we have $\textit{\textbf{u}}^k\in W^m_p(\Omega)$. By the a priori estimate, $\textit{\textbf{u}}^k$ is a Cauchy sequence in $W^m_p(\Omega)$. Then it is easily seen that the limit $\textit{\textbf{u}}$ is the $W^m_p(\Omega)$-solution to the original equation. Next we show the uniqueness. Let $\textit{\textbf{u}}_1$ be another $W^m_p(\Omega)$-solution to the equation. Then $\textit{\textbf{v}}:=\textit{\textbf{u}}-\textit{\textbf{u}}_1\in W^m_p(\Omega)$ satisfies the equation with the zero right-hand side. Following the bootstrap argument in Case 2, we infer that $\textit{\textbf{v}}\in W^m_2(\Omega)$. Therefore, by Case 1, $\textit{\textbf{v}}$ must be a polynomial of degree at most $m-1$.
The corollary is proved. \end{proof}
\begin{proof}[Proof of Theorem \ref{thmB}] The theorem is proved in the same way as Corollary \ref{cor7} in Cases 2 and 3 by using the classical $W^1_2$-estimate of the conormal derivative problem on a domain with a finite measure; see Theorem 13 of \cite{DongKim08a}. We remark that although in Theorem 13 (i) of \cite{DongKim08a} it is assumed that $b_i=c=0$, the same proof works under the relaxed condition $-D_ib_i+c=0$ in $\Omega$ and $b_in_i=0$ on $\partial\Omega$ in the weak sense, i.e., Assumption ($\text{H}^*$). \end{proof}
\def$'$} \def\cprime{$'${$'$}\def$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'${$'$}
\def$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'${$'$}
\end{document} |
\begin{document}
\begin{abstract}
This article deals with the uniqueness and stability issues in the inverse problem of determining the unbounded potential of the Schr\"odinger operator in a bounded domain of $\mathbb{R}^n$, $n \geq 3$, endowed with Robin boundary condition, from knowledge of its boundary spectral data. These data are defined by the pairs formed by the eigenvalues and either partial or full Dirichlet measurement of the eigenfunctions on the boundary of the domain. \end{abstract}
\maketitle
\section{Introduction}
In the present article $\Omega$ is a $C^{1,1}$ bounded domain of $\mathbb{R}^n$, $n\ge 3$, with boundary $\Gamma$, and we equip the two spaces $H:=L^2(\Omega)$ and $V:=H^1(\Omega)$ with their usual scalar product. Put $p:=2n/(n+2)$ and let $p^\ast:=2n/(n-2)$ be its conjugate number, in such a way that $V$ is continuously embedded in $L^{p^\ast}(\Omega)$.
\subsection{The Robin Laplacian} \label{sec-RL} For $\alpha \in L^\infty (\Gamma,\mathbb{R})$ and $q\in L^{n/2} (\Omega,\mathbb{R})$, we introduce the following continuous sesquilinear form $\mathfrak{a} : V\times V\rightarrow \mathbb{C}$ \[ \mathfrak{a}(u,v)=\int_\Omega \nabla u\cdot \nabla \overline{v}dx+\int_\Omega qu\overline{v}dx+\int_\Gamma \alpha u\overline{v}ds(x),\quad u,v\in V. \]
Throughout the entire text, we assume that $\alpha \ge -\mathfrak{c}$ for some constant $\mathfrak{c} \in (0, \mathfrak{n}^{-2})$ almost everywhere on $\Gamma$, where $\mathfrak{n}$ denotes the norm of the (bounded) trace operator $u\in V\mapsto u_{|\Gamma}\in L^2(\Gamma)$. Set \[
\mathrm{Q}(\rho,\aleph):=\{q\in L^\rho(\Omega,\mathbb{R});\; \|q\|_{L^\rho(\Omega)}\le \aleph\},\quad \rho \ge n/2,\; \aleph >0. \] Then, arguing as in the derivation of \cite[Lemma A2]{Po}, we obtain that \begin{equation}\label{ii1}
\|qu^2\|_{L^1(\Omega)}\le \epsilon \|u\|_V^2+C_\epsilon\|u\|_H^2,\quad q\in \mathrm{Q}(n/2,\aleph),\; u\in V,\; \epsilon >0, \end{equation} for some constant $C_\epsilon>0$ depending only of $n$, $\Omega$, $\aleph$ and $\epsilon$. Further, applying \eqref{ii1} with $\epsilon =\kappa:=(1-\mathfrak{c}\mathfrak{n}^2)/2$ yields \begin{equation}\label{co}
\mathfrak{a}(u,u)+ \lambda ^\ast\|u\|_H^2\ge \kappa \|u\|_V^2,\quad u\in V, \end{equation} where $\lambda^\ast>0$ is a constant which depends only on $n$, $\Omega$, $\mathfrak{c}$ and $\aleph$. Let us consider the bounded operator $A:V\rightarrow V^\ast$ defined by \[ \langle Au,v\rangle=\mathfrak{a}(u,v),\quad u,v\in V, \] where $\langle \cdot,\cdot \rangle$ denotes the duality pairing between an arbitrary Banach space and its dual. Notice that $A$ is self-adjoint and coercive according to \eqref{co}.
\subsection{Boundary spectral data} \label{sec-BSD} With reference to \cite[Theorem 2.37]{Mc}, the spectrum of $A$ consists of its eigenvalues $\lambda_k$, $k \in \mathbb{N}:=\{1,2,\ldots \}$, arranged in non-decreasing order and repeated with the (finite) multiplicity, \[ -\infty <\lambda_1\le \lambda_2\le \ldots \le \lambda_k\le \ldots, \quad \mbox{and\ such\ that}\quad \lim_{k \to \infty}\lambda_k \rightarrow \infty. \] Moreover, there exists an orthonormal basis $\{ \phi_k,\ k \in \mathbb{N} \}$ of $H$, made of eigenfunctions $\phi_k\in V$ of $A$, satisfying \[ \mathfrak{a}(\phi_k,v)=\lambda_k(\phi_k,v),\quad v\in V,\quad k \in \mathbb{N}, \] where $(\cdot,\cdot)$ is the usual scalar product in $H$. For the sake of shortness, we write \[
\psi_k:=\phi_k{_{|\Gamma}},\quad k \in \mathbb{N}. \]
Recall that for $u\in V$, we have $\Delta u\in H^{-1}(\Omega)$, the space dual to $H_0^1(\Omega)$, but that it is not guaranteed that $\Delta u$ lie in $V^\ast$ (which is strictly embedded in $H^{-1}(\Omega)$). Thus, we introduce \[ W:=\{u\in V; \Delta u\in V^\ast\}. \] Endowed with its natural norm \[
\|u\|_{W}=\|u\|_V+\|\Delta u\|_{V^\ast},\quad u\in W, \] is a Banach space. Next, for $\varphi \in H^{1/2}(\Gamma)$, we set \[
\dot{\varphi}:=\{v\in V;\; v_{|\Gamma}=\varphi\}, \] and we equip the space $H^{1/2}(\Gamma)$ with its graph norm \[
\|\varphi\|_{H^{1/2}(\Gamma)}=\min\{\|v\|_V;\; v\in \dot{\varphi}\}. \] Now, for $u\in W$ fixed, we put \[ \Phi_u (v):=\langle \Delta u , v\rangle+(\nabla u,\nabla v),\quad v\in V, \] apply the Cauchy-Schwarz inequality, and get that \begin{equation}\label{0.1}
|\Phi_u (v)|\le \|\Delta u\|_{V^\ast}\|v\|_V+\|u\|_V\|v\|_V \leq \|u\|_W\|v\|_V. \end{equation}
Moreover, since $C_0^\infty (\Omega)$ is dense in $H_0^1(\Omega)$, it is easy to see that $H_0^1(\Omega)\subset \ker \Phi_u$ and consequently that $\Phi_u (v)$ depends only on $v_{|\Gamma}$. This enables us to define the normal derivative of $u$, denoted by $\partial_\nu u$, as the unique vector in $H^{-1/2}(\Gamma)$ satisfying \[ \langle \partial_\nu u , \varphi\rangle =\Phi_u (v),\quad v \in \dot{\varphi}\hskip.2cm \mbox{is arbitrary}. \] As a consequence we have \[
\|\partial_\nu u\|_{H^{-1/2}(\Gamma)}\le \|u\|_W, \] by \eqref{0.1}, and the following generalized Green formula: \begin{equation} \label{ggf}
\langle\Delta u , v\rangle+(\nabla u,\nabla v)=\langle \partial_\nu u , v_{|\Gamma}\rangle,\quad u\in W,\; v\in V. \end{equation}
Pick $f\in V^\ast$ and $\mu \in \mathbb{C}$, and let $u\in V$ satisfy \begin{equation}\label{vf} \mathfrak{a}(u,v)+\mu(u,v)=\langle f , v\rangle ,\quad v\in V. \end{equation} Using that $C_0^\infty(\Omega) \subset V$, we obtain that \[
\int_\Omega \nabla u\cdot \nabla \overline{v}dx +\int_\Omega qu\overline{v}dx+\mu\int_\Omega u\overline{v}dx=\langle f| v \rangle,\quad v\in C_0^\infty (\Omega), \] which yields $-\Delta u+qu+\mu u=f$ in $\mathscr{D}'(\Omega)$. Thus, bearing in mind that $qu \in V^\ast$, we have $u\in W$, and the generalized Green formula \eqref{ggf} provides \[
\langle \partial_\nu u+\alpha u_{|\Gamma} , v_{|\Gamma}\rangle =0,\quad v\in V. \]
Since $v\in V\mapsto v_{|\Gamma}\in H^{1/2}(\Gamma)$ is surjective, the above line reads $\partial_\nu u+\alpha u_{|\Gamma}=0$, showing that \eqref{vf} is the variational formulation of the following boundary value problem (BVP): \[
(-\Delta +q+\mu)u=f\; \mathrm{in}\; \Omega,\quad \partial_\nu u+\alpha u_{|\Gamma}=0\; \mathrm{on}\; \Gamma. \] Thus, taking $\mu=\lambda_k$ for all $k \in \mathbb{N}$, we find that $\phi_k\in W$ satisfies \begin{equation}\label{ee}
(-\Delta +q-\lambda_k)\phi_k=0\; \mathrm{in}\; \Omega,\quad \partial_\nu \phi_k+\alpha \phi_k{_{|\Gamma}}=0\; \mathrm{on}\; \Gamma. \end{equation}
\subsection{Statement of the results} We stick to the notations of the previous sections, that is to say that we write $\tilde{\lambda}_k$ (resp., $\tilde{\phi}_k$, $\tilde{\psi}_k$), $k \in \mathbb{N}$, instead of $\lambda_k$ (resp., $\phi_k$, $\psi_k$) when the potential $\tilde{q}$ is substituted for $q$. Our first result is as follows.
\begin{theorem}\label{theorem1} Let $q$ and $\tilde{q}$ be in $L^{r}(\Omega,\mathbb{R})$, where $r=n/2$ when $n \ge 4$ and $r >n/2$ when $n=3$, and let $\ell \in \mathbb{N}$. Then, the conditions \[ \lambda_k=\tilde{\lambda}_k\ \mbox{for\ all}\ k \ge \ell\quad \mbox{and}\quad \psi_k=\tilde{\psi}_k\ \mbox{on}\ \Gamma\ \mbox{for\ all}\ k\ge 1, \] yield that $q=\tilde{q}$ in $\Omega$. \end{theorem} The claim of Theorem \ref{theorem1} was first established for smooth bounded potentials, in the peculiar case where $\ell=1$, by Nachman, Sylvester and Uhlmann in \cite{NSU}. In the same context (of smooth bounded potentials), their result was extended to $\ell \geq 1$ through a heuristic approach in \cite{Sm}.
In view of stating our stability results, we denote by $\ell^\infty$ (resp. $\ell^2$) the Banach (resp., Hilbert) space of bounded (resp. squared summable) sequences of complex numbers $(z_k)$ , equipped with the norm \[
\|(z_k)\|_{\ell^\infty}:=\sup_{k\ge 1}|z_k|\ \left({\rm resp.,}\ \|(z_k)\|_{\ell^2}:=\left(\sum_{k\ge 1}|z_k|^2\right)^{1/2}\right), \] and let \[
\ell^2(L^2(\Gamma)):=\left\{ (w_k) \in L^2(\Gamma)^{\mathbb N}\; \mbox{such that}\; (\|w_k\|_{L^2(\Gamma)})\in \ell^2 \right\} \] be endowed with its natural norm \[
\|(w_k)\|_{\ell^2(L^2(\Gamma))}:=\|(\|w_k\|_{L^2(\Gamma)})\|_{\ell^2}. \]
\begin{theorem}\label{theorem2} Fix $\aleph \in (0,\infty)$ and let
$(q,\tilde{q})\in \mathrm{Q}(r,\aleph)^2$, where $r=n/2$ when $n \ge 4$ and $r >n/2$ when $n = 3$, satisfy $q-\tilde{q} \in L^2(\Omega)$. Assume that $(\lambda_k-\tilde{\lambda}_k)\in \ell^\infty$ fulfills $\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}\le \aleph$ and that $(\psi_k-\tilde{\psi}_k)\in \ell^2(L^2(\Gamma))$. Then, we have \[
\|q-\tilde{q}\|_{H^{-1}(\Omega)}\le C\left( \|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}+ \|(\psi_k-\tilde{\psi}_k)\|_{\ell^2(L^2(\Gamma))} \right)^{2(1-2\beta)/(3(n+2))}, \] where $\beta:=\max \left( 0,n(2-r)/(2r) \right)$ and $C$ is a positive constant depending only on $n$, $\Omega$, $\aleph$ and $\mathfrak{c}$. \end{theorem}
\begin{remark}\label{remark1} {\rm (i) It is worth noticing that we have $\beta=0$ when $n \ge 4$, whereas $\beta \in [0,1/2)$ when $n=3$. Moreover, in the latter case we see that $\beta$ converges to $1/2$ (resp., $0$) as $r$ approaches $3/2$ (resp., $2$) from above (resp., below). \\ (ii) We have $q-\tilde{q} \in L^2(\Omega)$ for all $(q,\tilde{q}) \in \mathrm{Q}(n/2,\aleph)^2$, provided that $n \ge 4$. Nevertheless, this is no longer true when $n=3$, even if $(q,\tilde{q})$ is taken in $\mathrm{Q}(r,\aleph)^2$ with $r \in (n/2,2)$. Hence the additional requirement of Theorem \ref{theorem2} that $q-\tilde{q} \in L^2(\Omega)$ in the three-dimensional case.\\
(iii) When $q-\tilde{q} \in L^\infty(\Omega)$, we have $(\lambda_k-\tilde{\lambda}_k)\in \ell^\infty$ and $\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}\le \|q-\tilde{q}\|_{L^\infty(\Omega)}$, by the min-max principle. Thus, Theorem \ref{theorem2} remains valid by replacing the condition $\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}\le \aleph$ by the stronger assumption $\|q-\tilde{q}\|_{L^\infty(\Omega)}\le \aleph$. } \end{remark}
To the best of our knowledge, there is no comparable stability result available in the mathematical literature for Robin boundary conditions, even when the potentials are assumed to be bounded. Nevertheless, it should be pointed out that the variable coefficients case was recently addressed by \cite{BCKPS} in the framework of Dirichlet boundary conditions.
Further downsizing the data needed for retrieving the unknown potential, we seek a stability inequality requesting a local Dirichlet boundary measurement of the eigenfunctions only, i.e. boundary observation of the $\psi_k$'s and $\tilde{\psi}_k$'s that is performed on astrict subset of $\Gamma$. For this purpose we consider a subdomain $\Omega_0$ of $\Omega$ such that $\overline{\Omega}_0$ is a neighborhood of $\Gamma$ in $\overline{\Omega}$, a fixed nonempty open subset $\Gamma_{\ast}$ of $\Gamma$, and for all $\vartheta \in (0,\infty)$ we introduce the function $\Psi_\vartheta : [0,\infty) \to \mathbb{R}$ as \begin{equation} \label{def-Phi} \Phi_\vartheta(t):= \left\{ \begin{array}{cl} 0 & \mbox{if}\ t=0 \\
|\ln t|^{-\vartheta} & \mbox{if}\ t \in (0,1/e) \\ t & \mbox{if}\ t \in [1/e,\infty). \end{array} \right. \end{equation} The corresponding local stability estimate can be stated as follows.
\begin{theorem} \label{theorem3} For $\aleph \in (0,\infty)$ fixed, let $(q,\tilde{q}) \in \mathrm{Q}(n,\aleph)^2$ satisfy $q=\tilde{q}$ on $\Omega_0$. Assume that $\alpha \in C^{0,1}(\Gamma)$, and suppose that $(\lambda_k-\tilde{\lambda}_k)\in \ell^\infty$ and that $(k^{\mathfrak{t}}(\psi_k-\tilde{\psi}_k))\in \ell^2(L^2(\Gamma))$ for some $\mathfrak{t}>4/n+1$, with \[
\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}\le \aleph,\quad \|(k^\mathfrak{t}(\psi_k-\tilde{\psi}_k))\|_{\ell^2(L^2(\Gamma))}\le \aleph. \]
Then there exist two constants $C>0$ and $\vartheta>0$, both of them depending only on $n$, $\Omega$, $\Omega_0$, $\Gamma_{\ast}$, $\aleph$, $\mathfrak{c}$ and $\|\alpha\|_{C^{0,1}(\Gamma)}$, such that we have: \begin{equation} \label{thm3}
\|q-\tilde{q}\|_{H^{-1}(\Omega)}\le C\Phi_\vartheta\left( \|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}+ \|(k^{-\mathfrak{t}+2/n}(\psi_k-\tilde{\psi}_k))\|_{\ell^2(H^1(\Gamma_{\ast}))} \right). \end{equation} \end{theorem}
\begin{remark}\label{remark2} {\rm
Bearing in mind that the $k$-th eigenvalue, $k \ge 1$, of the unperturbed Dirichlet Laplacian (i.e. the operator $A$ associated with $q=0$ in $\Omega$ and $\alpha=0$ on $\Gamma$) scales like $k^{2/n}$ when $k$ becomes large, see e.g. \cite[Theorem III.36 and Remark III.37]{Be}, we obtain by combining the min-max principle with \eqref{ii1}, that for all $q\in \mathrm{Q}(n,\aleph)$, \begin{equation}\label{waf}
C^{-1}k^{2/n}\le 1+|\lambda_k|\le Ck^{2/n},\ k\ge 1, \end{equation}
where $C \in (1,\infty)$ is a constant depending only on $n$, $\Omega$, $\mathfrak{c}$ and $\aleph$. In light of Lemma \ref{lemma5.0} below, establishing the $H^2$-regularity of the eigenfunctions $\phi_k$, $k \ge 1$, of $A$, and the energy estimate \eqref{5.1}, it follows from \eqref{waf} that $(k^{-\mathfrak{t}+2/n}\psi_k)\in \ell^2(H^1(\Gamma))$. Therefore, we have $\|(k^{-\mathfrak{t}+2/n}(\psi_k-\tilde{\psi}_k))\|_{\ell^2(H^1(\Gamma_{\ast}))}<\infty$ on the right hand side of \eqref{thm3}. } \end{remark}
\subsection{A short bibliography of the existing literature} \label{sec-literature} The first published uniqueness result for the multidimensional Borg-Levinson problem can be found in \cite{NSU}. The breakthrough idea of the authors of this article was to relate the inverse spectral problem under analysis to the one of determining the bounded potential by the corresponding elliptic Dirichlet-to-Neumann map. This can be understood from the fact that, the Schwartz kernel of the elliptic Dirichlet-to-Neumann operator can be, at least heuristically, fully expressed in terms of the eigenvalues and the normal derivatives of the eigenfunctions. Later on, \cite{Is} proved that the result of \cite{NSU}, which assumes complete knowledge of the boundary spectral data, remains valid when finitely many of them remain unknown.
The stability issue for multidimensional Borg-Levinson type problems was first examined in \cite{AS}. The authors proceed by relating the spectral data to the corresponding hyperbolic Dirichlet-to-Neumann operator, which stably determines the bounded electric potential. We refer the reader to \cite{BCY1,BCY2,BD} for alternative inverse stability results based on this approach.
In all the aforementioned results, the number of unknown spectral data is at most finite (that is to say that the data are either complete or incomplete). Nevertheless, it was proved in \cite{CS} that asymptotic knowledge of the boundary spectral data is enough to H\"older stably retrieve the bounded potential. This result was improved in \cite{KKS,So} by removing all quantitative information on the eigenfunctions of the stability inequality, at the expense of an additional summability condition on their boundary measurements.
In all the articles cited above in this section, the unknown potential is supposed to be bounded. The unique determination of unbounded potentials by either complete or incomplete boundary spectral data is discussed in \cite{PS, Po}, whereas the stability issue for the same problem, but in the variable coefficients case, is examined in \cite{BCKPS}. As for the treatment of the inverse problem of determining the unbounded potential from asymptotic knowledge of the spectral data, we refer the reader to \cite{BKMS} for the uniqueness issue, and to \cite{KS} for the stability issue.
All the above mentioned results were obtained for multidimensional Laplace operators endowed with Dirichlet boundary conditions, except for \cite{NSU} which proved that full knowledge of the boundary spectral data of the Robin Laplacian uniquely determines the unknown electric potential. But, apart from the claim, based on a heuristic approach, of \cite{Sm}, that incomplete knowledge of the spectral data of the multidimensional Robin Laplacian uniquely determines the unknown bounded potential, it seems that, even for a bounded unknown potential $q$, there is no reconstruction result of $q$ by incomplete spectral data, available in the mathematical literature for such operators. In the present article we prove not only unique identification by incomplete spectral data, but also stable determination by either full or local boundary spectral data, of the singular potential of the multidimensional Robin Laplacian.
\subsection{Outline} The remaining part of this paper is structured as follows. In Section \ref{sec-pre} we gather several technical results which are needed by the proof of the three main results of this article. Then we proceed with the proof of Theorems \ref{theorem1}, \ref{theorem2} and \ref{theorem3} in Section \ref{sec-proof}.
\section{Preliminaries} \label{sec-pre}
In this section we collect several preliminary results that are needed by the proof of the main results of this article. We start by noticing, upon applying \eqref{co} with $u=\phi_k$, $k\ge 1$, that \begin{equation}\label{lb} \lambda_k>-\lambda^\ast,\quad k\ge 1. \end{equation}
\subsection{Resolvent estimates}
By \cite[Corollary 2.39]{Mc}, the operator $A-\lambda : V \to V^\ast$ has a bounded inverse whenever $\lambda \in \rho (A):=\mathbb{C}\setminus \sigma(A)$, the resolvent set of $A$. Furthermore, for all $f \in V^\ast$ we have \begin{equation}\label{rf} (A-\lambda)^{-1}f=\sum_{k\ge 1} \frac{\langle f , \phi_k \rangle}{\lambda_k-\lambda}\phi_k, \end{equation} where the series converges in $V$.
For further use, we now establish that the resolvent $(A-\lambda)^{-1}$ may be regarded as a bounded operator from $H$ into the space $K:=\{u\in H;\; Au \in H\}$ endowed with the norm \[
\|u\|_K:=\|u\|_H+\|Au\|_H,\quad u\in K. \]
\begin{lemma} \label{lemma1} For all $\lambda \in \rho(A)$, the operator $(A-\lambda)^{-1}$ is bounded from $H$ into $K$. \end{lemma} \begin{proof} Put $u:=(A-\lambda)^{-1}f$ where $f\in H$ is fixed. Then, we have $(u , \phi_k )=(f,\phi_k)/(\lambda_k-\lambda)$ for all $k \ge 1$, from \eqref{rf}, whence
\begin{equation} \label{es8} Au=\sum_{k\ge1} \frac{\lambda_k}{\lambda_k-\lambda} (f,\phi_k) \phi_k, \end{equation} according to \cite[Theorem 2.37]{Mc}, the series being convergent in $V^\ast$. Moreover, since
$$\sum_{k\ge 1} \frac{\lambda_k^2}{|\lambda_k-\lambda|^2} | (f,\phi_k) |^2 \le \| (\lambda_k / (\lambda_k-\lambda)) \|_{\ell^\infty}^2 \| f \|_H^2<\infty, $$
by the Parseval theorem, the right hand side on \eqref{es8} lies in $H$. Therefore, we have $Au\in H$ and $\| A u \|_H \le \| (\lambda_k / (\lambda_k-\lambda)) \|_{\ell^\infty} \| f \|_H$, and consequently
$u \in K$ and $\| u \|_K \le \| ((1+\lambda_k) / (\lambda_k-\lambda)) \|_{\ell^\infty} \| f \|_H$. \end{proof}
\begin{proposition}\label{proposition1} Let $q\in \mathrm{Q}(n/2,\aleph)$ and let $\lambda \in \rho(A)$. Then, for all $f \in V^\ast$, the following estimate \begin{equation} \label{re2}
\|(A-\lambda)^{-1}f\|_V \le C \|((\lambda_k+\lambda^\ast)/(\lambda_k-\lambda))\|_{\ell^\infty} \|f\|_{V^\ast} \end{equation}
holds with $C=\kappa^{-1/2} \| (A+\lambda_*)^{-1}\|_{\mathcal{B}(V^\ast,V)}$, where $\mathcal{B}(V^\ast,V)$ denotes the space of linear bounded operators from $V^\ast$ to $V$. Moreover, in the special case where $f \in H$, we have \begin{equation}
\label{re1}
\|(A-\lambda)^{-1}f\|_H\le \|(1/(\lambda_k-\lambda))\|_{\ell^\infty}\|f\|_H.
\end{equation}
\end{proposition}
\begin{proof} Since \eqref{re1} follows directly from \eqref{rf} and the Parseval formula, it is enough to prove \eqref{re2}. To this purpose we set $u:=(A-\lambda)^{-1} f$ and notice from the obvious identity $\Delta u = (q-\lambda) u - f \in V^\ast$ that $u \in W$. Therefore, by applying \eqref{ggf} with $v=u$, we infer from the coercivity estimate \eqref{co} that \begin{equation} \label{es7}
\kappa \|u\|_V^2\le \langle (A+\lambda^\ast) u , u \rangle_{V^\ast,V}. \end{equation} Let us assume for a while that $f \in H$. Then, with reference to \eqref{es8}, we have $$ (A+\lambda^\ast) u = \sum_{k \ge 1} \frac{\lambda_k+\lambda^\ast}{\lambda_k-\lambda} (f,\phi_k) \phi_k, $$ where the series converges in $H$. It follows from this, \eqref{rf} and \eqref{es7} that \begin{eqnarray} \label{es9}
\kappa \|u\|_V^2 & \le & \sum_{k \ge 1} \frac{\lambda_k+\lambda^\ast}{|\lambda_k-\lambda|^2} |(f,\phi_k)|^2 \\
& \le & \| ( (\lambda_k+\lambda^\ast) / (\lambda_k-\lambda) ) \|_{\ell^\infty}^2
\sum_{k \ge 1} \frac{|(f,\phi_k)|^2}{\lambda_k+\lambda^\ast}. \nonumber \end{eqnarray} Further, taking into account that
$$ \sum_{k \ge 1} \frac{|(f,\phi_k)|^2}{\lambda_k+\lambda^\ast} = \| (A+\lambda^\ast)^{-1} f \|_H^2$$
according to \eqref{rf} and the Parseval formula, and then using that $\| (A+\lambda^\ast)^{-1} f \|_H \le \| (A+\lambda_*)^{-1}\|_{\mathcal{B}(V^\ast,V)} \| f \|_{V^\ast}$, we infer from \eqref{es9} that \begin{equation} \label{es9b}
\|u\|_V \leq \kappa^{-1/2} \| (A+\lambda_*)^{-1}\|_{\mathcal{B}(V^\ast,V)} \| ( (\lambda_k+\lambda^\ast) / (\lambda_k-\lambda) ) \|_{\ell^\infty}\| f \|_{V^\ast}. \end{equation} Finally, keeping in mind that $u=(A-\lambda)^{-1} f$ and that $(A-\lambda)^{-1} \in \mathcal{B}(V^\ast,V)$, \eqref{re2} follows readily from \eqref{es9b} by density of $H$ in $V^\ast$. \end{proof}
As a byproduct of Proposition \ref{proposition1}, we have the following:
\begin{corollary}\label{corollary1} Let $q\in \mathrm{Q}(n/2,\aleph)$. Then, for all $\tau \in [1,+\infty)$ we have \begin{equation} \label{re4}
\|(A-(\tau+i)^2)^{-1}f\|_H\le (2\tau)^{-1}\|f\|_H,\ f \in H. \end{equation} Moreover, for all $\tau \ge \tau_\ast=:1+(\max(0,2-\lambda^\ast))^{1/2}$, we have \begin{equation} \label{re5}
\|(A-(\tau+i)^2)^{-1}f\|_V \le C (\tau+\lambda^\ast) \|f\|_{V^\ast},\ f \in V^{\ast}, \end{equation} where $C$ is the same constant as in \eqref{re2}. \end{corollary} \begin{proof} As \eqref{re4} is a straightforward consequence of \eqref{re1}, we shall only prove \eqref{re5}. To do that, we refer to \eqref{re2} and notice that \begin{equation} \label{es10}
\frac{\lambda_k+\lambda^\ast}{|\lambda_k-(\tau+i)^2|}= \frac{\lambda_k+\lambda^\ast}{\left( (\lambda_k-(\tau^2-1))^2+4\tau^2 \right)^{1/2}} \leq 2 \Theta(\lambda_k),\ k \geq 1, \end{equation} where we have set
$\Theta(t):=(t+\lambda^\ast)/ (|t-(\tau^2-1)|+ 2\tau)$ for all $t \in [-\lambda^\ast,\infty)$. Further, taking into account that $\Theta$ is a decreasing function on $[\tau^2-1,\infty)$, provided that $\tau \geq \tau_\ast$, we easily get that $$ \sup_{t \in [-\lambda^\ast,+\infty)} \Theta(t) \le \frac{\tau^2-1+\lambda^\ast}{2 \tau} \le \frac{\tau+\lambda^\ast}{2}, $$ which along with \eqref{re2} and \eqref{es10}, yields \eqref{re5}. \end{proof}
\begin{proposition}\label{proposition2} Let $q \in \mathrm{Q}(n/2,\aleph)$. Then, there exists a constant $C>0$, depending only on $n$, $\Omega$, $\mathfrak{c}$ and $\aleph$, such that for all $\sigma \in [0,1]$ and all $f \in L^{p_\sigma}(\Omega)$, we have \begin{equation}\label{re7}
\|(A-(\tau+i)^2)^{-1}f\|_{L^{p_\sigma^\ast}(\Omega)}\le C\tau ^{-1+2\sigma}\|f\|_{L^{p_\sigma}(\Omega)},\ \tau \in [\tau_\ast,\infty), \end{equation} where $p_\sigma:=2n / (n+2 \sigma)$ and $p_\sigma^\ast:=2n / (n-2 \sigma)$ is the conjugate integer to $p_\sigma$.
\end{proposition}
\begin{proof} In light of \eqref{re5}, we have for all $f \in L^p(\Omega)$, $$
\| (A-(\tau+i)^2)^{-1} f \|_{L^{p^\ast}(\Omega)} \le C \tau \| f \|_{L^p(\Omega)},\ \tau \in [\tau_\ast,\infty), $$ by the Sobolev embedding theorem, where $C$ is a positive constant depending only on $n$, $\Omega$, $\mathfrak{c}$ and $\aleph$. Thus, \eqref{re7} follows from this and \eqref{re4} by interpolating between $H=L^{p_0}(\Omega)$ and $L^p(\Omega)=L^{p_1}(\Omega)$ with the aid of the Riesz-Thorin theorem (see, e.g. \cite[Theorem IX.17]{RS2} \end{proof}
\subsection{Asymptotic spectral analysis}
Set $\mathfrak{H}:=H^2(\Omega)$ if $n\ne 4$ and put $\mathfrak{H}:=H^{2+\epsilon}(\Omega)$ for some arbitrary $\epsilon >0$, if $n=4$. We notice that $\mathfrak{H} \subset L^\infty(\Omega)$ and that the embedding is continuous, provided that $n=3$ or $n=4$, while $\mathfrak{H}$ is continuously embedded in $L^{2n/(n-4)}(\Omega)$ when $n>4$. The main purpose for bringing $\mathfrak{H}$ into the analysis here is the following useful property: $fu\in H$ whenever $f\in L^{\max(2,n/2)}(\Omega)$ and $u\in \mathfrak{H}$.
Next we introduce the subspace \[
\mathfrak{h}:=\{ g=\partial_\nu G +\alpha G_{|\Gamma};\; G\in \mathfrak{H}\} \] of $L^2(\Gamma)$, equipped with its natural quotient norm \[
\|g\|_{\mathfrak{h}}:=\min\{ \|G\|_{\mathfrak{H}};\; G\in \dot{g}\},\quad g\in \mathfrak{h}, \] where \[
\dot{g}:=\{ G\in \mathfrak{H};\; \partial_\nu G+\alpha G_{|\Gamma}=g\},\quad g\in \mathfrak{h}, \] and we consider the non homogenous BVP: \begin{equation}\label{bvp1}
(-\Delta +q-\lambda )u=0\; \mathrm{in}\; \Omega ,\quad \partial_\nu u+\alpha u_{|\Gamma} =g\; \mathrm{on}\; \Gamma . \end{equation}
We first examine the well-posedness of \eqref{bvp1}.
\begin{lemma}\label{lemma2} Let $\lambda\in \rho(A)$ and let $g\in \mathfrak{h}$. Then, the function \begin{equation}\label{sol1} u_\lambda (g):=(A-\lambda )^{-1}(\Delta -q+\lambda)G+G \end{equation} is independent of $G\in \dot{g}$. Moreover, $u_\lambda (g)\in W$ is the unique solution to \eqref{bvp1} and is expressed as \begin{equation}\label{rep1} u_\lambda (g)=\sum_{k\ge 1}\frac{(g,\psi_k)}{\lambda_k-\lambda}\phi_k \end{equation} in $H$, where $(\cdot,\cdot)$ denotes the usual scalar product in $L^2(\Gamma)$. \end{lemma}
\begin{proof} Since $G\in \mathfrak{H}$, it is clear that $(\Delta -q+\lambda)G\in H$. Thus, the right hand side of \eqref{sol1} lies in $W$ and it is obviously a solution to the BVP \eqref{bvp1}. Moreover, $\lambda$ being taken in the resolvent set of $A$, this solution is unique.
Further, for all $G_1$ and $G_2$ in $\dot{g}$, it is easy to check that $\partial_\nu (G_1-G_2) + \alpha (G_1-G_2)=0$ on $\Gamma$ and that $(A-\lambda )^{-1}(\Delta -q+\lambda)(G_1-G_2)=-(G_1-G_2)$ in $\Omega$. Therefore, the function $u_\lambda (g)$ given by \eqref{sol1}, is independent of $G\in \dot{g}$.
We turn now to showing \eqref{rep1}. To do that we apply the generalized Green formula \eqref{ggf} with $u=u_\lambda(g)$ and $v=\phi_k$, $k \geq 1$. We obtain \[
\langle \Delta u_\lambda(g) , \phi_k\rangle+(\nabla u_\lambda(g)|\nabla \phi_k)=\langle \partial_\nu u_\lambda(g) , \psi_k\rangle, \] which may be equivalently rewritten as \begin{equation}\label{2.1}
((q-\lambda) u_\lambda(g),\phi_k)+(\nabla u_\lambda(g),\nabla \phi_k)=\langle g-\alpha u_\lambda(g)_{|\Gamma} ,\psi_k\rangle. \end{equation} Doing the same with $u=\phi_k$ and $v=u_\lambda(g)$, and taking the conjugate of both sides of the obtained equality, we find that
$$( u_\lambda(g), (q-\lambda_k) \phi_k)+( \nabla u_\lambda(g), \nabla \phi_k)=-\langle u_\lambda(g)_{|\Gamma} , \alpha \psi_k \rangle. $$ Bearing in mind that $q$ and $\alpha$ are real-valued, and that $\lambda_k \in \mathbb{R}$, this entails that \begin{equation}\label{2.2}
((q-\lambda_k)u_\lambda(g),\phi_k)+(\nabla u_\lambda(g),\nabla \phi_k)=-\langle \alpha u_\lambda(g)_{|\Gamma} , \psi_k\rangle. \end{equation} Now, taking the difference of \eqref{2.1} with \eqref{2.2}, we end up getting that \[ (\lambda_k-\lambda )(u_\lambda(g),\phi_k)=\langle g , \psi_k \rangle=(g,\psi_k). \] This and the basic identity \[ u_\lambda(g)=\sum_{k\ge 1}(u,\phi_k)\phi_k \] yield \eqref{rep1}. \end{proof}
The series on the right hand side of \eqref{rep1} converges only in $H$ and thus we cannot deduce an expression of the trace $u_\lambda(g)_{| \Gamma}$ in terms of $\lambda_k$ and $\psi_k$, $k \geq 1$, directly from \eqref{rep1}. To circumvent this difficulty we establish the following lemma:
\begin{lemma}\label{lemma4} Let $g\in \mathfrak{h}$. Then, for all $\lambda$ and $\mu$ in $\rho(A)$, we have \begin{equation}\label{s2}
u_\lambda(g){_{|\Gamma}} -u_\mu(g){_{|\Gamma}}=(\lambda-\mu)\sum_{k\ge 1}\frac{(g,\psi_k)}{(\lambda_k-\lambda)(\lambda_k-\mu)}\psi_k, \end{equation} and the series converges in $H^{1/2}(\Gamma)$. \end{lemma}
\begin{proof} Notice that \[ (-\Delta +q-\lambda )(u_\lambda -u_\mu)=(\lambda-\mu)u_\mu \]
in $\Omega$ and that $\partial_\nu(u_\lambda -u_\mu)+\alpha(u_\lambda -u_\mu)_{|\Gamma}=0$ on $\Gamma$, where, for shortness sake, we write $u_\lambda =u_\lambda (g)$ and $u_\mu=u_\mu(g)$. Thus, we have \[ u_\lambda -u_\mu=(\lambda-\mu)(A-\lambda)^{-1}u_\mu =(\lambda-\mu)\sum_{k\ge 1}\frac{(u_\mu,\phi_k)}{\lambda_k-\lambda}\phi_k. \] On the other hand, since \[ (u_\mu,\phi_k)=\frac{(g,\psi_k)}{\lambda_k-\mu},\ k \geq 1, \] from \eqref{rep1}, we obtain that \begin{equation}\label{s1} u_\lambda -u_\mu=(\lambda-\mu)\sum_{k\ge 1}\frac{(g,\psi_k)}{(\lambda_k-\lambda)(\lambda_k-\mu)}\phi_k. \end{equation} Moreover, we have \[ \sum_{k\ge 1}\frac{(g,\psi_k)}{(\lambda_k-\lambda)(\lambda_k-\mu)}(A-\lambda)\phi_k=\sum_{k\ge 1}\frac{(g,\psi_k)}{\lambda_k-\mu}\phi_k, \] the series being convergent in $H$. It follows from this and \eqref{s1} that \[ u_\lambda -u_\mu=(\lambda-\mu) (A-\lambda)^{-1}\sum_{k\ge 1}\frac{(g,\psi_k)}{\lambda_k-\mu}\phi_k, \] where the series on the right hand side of \eqref{s1} converges in $V$. As a consequence we have \begin{equation}
u_\lambda{_{|\Gamma}} -u_\mu{_{|\Gamma}}=(\lambda-\mu)\sum_{k\ge 1}\frac{(g,\psi_k)}{(\lambda_k-\lambda)(\lambda_k-\mu)}\psi_k, \end{equation} the series being convergent in $H^{1/2}(\Gamma)$. \end{proof}
Next, we establish the following {\it a priori} estimate for the solution to \eqref{bvp1}.
\begin{lemma}\label{lemma3} Let $q\in \mathrm{Q}(n/2,\aleph)$. Then, there exist two constants $\lambda_+>0$ and $C>0$, depending only on $n$, $\Omega$, $\aleph$ and $\mathfrak{c}$, such that for all $\lambda \in (-\infty,-\lambda_+]$ and all $g\in \mathfrak{h}$, the solution $u_\lambda (g)$ to \eqref{bvp1} satisfies the estimate \begin{equation}\label{lim1}
|\lambda|^{1/2} \|u_\lambda(g)\|_H+\|u_\lambda (g)\|_V\le C\|g\|_{L^2(\Gamma)}. \end{equation} \end{lemma}
\begin{proof} Fix $\lambda \in \rho(A)\cap (-\infty ,0)$. We apply the generalized Green formula \eqref{ggf} with $u=v:=u_\lambda$, where we write $u_\lambda$ instead of $u_\lambda(g)$. We get that \begin{equation}\label{ae1}
|\lambda| \|u_\lambda\|_H^2+\|\nabla u_\lambda\|_H^2 \leq \|qu_\lambda ^2\|_{L^1(\Omega)}-(\alpha u_\lambda,u_\lambda)+(g,u_\lambda). \end{equation} Next, $\epsilon$ being fixed in $(0,+\infty)$, we combine \eqref{ii1} with \eqref{ae1} and obtain \begin{equation}\label{ae2}
|\lambda |\|u_\lambda\|_H^2+\|\nabla u_\lambda\|_H^2\le \epsilon\|u_\lambda\|_V^2+C_\epsilon \|u\|_H^2+\mathfrak{c}\mathfrak{n} ^2\|u\|_V^2+\mathfrak{n} \|g\|_{L^2(\Gamma)}\|u_\lambda\|_V, \end{equation} where $C_\epsilon$ is a positive constant depending only on $n$, $\Omega$, $\aleph$ and $\epsilon$. Taking $\epsilon = \kappa =(1- \mathfrak{c}\mathfrak{n}^2)/2$ in \eqref{ae2} then yields \[
(|\lambda |-1-C_\kappa)\|u_\lambda\|_H^2+\kappa\|u_\lambda\|_V^2\le \mathfrak{n} \|g\|_{L^2(\Gamma)}\|u_\lambda\|_V. \] As a consequence we have
$$ | \lambda | \| u_\lambda \|_H^2 + \| u_\lambda \|_V^2 \leq \frac{2\mathfrak{n}^2}{\kappa^2} \|g\|_{L^2(\Gamma)}^2, $$
whenever $|\lambda| \geq (1+C_\kappa) \slash (1 - \kappa \slash 4)$, and \eqref{lim1} follows readily from this. \end{proof}
Armed with Lemma \ref{lemma3} we can examine the dependence of (the trace of) the solution to the BVP \eqref{bvp1} with respect to $q$. More precisely, we shall establish that the influence of the potential on $u_\lambda(g)$ is, in some sense, dimmed as the spectral parameter $\lambda$ goes to $-\infty$. \begin{lemma}\label{lemma5.1} Let $q$ and $\tilde{q}$ be in $\mathrm{Q}(n/2,\aleph)$.
Then, for all $g\in \mathfrak{h}$, we have \begin{equation}\label{lim2}
\lim_{\lambda=\Re \lambda \rightarrow -\infty}\|u_\lambda(g)_{|\Gamma}-\tilde{u}_\lambda(g)_{|\Gamma}\|_{H^{1/2}(\Gamma)}=0. \end{equation} \end{lemma}
\begin{proof} Let $\lambda \in (-\infty,-\lambda_+]$, where $\lambda_+$ is the same as in Lemma \ref{lemma3}. We use the same notation as in the proof of Lemma \ref{lemma3} and write $u_\lambda$ (resp., $\tilde{u}_\lambda$) instead of $u_\lambda(g)$ (resp., $\tilde{u}_\lambda(g)$). Since \[ (-\Delta +q-\lambda )(u_\lambda - \tilde{u}_\lambda)=(\tilde{q}-q)\tilde{u}_\lambda\quad \mathrm{in}\; \Omega \] and \[
\partial_\nu(u_\lambda - \tilde{u}_\lambda)+\alpha (u_\lambda - \tilde{u}_\lambda)_{|\Gamma}=0\quad \mbox{on}\; \Gamma, \] we have $$ u_\lambda - \tilde{u}_\lambda=(A-\lambda)^{-1} ((\tilde{q}-q)\tilde{u}_\lambda), $$ whence \begin{equation} \label{a1}
\| u_\lambda - \tilde{u}_\lambda \|_V \le C \|((\lambda_k+\lambda^\ast)/(\lambda_k-\lambda))\|_{\ell^\infty} \| (\tilde{q}-q)\tilde{u}_\lambda\|_{V^\ast}, \end{equation} by \eqref{re2}, where $C$ is a positive constant which is independent of $\lambda$.
We are left with the task of estimating $\| (\tilde{q}-q)\tilde{u}_\lambda\|_{V^\ast}$. For this purpose, we notice from $\tilde{q}-q \in L^{n/2}(\Omega)$ and from $\tilde{u}_\lambda \in L^{p^\ast}(\Omega)$ that $(\tilde{q}-q)\tilde{u}_\lambda \in L^p(\Omega)$. Thus, bearing in mind that the embedding $V \subset L^{p^\ast}(\Omega)$ is continuous, we infer from H\"older's inequality that \begin{eqnarray*}
\| (\tilde{q}-q)\tilde{u}_\lambda\|_{V^\ast}
& \le & \| \tilde{q}-q \|_{L^{n/2}(\Omega)} \| \tilde{u}_\lambda \|_{L^{p^\ast}(\Omega)} \\
& \le & 2 \aleph \| \tilde{u}_\lambda \|_{V}. \end{eqnarray*} In light of \eqref{lim1}, this entails that
$$\| (\tilde{q}-q)\tilde{u}_\lambda\|_{V^\ast} \leq C \|g\|_{L^2(\Gamma)},$$
for some constant $C$ depending only on $n$, $\Omega$, $\aleph$ and $\mathfrak{c}$. From this, \eqref{a1} and the continuity of the trace operator $w\in V\mapsto w_{|\Gamma}\in H^{1/2}(\Gamma)$, we obtain that \[
\|(u_\lambda)_{| \Gamma}-(\tilde{u}_\lambda)_{|\Gamma}\|_{H^{1/2}(\Gamma)}\le C \|((\lambda_k+\lambda^\ast)/(\lambda_k-\lambda))\|_{\ell^\infty} \|g\|_{L^2(\Gamma)}, \] where $C$ is independent of $\lambda$. Now \eqref{lim2} follows immediately from this upon sending $\lambda$ to $-\infty$ on both sides of the above inequality. \end{proof}
\subsection{$H^2$-regularity of the eigenfunctions} For all $q \in L^{n/2}(\Omega)$, we have $\phi_k \in V$, $k \ge 1$, but it is no guaranteed in general that $\phi_k \in H^2(\Omega)$. Nevertheless, we shall establish that the regularity of the eigenfunctions of $A$ can be upgraded to $H^2$, provided that the potential $q$ is taken in $L^n(\Omega)$.
\begin{lemma}\label{lemma5.0} Let $q\in \mathrm{Q}(n,\aleph)$ and assume that $\alpha\in C^{0,1}(\Gamma)$. Then, for all $k\in \mathbb{N}$, we have $\phi_k\in H^2(\Omega)$ and the estimate \begin{equation}\label{5.1}
\|\phi_k\|_{H^2(\Omega)}\le C(1+|\lambda_k|), \end{equation}
where $C$ is a positive constant depending on $n$, $\Omega$ and $\aleph$ and $\|\alpha\|_{C^{0,1}(\Gamma)}$. \end{lemma}
\begin{proof} Let us start by noticing from \eqref{co} that \begin{equation}\label{5.0}
\|\phi_k\|_V\le \kappa^{-1/2}(\lambda_k+\lambda ^\ast)^{1/2},\ k \geq 1. \end{equation} On the other hand we have $q\phi_k\in H$ for all $k\in \mathbb{N}$, and the estimate
\begin{equation}\label{5.0.1}
\|q\phi_k\|_H\le \|q\|_{L^n(\Omega)}\|\phi_k\|_{L^{p^\ast}(\Omega)}\le C_0 \|\phi_k\|_V,
\end{equation} where $C_0$ is a positive constant depending only on $n$, $\Omega$, $\mathfrak{c}$ and $\aleph$.
Next, bearing in mind that $\alpha \phi_k{_{|\Gamma}} \in H^{1/2}(\Gamma)$, we pick
$\phi_k^0\in H^2(\Omega)$ such that $\partial_\nu \phi_k^0=\alpha \phi_k{_{|\Gamma}}$. Evidently, we have
\[
-\Delta (\phi_k+\phi_k^0)=(\lambda_k-q)\phi_k-\Delta\phi_k^0\; \mbox{in}\; \Omega \quad \mbox{and} \quad \partial_\nu(\phi_k+\phi_k^0)=0\; \mbox{on}\; \Gamma.
\] Since $(\lambda_k-q)\phi_k-\Delta\phi_k^0\in H$, \cite[Theorem 3.17]{Tr} then yields that $\phi_k+\phi_k^0\in H^2(\Omega)$. As a consequence we have $\phi_k=(\phi_k+\phi_k^0) -\phi_k^0 \in H^2(\Omega)$ and \[
\|\phi_k\|_{H^2(\Omega)}\le C_1(\|(\lambda_k-q)\phi_k\|_H+\|\phi_k\|_V) \]
for some constant $C_1>0$ which depends only on $n$, $\Omega$ and $\|\alpha\|_{C^{0,1}(\Gamma)}$, by \cite[Lemma 3.181]{Tr} (see also \cite[Theorem 2.3.3.6]{Gr}). Putting this together with \eqref{5.0}-\eqref{5.0.1}, we obtain \eqref{5.1}. \end{proof}
\section{Proof of Theorems \ref{theorem1}, \ref{theorem2} and \ref{theorem3}} \label{sec-proof}
\subsection{Proof of Theorem \ref{theorem1}} \label{sec-prthm1}
We use the same notations as in the previous sections. Namely, we denote by $\tilde{A}$ is the operator generated in $H$ by $\mathfrak{a}$ where $\tilde{q}$ is substituted for $q$, and we write $u_\lambda$ (resp., $\tilde{u}_\lambda$) instead of $u_\lambda (g)$ (resp., $\tilde{u}_\lambda(g)$). Let $\lambda \in \mathbb{C}\setminus \mathbb{R}$ and pick $\mu$ in $\rho(A)\cap \rho(\tilde{A})$. Depending on whether $\ell=1$ or $\ell \ge 2$, we have either \[
(u_\lambda)_{|\Gamma} -(u_\mu)_{|\Gamma}=(\tilde{u}_\lambda)_{|\Gamma} - (\tilde{u}_\mu)_{|\Gamma} \] or \begin{eqnarray*}
& & (u_\lambda)_{|\Gamma} -(u_\mu)_{|\Gamma}-(\lambda-\mu) \sum_{k= 1}^{\ell-1}\frac{(g,\psi_k)}{(\lambda_k-\lambda)(\lambda_k-\mu)}\psi_k \\
&=& (\tilde{u}_\lambda)_{|\Gamma} -(\tilde{u}_\mu)_{|\Gamma}-(\lambda-\mu)\sum_{k= 1}^{\ell-1}\frac{(g,\psi_k)}{(\tilde{\lambda}_k-\lambda)(\tilde{\lambda}_k-\mu)}\psi_k, \end{eqnarray*} by virtue of \eqref{s2}. Sending $\Re \mu$ to $-\infty$ in these two identities, where $\Re \mu$ denotes the real part of $\mu$, we get with the help of \eqref{lim2} that \begin{equation}\label{RtoD}
(u_\lambda)_{|\Gamma}- (\tilde{u}_\lambda)_{|\Gamma}=R_\lambda^\ell, \end{equation} where \[ R_\lambda^\ell=R_\lambda^\ell (g) := \left\{ \begin{array}{ll} 0 & \mbox{if}\ \ell=1 \\ \sum_{k= 1}^{\ell-1}\frac{(\tilde{\lambda}_k -\lambda_k)(g,\psi_k)}{(\lambda_k-\lambda)(\tilde{\lambda}_k-\lambda)}\psi_k & \mbox{if}\ \ell \ge 2. \end{array} \right. \] Notice for further use that there exists $\lambda_*>0$ such that the estimate \begin{equation}\label{Re}
| \langle R_\lambda ^\ell ,h\rangle|\le \frac{C_\ell}{|\lambda|^2}\|g\|_{L^2(\Gamma)}\|h\|_{L^2(\Gamma)},\quad |\lambda |\ge \lambda_*,\quad g,h\in \mathfrak{h}, \end{equation} holds for some constant $C_\ell=C_\ell(q,\tilde{q})$ which is independent of $\lambda$.
Let us now consider two functions $G \in \mathfrak{H}$ and $H \in \mathfrak{H}$, that will be made precise below, and put $u:=(A-\lambda)^{-1}(\Delta -q+\lambda)G+G$, $g:=\partial_\nu G+\alpha G_{|\Gamma}$ and $h:=\partial_\nu H +\alpha H_{|\Gamma}$. Then, bearing in mind that $\partial_\nu u + u_{| \Gamma}=g$, the Green formula yields that \begin{equation} \label{eq-G} \int_\Gamma u\overline{h}ds(x)=\int_\Gamma g\overline{H}ds(x)+\int_\Omega (u\Delta\overline{H}-\Delta u\overline{H})dx. \end{equation} Further, taking into account that $\Delta u=(q-\lambda )u$ in $\Omega$, we see that \begin{eqnarray*} u\Delta\overline{H}-\Delta u\overline{H} & =& u (\Delta -q+\lambda )\overline{H} \\ & = & \left( (A-\lambda)^{-1}(\Delta -q+\lambda )G+G \right) (\Delta -q+\lambda )\overline{H}. \end{eqnarray*} Thus, assuming that $(\Delta +\lambda)G=(\Delta +\lambda)H=0$, the above identity reduces to \[ u\Delta\overline{H}-\Delta u\overline{H}= -\left( -(A-\lambda)^{-1}qG+G \right) q\overline{H}, \] and \eqref{eq-G} then reads \begin{equation}\label{id1} \int_\Gamma u\overline{h}ds(x)=\int_\Gamma g\overline{H}ds(x)-\int_\Omega \left( -(A-\lambda)^{-1}qG+G \right) q\overline{H} dx. \end{equation}
This being said, we set $\lambda_\tau:=(\tau+i)^2$ for some fixed $\tau \in [1,+\infty)$, pick two vectors $\omega$ and $\theta$ in $\mathbb{S}^{n-1}$, and we consider the special case where \[ G(x)=\mathfrak{e}_{\lambda_\tau,\omega}(x):=e^{i\sqrt{\lambda_\tau}\omega \cdot x},\quad \overline{H}(x)=\mathfrak{e}_{\lambda_\tau,-\theta}(x):= e^{-i\sqrt{\lambda_\tau}\theta \cdot x}. \] Next, we put $$ S(\lambda_\tau,\omega ,\theta) :=\int_\Gamma u_\lambda (g)\overline{h}ds(x),\ \quad \tilde{S}(\lambda_\tau,\omega ,\theta) :=\int_\Gamma \tilde{u}_\lambda (g)\overline{h}ds(x), $$ in such a way that \begin{equation} \label{eq-S} S(\lambda_\tau,\omega ,\theta)-\tilde{S}(\lambda,\omega ,\theta)=\langle R_{\lambda_\tau} ^\ell(g) , h\rangle. \end{equation} Then, taking into account that \[ g(x)=(i\sqrt{\lambda_\tau}\omega \cdot \nu+\alpha)e^{i\sqrt{\lambda_\tau}\omega \cdot x},\quad \overline{h}(x)= (-i\sqrt{\lambda_\tau}\theta \cdot \nu+\alpha)e^{-i\sqrt{\lambda_\tau}\theta \cdot x}, \]
we have $\|g\|_{L^2(\Gamma)}\|h\|_{L^2(\Gamma)}\le C\tau^2$ for some positive constant $C$ which is independent of $\omega$, $\theta$ and $\tau$, and we infer from \eqref{Re} and \eqref{eq-S} that \begin{equation} \label{S1} \lim_{\tau \rightarrow \infty}\sup_{\omega,\theta \in \mathbb{S}^{n-1}} \left( S(\lambda_\tau,\omega ,\theta)-\tilde{S}(\lambda_\tau,\omega ,\theta) \right)=0. \end{equation} On the other hand, \eqref{id1} reads \begin{equation} \label{S1b} S(\lambda_\tau,\omega ,\theta) =S_0(\lambda_\tau,\omega ,\theta) +\int_\Gamma(i\sqrt{\lambda_\tau}\omega \cdot\nu +\alpha)e^{-i\sqrt{\lambda_\tau}(\theta-\omega)\cdot x}ds(x), \end{equation} where \begin{equation} \label{id3} S_0(\lambda_\tau,\omega ,\theta):=\int_\Omega (A-\lambda_\tau)^{-1}(q\mathfrak{e}_{\lambda_\tau,\omega})q\mathfrak{e}_{\lambda_\tau,-\theta}dx-\int_\Omega qe^{-i\sqrt{\lambda_\tau}(\theta-\omega)\cdot x}dx. \end{equation}
Now, we fix $\xi$ in $\mathbb{R}^n$, pick $\eta \in \mathbb{S}^{n-1}$ such that $\xi \cdot \eta =0$, and for all $\tau \in \left( |\xi|/2,+\infty \right)$ we set \begin{equation} \label{es4}
\omega_\tau :=\left(1-|\xi|^2/(4\tau ^2)\right)^{1/2}\eta -\xi/(2\tau),\quad \theta_\tau :=\left(1-|\xi|^2/(4\tau ^2)\right)^{1/2}\eta +\xi/(2\tau) \end{equation} in such a way that \begin{equation} \label{id3b} \lim_{\tau \rightarrow +\infty} \sqrt{\lambda_\tau}(\theta_\tau-\omega_\tau) = \xi. \end{equation} Evidently, we have \begin{equation} \label{gs0}
\|\mathfrak{e}_{\lambda_\tau,\omega_\tau}\|_{L^\infty (\Omega)}\le \|e^{|x|} \|_{L^\infty (\Omega)},\quad \|\mathfrak{e}_{\lambda_\tau,-\theta_\tau}\|_{L^\infty (\Omega)}\le \|e^{|x|} \|_{L^\infty (\Omega)}. \end{equation} Next, with reference to the notations $\beta=\max \left( 0,n(2-r)/(2r) \right)$ and $p_\sigma=2n / (n+2\sigma)$, $\sigma \in [0,1]$, of Theorem \ref{theorem2} and Proposition \ref{proposition2}, respectively, we see that $\beta=0$ and hence that $p_\beta=p_0=2$, when $n \ge 4$, whereas $p_\beta=r \in (3/2,2)$, when $n=3$. Thus, we have $p_\beta\le r$ whenever $n \ge 3$, and consequently $q \in L^{p_\beta}(\Omega)$. It follows from this and \eqref{gs0} that $q\mathfrak{e}_{\lambda_\tau,\omega_\tau}$ and $q\mathfrak{e}_{\lambda_\tau,-\theta_\tau}$ lie in $L^{p_\beta}(\Omega)$ and satisfy the estimate \begin{equation} \label{gs1}
\|q\mathfrak{e}_{\lambda_\tau,\omega_\tau}\|_{L^{p_\beta}(\Omega)}+\|q\mathfrak{e}_{\lambda_\tau,-\theta_\tau} \|_{L^{p_\beta}(\Omega)}\le C \| q \|_{L^r(\Omega)},\quad \tau \in (|\xi|/2,\infty), \end{equation}
for some positive constant $C=C(n,\Omega)$ depending only on $n$ and $\Omega$. Moreover, for all $\tau \ge \max(|\xi|/2,\tau_\ast)$, we have \begin{eqnarray} \label{gm0}
& & \left| \int_\Omega (A-\lambda_\tau)^{-1}(q\mathfrak{e}_{\lambda_\tau,\omega_\tau})q\mathfrak{e}_{\lambda_\tau,-\theta_\tau}dx \right| \\
& \leq & \| (A-\lambda_\tau)^{-1}(q\mathfrak{e}_{\lambda_\tau,\omega_\tau}) \|_{L^{p_\beta^\ast}(\Omega)} \|q\mathfrak{e}_{\lambda_\tau,-\theta_\tau} \|_{L^{p_\beta}(\Omega)} \nonumber\\
& \le & C \tau^{-1+2\beta} \|q\mathfrak{e}_{\lambda_\tau,\omega_\tau}\|_{L^{p_\beta}(\Omega)} \|q\mathfrak{e}_{\lambda_\tau,-\theta_\tau} \|_{L^{p_\beta}(\Omega)},\ \nonumber \end{eqnarray} by \eqref{re7}, where $C>0$ is independent of $\tau$. Since $\beta \in [0,1/2)$ from its very definition, we infer from \eqref{gs1}-\eqref{gm0} that \begin{equation} \label{gs2}
\lim_{\tau \rightarrow \infty}\left| \int_\Omega (A-\lambda_\tau)^{-1}(q\mathfrak{e}_{\lambda_\tau,\omega_\tau})q\mathfrak{e}_{\lambda_\tau,-\theta_\tau}dx \right|=0, \end{equation} which together with \eqref{id3}-\eqref{id3b} yields that \[ \lim_{\tau \rightarrow \infty} S_0(\lambda_\tau ,\omega_\tau ,\theta_\tau) =-\int_\Omega qe^{-i\xi \cdot x},\quad \xi \in \mathbb{R}^n. \] From this and the identity \[ \lim_{\tau \rightarrow \infty}\left( S_0(\lambda_\tau ,\omega_\tau ,\theta_\tau)-\tilde{S}_0(\lambda_\tau ,\omega_\tau ,\theta_\tau) \right)=\lim_{\tau \rightarrow \infty} \left( S(\lambda_\tau,\omega_\tau ,\theta_\tau)-\tilde{S}(\lambda_\tau,\omega_\tau ,\theta_\tau) \right)=0, \] arising from \eqref{S1}-\eqref{S1b}, it then follows that \[ \int_\Omega (q-\tilde{q})e^{-i\xi \cdot x}dx=0,\quad \xi \in \mathbb{R}^n. \] Otherwise stated, the Fourier transform of $(q-\tilde{q})\chi_\Omega$, where $\chi_\Omega$ is the characteristic function of $\Omega$, is identically zero in $\mathscr{S}'(\mathbb{R}^n)$. By the injectivity of the Fourier transformation, this entails that $q=\tilde{q}$ in $\Omega$.
\subsection{Proof of Theorem \ref{theorem2}} \label{sec-prthm2} Pick $\omega$ and $\theta$ be in $\mathbb{S}^{n-1}$, and let $\lambda\in \mathbb{C}\setminus\mathbb{R}$. We use the same notations as in the proof of Theorem \ref{theorem1}. Namely, for all $x \in \Gamma$, we write $$ g(x)=g_\lambda(x)=(i\sqrt{\lambda}\omega \cdot \nu+\alpha)e^{i\sqrt{\lambda}\omega \cdot x},\ \overline{h}(x)=\overline{h}_\lambda(x)= (-i\sqrt{\lambda}\theta \cdot \nu+\alpha)e^{-i\sqrt{\lambda}\theta \cdot x}$$ and we recall that $S(\lambda,\omega ,\theta)=\int_\Gamma u_\lambda (g)\overline{h}ds(x)$. Next, for all $\mu \in \rho(A)\cap \rho(\tilde{A})$ we set \begin{equation} \label{4.0} T(\lambda ,\mu)=T(\lambda ,\mu,\omega ,\theta):=S(\lambda,\omega ,\theta)-S(\mu,\omega ,\theta)=\int_\Gamma \left(u_\lambda (g)-u_\mu (g) \right)\overline{h}ds(x). \end{equation} By Lemma \ref{lemma4}, we have
\[ T(\lambda ,\mu)= (\lambda-\mu)\sum_{k\ge 1}\frac{d_k}{(\lambda_k-\lambda)(\lambda_k-\mu)},\ d_k:=(g,\psi_k)(\psi_k,h), \]
and hence \begin{equation} \label{dt} T(\lambda ,\mu)-\tilde{T}(\lambda ,\mu)=U(\lambda ,\mu)+V(\lambda ,\mu), \end{equation} where \begin{eqnarray} U(\lambda ,\mu)&:=&\sum_{k\ge 1} \frac{\lambda-\mu}{\lambda_k-\mu} \frac{d_k-\tilde{d}_k}{\lambda_k-\lambda}, \label{du} \\ V(\lambda ,\mu)&:=&\sum_{k\ge 1}\left(\frac{\lambda-\mu}{(\lambda_k-\lambda)(\lambda_k-\mu)}-\frac{\lambda-\mu}{(\tilde{\lambda}_k-\lambda)(\tilde{\lambda}_k-\mu)}\right)\tilde{d}_k. \label{dv} \end{eqnarray}
Notice that for all $k \in \mathbb{N}$, we have $d_k-\tilde{d}_k=(g,\psi_k-\tilde{\psi}_k) (\psi_k,h)+ (g,\tilde{\psi}_k)(\psi_k-\tilde{\psi}_k,h)$, which immediately entails that
\begin{equation} \label{es0}
\frac{|d_k-\tilde{d}_k|}{|\lambda_k-\lambda|}\le \left(\frac{|(g|\psi_k)|}{|\lambda_k-\lambda|}\|h\|_{L^2(\Gamma)}+\rho_k(\lambda)\frac{|(\tilde{\psi}_k|h)|}{|\tilde{\lambda}_k-\lambda|}\|g\|_{L^2(\Gamma)}\right)\|\psi_k-\tilde{\psi}_k\|_{L^2(\Gamma)}, \end{equation}
where $\rho_k(\lambda):=|\tilde{\lambda}_k-\lambda|/|\lambda_k-\lambda|$. Further, since $0 \le \rho_k(\lambda) \le 1+|\lambda_k-\tilde{\lambda}_k| / |\lambda_k-\lambda|$ and $(\lambda_k-\tilde{\lambda}_k)\in \ell^\infty$ by assumption, with $\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}\le \aleph$, it is apparent that $(\rho_k(\lambda ))\in \ell ^\infty$ and that $$
\|(\rho_k(\lambda ))\|_{\ell ^\infty}\le \zeta(\lambda):=1+\frac{\aleph}{|\Im \lambda|}, $$ where $\Im \lambda$ denotes the imaginary part of $\lambda$. Thus, applying the Cauchy-Schwarz inequality in \eqref{es0} and Parseval's theorem to the representation formula \eqref{rep1} in Lemma \ref{lemma2}, we get that \begin{equation} \label{es1}
\sum_{k=1}^N \frac{|d_k-\tilde{d}_k|}{|\lambda_k-\lambda|}\le M(\lambda)\|(\psi_k-\tilde{\psi}_k)\|_{\ell^2(L^2(\Gamma))},\ N \in \mathbb{N}, \end{equation} where \begin{equation} \label{def-M}
M(\lambda):=\|h\|_{L^2(\Gamma)}\|u_\lambda (g)\|_H +\zeta(\lambda)\|g\|_{L^2(\Gamma)}\|\tilde{u}_\lambda (h)\|_H. \end{equation}
As a consequence we have $\sum_{k\ge 1} |d_k-\tilde{d}_k| / |\lambda_k-\lambda|<\infty$. Furthermore, taking into account that
$$ \frac{|\lambda-\mu|}{|\lambda_k-\mu|} \le 1 + \frac{|\lambda|}{\lambda_1},\ \mu \in (-\infty,-\lambda_1], $$ we apply the dominated convergence theorem to \eqref{du} and find that \begin{equation}\label{4.1} \lim_{\mu=\Re \mu \rightarrow -\infty}U(\lambda ,\mu)=\sum_{k\ge 1}\frac{d_k-\tilde{d}_k}{\lambda_k-\lambda}=:\mathcal{U}(\lambda). \end{equation} Moreover, we have \begin{equation}\label{4.4}
|\mathcal{U}(\lambda)|
\le M(\lambda)\|(\psi_k-\tilde{\psi}_k)\|_{\ell^2(L^2(\Gamma))}, \end{equation} according to \eqref{es1}.
Arguing as before with $V$ defined by \eqref{dv} instead of $U$, we obtain in a similar fashion that \begin{equation}\label{4.3} \lim_{\mu=\Re \mu \rightarrow -\infty}V(\lambda ,\mu)=\sum_{k\ge 1} \frac{\tilde{\lambda}_k-\lambda_k}{(\lambda_k-\lambda)(\tilde{\lambda}_k-\lambda)} \tilde{d}_k=:\mathcal{V}(\lambda)
\end{equation} and that
\begin{equation}\label{4.7}
|\mathcal{V}(\lambda)|\le \zeta(\lambda)\|(\tilde{\lambda}_k-\lambda_k)\|_{\ell^\infty}\| \tilde{u}_\lambda(g)\|_H\|\tilde{u}_\lambda(h)\|_H. \end{equation}
Having seen this, we refer to \eqref{4.0}-\eqref{dt} and deduce from Lemma \ref{lemma5.1}, \eqref{4.1} and \eqref{4.3} that \begin{equation} \label{es3} \int_\Gamma \left( u_\lambda (g)-\tilde{u}_\lambda (g) \right) \overline{h}ds(x)=\mathcal{U}(\lambda)+\mathcal{V}(\lambda). \end{equation}
Now, taking $\lambda=\lambda_\tau=(\tau+i)^2$ for some fixed $\tau \in \left( |\xi|/2, \infty \right)$ and $(\omega,\theta)=(\omega_\tau,\theta_\tau)$, where $\omega_\tau$ and $\theta_\tau$ are the same as in \eqref{es4}, we combine \eqref{S1b}-\eqref{id3} with \eqref{es3}. We obtain that the Fourier transform $\hat{b}$ of $b:=(\tilde{q}-q)\chi_\Omega$, reads \begin{equation}\label{4.6} \hat{b}((1+i/\tau)\xi)= \mathcal{U}(\lambda_\tau )+\mathcal{V}(\lambda_\tau)+\mathfrak{R}(\lambda_\tau), \end{equation} where \[ \mathfrak{R}(\lambda_\tau):=\int_\Omega (\tilde{A}-\lambda_\tau)^{-1}(\tilde{q}\mathfrak{e}_{\lambda_\tau,\omega_\tau})\tilde{q}\mathfrak{e}_{\lambda_\tau,-\theta_\tau}dx-\int_\Omega (A-\lambda_\tau)^{-1}(q\mathfrak{e}_{\lambda_\tau,\omega_\tau})q\mathfrak{e}_{\lambda_\tau,-\theta_\tau}dx. \]
Moreover, for all $\tau \ge \max(|\xi|/2,\tau_\ast)$, we have \begin{equation}\label{4.8.1}
|\mathfrak{R}(\lambda_\tau)|\le C \tau^{-1+2\beta}, \end{equation} by \eqref{gs1}-\eqref{gm0}, where $\beta \in [0,1/2)$ is defined in Theorem \ref{theorem2} and $\tau_\ast$ is the same as in Corollary \ref{corollary1}. Here and in the remaining part of this proof, $C$ denotes a positive constant depending only on $n$, $\Omega$, $\aleph$ and $\mathfrak{c}$, which may change from line to line.
On the other hand, using that \begin{eqnarray*}
\left| \hat{b}((1+i/\tau)\xi) -\hat{b}(\xi) \right| & = & \left| \int_{\mathbb{R}^n} e^{-i \xi \cdot x} \left( e^{\frac{\xi}{\tau} \cdot x} - 1 \right) b(x) dx \right| \\
& \le & \frac{| \xi |}{\tau} \left( \sup_{x \in \Omega} e^{(| \xi | / \tau) |x|} \right) \| b \|_{L^1(\mathbb{R}^n)}, \end{eqnarray*}
we get in a similar way to \cite[Eq. (5.1)]{CS} that $$
|\hat{b}(\xi)|\le |\hat{b}((1+i/\tau)\xi)|+\frac{c|\xi|}{\tau}e^{c|\xi|/\tau}\aleph,\ \tau \in (|\xi|/2, \infty), $$ for some positive constant $c$ depending only on $\Omega$. Putting this together with
\eqref{4.6}-\eqref{4.8.1} we find that for all $\tau \ge \max(|\xi|/2,\tau_\ast)$, \begin{equation}\label{4.9}
|\hat{b}(\xi)|\le \frac{C}{\tau^{1-2\beta}}+\frac{c|\xi|}{\tau}e^{c|\xi|/\tau}\aleph+|\mathcal{U}(\lambda_\tau )|+|\mathcal{V}(\lambda_\tau)|. \end{equation}
To upper bound $|\mathcal{U}(\lambda_\tau )|+|\mathcal{V}(\lambda_\tau)|$ on the right hand side of \eqref{4.9}, we recall from \eqref{sol1} that $u_{\lambda_\tau}(g)=-(A-\lambda_\tau)^{-1} (q \mathfrak{e}_{\lambda_\tau,\omega_\tau}) + \mathfrak{e}_{\lambda_\tau,\omega_\tau}$ and that $\tilde{u}_{\lambda_\tau}(h)=-(\tilde{A}-\lambda_\tau)^{-1} (\tilde{q} \mathfrak{e}_{\lambda_\tau,-\theta_\tau})+\mathfrak{e}_{\lambda_\tau,-\theta_\tau}$, and we combine \eqref{re7} with
\eqref{gs0} and \eqref{gs1}: We get for all $\tau \ge \tau_{\xi}:=\max(1,|\xi|/2,\tau_\ast)$, that
$$\| u_{\lambda_\tau}(g) \|_H + \| \tilde{u}_{\lambda_\tau}(h) \|_H \leq C. $$
This together with the basic estimate $\| g \|_{L^2(\Gamma)} + \| h \|_{L^2(\Gamma)} \le C \tau$, \eqref{def-M}, \eqref{4.4} and \eqref{4.7}, yield that $$
|\mathcal{U}(\lambda_\tau )|+|\mathcal{V}(\lambda_\tau)|\le C\left(\tau\|(\psi_k-\tilde{\psi}_k)\|_{\ell^2(L^2(\Gamma))}+\|(\tilde{\lambda}_k-\lambda_k)\|_{\ell^\infty}\right),\ \tau \in [ \tau_{\xi},\infty ). $$ Inserting this into \eqref{4.9}, we find that \begin{equation}\label{4.12}
|\hat{b}(\xi)|\le \frac{C}{\tau^{1-2\beta}}+\frac{c|\xi|}{\tau}e^{c|\xi|/\tau}\aleph+C \tau \delta,\ \tau \in [ \tau_{\xi},\infty ), \end{equation} where we have set \begin{equation} \label{def-delta}
\delta:= \| (\psi_k-\tilde{\psi}_k)\|_{\ell^2(L^2(\Gamma))}+\|(\tilde{\lambda}_k-\lambda_k)\|_{\ell^\infty}. \end{equation}
Let $\varrho \in (0,1)$ to be made precise further. For all $\tau \in [\tau_\ast,\infty)$, where $\tau_\ast$ is defined in Corollary \ref{corollary1}, it is apparent that the condition $\tau \ge \tau_{\xi}$ is automatically satisfied whenever $\xi \in B(0,\tau^\varrho):= \{ \xi \in \mathbb{R}^n,\ |\xi|< \tau^\varrho \}$. Thus, squaring both sides of \eqref{4.12} and integrating the obtained inequality over $B(0,\tau^\varrho)$, we get that $$
\|\hat{b}\|_{L^2(B(0,\tau ^\varrho))}^2\le C\left( \tau^{-2(1-2 \beta) + \varrho n}+ e^{2c\tau^{-(1-\varrho)}} \tau^{\varrho (n+2)-2}+\tau^{2+\varrho n}\delta ^2 \right),\ \tau \in [\tau_\ast,\infty). $$ Then, taking $\varrho=(1-2\beta)/(n+2)$ in the above line, we obtain that \begin{equation} \label{sta1}
\|\hat{b}\|_{L^2(B(0,\tau ^{(1-2\beta)/(n+2)}))}^2\le C \left( \tau^{-(1-2\beta)}+\tau^{(3n+4)/(n+2)}\delta ^2 \right), \tau \in [\tau_\ast,\infty). \end{equation} On the other hand, using that the Fourier transform is an isometry from $L^2(\mathbb{R}^n)$ to itself, we have for all $\tau \in [\tau_\ast,\infty)$, \begin{eqnarray*}
\int_{\mathbb{R}^n \setminus B(0,\tau^{(1-2\beta)/(n+2)})} (1+|\xi|^2|)^{-1}|\hat{b}(\xi)|^2d\xi & \le &
\tau^{-2(1-2\beta)/(n+2)}\|b\|_{L^2(\mathbb{R}^n)}^2 \\ & \le & C \tau^{-2(1-2\beta)/(n+2)}, \end{eqnarray*} which together with \eqref{sta1} yields that \[
\|b\|_{H^{-1}(\mathbb{R}^n)}^2\le \tau^{-2(1-2\beta)/(n+2)}+\tau^{(3n+4)/(n+2)}\delta ^2,\ \tau \in [\tau_\ast,\infty). \] Assuming that $\delta < \left( 2(1-2\beta)/(3n+4) \right)^{1 /2}=:\delta_0$, we get by minimizing the right hand side of the above estimate with respect to $\tau \in [\tau_\ast,\infty)$, that $$
\|b\|_{H^{-1}(\mathbb{R}^n)}\le C\delta^{2(1-2\beta)/(3(n+2))}, $$
and the desired stability inequality follows from this upon recalling that $\|q-\tilde{q}\|_{H^{-1}(\Omega)}\le \|b\|_{H^{-1}(\mathbb{R}^n)}$. Finally, we complete the proof by noticing that for all $\delta \ge \delta_0$, we have
$$ \|q-\tilde{q}\|_{H^{-1}(\Omega)}\le \|q-\tilde{q}\|_{L^2(\Omega)} \leq \left( 2 \aleph \delta_0^{-2(1-2\beta)/(3(n+2))} \right) \delta^{2(1-2\beta)/(3(n+2))}. $$
\subsection{Proof of Theorem \ref{theorem3}} Upon possibly substituting $q+\lambda^\ast+1$ (resp., $\tilde{q}+\lambda^\ast+1$) for $q$ (resp., $\tilde{q}$), we shall assume without loss of generality in the sequel, that $\lambda_k\ge 1$ (resp., $\tilde{\lambda}_k \ge 1$) for all $k \ge 1$.
Next, taking into account that $q=\tilde{q}$ in $\Omega_0$, we notice that the function $u_k:=\phi_k-\tilde{\phi}_k$, $k \ge 1$, satisfies \begin{equation} \label{es20} (-\Delta +q-\lambda_k)u_k=(\lambda_k-\tilde{\lambda}_k)\tilde{\phi}_k\; \mbox{in}\; \Omega_0,\quad \partial_\nu u_k+\alpha u_k=0\; \mbox{on}\; \Gamma. \end{equation} Now, let us recall from \cite[Theorem 2.2]{BCKPS} that for all $s \in (0,1/2)$ fixed, there exist three constants $C=C(n,\Omega_0,\Gamma_{\ast})>0$, $\mathfrak{b}=\mathfrak{b}(n,\Omega_0,\Gamma_{\ast},s)>0$ and $\gamma=\gamma(n,\Omega_0)>0$, such that for all $r \in (0,1)$ and all $\lambda \in [0,+\infty)$, we have \begin{equation}\label{UC1}
C\left(\|u\|_{H^1(\Gamma_0)}+ \|\partial_\nu u\|_{L^2(\Gamma_0)}\right)\le r^{s/4}\|u\|_{H^2(\Omega_0)}+e^{\mathfrak{b}r^{-\gamma}}\mathfrak{C}_\lambda (u),\ u\in H^2(\Omega_0), \end{equation}
where we have set $\Gamma_0:=\partial \Omega_0$ and \[
\mathfrak{C}_\lambda (u) :=(1+\lambda)\left(\|u\|_{H^1(\Gamma_{\ast})}+\| \partial_\nu u\|_{L^2(\Gamma_{\ast})}\right)+\|(\Delta -q+\lambda)u\|_{L^2(\Omega_0)}. \]
Thus, in light of \eqref{5.1} and the embedding $\Gamma \subset \Gamma_0$, we deduce from \eqref{es20} upon applying \eqref{UC1} with $(\lambda,u)=(\lambda_k,(u_k)_{| \Omega_0})$, $k \geq 1$, that for all $r \in (0,1)$, we have \begin{eqnarray*}
& & C\|\psi_k-\tilde{\psi}_k\|_{L^2(\Gamma)} \\
& \le & r^{s/4}( \lambda_k +\tilde{\lambda}_k )+e^{\mathfrak{b} r^{-\gamma}}\left((1+\|\alpha\|_{C^{0,1}(\Gamma)})\lambda_k\|\psi_k-\tilde{\psi}_k\|_{H^1(\Gamma_{\ast})}+|\lambda_k-\tilde{\lambda}_k|\right), \end{eqnarray*} for some constant $C>0$ depending only on $n$, $\Omega$, $\Omega_0$, $\Gamma_\ast$, $\aleph$ and $s$.
From this and Weyl's asymptotic formula \eqref{waf}, it then follows for all $k \ge 1$ and all $r \in (0,1)$, that \begin{equation} \label{es21}
C\|\psi_k-\tilde{\psi}_k\|_{L^2(\Gamma)}^2\le r^{s/2}k^{4/n}+e^{2\mathfrak{b}r^{-\gamma}}\left(k^{4/n}\|\psi_k-\tilde{\psi}_k\|_{H^1(\Gamma_{\ast})}^2 + |\lambda_k-\tilde{\lambda}_k|^2 \right). \end{equation} Here and in the remaining part of this proof, $C$ denotes a generic positive constant depending only on $n$, $\Omega$, $\Omega_0$, $\Gamma_\ast$, $\aleph$ and $\alpha$, which may change from one line to another. Since the constant $C$ is independent of $k \geq 1$ and since $\sum_{k\ge 1}k^{-2\mathfrak{t}+4/n}<\infty$ as we have $2\mathfrak{t}>1+4/n$, we find upon multiplying both sides of \eqref{es21} by $k^{-2\mathfrak{t}}$ and then summing up the result over $k \geq 1$, that \begin{eqnarray} \label{es22}
& & C \|(k^{-\mathfrak{t}} (\psi_k-\tilde{\psi}_k))\|_{\ell ^2(L^2(\Gamma))}^2 \\
& \le & r^{s/2}
+e^{2\mathfrak{b}r^{-\gamma}} \left( \| (k^{-\mathfrak{t}+2/n} (\psi_k-\tilde{\psi}_k))\|_{\ell^2(H^1(\Gamma_{\ast}))}^2 + \| (k^{-\mathfrak{t}} (\lambda_k-\tilde{\lambda}_k)) \|_{\ell^2}^2 \right), \nonumber
\end{eqnarray} uniformly in $r \in (0,1)$.
Further, taking into account that $(k^{\mathfrak{t}}(\psi_k-\tilde{\phi}_k)) \in \ell^2(L^2(\Gamma))$ and $\|(k^{\mathfrak{t}}(\psi_k-\tilde{\phi}_k))\|_{\ell ^2(L^2(\Gamma))}\le \aleph$, we have \begin{eqnarray*}
\|(\psi_k-\tilde{\psi}_k)\|_{\ell ^2(L^2(\Gamma))}^2
&\le &\|(k^{\mathfrak{t}}(\psi_k-\tilde{\phi}_k))\|_{\ell ^2(L^2(\Gamma))}\|(k^{-\mathfrak{t}}(\psi_k-\tilde{\psi}_k))\|_{\ell ^2(L^2(\Gamma))} \\
& \le & \aleph\|(k^{-\mathfrak{t}}(\psi_k-\tilde{\psi}_k))\|_{\ell ^2(L^2(\Gamma))}, \end{eqnarray*} by the Cauchy-Schwarz inequality, and hence \begin{eqnarray} \label{es23}
& & C \|(\psi_k-\tilde{\psi}_k)\|_{\ell ^2(L^2(\Gamma))}^2 \\ & \le & r^{s/4}
+e^{\mathfrak{b}r^{-\gamma}} \left( \| (k^{-\mathfrak{t}} (\lambda_k-\tilde{\lambda}_k)) \|_{\ell^2} + \| (k^{-\mathfrak{t}+2/n} (\psi_k-\tilde{\psi}_k))\|_{\ell^2(H^1(\Gamma_{\ast}))} \right), \nonumber \end{eqnarray} whenever $r \in (0,1)$, by \eqref{es22}. Moreover, since
$$\| (k^{-\mathfrak{t}} (\lambda_k-\tilde{\lambda}_k)) \|_{\ell^2} \le \left( \sum_{k\ge 1} k^{-2\mathfrak{t}}\right)^{1/2}
\| (\lambda_k-\tilde{\lambda}_k)) \|_{\ell^\infty}$$ and $\sum_{k\ge 1} k^{-2\mathfrak{t}}<\infty$ as we assumed that $2\mathfrak{t}>1+n/2$, \eqref{es23} then provides \begin{equation} \label{es24}
\|(\psi_k-\tilde{\psi}_k)\|_{\ell ^2(L^2(\Gamma))}^2 \le C \left( r^{s/4} +e^{\mathfrak{b}r^{-\gamma}} \delta_\ast \right),\ r \in (0,1), \end{equation} where we have set $$ \delta_\ast :=
\| (\lambda_k-\tilde{\lambda}_k) \|_{\ell^\infty} + \| (k^{-\mathfrak{t}+2/n} (\psi_k-\tilde{\psi}_k))\|_{\ell^2(H^1(\Gamma_{\ast}))}. $$ Next, with reference to \eqref{def-delta} we have \begin{eqnarray*}
\delta^2 & \le & 2 \left( \|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty}^2 + \|(\psi_k-\tilde{\psi}_k)\|_{\ell ^2(L^2(\Gamma))}^2 \right) \\
& \le & 2 \left( \aleph \|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty} + \|(\psi_k-\tilde{\psi}_k)\|_{\ell ^2(L^2(\Gamma))}^2 \right). \nonumber \end{eqnarray*}
Moreover, since $\|(\lambda_k-\tilde{\lambda}_k)\|_{\ell^\infty} \le e^{\mathfrak{b}r^{-\gamma}} \delta_\ast$ whenever $r \in (0,1)$, the above inequality combined with \eqref{es24} yield that \begin{equation} \label{e1} \delta^2 \leq C \left( r^{s/4}+e^{\mathfrak{b}r^{-\gamma}} \delta_\ast \right),\ r \in (0,1). \end{equation}
On the other hand, we have $$
\|q-\tilde{q}\|_{H^{-1}(\Omega)}\le C \delta^{2 (1-2\beta) / (3(n+2))}, $$ from Theorem \ref{theorem2}. Putting this together with \eqref{e1}, we obtain that \begin{equation}\label{e3}
\|q-\tilde{q}\|_{H^{-1}(\Omega)}\le C \left( r^{s/4}+e^{\mathfrak{b}r^{-\gamma}}\delta_\ast \right)^{(1-2\beta)/(3(n+2))},\ r \in (0,1). \end{equation}
Let us now examine the two cases $\delta_\ast \in (0,1/e)$ and $\delta_\ast \in [1/e,\infty)$ separately. We start with $\delta_\ast \in (0,1/e)$ and take $r=| \ln \delta_\ast |^{-1/\gamma} \in (0,1)$ in \eqref{e3}, getting that \begin{eqnarray*}
\|q-\tilde{q}\|_{H^{-1}(\Omega)} & \le & C \left( | \ln \delta_\ast |^{-s/(4\gamma)}+\delta_\ast^{(\mathfrak{b}+1)} \right)^{(1-2\beta)/(3(n+2))} \\
& \le & C \left( | \ln \delta_\ast |^{-s/(4\gamma)}+ e^{-(\mathfrak{b}+1)} | \ln \delta_\ast |^{-(\mathfrak{b}+1)} \right)^{(1-2\beta)/(3(n+2))}, \end{eqnarray*}
where we used in the last line that $\delta_\ast \le 1/(e | \ln \delta_\ast |)$. This immediately yields \begin{equation}\label{e4}
\|q-\tilde{q}\|_{H^{-1}(\Omega)} \le C | \ln \delta_\ast |^{-\vartheta},\ \delta_\ast \in (0,1/e), \end{equation} where $$\vartheta:=\min \left( s/(4\gamma) , \mathfrak{b}+1 \right)(1-2\beta)/(3(n+2)).$$ Next, for $\delta_\ast \in [1/e,\infty)$, we get upon choosing, say, $r=1/2$ in \eqref{e3}, and then taking into account that $r < 1 \le e \delta_\ast$ and $(1-2\beta)/(3(n+2)) \ge 0$, that \begin{eqnarray*}
\|q-\tilde{q}\|_{H^{-1}(\Omega)} & \le & C \left( (e\delta_\ast)^{s/4}+e^{2^\gamma \mathfrak{b}-1} e \delta_\ast \right)^{(1-2\beta)/(3(n+2))} \\ & \le & C (e\delta_\ast)^{(1-2\beta)/(3(n+2))} \\ & \le & C \delta_\ast. \end{eqnarray*} Now, with reference to \eqref{def-Phi}, the stability estimate \eqref{thm3} follows readily from this and \eqref{e4}.
\end{document} |
\begin{document}
\title{Triple correlations of Fourier coefficients of cusp forms}
\author[Y. Lin]{Yongxiao Lin} \address{Department of Mathematics, The Ohio State University\\ 231 W 18th Avenue\\ Columbus, Ohio 43210-1174} \email{[email protected]}
\begin{abstract} We treat an unbalanced shifted convolution sum of Fourier coefficients of cusp forms. As a consequence, we obtain an upper bound for correlation of three Hecke eigenvalues of holomorphic cusp forms $\sum_{H\leq h\leq 2H}W\big(\frac{h}{H}\big)\sum_{X\leq n\leq 2X}\lambda_{1}(n-h)\lambda_{2}(n)\lambda_{3}(n+h)$, which is nontrivial provided that $H\geq X^{2/3+\varepsilon}$. The result can be viewed as a cuspidal analogue of a recent result of Blomer \cite{blo} on triple correlations of divisor functions. \end{abstract}
\subjclass[2010]{Primary 11F30; Secondary 11F72}
\keywords{Triple correlation, cusp forms, circle method, Kuznetsov's trace formula, large sieve inequalities}
\maketitle
\section{Introduction}
Recently Blomer \cite{blo} established an asymptotic formula with power saving error term for certain types of triple correlations of divisor functions. Motivated by his work, we are going to prove a cuspidal analogue. While shifted convolutions of two Fourier coefficients have been an extensively studied topic, there seem to be few results available on the correlation of three Fourier coefficients, with power saving error term. For instance, one of the highly interesting and open problems in analytic number theory is to find an asymptotic formula for \begin{equation} D_h(X)=\sum_{n\leq X}\tau(n-h)\tau(n)\tau(n+h), \end{equation} where $\tau(n)$ denotes the divisor function. It is conjectured that \begin{equation}\label{D_h} D_h(X)\sim c_h X(\log X)^3, \end{equation} as $X\rightarrow \infty$, for some positive constant $c_h$. Browning \cite{bro} suggests that one should take
\begin{equation}
c_h=\frac{11}{8}f(h)\prod_{p}\left(1-\frac{1}{p}\right)^2\left(1+\frac{2}{p}\right),
\end{equation}
for an explicit multiplicative function $f(h)$, and is able to prove that \eqref{D_h} is true on average, namely
\begin{equation}
\sum_{h\leq H}\left(D_h(X)-c_h X(\log X)^3\right)=o(HX(\log X)^3)
\end{equation}
for $H\geq X^{3/4+\varepsilon}$.
Using spectral theory of automorphic forms, Blomer \cite{blo} improved the range of $H$ substantially to $H\geq X^{1/3+\varepsilon}$ and produced a power saving error term. Furthermore, Blomer's approach seems to be flexible enough to be adapted to the study of more general correlation sums.
\begin{Theorem}[Blomer \cite{blo}\label{blomer's}] Let $W$ be a smooth function with compact support in $[1,2]$ and Mellin transform $\widehat{W}$. Let $1\leq H\leq X/3$ and $r_d(n)$ be the Ramanujan sum. Let $a(n)$, for $X\leq n\leq 2X$, be any sequence of complex numbers. Then \begin{equation} \begin{split} \sum_{h}W\left(\frac{h}{H}\right)\sum_{X\leq n\leq 2X}a(n)\tau(n+h)\tau(n-h)&=H\widehat{W}(1)\sum_{X\leq n\leq 2X}a(n)\sum_{d}\frac{r_d(2n)}{d^2}(\log n+2\gamma-2\log d)^2
\\&+O\left(X^{\varepsilon}\left(\frac{H^2}{X^{1/2}}+HX^{1/4}+(XH)^{1/2}+\frac{X}{H^{1/2}}\right)\|a\|_2\right). \end{split} \end{equation} \end{Theorem}
Here $\|a\|_2=(\sum|a_n|^2)^{\frac{1}{2}}$ is the $\ell^2$-norm. The first term in the $O$-term above comes from the smooth approximation of the sum on the left. The second one comes from the treatment of the ``minus-case'' after application of Voronoi summation of the divisor function. The last two terms come from spectral methods.
As a corollary, Blomer obtains
\begin{Corollary}[Blomer \cite{blo}] Let $W$ be a smooth function with compact support in $[1,2]$ and Mellin transform $\widehat{W}$. Let $1\leq H\leq X/3$. Then \begin{equation} \begin{split} \sum_{h}W\left(\frac{h}{H}\right)\sum_{X\leq n\leq 2X}\tau_k(n)\tau(n+h)\tau(n-h)&=\widehat{W}(1)XHQ_{k+1}(\log X) \\&+O\left(X^{\varepsilon}(H^2+HX^{1-\frac{1}{k+2}}+XH^{1/2}+X^{3/2}H^{-1/2})\right), \end{split} \end{equation} where $\tau_k$ is the $k$-th fold divisor function, and $Q_{k+1}$ is a degree $k+1$ polynomial. \end{Corollary}
The result is non-trivial for $X^{\frac{1}{3}+\varepsilon}\leq H\leq X^{1-\varepsilon}$, which in the case of $k=2$, substantially improves Browning's result.
Note that since the divisor function can be viewed as the Fourier coefficient of Eisenstein series, one naturally would ask what will be the case if the divisor functions are replaced by Fourier coefficients of cusp forms. Blomer \cite{blo} remarked that if one uses Jutila's circle method and argues as in \cite{bl-mi}, then one might obtain an analogous result. The purpose of this note is to carry this out in detail. It turns out that new difficulties arise, making it difficult to obtain a range for $H$ as good as the divisor function case. Namely, we are not able to `open' the Fourier coefficient as Blomer does with the divisor function. We obtain the following result.
\begin{Theorem}\label{maintheorem} Let $1\leq H\leq X/3$. Let $W$ be a smooth function with compact support in $[1,2]$, and $a(n)$, $X\leq n\leq 2X$, be any sequence of complex numbers. Let $\lambda_1(n), \lambda_2(n)$ be Hecke eigenvalues of holomorphic Hecke eigencuspforms of weight $\kappa_1$, $\kappa_2$ for $\mathrm{SL}_2(\mathbb{Z})$ respectively. Then \begin{equation}\label{finalbdd}
\sum_{h}W\left(\frac{h}{H}\right)\sum_{X\leq n\leq 2X}a(n)\lambda_{1}(n+h)\lambda_{2}(n-h)\ll X^{\varepsilon}\frac{X}{H}\left((XH)^{1/2}+\frac{X}{H^{1/2}}\right)\|a\|_2. \end{equation} \end{Theorem}
One should compare our result with the third and fourth terms of the $O$-term in Blomer's theorem, as both of them naturally come from the spectral theory of automorphic forms. The result is nontrivial as long as $H\geq X^{\frac{2}{3}+\varepsilon}$.
As an immediate consequence, we have \begin{Corollary} Let $1\leq H\leq X/3$. Let $W$ be a smooth function with compact support in $[1,2]$. Let $\lambda_1(n), \lambda_2(n), \lambda_3(n)$ be Hecke eigenvalues of holomorphic Hecke eigencuspforms of weight $\kappa_1$, $\kappa_2$ and $\kappa_3$ for $\mathrm{SL}_2(\mathbb{Z})$ respectively. Then \begin{equation} \sum_{h}W\left(\frac{h}{H}\right)\sum_{X\leq n\leq 2X}\lambda_{1}(n-h)\lambda_{2}(n)\lambda_{3}(n+h)\ll X^{\varepsilon}\min\left(XH,\frac{X^2}{H^{1/2}}\right). \end{equation} \end{Corollary}
The result is nontrivial only for $H\geq X^{\frac{2}{3}+\varepsilon}$. One can remove the smooth function $W$ in the $h$-sum, as in \cite{blo}.
If on the other hand, one allows one of the Fourier coefficients to be non-cuspidal, then the advantage to open the divisor function enables us to enlarge the range of $H$ to $H\geq X^{\frac{1}{3}+\varepsilon}$. This feature of decomposable functions was pointed out by Meurman in \cite{Meu}. For instance one has the following result which follows from the same line of proof of Blomer \cite{blo}, although it is not explicitly stated as such in that work. \begin{Corollary}[Blomer \cite{blo}] Let $1\leq H\leq X/3$. Let $W$ be a smooth function with compact support in $[1,2]$, and $a(n)$, $X\leq n\leq 2X$, be any sequence of complex numbers. Let $\lambda(n)$ be Hecke eigenvalues of a holomorphic Hecke eigencuspform of weight $\kappa$ for $\mathrm{SL}_2(\mathbb{Z})$. Then \begin{equation}
\sum_{h}W\left(\frac{h}{H}\right)\sum_{X\leq n\leq 2X}a(n)\tau(n+h)\lambda(n-h)\ll X^{\varepsilon}\left(\frac{H^2}{X^{1/2}}+HX^{1/4}+(XH)^{1/2}+\frac{X}{H^{1/2}}\right)\|a\|_2. \end{equation} \end{Corollary}
\textbf{Notation.} $x\asymp X$ means $X\leq x\leq 2X$. We will use the $\varepsilon$-convention: $\varepsilon>0$ is arbitrarily small but not necessarily the same at each occurrence.
\section{Preliminaries} In this section we collect some lemmas that will be used in our proof. First let us recall the Voronoi summation formula for holomorphic Hecke eigenvalues. \begin{Lemma}[{\cite[Theorem A.4]{KMV1}}]\label{voronoi} Assume $(b,c)=1$. Let $V$ be a smooth compactly supported function. Let $N>0$ and let $\lambda(n)$ denote Hecke eigenvalues of a holomorphic Hecke eigencuspform of weight $\kappa$ for $\mathrm{SL}_2(\mathbb{Z})$. Then \begin{equation} \sum_{n}\lambda(n)e\left(\frac{bn}{c}\right)V\left(\frac{n}{N}\right)=\frac{N}{c}\sum_{n}\lambda(n)e\left(-\frac{\bar{b}n}{c}\right)\cdot 2\pi i^\kappa\int_{0}^{\infty}V(x)J_{k-1}\left(\frac{4\pi\sqrt{nNx}}{c}\right)\mathrm{d}x \end{equation} \end{Lemma}
We will use the following variant of Jutila's circle method \cite{jut1}, \cite{jut}.
\begin{Lemma}\label{jutila}Let $Q\geq 1$ and $Q^{-2}\leq \delta\leq Q^{-1}$ be two parameters. Let $w$ be a nonnegative function supported in $[Q,2Q]$ and satisfies $\|w\|_\infty\leq1$ and $\sum_{c}w(c)>0$. Let $\mathbb{I}_S$ be the characteristic function of the set $S$. Define\label{jutila} \begin{equation} \tilde{I}(\alpha)=\frac{1}{2\delta \Lambda}\sum_{c}w(c)\sumstar_{d(\mathrm{mod} c)}\mathbb{I}_{[\frac{d}{c}-\delta,\frac{d}{c}+\delta]}(\alpha), \end{equation} where $\Lambda=\sum_{c}w(c)\phi(c)$. Then $\tilde{I}(\alpha)$ is a good approximation of $\mathbb{I}_{[0,1]}$ in the sense that \begin{equation}
\int_{0}^{1}|1-\tilde{I}(\alpha)|^2\mathrm{d}\alpha\ll_{\varepsilon} \frac{Q^{2+\varepsilon}}{\delta\Lambda^2}. \end{equation} \end{Lemma}
The feature of the circle method in our application, as in \cite{bl-mi}, is that the parameter $Q$ turns out to be just a ``catalyst'', not entering into the final bound. This will become clear at the final stage of our argument.
In order to state Kuznetsov's trace formula and the large sieve inequalities, let us define the following integral transforms for a smooth function $\phi: [0,\infty)\rightarrow \mathbb{C}$ satisfying $\phi(0)=\phi'(0)=0$, $\phi^{(j)}(x)\ll (1+x)^{-3}$ for $0\leq j\leq 3$: $$\dot{\phi}(k)=4i^k\int_{0}^{\infty}\phi(x)J_{k-1}(x)\frac{\mathrm{d}x}{x},\ \ \tilde{\phi}(t)=2\pi i\int_{0}^{\infty}\phi(x)\frac{J_{2it}(x)-J_{-2it}(x)}{\sinh(\pi t)}\frac{\mathrm{d}x}{x}.$$
We use the notations in \cite{BHM} and \cite{blo}. Let $\mathcal{B}_k$ be an orthonormal basis of the space of holomorphic cusp forms of level $1$ and weight $k$. Let $\mathcal{B}$ be a fixed orthonormal basis of Hecke-Maass eigenforms of level 1. For $f\in \mathcal{B}_k$, we write its Fourier expansion at $\infty$ as $$f(z)=\sum_{n\geq 1}\rho_f(n)(4\pi n)^{k/2}e(nz).$$
For $f\in \mathcal{B}$ with spectral parameter $t$, we write its Fourier expansion as
$$f(z)=\sum_{n\neq 0}\rho_f(n)W_{0,it}(4\pi|n|y)e(nx),$$
where $W_{0,it}(4\pi|n|y)=(y/\pi)^{1/2}K_{it}(y/2)$ is a Whittaker function.
Finally for the Eisenstein series $E(z,s)$, we write its Fourier expansion at $s=\frac{1}{2}+it$ as \begin{equation*}
E\left(z,\frac{1}{2}+it\right)=y^{\frac{1}{2}+it}+\varphi\left(1/2+it\right)y^{\frac{1}{2}-it}+\sum_{n\neq0}\rho(n,t)W_{0,it}(4\pi|n|y)e(nx). \end{equation*}
Then we have the following Kuznetsov's trace formula in the notation of \cite{BHM}. \begin{Lemma}\label{kuznetsov} Let $a, b>0$ be integers, then
\begin{equation} \begin{split} \sum_{c\geq 1}\frac{1}{c}S(a,b;c)\phi\left(\frac{4\pi\sqrt{ab}}{c}\right)&=\sum_{\substack{k\geq 2\\ k\ \mathrm{even}}}\sum_{f\in\mathcal{B}_k}\dot{\phi}(k)\Gamma(k)\sqrt{ab}\,\overline{\rho_f(a)}\rho_f(b) \\&+\sum_{f\in\mathcal{B}}\tilde{\phi}(t_f)\frac{\sqrt{ab}}{\cosh (\pi t_f)}\overline{\rho_f(a)}\rho_f(b)+\frac{1}{4\pi}\int_{-\infty}^{\infty}\tilde{\phi}(t)\frac{\sqrt{ab}}{\cosh (\pi t)}\overline{\rho(a,t)}\rho(b,t)\mathrm{d}t. \end{split} \end{equation} \end{Lemma}
We will use the following spectral large sieve inequalities of Deshouillers and Iwaniec \cite{de-iw}. \begin{Lemma}\label{largesieve} Let $T, M\geq 1$. Let $(a_m)$, $M\leq m\leq 2M$, be a sequence of complex numbers, then all of the three quantities\\
$$\sum_{\substack{2\leq k\leq T\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\big|\sum_{m}a_m\sqrt{m}\rho_f(m)\big|^2, \sum_{\substack{f\in\mathcal{B}\\ |t_f|\leq T}}\frac{1}{\cosh (\pi t_f)}\big|\sum_{m}a_m\sqrt{m}\rho_f(\pm m)\big|^2,$$
$$\int_{-T}^{T}\frac{1}{\cosh (\pi t)}\big|\sum_{m}a_m\sqrt{m}\rho(\pm m,t)\big|^2 \mathrm{d}t$$ are bounded by
$$M^{\varepsilon}(T^2+M)\sum_{m}|a_m|^2.$$ \end{Lemma}
We need the following lemma of Blomer-Mili{\'c}evi{\'c} \cite{bl-mi} for a certain type of Bessel transform, which provides the asymptotic behavior of the weight function. \begin{Lemma}\label{wstar} Let $W$ be a fixed smooth function with support in $[1,2]$ satisfying $W^{(j)}(x)\ll_j 1$ for all $j$. Let $\nu\in \mathbb{C}$ be a fixed number with $\Re \nu\geq 0$. Define\label{boundofw} \begin{equation} W^{\star}(z,w)=\int_{0}^{\infty}W(y)J_{\nu}(4\pi\sqrt{yw+z})\mathrm{d}y. \end{equation}
Fix $C\geq 1$ and $A,\varepsilon>0$. Then for $z\geq 4|w|>0$ we have \begin{equation} W^{\star}(z,w)=W_+(z,w)z^{-1/4}e(2\sqrt{z})+W_-(z,w)z^{-1/4}e(-2\sqrt{z})+O_A(C^{-A}) \end{equation} for suitable functions $W_\pm$ satisfying \begin{equation}\label{wplusminus} z^i w^j\frac{\partial^i}{\partial z^i}\frac{\partial^j}{\partial w^j}W_\pm(z,w)\begin{cases}=0, & \quad \sqrt{z}/w\leq C^{-\varepsilon}, \\ \ll C^{\varepsilon(i+j)}, & \quad \mathrm{otherwise},\\ \end{cases} \end{equation} for any $i,j\in \mathbb{N}_0$. The implied constants depend on $i, j$ and $\nu$. \end{Lemma}
The lemma holds true for both positive and negative $w$, as long as $z\geq 4|w|$. For a proof, see \cite[Lemma 17]{bl-mi} and the remark after that.
From Lemma \ref{wstar}, one has the following easy consequence. \begin{Corollary}[{\cite[Corollary 18]{bl-mi}}]\label{realpart} Let $\omega$ be a smooth function supported in a rectangle $[c_1,c_2]\times[c_1,c_2]$ for two constants $c_2>c_1>0$, and let $Z\gg 1$, $W>1$ be two parameters such that $c_1Z\geq 4c_2W$, then the double Mellin transform \begin{equation*} \widehat{W}_{\pm}(s,t)=\int_{0}^{\infty}\int_{0}^{\infty}W_{\pm}(z,w)\omega\left(\frac{z}{Z},\frac{w}{W}\right)z^sw^t\frac{\mathrm{d}z}{z}\frac{\mathrm{d}w}{w} \end{equation*} is holomorphic on $\mathbb{C}^2$, and satisfies \begin{equation*}
\widehat{W}_{\pm}(s,t)\ll_{A,B,\varepsilon,\Re s, \Re t} C^{\varepsilon}(1+|s|)^{-A}(1+|t|)^{-B} \end{equation*} on vertical lines.
\end{Corollary} We will use this corollary when we try to separate variables, after the application of Kuznetsov's trace formula. Then, the following lemma of \cite{blo} will help us to truncate the lengths of summations. See also \cite[Lemma 3]{jut}. \begin{Lemma}[{\cite[Lemma 5]{blo}}] \label{lemmaofphi} Let $\mathcal{Z}\gg1$,$\tau\in\mathbb{R}$, $\alpha\in[-4/5,4/5]$ and $w$ be a smooth compactly supported function. For \begin{equation} \phi(z)=e^{\pm iz\alpha}w\left(\frac{z}{\mathcal{Z}}\right)\left(\frac{z}{\mathcal{Z}}\right)^{i\tau}, \end{equation} we have \begin{equation}
\dot{\phi}(k)\ll_A\frac{1+|\tau|}{\mathcal{Z}}\left(1+\frac{k}{\mathcal{Z}}\right)^{-A}, \tilde{\phi}(t)\ll_A\left(1+\frac{|t|+\mathcal{Z}}{1+|\tau|}\right)^{-A} \end{equation} for $t\in\mathbb{R}$, $k\in\mathbb{N}$ and any $A\geq 0$. \end{Lemma}
\begin{Remark} From Lemma \ref{lemmaofphi} we see that $\dot{\phi}(k)$ is negligibly small as long as $|k|>\mathcal{Z}$, so that later one can truncate summation over $k$ at $\mathcal{Z}$, up to a negligible error. Moreover, in application, $\mathcal{Z}$ will usually be relatively larger than $\tau$, so that the contribution from $\tilde{\phi}(t)$ is always negligible. In particular, after the application of Kuznetsov's trace formula later, we only need to treat the contribution from the holomorphic spectrum, since the Maass forms and Eisenstein series parts will be negligible due to the rapid decay of the weight function $\tilde{\phi}(t)$. \end{Remark}
\section{Proof of the theorem} Let $\lambda_1(n), \lambda_2(n)$ be the Hecke eigenvalues of holomorphic Hecke eigencuspforms of weight $\kappa_1$, $\kappa_2$ for $\mathrm{SL}_2(\mathbb{Z})$. We change the order of summation of $n$ and $h$, fix $n\asymp X$, and first deal with the sum over $h$ \begin{equation}\label{originalsum} E(n):=\sum_{h\asymp H}\lambda_{1}\left(n+h\right)\lambda_{2}\left(n-h\right)W\left(\frac{h}{H}\right). \end{equation}
Let $n+h=m_1$ and $n-h=m_2$, then $m_1+m_2=2n$, and we can rewrite the summation above as
\begin{equation}\label{shiftedsum} \sum_{m_1+m_2=2n}\lambda_{1}(m_1)\lambda_{2}(m_2)W\left(\frac{m_1-n}{H}\right)V\left(\frac{n-m_2}{H'}\right). \end{equation} Here $V$ is a redundant smooth function used to keep track of the support of $m_2$, and $H'$ is a parameter to be determined later, satisfying $H\leq H'\leq X/3$. Later we will see that in our case, $H'=X/3$ will give the best result. Here one can take, say, $V=W$.
\begin{Remark} Let us make a comment on the difference between the shifted convolution sum in \eqref{shiftedsum} with the one treated by Blomer and Mili{\'c}evi{\'c}, \cite[(3.7)]{bl-mi}. In our case, we have localized both the variables $m_1$ and $m_2$ to vary in intervals \textbf{around} $n$. In Blomer and Mili{\'c}evi{\'c}'s case, one of the variables varies in an interval of length $2H$ around $n$, while the other variable varies, say, in $[H/2,2H]$. One should also note that for $f$ a holomorphic cusp form, the sum $\sum_{m_1+m_2=X}\lambda_{f}(m_1)\lambda_{f}(m_2)$ is just the $X$-th Fourier coefficient of the cusp form $f^2$ and thus one can apply Deligne's estimate. \end{Remark}
Now we follow the approach of Jutila \cite{jut} and Blomer and Mili{\'c}evi{\'c} \cite[sections 7 $\&$ 8]{bl-mi}. Let $C=X^{1000}$ be a large parameter. Apply Lemma \ref{jutila} with $Q=C$ and $\delta=C^{-1}$. Let $w_0$ be a fixed smooth function supported in $[1,2]$. Let $w(c)=w_0(c/C)$. In particular we have $\Lambda=\sum_{c}w_0(c/C)\phi(c)\gg C^{2-\varepsilon}$. Detecting the condition $m_1+m_2=2n$ by $\int_{0}^{1}e(\alpha(m_1+m_2-2n))\mathrm{d} \alpha$ and applying Jutila's circle method, we have \begin{equation*} E(n)=\widetilde{E}(n)+\mathrm{Error}, \end{equation*} where \begin{equation} \begin{split} \widetilde{E}(n)=\frac{1}{2\delta}\int_{-\delta}^{\delta}&\frac{1}{\Lambda}\sum_{c}w_0\left(\frac{c}{C}\right)\sumstar_{d(c)}e\left(\frac{-2nd}{c}\right)\sum_{m_1}\lambda_{1}(m_1)e\left(\frac{dm_1}{c}\right)W\left(\frac{m_1-n}{H}\right)e(\eta(m_1-n)) \\&\cdot \sum_{m_2}\lambda_{2}(m_2)e\left(\frac{dm_2}{c}\right)V\left(\frac{n-m_2}{H'}\right)e(-\eta(n-m_2))\mathrm{d}\eta, \end{split} \end{equation} and \begin{equation*} \begin{split} \mathrm{Error}&=\int_{0}^{1}\sum_{m_1}\sum_{m_2}\lambda_{1}(m_1)\lambda_{2}(m_2)W\left(\frac{m_1-n}{H}\right)V\left(\frac{n-m_2}{H'}\right)e(\alpha(m_1+m_2-2n))(1-\tilde{I}(\alpha))\mathrm{d} \alpha\\
&\ll\left(\sum_{m_1}\left|\lambda_{1}(m_1)W\big(\frac{m_1-n}{H}\big)\right|\right)\left(\sum_{m_2}\left|\lambda_{2}(m_2)V\big(\frac{n-m_2}{H'}\big)\right|\right)\cdot \frac{C^{1+\varepsilon}}{\delta^{1/2}\Lambda}\\ &\ll\frac{X^{2}C^{1+\varepsilon}}{\delta^{1/2}\Lambda}\ll C^{-2/5} \end{split} \end{equation*}
by the Cauchy-Schwarz inequality, Lemma \ref{jutila} and the bound $\sum_{n\leq x}|\lambda_f(n)|^2\ll_f x$.
Denote $W_\eta(x)=W(x)e(\eta x)$ and $V_{-\eta}(x)=V(x)e(-\eta x)$. Then \begin{equation} \begin{split} \widetilde{E}(n)=\frac{1}{2\delta}\int_{-\delta}^{\delta}&\frac{1}{\Lambda}\sum_{c}w_0\left(\frac{c}{C}\right)\sumstar_{d(c)}e\left(\frac{-2nd}{c}\right)\sum_{m_1}\lambda_{1}(m_1)e\left(\frac{dm_1}{c}\right)W_{\eta H}\left(\frac{m_1-n}{H}\right)\\ &\cdot \sum_{m_2}\lambda_{2}(m_2)e\left(\frac{dm_2}{c}\right)V_{-\eta H'}\left(\frac{n-m_2}{H'}\right)\mathrm{d}\eta. \end{split} \end{equation}
Note that since $|\eta|\leq C^{-1}=X^{-1000}$ is very small, the functions $W_{\eta H}$ and $V_{-\eta H'}$ have nice properties inherited from $W$. In particular, one has $W^{(j)}_{\eta H},\ V^{(j)}_{-\eta H'}\ll 1$ for any $j\geq 0$, supported in $[1,2]$, uniformly for $|\eta|\leq C^{-1}$.
Applying Voronoi summation formula to the $m_1$-sum, we have \begin{equation} \sum_{m_1}\lambda_{1}(m_1)e\left(\frac{dm_1}{c}\right)W_{\eta H}\left(\frac{m_1-n}{H}\right)=\frac{H}{c}\sum_{m_1}\lambda_{1}(m_1)e\left(-\frac{\bar{d}m_1}{c}\right)W^{\star}_{\eta H}\left(\frac{m_1n}{c^2},\frac{m_1H}{c^2}\right), \end{equation} where \begin{equation} W^{\star}_{\eta H}(z,w)=2\pi i^{\kappa_1}\int_{0}^{\infty}W_{\eta H}(y)J_{\kappa_1-1}(4\pi\sqrt{yw+z})\mathrm{d}y. \end{equation}
Similarly, \begin{equation} \sum_{m_2}\lambda_{2}(m_2)e\left(\frac{dm_2}{c}\right)V_{-\eta H'}\left(\frac{n-m_2}{H'}\right)=\frac{H'}{c}\sum_{m_2}\lambda_{2}(m_2)e\left(-\frac{\bar{d}m_2}{c}\right)V^{\star}_{-\eta H'}\left(\frac{m_2n}{c^2},\frac{m_2H'}{c^2}\right), \end{equation} with \begin{equation} V^{\star}_{-\eta H'}(z,w)=2\pi i^{\kappa_2}\int_{0}^{\infty}V_{-\eta H'}(y)J_{\kappa_2-1}(4\pi\sqrt{-yw+z})\mathrm{d}y. \end{equation}
Substituting these back into $\widetilde{E}(n)$, we get
\begin{equation} \widetilde{E}(n)=\frac{1}{2\delta}\int_{-\delta}^{\delta} \widetilde{E}_\eta(n)\mathrm{d}\eta, \end{equation} with \begin{equation} \begin{split} \widetilde{E}_\eta(n)=&\frac{HH'}{\Lambda}\sum_{c}\frac{w_0(c/C)}{c^2}\sum_{m_1}\sum_{m_2}\lambda_{1}(m_1)\lambda_{2}(m_2)S(m_1+m_2,2n;c)\\ &\cdot W^{\star}_{\eta H}\left(\frac{m_1n}{c^2},\frac{m_1H}{c^2}\right)V^{\star}_{-\eta H'}\left(\frac{m_2n}{c^2},\frac{m_2H'}{c^2}\right). \end{split} \end{equation}
If one can establish a bound for $\sum_{n\asymp X}a(n)\widetilde{E}_\eta(n)$, uniformly in $\eta$, then the same bound will hold true for $\sum_{n\asymp X}a(n)E(n)$, and we are done.
By Lemma \ref{boundofw}, we can write \begin{equation*} W^{\star}_{\eta H}\left(\frac{m_1n}{c^2},\frac{m_1H}{c^2}\right)=\sum_{\pm}W_{\pm}\left(\frac{m_1n}{c^2},\frac{m_1H}{c^2}\right)\left(\frac{m_1n}{c^2}\right)^{-\frac{1}{4}}e\left(\pm2\frac{\sqrt{m_1n}}{c}\right)+O\left(C^{-A}\right), \end{equation*} for some $W_{\pm}$ satisfying \eqref{wplusminus}.
Similarly we have \begin{equation*}V^{\star}_{-\eta H'}\left(\frac{m_2n}{c^2},\frac{m_2H'}{c^2}\right)=\sum^{\pm}V^{\pm}\left(\frac{m_2n}{c^2},\frac{m_2H'}{c^2}\right)\left(\frac{m_2n}{c^2}\right)^{-\frac{1}{4}}e\left(\pm2\frac{\sqrt{m_2n}}{c}\right)+O\left(C^{-A}\right), \end{equation*} for some $V^{\pm}$ satisfying similar conditions. Note that from now on we will not display dependence on the parameter $\eta$ anymore, since all of the estimations we obtain will be uniform in $\eta$, by our previous remark on the nice properties of $W_{\eta H}$ and $V_{-\eta H'}$.
By the bound \eqref{wplusminus}, we can restrict the lengths of the new sums over $m_1$, $m_2$ to \begin{equation*} m_1\leq T_1:=\frac{C^{2+\varepsilon}X}{H^2},\ m_2\leq T_2:=\frac{C^{2+\varepsilon}X}{H'^2}, \end{equation*} up to a negligible error.
Now we further restrict the lengths of summations to dyadic segments. That is, we assume
$$m_1\asymp \mathcal{M}_1,\ m_2\asymp \mathcal{M}_2,$$ where $\mathcal{M}_1\ll T_1,\ \mathcal{M}_2\ll T_2$.
Denote $b=m_1+m_2$. Then $$b\asymp \mathcal{M}_1+\mathcal{M}_2.$$
Then $\widetilde{E}(n)$ will be a sum of at most $O((\log C)^3)$ terms, of the following form. \begin{equation} \begin{split} \widetilde{E}(n,\mathcal{M}_1,\mathcal{M}_2)&=\frac{HH'n^{-\frac{1}{2}}}{\Lambda}\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)m_1^{-\frac{1}{4}}\lambda_{2}(m_2)m_2^{-\frac{1}{4}}\sum_{c}\frac{S(b,2n;c)}{c}w_0\left(\frac{c}{C}\right) \\& \cdot\sum_{\pm}\sum^{\pm}W_{\pm}\left(\frac{m_1n}{c^2},\frac{m_1H}{c^2}\right)V^{\pm}\left(\frac{m_2n}{c^2},\frac{m_2H'}{c^2}\right)e\left(\pm2\frac{\sqrt{m_1n}}{c}\right)e\left(\pm2\frac{\sqrt{m_2n}}{c}\right)+O(C^{-10}). \end{split} \end{equation}
To prepare for the application of Kuznetsov's trace formula, we combine the weight functions above and write \begin{equation} \widetilde{E}(n,\mathcal{M}_1,\mathcal{M}_2)=\frac{HH'n^{-\frac{1}{2}}}{\Lambda}\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)m_1^{-\frac{1}{4}}\lambda_{2}(m_2)m_2^{-\frac{1}{4}}\sum_{c}\frac{S(b,2n;c)}{c}\Phi\left(\frac{4\pi\sqrt{2nb}}{c}\right), \end{equation} where \begin{equation} \Phi(z)=\sum_{\pm}\sum^{\pm}w_0\left(\frac{4\pi\sqrt{2nb}}{zC}\right)W_{\pm}\left(\frac{m_1z^2}{32\pi^2b},\frac{m_1Hz^2}{32\pi^2nb}\right) V^{\pm}\left(\frac{m_2z^2}{32\pi^2b},\frac{m_2H'z^2}{32\pi^2nb}\right)e^{\pm i\sqrt{\frac{m_1}{2b}}z}e^{\pm i\sqrt{\frac{m_2}{2b}}z}. \end{equation}
Note that the support of $w_0$ implies that we can restrict $z$ to $$z\asymp \mathcal{Z}:=\frac{\sqrt{X(\mathcal{M}_1+\mathcal{M}_2)}}{C}.$$
We can attach a redundant smooth weight function $w_2(\frac{z}{\mathcal{Z}})$ of compact support $[1/100\mathcal{Z},100\mathcal{Z}]$ that is constantly $1$ on $[1/20\mathcal{Z},20\mathcal{Z}]$, to $\Phi(z)$.
Now we separate the variables. We do this by Mellin inversion to the functions $w_0$, $W_{\pm}$ and $V^{\pm}$ (using Corollary \ref{realpart}). This can be done with almost no loss since these functions are non-oscillatory, similar to \cite{bl-mi} and \cite{bfkmm}. Thus we have \begin{equation} \begin{split} \Phi(z)=&\sum_{\pm}\sum^{\pm}w_0\left(\frac{4\pi\sqrt{2nb}}{zC}\right)W_{\pm}\left(\frac{m_1z^2}{32\pi^2b},\frac{m_1Hz^2}{32\pi^2nb}\right)V^{\pm}\left(\frac{m_2z^2}{32\pi^2b},\frac{m_2H'z^2}{32\pi^2nb}\right)\\ &\cdot e^{\pm iz(\sqrt{\frac{m_1}{2b}}\pm\sqrt{\frac{m_2}{2b}})}w_2\left(\frac{z}{\mathcal{Z}}\right)\\ =&\sum_{\pm}\sum^{\pm}\int_{\mathcal{C}}\widehat{w}_0(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5) \bigg(\frac{4\pi\sqrt{2nb}}{zC}\bigg)^{-s_1}\bigg(\frac{m_1z^2}{32\pi^2b}\bigg)^{-s_2}\\ &\cdot\bigg(\frac{m_1Hz^2}{32\pi^2nb}\bigg)^{-s_3}\bigg(\frac{m_2z^2}{32\pi^2b}\bigg)^{-s_4}\bigg(\frac{m_2H'z^2}{32\pi^2nb}\bigg)^{-s_5}\mathrm{d} s\cdot e^{\pm iz(\sqrt{\frac{m_1}{2b}}\pm\sqrt{\frac{m_2}{2b}})}w_2\left(\frac{z}{\mathcal{Z}}\right)\\ =&\sum_{\pm}\sum^{\pm}\int_{\mathcal{C}}\widehat{w}_0(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5)(4\pi\sqrt{2})^{-s_1}(32\pi^2)^{s_2+s_3+s_4+s_5}C^{s_1}\mathcal{Z}^{s_1-2s_2-2s_3-2s_4-2s_5}\\ &\cdot (\mathcal{M}_1+\mathcal{M}_2)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\mathcal{M}_1^{-s_2-s_3}\mathcal{M}_2^{-s_4-s_5}H^{-s_3}H'^{-s_5}n^{-\frac{s_1}{2}+s_3+s_5}\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\\ &\cdot\bigg(\frac{m_1}{\mathcal{M}_1}\bigg)^{-s_2-s_3}\bigg(\frac{m_2}{\mathcal{M}_2}\bigg)^{-s_4-s_5} \bigg(\frac{z}{\mathcal{Z}}\bigg)^{s_1-2s_2-2s_3-2s_4-2s_5}e^{\pm iz(\sqrt{\frac{m_1}{2b}}\pm\sqrt{\frac{m_2}{2b}})}w_2\left(\frac{z}{\mathcal{Z}}\right)\mathrm{d}s, \end{split} \end{equation} where $\mathcal{C}$ is the fivefold contour taken over the vertical lines $\Re s_1=\Re s_2=\Re s_3=\Re s_4=\Re s_5=0$.
Here we denote $\mathrm{d}s=\frac{1}{(2\pi i)^5}\prod_{j=1}^{5}\mathrm{d} s_j$. Note that due to the rapid decay of $\widehat{w}_1(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5)$ along vertical lines, we can truncate the integrals above at $|\Im s_i|\leq C^\varepsilon, 1\leq i\leq 5$, at the cost of a negligible error. We denote the truncated contour by $\widetilde{\mathcal{C}}$.
We arrive at \begin{equation} \begin{split} \widetilde{E}(n,\mathcal{M}_1,\mathcal{M}_2)&=\frac{HH'}{\Lambda}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\sum_{\pm}\sum^{\pm}\int_{\widetilde{\mathcal{C}}}\widehat{w}_0(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5)(4\pi\sqrt{2})^{-s_1}(32\pi^2)^{s_2+s_3+s_4+s_5} \\&\cdot C^{s_1}\mathcal{Z}^{s_1-2s_2-2s_3-2s_4-2s_5}(\mathcal{M}_1+\mathcal{M}_2)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\mathcal{M}_1^{-s_2-s_3}\mathcal{M}_2^{-s_4-s_5}H^{-s_3}H'^{-s_5}\\ &\cdot n^{-\frac{1}{2}-\frac{s_1}{2}+s_3+s_5}\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)\lambda_{2}(m_2) \\&\cdot\left(\frac{m_1}{\mathcal{M}_1}\right)^{-\frac{1}{4}-s_2-s_3}\left(\frac{m_2}{\mathcal{M}_2}\right)^{-\frac{1}{4}-s_4-s_5}\sum_{c}\frac{S(b,2n;c)}{c}\Theta\left(\frac{4\pi\sqrt{2nb}}{c}\right)\mathrm{d}s+O(C^{-10}), \end{split} \end{equation} where \begin{equation}\label{function} \Theta(z)=e^{\pm iz(\sqrt{\frac{m_1}{2b}}\pm\sqrt{\frac{m_2}{2b}})}w_2\left(\frac{z}{\mathcal{Z}}\right)\left(\frac{z}{\mathcal{Z}}\right)^{s_1-2s_2-2s_3-2s_4-2s_5}. \end{equation}
Now we apply Kuznetsov's trace formula to the $c$-sum. By Lemma \ref{lemmaofphi}, the spectral sums can be truncated at $C^{\varepsilon}\mathcal{Z}$. Hence we obtain \begin{equation} \sum_{c}\frac{S(b,2n;c)}{c}\Theta\left(\frac{4\pi\sqrt{2nb}}{c}\right)=\mathcal{H}(n)+\mathcal{M}(n)+\mathcal{E}(n)+\text{(negligible error)}, \end{equation} where \begin{equation}\label{ksum} \begin{split} & \mathcal{H}(n)=\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\sum_{f\in\mathcal{B}_k}\Gamma(k)\cdot4i^k\int_{0}^{\infty}\Theta(z)J_{k-1}(z)\frac{\mathrm{d}z}{z}\cdot \sqrt{2n}\rho_f(2n)\sqrt{b}\,\overline{\rho_f(b)}, \\
& \mathcal{M}(n)=\sum_{\substack{f\in\mathcal{B}\\|t_f|\leq C^{\varepsilon}\mathcal{Z}}}2\pi i\int_{0}^{\infty}\Theta(z)\frac{J_{2it_f}(z)-J_{-2it_f}(z)}{\sinh(\pi t_f)}\frac{\mathrm{d}z}{z}\cdot \frac{\sqrt{2nb}}{\cosh (\pi t_f)}\rho_f(2n)\overline{\rho_f(b)}, \\
& \mathcal{E}(n)=\frac{1}{4\pi}\int_{|t|\leq C^{\varepsilon}\mathcal{Z}}2\pi i\int_{0}^{\infty}\Theta(z)\frac{J_{2it}(z)-J_{-2it}(z)}{\sinh(\pi t)}\frac{\mathrm{d}z}{z}\cdot \frac{\sqrt{2nb}}{\cosh (\pi t)}\rho(2n,t)\overline{\rho(b,t)}\mathrm{d}t \end{split} \end{equation} are contributions from the holomorphic modular forms, Maass forms, and Eisenstein series respectively.
Now we are ready to sum over $a(n)$, $X\leq n\leq 2X$. Summing over $n$, we get
\begin{equation} \begin{split} &\sum_{n\asymp X}a(n)\widetilde{E}(n,\mathcal{M}_1,\mathcal{M}_2)\\ &=\frac{HH'}{\Lambda}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\sum_{\pm}\sum^{\pm}\int_{\widetilde{\mathcal{C}}}\widehat{w}_0(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5)(4\pi\sqrt{2})^{-s_1}(32\pi^2)^{s_2+s_3+s_4+s_5}\\ &\cdot C^{s_1}\mathcal{Z}^{s_1-2s_2-2s_3-2s_4-2s_5}(\mathcal{M}_1+\mathcal{M}_2)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\mathcal{M}_1^{-s_2-s_3}\mathcal{M}_2^{-s_4-s_5}H^{-s_3}H'^{-s_5}\\ &\cdot\sum_{n\asymp X}a(n)n^{-\frac{1}{2}-\frac{s_1}{2}+s_3+s_5}\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)\lambda_{2}(m_2)\\ &\cdot\bigg(\frac{m_1}{\mathcal{M}_1}\bigg)^{-\frac{1}{4}-s_2-s_3}\bigg(\frac{m_2}{\mathcal{M}_2}\bigg)^{-\frac{1}{4}-s_4-s_5}\bigg(\mathcal{H}(n)+\mathcal{M}(n)+\mathcal{E}(n)\bigg)\mathrm{d}s+O(C^{-10})\\ &:=\mathcal{HH}+\mathcal{MM}+\mathcal{EE}+O(C^{-10}), \end{split} \end{equation} with $\mathcal{HH}$, $\mathcal{MM}$, $\mathcal{EE}$ being contributions from holomorphic forms, Maass forms and Eisenstein series respectively.
Note that for the function $\Theta(z)$ in \eqref{function}, the imaginary part $\Im (s_1-2s_2-2s_3-2s_4-2s_5)\ll C^{\varepsilon}$, which is relatively small compared to $\mathcal{Z}$. By Lemma \ref{lemmaofphi} and the remark after it, it suffices to deal with the contribution from the holomorphic spectrum, since the contributions from the other two parts will be similar or even smaller. Also note that since we are considering the full modular group case, we do not have exceptional eigenvalues contribution in the Maass spectrum.
Now we focus on the holomorphic contribution, which is \begin{equation} \begin{split} \mathcal{HH}&=\frac{C^{\varepsilon}HH'}{\Lambda}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\sum_{\pm}\sum^{\pm}\int_{\widetilde{\mathcal{C}}}\widehat{w}_0(s_1)\widehat{W}_{\pm}(s_2,s_3)\widehat{V}^{\pm}(s_4, s_5)(4\pi\sqrt{2})^{-s_1}(32\pi^2)^{s_2+s_3+s_4+s_5} \\&\cdot C^{s_1}\mathcal{Z}^{s_1-2s_2-2s_3-2s_4-2s_5}(\mathcal{M}_1+\mathcal{M}_2)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5}\mathcal{M}_1^{-s_2-s_3}\mathcal{M}_2^{-s_4-s_5}H^{-s_3}H'^{-s_5}\\ &\cdot\int_{0}^{\infty}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}4i^k\Gamma(k)\sum_{f\in\mathcal{B}_k}\sum_{n\asymp X}a(n)n^{-\frac{1}{2}-\frac{s_1}{2}+s_3+s_5}\sqrt{2n}\rho_f(2n)\cdot \sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}\\ &\cdot\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{-\frac{s_1}{2}+s_2+s_3+s_4+s_5} \sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)\lambda_{2}(m_2)\left(\frac{m_1}{\mathcal{M}_1}\right)^{-\frac{1}{4}-s_2-s_3} \left(\frac{m_2}{\mathcal{M}_2}\right)^{-\frac{1}{4}-s_4-s_5}\\ &\cdot e^{\pm iz(\sqrt{m_1}\pm\sqrt{m_2})}w_2\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)^{s_1-2s_2-2s_3-2s_4-2s_5}J_{k-1}(\sqrt{2b}z)\frac{\mathrm{d}z}{z}\mathrm{d}s, \end{split} \end{equation} after making the change of variable $z\mapsto \sqrt{2b}z$. The support of the smooth function $w_2$ implies that $z\asymp \frac{\sqrt{X}}{C}$ in the inner integral.
Bounding the $z$-integral and $s_i$-integrals trivially, we have \begin{equation} \begin{split} \mathcal{HH}&\ll\frac{C^{\varepsilon}HH'}{\Lambda}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\cdot
\sup_{\substack{|u_1|, |u_2|,|u_3|,|u_4|\leq C^{\varepsilon}\\z\asymp \frac{\sqrt{X}}{C}}}\bigg|\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\sum_{n\asymp X}a(n)n^{-\frac{1}{2}+iu_4}\sqrt{2n}\rho_f(2n)\\ &\cdot \sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}J_{k-1}(\sqrt{2b}z)\cdot w_2\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)^{-2iu_3}\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{iu_3}\\
&\cdot\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)\lambda_{2}(m_2)\left(\frac{m_1}{\mathcal{M}_1}\right)^{-\frac{1}{4}+iu_1}\left(\frac{m_2}{\mathcal{M}_2}\right)^{-\frac{1}{4}+iu_2}e^{\pm iz(\sqrt{m_1}\pm\sqrt{m_2})}\bigg|. \end{split} \end{equation}
The Cauchy-Schwarz inequality further implies that \begin{equation}\label{twosup} \begin{split} \mathcal{HH}&\ll\frac{C^{\varepsilon}HH'}{\Lambda}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\cdot
\bigg(\sup_{|u_4|\leq C^{\varepsilon}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg |\sum_{n\asymp X}a(n)n^{-\frac{1}{2}+iu_4}\sqrt{2n}\rho_f(2n)\bigg|^2\bigg)^{\frac{1}{2}}
\\&\cdot\bigg(\sup_{\substack{|u_1|, |u_2|,|u_3|\leq C^{\varepsilon}\\z\asymp \frac{\sqrt{X}}{C}}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg|\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}J_{k-1}(\sqrt{2b}z)\gamma^{\star}(b,z)\bigg|^2\bigg)^{\frac{1}{2}}, \end{split} \end{equation} where \begin{equation}\label{gammastar} \begin{split} \gamma^{\star}(b,z)&:=w_2\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)\left(\frac{\sqrt{2b}z}{\mathcal{Z}}\right)^{-2iu_3}\left(\frac{b}{\mathcal{M}_1+\mathcal{M}_2}\right)^{iu_3}\\ &\cdot\sum_{\substack{m_1+m_2=b\\ m_1\asymp\mathcal{M}_1,\ m_2\asymp\mathcal{M}_2}}\lambda_{1}(m_1)\lambda_{2}(m_2)\left(\frac{m_1}{\mathcal{M}_1}\right)^{-\frac{1}{4}+iu_1}e^{\pm iz\sqrt{m_1}}\left(\frac{m_2}{\mathcal{M}_2}\right)^{-\frac{1}{4}+iu_2}e^{\pm iz\sqrt{m_2}}. \end{split} \end{equation}
The large sieve inequalities yield that \begin{equation*}
\bigg(\sup_{|u_4|\leq C^{\varepsilon}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg |\sum_{n\asymp X}a(n)n^{-\frac{1}{2}+iu_4}\sqrt{2n}\rho_f(2n)\bigg|^2\bigg)^{\frac{1}{2}}
\ll \mathcal{C}^{\varepsilon}\left(\mathcal{Z}^2+X\right)^{\frac{1}{2}}X^{-\frac{1}{2}}\|a\|_2. \end{equation*}
Now it remains to deal with the second line of \eqref{twosup}. We denote \begin{equation*}
(\star\star):=\sup_{\substack{|u_1|, |u_2|,|u_3|\leq C^{\varepsilon}\\z\asymp \frac{\sqrt{X}}{C}}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg|\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}J_{k-1}(\sqrt{2b}z)\gamma^{\star}(b,z)\bigg|^2. \end{equation*}
We want to separate the $k$-variable from the argument of the $J$-Bessel function, so that one can apply the large sieve inequalities. By the integral representation $J_{k-1}(x)=\frac{1}{\pi}\int_0^{\pi}\cos((k-1)\xi-x\sin\xi)\mathrm{d}\xi$, \cite[(8.411.1)]{GR07}, we have \begin{equation*} J_{k-1}(\sqrt{2b}z)=\frac{1}{2\pi}\int_0^{\pi}\left(e^{i(k-1)\xi}e^{-i\sqrt{2b}z\sin\xi}+e^{-i(k-1)\xi}e^{i\sqrt{2b}z\sin\xi}\right)\mathrm{d}\xi. \end{equation*}
Hence \begin{equation*} \begin{split}
(\star\star)&\ll\sup_{\substack{|u_1|, |u_2|,|u_3|\leq C^{\varepsilon}\\z\asymp \frac{\sqrt{X}}{C}}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg|\int_0^{\pi}e^{i(k-1)\xi}\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}\cdot e^{-i\sqrt{2b}z\sin\xi}\gamma^{\star}(b,z)\mathrm{d}\xi\bigg|^2\\
&\ll\int_0^{\pi}\sup_{\substack{|u_1|, |u_2|,|u_3|\leq C^{\varepsilon}\\z\asymp \frac{\sqrt{X}}{C}}}\sum_{\substack{2\leq k\leq C^{\varepsilon}\mathcal{Z}\\ k\ \mathrm{even}}}\Gamma(k)\sum_{f\in\mathcal{B}_k}\bigg|\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\sqrt{b}\,\overline{\rho_f(b)}\cdot e^{-i\sqrt{2b}z\sin\xi}\gamma^{\star}(b,z)\bigg|^2\mathrm{d}\xi\\
&\ll \mathcal{C}^{\varepsilon}\left(\mathcal{Z}^2+\mathcal{M}_1+\mathcal{M}_2\right)\sup_{z\asymp \frac{\sqrt{X}}{C}}\sum_{b}\big|\gamma^{\star}(b,z)\big|^2 \end{split} \end{equation*} by Lemma \ref{largesieve}.
In summary, we have arrived at \begin{equation*}
\mathcal{HH}\ll\frac{C^{\varepsilon}HH'}{\Lambda}X^{-\frac{1}{2}}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\cdot \left(\mathcal{Z}^2+X\right)^{\frac{1}{2}}\|a\|_2\cdot \left(\mathcal{Z}^2+\mathcal{M}_1+\mathcal{M}_2\right)^{\frac{1}{2}}\sup_{z\asymp \frac{\sqrt{X}}{C}}\|\gamma^{\star}\|_2. \end{equation*}
Now for our purpose it remains to deal with the $\ell^2$-norm $\|\gamma^{\star}\|_2=\left(\sum_{b}\big|\gamma^{\star}(b,z)\big|^2\right)^{\frac{1}{2}}$, where $\gamma^{\star}(b,z)$ is defined in \eqref{gammastar}. By Parseval,
\begin{equation} \begin{split}
\sum_{b\asymp \mathcal{M}_1+\mathcal{M}_2}\big|\gamma^{\star}(b,z)\big|^2\ll&
\int_{0}^{1}\left|\sum_{m_1\asymp\mathcal{M}_1}\lambda_{1}(m_1)\left(\frac{m_1}{\mathcal{M}_1}\right)^{-\frac{1}{4}+iu_1}e^{\pm iz\sqrt{m_1}}e(m_1\alpha)\right|^2\\
&\cdot\left|\sum_{m_2\asymp\mathcal{M}_2}\lambda_{2}(m_2)\left(\frac{m_2}{\mathcal{M}_2}\right)^{-\frac{1}{4}+iu_2}e^{\pm iz\sqrt{m_2}}e(m_2\alpha)\right|^2\mathrm{d}\alpha. \end{split} \end{equation}
Note that both $u_1$ and $u_2$ are of negligible size here. The sup-norm of the $m_2$-sum is bounded by $C^{\varepsilon}(\mathcal{M}_2^{1/2}+z\mathcal{M}_2)$, by Wilton's bound $\sum_{n\leq x}\lambda_f(n)e(\alpha n)\ll_f x^{1/2+\varepsilon}$ and partial summation. Next we open the square, getting \begin{equation*}
\sum_{b}\big|\gamma^{\star}(b,z)\big|^2\ll C^{\varepsilon}(\mathcal{M}_2^{1/2}+z\mathcal{M}_2)^2\sum_{m_1\asymp\mathcal{M}_1}|\lambda_{1}(m_1)|^2\ll C^{\varepsilon}(\mathcal{M}_2^{1/2}+z\mathcal{M}_2)^2\mathcal{M}_1. \end{equation*}
Similar sums without the twisted weight functions have appeared in the work of Jutila \cite{jut}. See also Blomer and Mili{\'c}evi{\'c} \cite[(8.5)]{bl-mi} for a similar sum.
Recall $\ \mathcal{M}_1\ll T_1=C^{\varepsilon}\frac{C^{2}X}{H^2}$, $\mathcal{M}_2\ll T_2=C^{\varepsilon}\frac{C^{2}X}{H'^2}$ $\mathcal{Z}=\frac{\sqrt{X(\mathcal{M}_1+\mathcal{M}_2)}}{C}\ll C^{\varepsilon}\frac{X}{H}$, $\Lambda\gg C^{2-\varepsilon}$, and $C=X^{1000}$ is a large parameter. In particular, $\sup_{z\asymp \frac{\sqrt{X}}{C}}\sum_{b}\big|\gamma^{\star}(b,z)\big|^2\ll C^{\varepsilon}(\frac{X}{H'})^2\mathcal{M}_1\mathcal{M}_2$.
Then \begin{equation*} \begin{split}
\mathcal{HH}&\ll\frac{C^{\varepsilon}HH'}{C^2}X^{-\frac{1}{2}}(\mathcal{M}_1\mathcal{M}_2)^{-\frac{1}{4}}\cdot \left(\mathcal{Z}^2+X\right)^{\frac{1}{2}}\|a\|_2\cdot \left(\mathcal{Z}^2+\mathcal{M}_1+\mathcal{M}_2\right)^{\frac{1}{2}}\cdot \frac{X}{H'}\left(\mathcal{M}_1\mathcal{M}_2\right)^{\frac{1}{2}}\\
&\ll\frac{C^{\varepsilon}HH'}{C^2} X^{-\frac{1}{2}}\left(\frac{C^{2}X}{H^2}\cdot\frac{C^{2}X}{H'^2}\right)^{\frac{1}{4}}\left(\frac{X^2}{H^2}+X\right)^{\frac{1}{2}}\cdot \left(\frac{X^2}{H^2}+\frac{C^{2}X}{H^2}\right)^{\frac{1}{2}}\cdot\frac{X}{H'}\cdot \|a\|_2\\
&\ll C^{\varepsilon}(HH')^{\frac{1}{2}}\left(\frac{X^2}{H^2}+X\right)^{\frac{1}{2}}\cdot\frac{X^{1/2}}{H}\cdot\frac{X}{H'}\|a\|_2\\
&\ll C^{\varepsilon}\frac{X^{3/2}}{(HH')^{\frac{1}{2}}}\left(\frac{X}{H}+X^{\frac{1}{2}}\right)\|a\|_2. \end{split} \end{equation*}
By taking $H'=X/3$, we have $\mathcal{HH}\ll C^{\varepsilon}\frac{X}{H}\left(\frac{X}{H^{1/2}}+(XH)^{\frac{1}{2}}\right)\|a\|_2$, and hence
\begin{equation*}\sum_{X\leq n\leq 2X}a(n)E(n)\ll C^{\varepsilon}\frac{X}{H}\left(\frac{X}{H^{1/2}}+(XH)^{\frac{1}{2}}\right)\|a\|_2. \end{equation*}
\subsection*{Acknowledgements.} The author is grateful to Valentin Blomer for several helpful comments and suggestions during the preparation of this article. He also thanks Sheng-Chi Liu for drawing his attention to \cite{blo} and Roman Holowinsky for his encouragement and comments.
\end{document} |
\begin{document}
\title[The Bochner-Riesz means for Fourier-Bessel expansions] {The Bochner-Riesz means for Fourier-Bessel expansions: norm inequalities for the maximal operator and almost everywhere convergence}
\author[\'O. Ciaurri and L. Roncal]{\'Oscar Ciaurri and Luz Roncal} \address{Departamento de Matem\'aticas y Computaci\'on\\ Universidad de La Rioja\\ 26004 Logro\~no, Spain} \email{[email protected], [email protected]}
\thanks{Research supported by the grant MTM2009-12740-C03-03 from Spanish Government.}
\keywords{Fourier-Bessel expansions, Bochner-Riesz means, almost everywhere convergence, maximal operators, weighted inequalities}
\subjclass[2010]{Primary: 42C10, Secondary: 42C20, 42A45}
\begin{abstract} In this paper, we develop a thorough analysis of the boundedness properties of the maximal operator for the Bochner-Riesz means related to the Fourier-Bessel expansions. For this operator, we study weighted and unweighted inequalities in the spaces $L^p((0,1),x^{2\nu+1}\, dx)$. Moreover, weak and restricted weak type inequalities are obtained for the critical values of~$p$. As a consequence, we deduce the almost everywhere pointwise convergence of these means. \end{abstract}
\maketitle
\section{Introduction and main results}
Let $J_{\nu}$ be the Bessel function of order $\nu$. For $\nu>-1$ we have that \[ \int_0^1 J_{\nu}(s_jx)J_{\nu}(s_kx)x\,dx= \frac12 (J_{\nu+1}(s_j))^2\delta_{j,k},\quad j,k=1,2,\dots \] where $\{s_j\}_{j\ge 1}$ denotes the sequence of successive positive zeros of $J_{\nu}$. From the previous identity we can check that the system of functions \begin{equation} \label{eq:FBesselSystemI}
\psi_j(x)=\frac{\sqrt{2}}{|J_{\nu+1}(s_j)|}x^{-\nu}J_{\nu}(s_jx),\quad j=1,2,\dots \end{equation} is orthonormal and complete in $L^2((0,1),d\mu_\nu)$, with $d\mu_\nu(x)=x^{2\nu+1}\, dx$ (for the completeness, see~\cite{Hochstadt}). Given a function $f$ on $(0,1)$, its Fourier series associated with this system, named as Fourier-Bessel series, is defined by \begin{equation}\label{SeriesCoeficientes} f\sim \sum_{j=1}^\infty a_j(f)\psi_j,\qquad\text{with}\qquad a_j(f)=\int_0^1 f(y)\psi_j(y)\,d\mu_{\nu}(y), \end{equation} provided the integral exists. When $\nu=n/2-1$, for $n\in \mathbb{N}$ and $n\ge 2$, the functions $\psi_j$ are the eigenfunctions of the radial Laplacian in the multidimensional ball $B^n$. The eigenvalues are the elements of the sequence $\{s_j^2\}_{j\ge 1}$. The Fourier-Bessel series corresponds with the radial case of the multidimensional Fourier-Bessel expansions analyzed in~\cite{Bal-Cor}.
For each $\delta>0$, we define the Bochner-Riesz means for Fourier-Bessel series as \begin{equation*}
\mathcal{B}_R^{\delta}(f,x)=\sum_{j\ge 1} \left(1-\frac{s_j^2}{R^2}\right)_+^{\delta}a_j(f)\psi_j(x), \end{equation*} where $R>0$ and $(1-s^2)_+=\max\{1-s^2,0\}$. Bochner-Riesz means are a regular summation method used oftenly in harmonic analysis. It is very common to analyze regular summation methods for Fourier series when the convergence of the partial sum fails. Ces\`{a}ro means are other of the most usual summation methods. B. Muckenhoupt and D. W. Webb \cite{Mu-We} give inequalities for Ces\`{a}ro means of Laguerre polynomial series and for the supremum of these means with certain parameters and $1<p\leq \infty$. For $p=1$, they prove a weak type result. They also obtain similar estimates for Ces\`{a}ro means of Hermite polynomial series and for the supremum of those means in \cite{Mu-We-Her}. An almost everywhere convergence result is obtained as a corollary in \cite{Mu-We} and \cite{Mu-We-Her}. The result about Laguerre polynomials is an extension of a previous result in \cite{Stem}. This kind of matters has been also studied by the first author and J. L. Varona in \cite{Ciau-Var} for the Ces\`{a}ro means of generalized Hermite expansions. The Ces\`{a}ro means for Jacobi polynomials were analyzed by S. Chanillo and B. Muckenhoupt in \cite{Ch-Muc}. The Bochner-Riesz means themselves have been analyzed for the Fourier transform and their boundedness properties in $L^p(\mathbb{R}^n)$ is an important unsolved problem for $n>2$ (the case $n=2$ is well understood, see \cite{Car-Sjo}).
The target of this paper is twofold. First we will analyze the almost everywhere (a. e.) convergence, for functions in $L^p((0,1),d\mu_\nu)$, of the Bochner-Riesz means for Fourier-Bessel expansions. By the general theory \cite[Ch. 2]{Duoa}, to obtain this result we need to estimate the maximal operator \[
\mathcal{B}^{\delta}(f,x)=\sup_{R>0}\left|\mathcal{B}_R^{\delta}(f,x)\right|, \] in the $L^p((0,1),d\mu_\nu)$ spaces. A deep analysis of the boundedness properties of this operator will be the second goal of our paper. This part of our work is strongly inspired by the results given in \cite{Ch-Muc} for the Fourier-Jacobi expansions.
Before giving our results we introduce some notation. Being $p_0=\frac{4(\nu+1)}{2\nu+3+2\delta}$ and $p_1=\frac{4(\nu+1)}{2\nu+1-2\delta}$, we define \begin{align} \label{eq:endpoints0} p_0(\delta)&=\begin{cases} 1,& \delta> \nu+1/2\text{ or } -1<\nu\le -1/2,\\ p_0,& \delta\le \nu+1/2\text{ and } \nu>-1/2, \end{cases}\\
\notag p_1(\delta)&=\begin{cases} \infty,& \delta> \nu+1/2\text{ or } -1<\nu\le -1/2,\\ p_1,& \delta\le \nu+1/2\text{ and } \nu>-1/2. \end{cases} \end{align}
Concerning to the a. e. convergence of the Bochner-Riesz means, our result reads as follows \begin{Theo} \label{th:fin} Let $\nu>-1$, $\delta>0$, and $1\le p<\infty$. Then, \begin{equation*}
\mathcal{B}_R^\delta(f,x)\to f(x)\quad \text{a. e., for $f\in L^p((0,1),d\mu_\nu)$} \end{equation*} if and only if $p_0(\delta)\le p$, where $p_0(\delta)$ is as in \eqref{eq:endpoints0}. \end{Theo} Proof of Theorem \ref{th:fin} is contained in Section \ref{sec:Proof Th1} and is based on the following arguments. On one hand, to prove the necessity part, we will show the existence of functions in $L^p((0,1),d\mu_{\nu})$ for $p<p_0(\delta)$ such that $\mathcal{B}^\delta_R$ diverges for them. In order to do this, we will use a reasoning similar to the one given by C. Meaney in \cite{Meaney} that we describe in Section \ref{sec:Proof Th1}. On the other hand, for the sufficiency, observe that the convergence result follows from the study of the maximal operator $\mathcal{B}^\delta f$. Indeed, it is sufficient to get $(p_0(\delta),p_0(\delta))$-weak type estimates for this operator and this will be the content of Theorem \ref{th:AcDebilMaxRedonda}.
Regarding the boundedness properties of $\mathcal{B}^\delta f$ we have the following facts. First, a result containing the $(p,p)$-strong type inequality. \begin{Theo} \label{th:max} Let $\nu>-1$, $\delta>0$, and $1< p\le\infty$. Then, \begin{equation*}
\left\|\mathcal{B}^{\delta}f\right\|_{L^p((0,1),d\mu_{\nu})}\le C
\|f\|_{L^p((0,1),d\mu_{\nu})} \end{equation*} if and only if \[ \begin{cases} 1<p\le \infty, &\text{for $-1<\nu\le -1/2$ or $\delta>\nu+1/2$},\\ p_0<p<p_1, &\text{for $\delta\le \nu+1/2$ and $\nu>-1/2$.} \end{cases} \] \end{Theo} In the lower critical value of $p_0(\delta)$ we can prove a $(p_0(\delta),p_0(\delta))$-weak type estimate. \begin{Theo} \label{th:AcDebilMaxRedonda} Let $\nu>-1$, $\delta>0$, and $p_0(\delta)$ be the number in \eqref{eq:endpoints0}. Then, \[
\left\|\mathcal{B}^{\delta}f\right\|_{L^{p_0(\delta),\infty}((0,1),d\mu_{\nu})}\le C \|f\|_{L^{p_0(\delta)}((0,1),d\mu_{\nu})}, \] with $C$ independent of $f$. \end{Theo} Finally, for the upper critical value, when $0<\delta<\nu+1/2$ and $\nu>-1/2$, it is possible to obtain a $(p_1,p_1)$-restricted weak type estimate. \begin{Theo} \label{th:AcDebilRestMaxRedonda} Let $\nu>-1/2$ and $0<\delta<\nu+1/2$. Then, \[
\left\|\mathcal{B}^{\delta}\chi_E\right\|_{L^{p_1,\infty}((0,1),d\mu_{\nu})}\le C \|\chi_E\|_{L^{p_1}((0,1),d\mu_{\nu})}, \] for all measurable subsets $E$ of $(0,1)$ and $C$ independent of $E$. \end{Theo} The previous results about norm inequalities are summarized in Figure 1 (case $-1<\nu \le -1/2$) and Figure 2 (case $\nu>-1/2$).
\begin{center} \begin{tikzpicture}[scale=2.45] \fill[black!10!white] (0.1,0.1) -- (2.1,0.1) -- (2.1,2.1) -- (0.1,2.1) -- cycle; \draw[thick, dashed] (2.1,0.1) -- (2.1,2.1);
\draw[very thick] (0.1,2.1) -- (0.1,0.1); \draw[very thin] (0.1,0.1) -- (2.15,0.1); \node at (0.1,0) {$0$}; \node at (2.1,0) {$1$}; \node at (2.2,0.1) {$\frac{1}{p}$}; \node at (0.1,2.175) {$\delta$}; \draw (0,1.4) node [rotate=90] {\tiny{$(p,p)$-strong}}; \draw (2.2,1.4) node [rotate=90] {\tiny{$(p,p)$-weak}}; \draw (1.1,-0.2) node {Figure 1: case $-1<\nu\le-\tfrac{1}{2}$.};
\fill[black!10!white] (2.7,0.75) -- (3.35,0.1) -- (4.05,0.1) -- (4.7,0.75) -- (4.7,2.1) -- (2.7,2.1) -- cycle; \draw[thick, dashed] (4.7,0.75) -- (4.7,2.1);
\draw[very thick] (2.7,2.1) -- (2.7,0.78);
\filldraw[fill=white] (2.7,0.75) circle (0.8pt); \draw[thick, dotted] (2.72,0.73) -- (3.35,0.1); \draw[thick, dashed] (4.05,0.1) -- (4.7,0.75); \draw[very thin] (2.7,0.1) -- (4.75,0.1); \draw[very thin] (2.7,0.1) -- (2.7,0.72); \draw[very thin] (4.7,0.1) -- (4.7,0.13); \node at (2.7,0) {$0$}; \node at (4.7,0) {$1$}; \node at (4.8,0.1) {$\frac{1}{p}$}; \node at (2.7,2.175) {$\delta$}; \draw (2.43,0.75) node {\tiny{$\delta=\nu+\tfrac{1}{2}$}}; \draw (3.35,0) node {\tiny{$\tfrac{2\nu+1}{4(\nu+1)}$}}; \draw (4.05,0) node {\tiny{$\tfrac{2\nu+3}{4(\nu+1)}$}}; \draw (2.6,1.4) node [rotate=90] {\tiny{$(p,p)$-strong}}; \draw (3.1,0.45) node [rotate=-45] {\tiny{$(p,p)$-restric. weak}}; \draw (4.3,0.45) node [rotate=45] {\tiny{$(p,p)$-weak}}; \draw (4.8,1.4) node [rotate=90] {\tiny{$(p,p)$-weak}}; \draw (3.7,-0.2) node {Figure 2: case $\nu>-\tfrac{1}{2}$.}; \end{tikzpicture} \end{center}
At this point, a comment is in order. Note that J. E. Gilbert \cite{Gi} also proves weak type norm inequalities for maximal operators associated with orthogonal expansions. The method used cannot be applied in our case, and the reason is the same as can be read in \cite{Ch-Muc}, at the end of Sections 15 and 16 therein. Following the technique in \cite{Gi} we have to analyze some weak type inequalities for Hardy operator and its adjoint with weights and these inequalities do not hold for $p=p_0$ and $p=p_1$.
The proof of the sufficiency in Theorem \ref{th:max} will be deduced from a more general result in which we analyze the boundedness of the operator $\mathcal{B}^\delta f$ with potential weights. Before stating it, we need a previous definition. We say that the parameters $(b,B,\nu,\delta)$ satisfy the $C_p$ conditions if \begin{align}
b& > \frac{-2(\nu+1)}{p} \,\,\, (\ge \text{ if }p=\infty), \label{ec:con1B}\\
B& < 2(\nu+1)\left(1-\frac1p\right) \,\,\, (\le \text{ if }
p=1), \label{ec:con2B}\\
b& > 2(\nu+1)\left(\frac12-\frac1p\right)-\delta-\frac12\,\,\,
(\ge \text{ if }p=\infty), \label{ec:con3B}\\
B& \le 2(\nu+1)\left(\frac12-\frac1p\right)+\delta+\frac12, \label{ec:con4B}\\
B &\le b \label{ec:con5B}, \end{align} and in at least one of each of the following pairs the inequality is strict: \eqref{ec:con2B} and \eqref{ec:con5B}, \eqref{ec:con3B} and \eqref{ec:con5B}, and \eqref{ec:con4B} and \eqref{ec:con5B} except for $p=\infty$. The result concerning inequalities with potential weights is the following. \begin{Theo} \label{th:AcFuerteMaxRedonda} Let $\nu>-1$, $\delta>0$, and $1< p\le\infty$. If $(b,B,\nu,\delta)$ satisfy the $C_p$ conditions, then \begin{equation*}
\left\|x^b\mathcal{B}^{\delta}f\right\|_{L^p((0,1),d\mu_{\nu})}\le C \|x^Bf\|_{L^p((0,1),d\mu_{\nu})}, \end{equation*} with $C$ independent of $f$. \end{Theo}
A result similar to Theorem \ref{th:AcFuerteMaxRedonda} for the partial sum operator was proved in \cite [Theorem 1]{GuPeRuVa}. It followed from a weighted version of a general Gilbert's maximal transference theorem, see \cite[Theorem 1]{Gi}. The weighted extension of Gilbert's result given in \cite{GuPeRuVa} depended heavily on the $A_p$ theory and it can not be used in our case because it did not capture all the information relative to the weights. On the other hand, it is also remarkable the paper by K. Stempak \cite{Stem2} in which maximal inequalities for the partial sum operator of Fourier-Bessel expansions and divergence and convergence results are discussed.
The necessity in Theorem \ref{th:max} will follow by showing that the operator $\mathcal{B}^\delta f$ is neither $(p_1,p_1)$-weak nor $(p_0,p_0)$-strong for $\nu>-1/2$ and $0<\delta\le \nu+1/2$. This is the content of the next theorems. \begin{Theo} \label{th:noweak} Let $\nu>-1/2$. Then \[
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}
\|\mathcal{B}^{\delta}_{R}f\|_{L^{p_1,\infty}((0,1),d\mu_\nu)}\ge C (\log R)^{1/p_0}, \] if $0<\delta<\nu+1/2$; and \[
\sup_{\|f\|_{L^\infty((0,1),d\mu_\nu)}=1}
\|\mathcal{B}^{\delta}_{R}f\|_{L^{\infty}((0,1),d\mu_\nu)}\ge C \log R, \] if $\delta= \nu+1/2$. \end{Theo}
\begin{Theo} \label{th:nostrong} Let $\nu>-1/2$. Then \[ \sup_{E\subset
(0,1)}\frac{\|\mathcal{B}^{\delta}_{R}\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}{\|\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}\ge C (\log R)^{1/p_0}, \] if $0<\delta<\nu+1/2$; and \[
\sup_{\|f\|_{L^1((0,1),d\mu_\nu)}=1}
\|\mathcal{B}^{\delta}_{R}f\|_{L^{1}((0,1),d\mu_\nu)}\ge C \log R, \] if $\delta= \nu+1/2$. \end{Theo}
The paper is organized as follows. In the next section, we give the proof of Theorem \ref{th:fin}. In Section \ref{sec:proofAcFuerte} we first relate the Bochner-Riesz means $\mathcal{B}_R^{\delta}$ to the Bochner-Riesz means operator associated with the Fourier-Bessel system in the Lebesgue measure setting. Then, we prove weighted inequalities for the supremum of this new operator. With the connection between these means and the operator $\mathcal{B}_R^{\delta}$, we obtain Theorem \ref{th:AcFuerteMaxRedonda} and, as a consequence, the sufficiency of Theorem \ref{th:max}. Sections \ref{sec:ProofThAcDebilMaxRedonda} and \ref{sec:acdelrest} will be devoted to the proofs of Theorems \ref{th:AcDebilMaxRedonda} and \ref{th:AcDebilRestMaxRedonda}, respectively. The proofs of Theorems \ref{th:noweak} and \ref{th:nostrong} are contained in Section \ref{sec:negativeths}. One of the main ingredients in the proofs of Theorems \ref{th:noweak} and \ref{th:nostrong} will be Lemma \ref{lem:pol}, this lemma is rather technical and it will be proved in the Section \ref{sec:techlemma}.
Throughout the paper, we will use the following notation: for each $p\in[1,\infty]$, we will denote by $p'$ the conjugate of $p$, that is, $\tfrac{1}{p}+\tfrac{1}{p'}=1$. We shall write $X\simeq Y$ when simultaneously $X\le C Y$ and $Y \le C X$.
\section{Proof of Theorem \ref{th:fin}}\label{sec:Proof Th1}
The proof of the sufficiency follows from Theorem \ref{th:AcDebilMaxRedonda} and standard arguments.
In order to prove the necessity, let us see that, for $0<\delta<\nu+1/2$ and $\nu>-1/2$, there exists a function $f\in L^{p}((0,1),d\mu_{\nu})$, $p\in [1,p_0)$, for which $\mathcal{B}_R^{\delta}(f,x)$ diverges. We follow some ideas contained in \cite{Meaney} and \cite{Stem2}.
First, we need a few more ingredients. Recall the well-known asymptotics for the Bessel functions (see \cite[Chapter 7]{Wat}) \begin{equation}\label{zero} J_\nu(z) = \frac{z^\nu}{2^\nu \Gamma(\nu+1)} + O(z^{\nu+2}),
\quad |z|<1,\quad |\arg(z)|\leq\pi, \end{equation} and \begin{equation} \label{infty} J_\nu(z)=\sqrt{\frac{2}{\pi z}}\left[ \cos\left(z-\frac{\nu\pi}2
- \frac\pi4 \right) + O(e^{\mathop{\rm Im}(z)}z^{-1}) \right], \quad |z|
\ge 1,\quad |\arg(z)|\leq\pi-\theta, \end{equation} where $D_{\nu}=-(\nu\pi/2+\pi/4)$. It will also be useful the fact that (cf. \cite[(2.6)]{OScon}) \begin{equation} \label{eq:zerosCons} s_{j}=O(j). \end{equation} For our purposes, we need estimates for the $L^p$ norms of the functions $\psi_j$. These estimates are contained in the following lemma, whose proof can be read in \cite[Lemma 2.1]{Ci-RoWave}. \begin{Lem} \label{Lem:NormaFunc} Let $1\le p\le\infty$ and $\nu>-1$. Then, for $\nu>-1/2$, \begin{equation*}
\|\psi_j\|_{L^p((0,1),d\mu_{\nu})}\simeq \begin{cases} j^{(\nu+1/2)-\frac{2(\nu+1)}{p}}, & \text{if $p>\frac{2(\nu+1)}{\nu+1/2}$},\\ (\log j)^{1/p}, & \text{if $p=\frac{2(\nu+1)}{\nu+1/2}$},\\ 1, & \text{if $p<\frac{2(\nu+1)}{\nu+1/2}$}, \end{cases} \end{equation*} and, for $-1<\nu\le-1/2$, \begin{equation*}
\|\psi_j\|_{L^p((0,1),d\mu_{\nu})}\simeq \begin{cases} 1, & \text{if $p<\infty$},\\ j^{\nu+1/2},& \text{if $p=\infty$}. \end{cases} \end{equation*} \end{Lem} We will also use a slight modification of a result by G. H. Hardy and M. Riesz for the Riesz means of order $\delta$, that is contained in \cite[Theorem 21]{HaRi}. We present here this
result, adapted to the Bochner-Riesz means. We denote by $S_R(f,x)$ the partial sum associated to the Fourier-Bessel expansion, namely \[ S_R(f,x)=\sum_{0<s_j\le R} a_j(f)\psi_j(x). \] The result reads as follows. \begin{Lem} \label{lem:HardyRiesz} Suppose that $f$ can be expressed as a Fourier-Bessel expansion and for some $\delta>0$ and $x\in(0,1)$ its Bochner-Riesz means $\mathcal{B}_R^\delta(f,x)$ converges to $c$ as $R\rightarrow \infty$. Then, for $s_n\le R < s_{n+1}$, \[
|S_R(f,x)-c|\le A_{\delta}n^{\delta}\sup_{0<t\le s_{n+1}}|\mathcal{B}_{t}^{\delta}(f,x)|. \] \end{Lem} By using this lemma, we can write \begin{equation} \label{ConsecuenciaLemaHardyRiesz}
|a_j(f)\psi_j(x)|=|(S_{s_{j}}(f,x)-c)-(S_{s_{j-1}}(f,x)-c)|\le A_{\delta}j^\delta\sup_{0<t\le s_{j+1}}|\mathcal{B}_{t}^{\delta}(f,x)|. \end{equation} Let us proceed with the proof of the necessity. Let $1\le p<p_0$. Note that $p_0'=p_1$. Therefore, $p'>p_0'>\tfrac{2(\nu+1)}{\nu+1/2}$, and $\delta<\nu+1/2-\frac{2(\nu+1)}{p'}:=\lambda$. By Lemma
\ref{Lem:NormaFunc}, $\|\psi_j\|_{L^{p'}((0,1),d\mu_{\nu})}\ge C j^{\lambda}$. Then, we have that the mapping $f\mapsto a_j(f)$, where $a_j(f)$ was given in \eqref{SeriesCoeficientes}, is a bounded linear functional on $L^{p}((0,1),d\mu_{\nu})$ with norm bounded below by a constant multiple of $j^\lambda$. By uniform boundedness principle, for $p$ conjugate to $p'$ and each $0\le \varepsilon<\lambda$, there is a function $f_0\in L^p((0,1), d\mu_{\nu})$ so that $a_j(f_0)j^{-\varepsilon}\rightarrow \infty$ as $j\rightarrow \infty$. By taking $\varepsilon=\delta$, we have that \begin{equation}\label{coeficienteInfinito} a_j(f_0)j^{-\delta}\rightarrow \infty \quad \textrm{ as } \quad j\rightarrow \infty. \end{equation}
Suppose now that $B_R^\delta(f_0,x)$ converges. Then, by Egoroff's theorem, it converges on a subset $E$ of positive measure in $(0,1)$ and, clearly, we can think that $E\subset (\eta, 1)$ for some fixed $\eta>0$. For each $x\in E$, we can consider $j$ such that $s_j x\ge 1$ and, by \eqref{infty}, \begin{align*}
|a_j(f_0)\psi_j(x)|&=\big|a_j(f_0)
\Big(\frac{\sqrt2}{|J_{\nu+1}(s_j)|}
x^{-\nu}J_{\nu}(s_jx)\\
&-\frac{\sqrt2}{|J_{\nu+1}(s_j)|}x^{-\nu}
\Big(\frac{2}{\pi s_jx}\Big)^{1/2}
\cos(s_jx+D_{\nu})\Big)\\
&+a_j(f_0) \frac{\sqrt2}{|J_{\nu+1}(s_j)|}x^{-\nu}
\Big(\frac{2}{\pi s_jx}\Big)^{1/2}
\cos(s_jx+D_{\nu})\big|\\
&=Cs_j^{-1/2}\frac{\sqrt 2}{|J_{\nu+1}(s_j)|}|a_j(f_0)x^{-\nu-1/2}
\big(O((s_jx)^{-1})+\cos(s_jx+D_{\nu})\big)|\\
&\simeq |a_j(f_0)x^{-\nu-1/2}(\cos(s_jx+D_{\nu})+O((s_jx)^{-1}))|. \end{align*} By \eqref{ConsecuenciaLemaHardyRiesz} on this set $E$, \[
|a_j(f_0)x^{-\nu-1/2}(\cos(s_jx+D_{\nu})+O((j)^{-1}))|\le A_{\delta}j^{\delta}\sup_{0<t\le s_{j+1}}|\mathcal{B}_{t}^{\delta}(f_0,x)|\le K_Ej^{\delta}, \] uniformly on $x\in E$. We also used \eqref{eq:zerosCons} in the latter. The inequality above is equivalent to $$
|a_j(f_0)(\cos(s_jx+D_{\nu})+O(j^{-1}))|\le K_E x^{\nu+1/2}j^{\delta}\le K_{E}j^{\delta}. $$ Therefore, \begin{equation} \label{ec:boundFj}
|a_j(f_0)j^{-\delta}(\cos(s_jx+D_{\nu})+O((j)^{-1}))|\le K_E. \end{equation} Now, taking the functions \[ F_j(x)=a_j(f_0)j^{-\delta}(\cos(s_jx+D_{\nu})+O(j^{-1})), \qquad x\in E, \] and using an argument based on the Cantor-Lebesgue and Riemann-Lebesgue theorems, see \cite[Section 1.5]{Meaney} and \cite[Section IX.1]{Zyg}, we obtain that \[
\int_E |F_j(x)|^2\, dx\ge C |a_j(f_0)j^{-\delta}|^2|E|, \]
where, as usual, $|E|$ denotes the Lebesgue measure of the set $E$. On the other hand, by \eqref{ec:boundFj}, \[
\int_E |F_j(x)|^2\, dx\le K_E^2 |E|. \] Then, from the previous estimates, it follows that
$|a_j(f_0)j^{-\delta}|\le C$, which contradicts \eqref{coeficienteInfinito}.
\section{Bochner-Riesz means for Fourier-Bessel expansions in the Lebesgue measure setting. Proof of Theorem \ref{th:AcFuerteMaxRedonda}}\label{sec:proofAcFuerte}
For our convenience, we are going to introduce a new orthonormal system. We will take the functions \[
\phi_j(x)=\frac{\sqrt{2x}J_{\nu}(s_jx)}{|J_{\nu+1}(s_j)|},\quad j=1,2,\dots. \] These functions are a slight modification of the functions \eqref{eq:FBesselSystemI}; in fact, \begin{equation} \label{eq:Relation} \phi_j(x)=x^{\nu+1/2}\psi_j(x). \end{equation} The system $\{\phi_j(x)\}_{j\ge1}$ is a complete orthonormal basis of $L^2((0,1),dx)$.
In this case, the corresponding Fourier-Bessel expansion of a function $f$ is \[ f\sim\sum_{j=1}^{\infty}b_j(f) \phi_j(x), \qquad \text{with} \qquad b_j(f)=\left(\int_0^1 f(y)\phi_j(y)\, dy\right) \] provided the integral exists, and for $\delta>0$ the Bochner-Riesz means of this expansion are \[ B_R^{\delta}(f,x)=\sum_{j\ge 1} \left(1-\frac{s_j^2}{R^2}\right)_+^{\delta}b_j(f)\phi_j(x), \] where $R>0$ and $(1-s^2)_+=\max\{1-s^2,0\}$. It follows that \[ B_R^{\delta}(f,x)=\int_0^1 f(y)K_R^\delta(x,y)\, dy \] where \begin{equation} \label{ec:kern} K_R^\delta(x,y)=\sum_{j\ge 1}\left(1-\frac{s_j^2}{R^2}\right)_+^{\delta}\phi_j(x)\phi_j(y). \end{equation}
Our next target is the proof of Theorem \ref{th:AcFuerteMaxRedonda}. Taking into account that \[ \mathcal{B}_R^\delta f(x)=\int_0^1 f(y)\mathcal{K}_R^\delta(x,y) \, d\mu_\nu(y), \] where \[ \mathcal{K}_R^\delta(x,y)=\sum_{j\ge 1}\left(1-\frac{s_j^2}{R^2}\right)_+^{\delta}\psi_j(x)\psi_j(y), \] it is clear, from \eqref{eq:Relation}, that $\mathcal{K}_R^\delta(x,y)=(xy)^{-(\nu+1/2)}K_R^{\delta}(x,y)$. Then, it is verified that the inequality \[
\|x^b\mathcal{B}^{\delta}(f,x)\|_{L^p((0,1),d\mu_{\nu})}\le C\|x^Bf(x)\|_{L^p((0,1),d\mu_{\nu})} \] is equivalent to \begin{equation*}
\|x^{b+(\nu+1/2)(2/p-1)}B^{\delta}(f,x)\|_{L^p((0,1),dx)}\le C\|x^{B+(\nu+1/2)(2/p-1)}f(x)\|_{L^p((0,1),dx)}, \end{equation*} that is, we can focus on the study of a weighted inequality for the operator $B_R^{\delta}(f,x)$. The first results about convergence of this operator can be found in \cite{Ci-Ro}.
We are going to prove an inequality of the form \begin{equation*}
\|x^{a}B^{\delta}(f,x)\|_{L^p((0,1),dx)}\le C
\|x^{A}f(x)\|_{L^p((0,1),dx)} \end{equation*} for $\delta>0$, $1< p\leq \infty$, under certain conditions for $a, A,\nu$ and $\delta$. Besides, a weighted weak type result for
$\sup_{R>0}|B_R^{\delta}(f,x)|$ will be proved for $p=1$. The abovementioned conditions are the following. Let $\nu>-1$, $\delta>0$ and $1\leq p\leq \infty$; parameters $(a,A,\nu,\delta)$ will be said to satisfy the $c_p$ conditions provided \begin{align}
a & > -1/p-(\nu+1/2) \,\,\, (\ge \text{ if } p=\infty), \label{ec:con1}\\
A & < 1-1/p+(\nu+1/2)\,\,\, (\le \text{ if } p=1),\label{ec:con2}\\
a &> -\delta-1/p\,\,\, (\ge \text{ if }p=\infty),\label{ec:con3}\\
A &\le 1+\delta-1/p, \label{ec:con4}\\
A &\le a\label{ec:con5} \end{align} and in at least one of each of the following pairs the inequality is strict: \eqref{ec:con2} and \eqref{ec:con5}, \eqref{ec:con3} and \eqref{ec:con5}, and \eqref{ec:con4} and \eqref{ec:con5} except for $p=\infty$.
The main results in this section are the following: \begin{Theo} \label{th:main1} Let $\nu>-1$, $\delta>0$ and $1< p\le \infty$. If $(a, A, \nu, \delta)$ satisfy the $c_p$ conditions, then \[
\|x^{a}B^{\delta}(f,x)\|_{L^p((0,1),dx)}\le C
\|x^{A}f(x)\|_{L^p((0,1),dx)}, \] with $C$ independent of $f$. \end{Theo}
\begin{Theo} \label{th:main2} Let $\nu>-1$ and $\delta>0$. If $(a, A, \nu, \delta)$ satisfy the $c_1$ conditions and \[ E_{\lambda}=\left\{x\in (0,1)\colon x^{a}
\sup_{R>0}\left(|B_R^{\delta}(f,x)|\right)>\lambda \right\}, \]
then \[
|E_{\lambda}|\leq C \frac{\|x^{A}
f(x)\|_{L^1((0,1),dx)}}{\lambda}, \] with $C$ independent of $f$ and $\lambda$. \end{Theo}
Note that, taking $a=b+(\nu+1/2)(2/p-1)$ and $A=B+(\nu+1/2)(2/p-1)$, Theorem \ref{th:AcFuerteMaxRedonda} follows from Theorem \ref{th:main1}.
The proofs of Theorem \ref{th:main1} and Theorem \ref{th:main2} will be achieved by decomposing the square $(0,1)\times (0,1)$ into five regions and obtaining the estimates therein. The regions will be: \begin{align} \label{regions} \notag A_1&=\{(x,y):0 < x, y\leq 4/R\},\\
\notag A_2&=\{(x,y):4/R<\max\{x,y\}<1,\, |x-y|\le 2/R \},\\
A_3&=\{(x,y): 4/R \leq x < 1,\, 0 < y\leq x/2\},\\ \notag A_4&=\{(x,y):0 < x \leq y/2,\, 4/R \leq y< 1\}, \\ \notag A_5&=\{(x,y): 4/R < x < 1, \, x/2 < y< x- 2/R\}\\ \notag &\kern25pt\cup \{(x,y): y/2 < x \leq y-2/R,\, 4/R \leq y<1\}. \end{align}
Theorem~\ref{th:main1} and Theorem~\ref{th:main2} will follow by showing that, if $1\leq p\leq \infty$, then \begin{equation}
\label{eq:des_1} \left\|\sup_{R> 0}\int_0^1 y^{-A}x^a|K_R^\delta(x,y)||f(y)|\chi_{A_j}\,dy\right\|_{L^p((0,1),dx)}
\leq C\|f(x)\|_{L^p((0,1),dx)} \end{equation} holds for $j=1,3,4$ and that \begin{equation}
\label{eq:des_2} \int_0^1 y^{-A}x^a|K_R^\delta(x,y)||f(y)|\chi_{A_j}\,dy \leq C M (f,x), \end{equation} for $j=2,5$, where $M$ is the Hardy-Littlewood maximal function of $f$, and $C$ is independent of $R, x$ and $f$. These results and the fact that $M$ is $(1,1)$-weak and $(p,p)$-strong if $1<p\leq \infty$ complete the proofs.
To get \eqref{eq:des_1} and \eqref{eq:des_2} we will use a very precise pointwise estimate for the kernel $K_R^\delta(x,y)$, obtained in \cite{Ci-Ro}; there, it was shown that \begin{equation} \label{ec:kernel}
|K_R^\delta(x,y)|\le C \begin{cases} (xy)^{\nu+1/2}R^{2(\nu+1)}, & (x,y) \in A_1,\\ R, & (x,y) \in A_2\\
\frac{\Phi_\nu(Rx)\Phi_{\nu}(Ry)}{R^{\delta}|x-y|^{\delta+1}}, & (x,y) \in A_3\cup A_4 \cup A_5, \end{cases} \end{equation} with \begin{equation} \label{ec:aux} \Phi_\nu(t)=\begin{cases}t^{\nu+1/2}, & \text{ if $0<t<2$},\\ 1,& \text{ if $t\ge 2$}.\end{cases} \end{equation}
The proof of \eqref{eq:des_2} follows from the given estimate for the kernel $K_R^\delta(x,y)$ and $y^{-A}x^a\simeq C$ in $A_2\cup A_5$ because $A\le a$. In the case of $A_2$, from
$|K_R^\delta(x,y)|\le C R$ we deduce easily the required inequality. For $A_5$ the result is a consequence of
$\Phi_{\nu}(Rx)\Phi_\nu(Ry)\le C$ and of a decomposition of the region in strips such that $R|x-y|\simeq 2^{k}$, with $k=0,\dots, [\log_2 R]-1$; this can be seen in \cite[p. 109]{Ci-Ro}
In this manner, to complete the proofs of Theorem~\ref{th:main1} and Theorem~\ref{th:main2} we only have to show \eqref{eq:des_1} for $j=1,3,4$ in the conditions $c_p$ for $1\le p \le \infty$, and this is the content of Corollary~\ref{cor:corolario2} in Subsection~\ref{subsec:reg1}. In its turn, Corollary~\ref{cor:corolario2} follows from Lemmas~\ref{lem:lema7} and~\ref{lem:lema8} in the same subsection. Previously, Subsection \ref{subsec:lemmas} contains some technical lemmas that will be used in the proofs of Lemmas~\ref{lem:lema7} and~\ref{lem:lema8}.
\subsection{Technical Lemmas} \label{subsec:lemmas}
To prove \eqref{eq:des_1} for $j=1,3,4$ we will use an interpolation argument based on six lemmas. These are stated below. They are small modifications of the six lemmas contained in Section 3 of \cite{Mu-We} where a sketch of their proofs can be found.
\begin{Lem} \label{lem:lema1} Let $\xi_0>0$, if $r<-1$, $r+t\leq-1$ and $r+s+t\leq-1$, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{\xi_0\leq\xi\leq x}\xi^s
\int_\xi^x y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $r\leq0$, $r+t\leq-1$ and $r+s+t\leq-1$ with equality holding in at most one of the first two inequalities, then this holds for $p=\infty$. \end{Lem}
\begin{Lem} \label{lem:lema2} Let $\xi_0>0$, if $t\leq0$, $r+t\leq-1$ and $r+s+t\leq-1$, with strict inequality in the last two in case of equality in the first, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{\xi_0\leq\xi\leq x}\xi^s
\int_x^\infty y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $t<-1$, $r+t\leq-1$ and $r+s+t\leq-1$, then this holds for $p=\infty$. \end{Lem}
\begin{Lem} \label{lem:lema3} If $s<0$, $s+t\leq0$ and $r+s+t\leq-1$,with equality holding in at most one of the last two inequalities, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{\xi\geq x}\xi^s \int_x^\xi y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $s<0$, $s+t\leq-1$ and $r+s+t\leq-1$ this holds for $p=\infty$. \end{Lem}
\begin{Lem} \label{lem:lema4} If $t\leq0$, $s+t\leq0$ and $r+s+t\leq-1$,with strict inequality holding in the first two in case the third is an equality, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{\xi\geq x}\xi^s
\int_\xi^\infty y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $t<-1$, $s+t\leq-1$ and $r+s+t\leq-1$ then this holds for $p=\infty$. \end{Lem}
\begin{Lem} \label{lem:lema5} If $s<0$, $r+s<-1$ and $r+s+t\leq-1$, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{\xi\geq x}\xi^s \int_1^x y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $s<0$, $r+s\leq 0$ and $r+s+t\leq-1$, with equality holding in at most one of the last two inequalities, this holds for $p=\infty$. \end{Lem}
\begin{Lem} \label{lem:lema6} If $r<-1$, $r+s<-1$ and $r+s+t\leq-1$, then for $p=1$ \[
\left\|x^r\chi_{[1,\infty)}(x) \sup_{1\leq\xi\leq x}\xi^s
\int_1^{\xi} y^t|f(y)|\,dy\right\|_{L^p((0,\infty),dx)}\leq C\|f(x)\|_{L^p((0,\infty),dx)} \] with $C$ independent of $f$. If $r\leq0$, $r+s\leq0$ and $r+s+t\leq-1$, with equality in at most one of the last two inequalities, this holds for $p=\infty$. \end{Lem}
\subsection{Proofs of Theorem~\ref{th:main1} and Theorem~\ref{th:main2} for regions $A_1$, $A_3$ and $A_4$}
\label{subsec:reg1}
This section contains the proofs of the inequality \eqref{eq:des_1} for regions $A_1$, $A_3$ and $A_4$. The results we will prove are included in the following \begin{Lem} \label{lem:lema7} If $\nu>-1$, $\delta>0$, $R>0$, $j=1, 3, 4$ and $(a, A, \nu, \delta)$ satisfy the $c_1$ conditions, then \eqref{eq:des_1} holds for $p=1$ with $C$ independent of $f$. \end{Lem}
\begin{Lem} \label{lem:lema8} If $\nu>-1$, $\delta>0$, $R>0$, $j=1, 3, 4$ and $(a, A, \nu, \delta)$ satisfy the $c_{\infty}$ conditions, then \eqref{eq:des_1} holds for $p=\infty$ with $C$ independent of $f$. \end{Lem}
\begin{Cor} \label{cor:corolario2} If $1\leq p\leq\infty$, $\nu>-1$, $\delta>0$, $R>0$, $(a, A, \nu, \delta)$ satisfy the $c_p$ conditions and $j=1,3,4$, then \eqref{eq:des_1} holds with $C$ independent of $f$. \end{Cor}
\textbf{Proof of Corollary~\ref{cor:corolario2}}. It is enough to observe that if $1< p<\infty$ and $(a, A, \nu, \delta)$ satisfy the $c_p$ conditions, then $(a-1+1/p, A-1+1/p, \nu, \delta)$ satisfy the $c_1$ conditions. So, by Lemma~\ref{lem:lema7} \begin{multline*}
\left\|\sup_{R\geq 0}\int_0^1 y^{-A+1-1/p}x^{a-1+1/p}|K_R^{\delta}(x,y)|\chi_{A_j}(x,y)|f(y)|\, dy \right\|_{L^1((0,1),dx)}\\\leq C\|f(x)\|_{L^1((0,1),dx)}, \end{multline*} and this is equivalent to \[ \int_0^1 x^{a+1/p}\left(\sup_{R\geq 0}\int_0^1
|K_R^{\delta}(x,y)|\chi_{A_j}(x,y)|f(y)|\, dy\right)
\frac{dx}{x}\leq C\int_0^1 x^{A+1/p}|f(x)|\frac{dx}{x}, \] where $j=1,3,4$. Similarly, if $(a,A,\nu,\delta)$ verify the $c_p$ conditions, then $(a+1/p, A+1/p, \nu, \delta )$ satisfy the $c_{\infty}$ conditions. Hence, by Lemma~\ref{lem:lema8} \begin{multline*}
\left\|x^{a+1/p}\sup_{R\geq 0}\int_0^1
|K_R^{\delta}(x,y)|\chi_{A_j}(x,y)|f(y)|\, dy\right\|_{L^{\infty}((0,1),dx)}
\\\leq C\|x^{A+1/p}f(x)\|_{L^{\infty}((0,1),dx)}. \end{multline*} Now, we can use the Marcinkiewicz interpolation theorem to obtain the inequality \begin{multline*}
\int_0^1 \left(x^{a+1/p} \left(\sup_{R\geq 0}\int_0^1
|K_R^{\delta}(x,y)|\chi_{A_j}(x,y)|f(y)|\, dy\right)\right)^p \frac{dx}{x}\\
\leq C\int_0^1 \left(x^{A+1/p}|f(x)|\right)^p\frac{dx}{x}, \end{multline*} for $1<p<\infty$ and the proof is finished.
Finally, we will prove Lemmas~\ref{lem:lema7} and~\ref{lem:lema8} for $A_j$, $j=1, 3$ and $4$, separately.
\textbf{Proof of Lemma~\ref{lem:lema7} and Lemma~\ref{lem:lema8} for $A_1$}. First of all, we have to note that $B_R^\delta (f,x)=0$ when $0<R<s_1$, being $s_1$ the first positive zero of $J_\nu$. Using the estimate~\eqref{ec:kernel}, the left side of \eqref{eq:des_1} in this case is bounded by \[
C\left\|x^{a+\nu+1/2}\chi_{[0,1]}(x)\sup_{s_1<R\leq 4/x}R^{2(\nu+1)} \int_0^{4/R}y^{-A+\nu+1/2}|f(y)|\, dy\right\|_{L^p((0,1),dx)}. \] Making the change of variables $x=4/u$ and $y=4/v$, we have \[
C\left\|u^{-a-\nu-\frac12-\frac2p}\chi_{[4,\infty)}(u)
\sup_{s_1\leq R \leq u}R^{2(\nu+1)}\int_R^\infty v^{A-(\nu+\frac12)-2+\frac2p}g(v)\,dv\right\|_{L^p((0,\infty),du)}, \]
where $\|\cdot\|_{L^p((0,\infty),du)}$ denotes the $L^p$ norm in the variable $u$, and \[
g(v)=v^{-2/p}|f(4v^{-1})|. \] Note that function $g(v)$ is supported in $(1,\infty)$ and
$\|g\|_{L^p((0,\infty),du)}=\|f\|_{L^p((0,1),dx)}$. The function $g$ will be used through the subsection, but the value $4$ may be changed by another one, at some points, without comment. Now, splitting the inner integral at $u$, we obtain the sum of \begin{equation} \label{ec:pa11}
C\left\|u^{-a-\nu-\frac12-\frac2p}\chi_{[4,\infty)}(u)
\sup_{s_1\leq R \leq u}R^{2(\nu+1)}\int_R^u v^{A-(\nu+\frac12)-2+\frac2p}g(v)\,dv\right\|_{L^p((0,\infty),du)} \end{equation} and \begin{equation} \label{ec:pa12}
C\left\|u^{-a-\nu-\frac12-\frac2p}\chi_{[4,\infty)}(u)
\sup_{s_1\leq R \leq u}R^{2(\nu+1)}\int_u^\infty v^{A-(\nu+\frac12)-2+\frac2p}g(v)\,dv\right\|_{L^p((0,\infty),du)}. \end{equation} From Lemma~\ref{lem:lema1} we get the required estimate for \eqref{ec:pa11}, using conditions \eqref{ec:con1} and \eqref{ec:con5}; Lemma~\ref{lem:lema2} is applied to inequality \eqref{ec:pa12}, there we need conditions \eqref{ec:con2} and \eqref{ec:con5} and the restriction on them. This completes the proof of Lemmas~\ref{lem:lema7} and~\ref{lem:lema8} for $j=1$.
\textbf{Proof of Lemma~\ref{lem:lema7} and Lemma~\ref{lem:lema8} for $A_3$}. Clearly, the left side of \eqref{eq:des_1} is bounded by \[
C\left\|x^a \chi_{[4/R,1]}(x)\sup_{4/x\leq R}\int_0^{x/2}y^{-A}
|K_R^{\delta}(x,y)||f(y)|\, dy\right\|_{L^p((0,1),dx)}. \] Splitting the inner integral at $2/R$, using the bound for the kernel given in \eqref{ec:kernel} and the definition of $\Phi_\nu$, we have this expression majorized by the sum of \begin{equation}
\label{ec:pa21} \left\|x^a \chi_{[0,1]}(x)\sup_{4/x\leq R}\int_0^{2/R}|f(y)|\frac{(Ry)^{\nu+1/2}y^{-A}}{R^{\delta}|x-y|^{\delta+1}}\, dy\right\|_{L^p((0,1),dx)} \end{equation} and \begin{equation}
\label{ec:pa22} \left\|x^a \chi_{[0,1]}(x)\sup_{4/x\leq R}\int_{2/R}^{x/2}\frac{|f(y)|y^{-A}}{R^{\delta}|x-y|^{\delta+1}}\, dy
\right\|_{L^p((0,1),dx)}. \end{equation}
For \eqref{ec:pa21}, taking into account that $|x-y|\simeq x$ in $A_3$, the changes of variables $x=4/u$, $y=2/v$ give us \[
\left\|u^{-a+(\delta+1)-\frac 2p}\chi_{[4,\infty)}(u)\sup_{u\leq R} R^{-\delta+(\nu+1/2)}\int_R^{\infty}v^{-(\nu+1/2)+A+\frac 2p-2}g(v)\, dv\right\|_{L^p((0,\infty),du)}. \] Lemma~\ref{lem:lema4} can be used here. The required conditions for $p=1$ are \eqref{ec:con2}, \eqref{ec:con4} and \eqref{ec:con5} with the restriction in the pairs therein. For $p=\infty$ the same inequalities are needed.
On the other hand, in \eqref{ec:pa22}, using again that $|x-y|\simeq x$, by changing of variables $x=4/u$ and $y=2/v$ we have \begin{multline*}
C\left\|u^{-a+(\delta+1)-\frac
2p}\chi_{[4,\infty)}(u)\sup_{u\leq R}R^{-\delta}\int_{2u}^R
v^{A+\frac 2p-2}g(v)\, dv\right\|_{L^p((0,\infty),du)}\\
\leq C\left\|u^{-a+(\delta+1)-\frac
2p}\chi_{[4,\infty)}(u)\sup_{u\leq R}R^{-\delta}\int_u^R
v^{A+\frac 2p-2}g(v)\, dv\right\|_{L^p((0,\infty),du)}. \end{multline*} Lemma~\ref{lem:lema3} can then be applied. For $p=1$, we need $\delta>0$, which is an hypothesis, and \eqref{ec:con4} and \eqref{ec:con5} with its corresponding restriction. For $p=\infty$ the inequalities are the same, with the requirement that \eqref{ec:con4} is strict. This completes the proof of Lemmas~\ref{lem:lema7} and~\ref{lem:lema8} for $j=3$.
\textbf{Proof of Lemma~\ref{lem:lema7} and Lemma~\ref{lem:lema8} for $A_4$}. In this case, the left hand side of \eqref{eq:des_1} is estimated by \[
C\left\|x^a \chi_{[0,1/2]}(x)\sup_{R>4}\int_{\max(4/R,2x)}^1 y^{-A}|K_R^{\delta}(x,y)||f(y)|\, dy\right\|_{L^p((0,1),dx)}. \] To majorize this, we decompose the $R$-range in two regions: $4<R\leq 2/x$ and $R\geq 2/x$. In this manner, with the bound for the kernel given in \eqref{ec:kernel} and the definition of $\Phi_\nu$, the previous norm is controlled by the sum of \[
C\left\|x^a \chi_{[0,1/2]}(x)\sup_{4<R\leq 2/x}
\int_{4/R}^1 |f(y)|\frac{(Rx)^{\nu+1/2}y^{-A}}{R^{\delta}|x-y|^{\delta+1}}\, dy
\right\|_{L^p((0,1),dx)} \] and \[
C\left\|x^a \chi_{[0,1/2]}(x)\sup_{R\geq 2/x}\int_{2x}^1
\frac{|f(y)|y^{-A}}{R^{\delta}|x-y|^{\delta+1}}\, dy\right\|_{L^p((0,1),dx)}. \]
Next, using that $|x-y|\simeq y$ in $A_4$, with the changes of variables $x=2/u$ and $y=1/v$ the previous norms are controlled by \begin{equation}
\label{ec:pa31} C\left\|u^{-a-\frac 2p-(\nu+\frac 12)}\chi_{[4,\infty)}(u)\sup_{4<R\leq u}R^{-\delta+(\nu +\frac 12)} \int_1^{R/4}v^{A+\frac 2p
-2+(\delta+1)}g(v)\,dv\right\|_{L^p((0,\infty),du)} \end{equation} and \begin{equation}
\label{ec:pa32} C\left\|u^{-a-\frac 2p}\chi_{[4,\infty)}(u)\sup_{R\geq u}R^{-\delta}\int_1^{u/4}
v^{A+\frac 2p -2+(\delta+1)}g(v)\,dv\right\|_{L^p((0,\infty),du)}. \end{equation} In \eqref{ec:pa31}, we use Lemma~\ref{lem:lema6}; for $p=1$, conditions \eqref{ec:con1}, \eqref{ec:con3} and \eqref{ec:con5} are needed; we need the same for $p=\infty$. For \eqref{ec:pa32}, Lemma~\ref{lem:lema5} requires the hypothesis $\delta>0$ and conditions \eqref{ec:con3} and \eqref{ec:con5} for $p=1$ and the same for $p=\infty$ with the restrictions in the pairs therein. This proves Lemmas~\ref{lem:lema7} and~\ref{lem:lema8} for $j=4$.
\section{Proof of Theorem \ref{th:AcDebilMaxRedonda}} \label{sec:ProofThAcDebilMaxRedonda}
Now we shall prove Theorem \ref{th:AcDebilMaxRedonda}. First note that, by \eqref{eq:Relation}, we can write \begin{equation*} \mathcal{B}_R^{\delta}(f,x) =\int_0^1f(y)\left(\frac{y}{x}\right)^{\nu+1/2}K_R^{\delta}(x,y)\,dy, \end{equation*} where $K_R^\delta$ is the kernel in \eqref{ec:kern}. By taking $g(y)=f(y)y^{\nu+1/2}$, to prove the result it is enough to check that \[
\int_{E}\,d\mu_\nu(x)\le \frac{C}{\lambda^{p}}
\int_0^1|g(x)|^{p}x^{(\nu+1/2)(2-p)}\,dx, \] where $E=\left\{x\in(0,1):
\sup_{R>0}x^{-(\nu+1/2)}\int_0^1|g(y)||K_R^{\delta}(x,y)|\,dy>\lambda\right\}$ and $p=p_0(\delta)$. We decompose $E$ into four regions, such that $E=\bigcup_{i=1}^{4}J_i$, where \begin{equation*}
J_i=\left\{x\in(0,1): \sup_{R>0}x^{-(\nu+1/2)}\int_0^{1}|g(y)|
\chi_{B_i}(x,y)|K_R^{\delta}(x,y)|\,dy>\lambda\right\} \end{equation*} for $i=1,\dots,4$, with $B_1=A_1$, $B_2=A_2\cup A_5$, $B_3=A_3$, and $B_4=A_4$ where the sets $A_i$ were defined in \eqref{regions}. Note also that $\int_{E}\,d\mu_\nu(x)\le\sum_{i=1}^4\int_{J_i}\,d\mu_\nu(x)$, then we need to prove that \begin{equation} \label{ec:boundweak}
\int_{J_i}\,d\mu_\nu(x)\le \frac{C}{\lambda^{p}}
\int_0^1|g(x)|^{p}x^{(\nu+1/2)(2-p)}\,dx, \end{equation} for $i=1,\dots,4$ and $p=p_0(\delta)$. At some points along the proof we will use the notation \begin{equation} \label{eq:integral}
I_p:=\int_0^{1}|g(y)|^{p}y^{(\nu+1/2)(2-p)}\,dy. \end{equation}
In $J_1$, by applying \eqref{ec:kernel} and H\"older inequality with $p=p_0$, we have \begin{multline*}
x^{-(\nu+1/2)}\int_0^{1}|g(y)|\chi_{B_1}(x,y)
|K_R^{\delta}(x,y)|\,dy\\
\begin{aligned}
&\le Cx^{-(\nu+1/2)}\int_0^{4/R}
|g(y)|(xy)^{\nu+1/2}R^{2(\nu+1)}\,dy\\
&\le C R^{2(\nu+1)}\left(\int_0^{4/R}
|g(y)|^{p_0}y^{(\nu+1/2)(2-p_0)}\,dy\right)^{1/p_0}
\left(\int_0^{4/R}y^{(2\nu+1)}\,dy\right)^{1/p'_0}\\
&=C R^{\frac{2(\nu+1)}{p_0}}
\left(\int_0^{4/R}|g(y)|^{p_0}y^{(\nu+1/2)(2-p_0)}\,dy\right)^{1/p_0}
\le C R^{\frac{2(\nu+1)}{p_0}}I_{p_0}^{1/p_0}.
\end{aligned} \end{multline*} Therefore, \begin{align*}
\sup_{R>0}x^{-(\nu+1/2)}
\int_0^{1}|g(y)|\chi_{B_1}(x,y)|K_R^{\delta}(x,y)|\,dy
&\le C\sup_{R>0}\chi_{[0,4/R]}(x)R^{\frac{2(\nu+1)}{p_0}}
I_{p_0}^{1/p_0}\\&\le C
x^{-\frac{2(\nu+1)}{p_0}}I_{p_0}^{1/p_0}. \end{align*} In the case $p=1$, it is clear that \[
x^{-(\nu+1/2)}\int_0^{1}|g(y)|\chi_{B_1}(x,y)|K_R^{\delta}(x,y)|\,dy\le C R^{2(\nu+1)}I_1 \] and \[ \sup_{R>0}x^{-(\nu+1/2)}
\int_0^{1}|g(y)|\chi_{B_1}(x,y)|K_R^{\delta}(x,y)|\,dy\le C x^{-2(\nu+1)}I_1. \] Hence, for $p=p_0(\delta)$, \[
J_1\subseteq \{x\in(0,1): C x^{-\frac{2(\nu+1)}{p}}I_{p}^{1/p} >\lambda\}, \] and this gives \eqref{ec:boundweak} for $i=1$.
In $J_3$, note first that \begin{multline*}
\sup_{R>0}x^{-(\nu+1/2)}
\int_0^{1}|g(y)|\chi_{B_3}(x,y)|K_R^{\delta}(x,y)|\,dy\\
=\sup_{R>0}x^{-(\nu+1/2)}\chi_{[4/R,1]}(x)
\left(\int_0^{2/R}|g(y)||K_R^{\delta}(x,y)|\,dy+
\int_{2/R}^{x/2}|g(y)||K_R^{\delta}(x,y)|\,dy\right)\\:=R_1+R_2. \end{multline*} For $R_1$, using \eqref{ec:kernel}, the inequality $x/2<x-y$, which holds in $B_3$, and H\"older inequality with $p=p_0$, \begin{align*}
R_1
&\le \sup_{R>0}x^{-(\nu+3/2+\delta)}\chi_{[4/R,1]}(x)
\int_0^{2/R}R^{\nu+1/2-\delta}y^{\nu+1/2}|g(y)|\,dy\\
&\le\sup_{R>0}x^{-(\nu+3/2+\delta)}\chi_{[4/R,1]}(x)
R^{\nu+1/2-\delta}R^{-\frac{2(\nu+1)}{p'_0}}I_{p_0}^{1/p_0}
\le C x^{-\frac{2(\nu+1)}{p_0}}I_{p_0}^{1/p_0}, \end{align*} where $I_{p_0}$ is the same as in \eqref{eq:integral}. In the case $p=1$, the estimate $R_1\le C x^{-2(\nu+1)}I_{1}$ can be obtained easily.
On the other hand, for $R_2$, by using \eqref{ec:kernel} and H\"older inequality with $p=p_0$ again, \begin{align*}
R_2
&\le \sup_{R>0}x^{-(\nu+3/2+\delta)}
\chi_{[4/R,1]}(x)I_{p_0}^{1/p_0}R^{-\delta}
\left(\int_{2/R}^{x/2}y^{-(\nu+1/2)\frac{(2-p_0)p'_0}{p_0}}\,dy\right)^{1/p'_0}\\
&\le \sup_{R>0}x^{-(\nu+3/2+\delta)}
\chi_{[4/R,1]}(x)I_{p_0}^{1/p_0}R^{-\delta}
\left(\int_{2/R}^{x/2}y^{(\nu+1/2)\frac{2-p_0}{1-p_0}}\,dy\right)^{1/p'_0}. \end{align*} Using that $(\nu+1/2)\frac{2-p_0}{1-p_0}<-1$ and $4/R<x<1$, we have that \begin{align*}
R^{-\delta}\left(\int_{2/R}^{x/2}
y^{(\nu+1/2)\frac{2-p_0}{1-p_0}}\,dy\right)^{1/p'_0}
\le C\left(R^{-(\nu+1/2)\frac{2-p_0}{1-p_0}-1}\right)^{1/p'_0}
R^{-\delta}= C \end{align*} and the last inequality is true because the exponent of $R$ is zero. Then \[ R_2\le C x^{\frac{-2(\nu+1)}{p_0}}I_{p_0}^{1/p_0}. \] In the case $p=1$ applying H\"older inequality, then \begin{equation*}
R_2\le \sup_{R>0}x^{-(\nu+3/2+\delta)}\chi_{[4/R,1]}(x)I_1
\,R^{-\delta}\sup_{y\in[2/R,x/2]}y^{-(\nu+1/2)}. \end{equation*} Now, if $\nu+1/2>0$ and $\nu+1/2<\delta$, \begin{multline*} \sup_{R>0}\chi_{[4/R,1]}(x)R^{-\delta}\sup_{y\in[2/R,x/2]}y^{-(\nu+1/2)}\\ =C\sup_{R>0}\chi_{[4/R,1]}(x)R^{\nu+1/2-\delta}\le Cx^{-\nu-1/2+\delta}; \end{multline*} and if $\nu+1/2\le0$, \begin{multline*} \sup_{R>0}\chi_{[4/R,1]}(x)R^{-\delta}\sup_{y\in[2/R,x/2]}y^{-(\nu+1/2)}\\ =C\sup_{R>0}\chi_{[4/R,1]}(x)R^{-\delta}x^{-(\nu+1/2)}\le Cx^{-\nu-1/2+\delta}. \end{multline*} In this manner \[ R_2\le C x^{-2(\nu+1)}I_{1}. \] Therefore, collecting the estimates for $R_1$ and $R_2$ for $p=p_0$ and $p=1$, we have shown that \[
J_3\subseteq \{x\in(0,1): C x^{\frac{-2(\nu+1)}{p}}(x)I^{1/p}
>\lambda\}, \] hence we can deduce \eqref{ec:boundweak} for $i=3$.
For the region $J_4$, we proceed as follows \begin{align*}
\sup_{R>0}x^{-(\nu+1/2)}&
\int_{0}^1|g(y)|\chi_{B_4}(x,y)|K_R^\delta(x,y)|\,dy\\
&\le \sup_{R>0}x^{-(\nu+1/2)}\chi_{[0,2/R]}(x)
\int_{4/R}^1|g(y)||K_R^\delta(x,y)|\,dy\\
&\kern20pt+\sup_{R>0}x^{-(\nu+1/2)}\chi_{[2/R,1]}(x)
\int_{2x}^1|g(y)||K_R^\delta(x,y)|\,dy\\
&\le C\sup_{R>0}x^{-(\nu+1/2)}\chi_{[0,2/R]}(x)(Rx)^{\nu+1/2}
\int_{4/R}^1\frac{|g(y)|}{R^{\delta}|x-y|^{\delta+1}}\,dy\\
&\kern20pt+ C\sup_{R>0}x^{-(\nu+1/2)}\chi_{[2/R,1]}(x)
\int_{2x}^1\frac{|g(y)|}{R^{\delta}|x-y|^{\delta+1}}\,dy:=S_1+S_2. \end{align*} We first deal with $S_1$, we use that $y-x>y/2$, then \begin{align*}
S_1\le &C\sup_{R>0}\chi_{[0,2/R]}(x) R^{\nu+1/2-\delta}
\int_{4/R}^1\frac{|g(y)|}{y^{\delta+1}}\,dy\\
&\le C\sup_{R>0}\chi_{[0,2/R]}(x) R^{\nu+1}\int_{4/R}^1\frac{|g(y)|}{\sqrt{y}}\,dy
\le C x^{-(\nu+1)}\int_x^1\frac{|g(y)|}{\sqrt{y}}\,dy. \end{align*} Now for $p=p_0$ or $p=1$, we have that $2\nu+1-p(\nu+1)>-1$ and Hardy's inequality \cite[Lemma 3.14, p. 196]{SteinWeiss} is applied in the following estimate \begin{align*}
\int_0^1|S_1(x)|^{p}x^{2\nu+1}\,dx& \le C
\int_0^1\left(\int_x^1\frac{|g(y)|}{\sqrt{y}}\,dy\right)^{p}
x^{2\nu+1-p(\nu+1)}\,dx\\
&\le C \int_0^1\left|\frac{g(y)}{\sqrt y}\right|^{p}y^{2\nu+1-p\nu}\,dy
=C\int_0^1|g(y)|^{p}y^{(\nu+1/2)(2-p)}\,dy. \end{align*} Concerning $S_2$, observe that $\sup_{R>0}\chi_{[2/R,1]}(x)R^{-\delta}\le Cx^{\delta}$, thus \[
S_2\le C x^{-\nu-1/2+\delta}\int_x^1\frac{|g(y)|}{y^{\delta+1}}\,dy. \] Since for $p=p_0$ or $p=1$ we have that $2\nu+1-p(\nu+1/2-\delta)>-1$, we can use again Hardy's inequality to complete the required estimate. Indeed, \begin{align*}
\int_0^1|S_2(x)|^{p}x^{2\nu+1}\,dx&
\le C\int_0^1\left(\int_x^1\frac{|g(y)|}{y^{\delta+1}}\,dy\right)^{p}
x^{2\nu+1-p(\nu+1/2-\delta)}\,dx\\
&\le C\int_0^1\left|\frac{g(y)}{y^{\delta+1}}\right|^{p}
y^{2\nu+1-p(\nu+1/2-\delta)+p}\,dy\\&
=C\int_0^1|g(y)|^{p}y^{(\nu+1/2)(2-p)}\,dy. \end{align*} With the inequalities for $S_1$ and $S_2$, we can conclude \eqref{ec:boundweak} for $i=4$.
To prove \eqref{ec:boundweak} for $i=2$ we define, for $k$ a nonnegative integer, the intervals \[ I_k=[2^{-k-1},2^{-k}], \qquad N_k=[2^{-k-3},2^{-k+2}] \]
and the function $g_k(y)=|g(y)|\chi_{I_k}(y)$. By using \eqref{ec:kernel} for $x/2<y<2x$, with $x\in (0,1)$, we have the bound \[
|K_R^\delta (x,y)|\le \frac{C}{R^{\delta}(|x-y|+2/R)^{\delta+1}}. \] Then \[ J_{2}\subset \left\{x\in (0,1): \sup_{R>0}\sum_{k=0}^\infty \int_{x/2}^{\min{\{2x,1\}}}
\frac{g_k(t)}{R^{\delta}(|x-y|+2/R)^{\delta+1}}\, dy> C \lambda x^{\nu+1/2}\right\}. \] Since at most three of these integrals are not zero for each $x\in (0,1)$ \begin{align*} J_2&\subset \bigcup_{k=0}^\infty \left\{x\in (0,1): 3\sup_{R>0}\int_{x/2}^{\min{\{2x,1\}}}
\frac{g_k(t)}{R^{\delta}(|x-y|+2/R)^{\delta+1}}\, dy> C \lambda x^{\nu+1/2}\right\}\\ &\subset \bigcup_{k=0}^\infty \left\{x\in N_k : M(g_k,x)> C \lambda x^{\nu+1/2}\right\} \end{align*} where in the las step we have used that \[ \sup_{R>0}\int_{x/2}^{\min{\{2x,1\}}}
\frac{g_k(t)}{R^{\delta}(|x-y|+2/R)^{\delta+1}}\, dy\le C M(g_k,x). \] By using the estimate $x\simeq 2^{-k}$ for $x\in N_k$, we can check easily that \[ J_2\subset \bigcup_{k=1}^\infty \left\{x \in N_k : M(g_k,x)> C \lambda 2^{-k(\nu+1/2)}\right\}. \] Finally by using again that $x\simeq 2^{-k}$ for $x\in I_k, N_k$ and the weak type norm inequality for the Hardy-Littlewood maximal function we have \begin{align*} \int_{J_2}x^{2\nu+1}\, dx &\le C \sum_{k=0}^\infty 2^{-k(2\nu+1)} \int_{\left\{x\in N_k : M(g_k,x)> C \lambda 2^{-k(\nu+1/2)}\right\}}\, dx\\&\le C \sum_{k=0}^\infty
\frac{2^{pk(\nu+1/2)-k(2\nu+1)}}{\lambda^p}\int_{I_k}|g(y)|^p\, dy\\&\le \frac{C}{\lambda^p}\int_0^1 |g(y)|^p y^{(\nu+1/2)(2-p)}\, dy \end{align*} and the proof is complete.
\section{Proof of Theorem \ref{th:AcDebilRestMaxRedonda}} \label{sec:acdelrest}
To conclude the result we have to prove \eqref{ec:boundweak} with $g(x)=\chi_E(x)$ and $p=p_1$. For $J_1$ and $J_2$ the result follows by using the steps given in the proof of Theorem \ref{th:AcDebilMaxRedonda} for the same intervals. To analyze $J_3$ we proceed as we did for $J_4$ in the proof of Theorem \ref{th:AcDebilMaxRedonda}. In this case we obtain that \begin{multline*} \sup_{R>0}x^{-(\nu+1/2)}\int_0^1
|g(y)|\chi_{B_3}(x,y)|K_r^\delta(x,y)|\\\le C\left(x^{-(\nu+1)}\int_0^x \frac{|g(y)|}{\sqrt{y}}\, dy+
x^{-(\nu+3/2+\delta)}\int_0^x |g(y)| y^\delta\, dy\right). \end{multline*} Now taking into account that for $p=p_1$ we have $2\nu+1-p(\nu+1)<-1$ and $2\nu+1-p(\nu+3/2+\delta)<-1$ we can apply Hardy's inequalities to obtain that \[
\int_0^1\left(x^{-(\nu+1)}\int_0^x \frac{|g(y)|}{\sqrt{y}}\, dy\right)^{p}x^{2\nu+1}\, dx\le C \int_0^1 |g(y)|^p y^{(\nu+1/2)(2-p)}\, dy \] and \[
\int_0^1\left(x^{-(\nu+3/2+\delta)}\int_0^x |g(y)| y^\delta\, dy\right)^{p}x^{2\nu+1}\, dx\le C \int_0^1 |g(y)|^p y^{(\nu+1/2)(2-p)}\, dy, \] with these two inequalities we can deduce that \eqref{ec:boundweak} holds for $J_3$ with $p=p_1$ in this case.
The main difference with the previous proof appears in the analysis of $J_4$. To deal with this case, we have to use the following lemma \cite[Lemma 16.5]{Ch-Muc} \begin{Lem} \label{lem:Muck} If $1<p<\infty$, $a>-1$, and $E\subset [0,\infty)$, then \[ \left(\int_{E}x^a\, dx\right)^p\le 2^p(a+1)^{1-p}\int_{E}x^{(a+1)p-1}\, dx. \] \end{Lem} In this case, it is enough to prove that \[ \int_{\mathcal{J}}\, d\mu_\nu(x)\le \frac{C}{\lambda^p}\int_{0}^1 \chi_E(y)\,d\mu_\nu(y), \] where \begin{equation*}
\mathcal{J}=\left\{x\in(0,1):
\sup_{R>0}x^{-(\nu+1/2)}\int_0^{1}\chi_E(y)
\chi_{B_4}(x,y)y^{\nu+1/2}|K_R^{\delta}(x,y)|\,dy>\lambda\right\}, \end{equation*} and this can be deduced immediately by using the inclusion \begin{equation} \label{ec:final} \mathcal{J}\subseteq [0,\min\{1,H\}] \end{equation} with \[ H^{2(\nu+1)}=\frac{C}{\lambda^p}\int_{0}^1 \chi_E(y)\,d\mu_\nu(y). \] Let's prove \eqref{ec:final}. By using \eqref{ec:kern} and the estimate $y-x>y/2$, we have \begin{multline*} \sup_{R>0}x^{-(\nu+1/2)}\int_0^{1}\chi_E(y)
\chi_{B_4}(x,y)y^{\nu+1/2}|K_R^{\delta}(x,y)|\,dy \\ \le C\sup_{R>0} R^{-\delta+\nu+1/2}\chi_{[0,2/R]}(x)\int_{4/R}^1\chi_E(y) y^{-\delta+\nu-1/2}\, dy\\ + C\sup_{R>0} R^{-\delta}x^{-(\nu+1/2)}\chi_{[2/R,1]}(x)\int_{2x}^1 \chi_E(y) y^{-\delta+\nu-1/2}\, dy. \end{multline*} In the first summand we can use that $R^{-\delta+\nu+1/2}\le C x^{\delta-\nu-1/2}$ and in the second one that $R^{-\delta}\le x^{\delta}$. Moreover observing that with $p=p_1$ it holds $-\delta+\nu+1/2=2(\nu+1)/p$ we obtain that \begin{align*}
\sup_{R>0}x^{-(\nu+1/2)}\int_0^{1}\chi_E(y)\chi_{B_4}y^{\nu+1/2}|K_R^{\delta}(x,y)|\,dy&\le C x^{-2(\nu+1)/p}\int_{E}y^{-1+2(\nu+1)/p}\, dy\\ &\le C x^{-2(\nu+1)/p}\int_{E}\,d\mu_\nu(y), \end{align*} where in the last step we have used Lemma \ref{lem:Muck}, and this is enough to deduce the inclusion in \eqref{ec:final}.
\section{Proofs of Theorem \ref{th:noweak} and Theorem \ref{th:nostrong}} \label{sec:negativeths}
This section will be devoted to the proofs of Theorem \ref{th:noweak} and Theorem \ref{th:nostrong}. To this end we need a suitable identity for the kernel and in order to do that we have to introduce some notation. $H_{\nu}^{(1)}$ will denote the Hankel function of the first kind, and it is defined as follows \[ H_{\nu}^{(1)}(z)=J_{\nu}(z)+iY_{\nu}(z), \] where $Y_{\nu}$ denotes the Weber's function, given by \begin{equation*} Y_{\nu}(z)=\frac{J_{\nu}(z)\cos \nu \pi-J_{-\nu}(z)}{\sin \nu \pi},\,\, \nu\notin \mathbb{Z}, \text{ and } Y_n(z)=\lim_{\nu\to n}\frac{J_{\nu}(z)\cos \nu \pi-J_{-\nu}(z)}{\sin \nu \pi}. \end{equation*} From these definitions, we have \begin{equation*} H_{\nu}^{(1)}(z)=\frac{J_{-\nu}(z)-e^{-\nu \pi i}J_{\nu}(z)}{i\sin \nu \pi}, \,\, \nu\notin \mathbb{Z},\\ \text{ and } H_n^{(1)}(z)=\lim_{\nu\to n}\frac{J_{-\nu}(z)-e^{-\nu \pi i}J_{\nu}(z)}{i\sin \nu \pi}. \end{equation*} For the function $H_\nu^{(1)}$, the asymptotic \begin{equation}
\label{inftyH} H_{\nu}^{(1)}(z)=\sqrt{\frac{2}{\pi z}}e^{i(z-\nu\pi/2-\pi/4)}[A+O(z^{-1})], \quad |z|>1,\quad -\pi < \arg(z)<2\pi, \end{equation} holds for some constant $A$.
In \cite[Lemma 1]{Ci-Ro} the following lemma was proved \begin{Lem} \label{lem:expresnucleo} For $R>0$ the following holds: \[K_R^\delta(x,y)=I_{R,1}^\delta(x,y)+I_{R,2}^\delta(x,y)\] with \[ I_{R,1}^{\delta}(x,y)=(xy)^{1/2}\int_0^{R}z\multiJ_{\nu}(zx)J_{\nu}(zy)\, dz \] and \[ I_{R,2}^{\delta}(x,y)=\lim_{\varepsilon\to 0} \frac{(xy)^{1/2}}{2}\int_{\mathbf{S_\varepsilon}} \multi \frac{z H^{(1)}_{\nu}(z)J_{\nu}(zx)J_{\nu}(zy)}{J_{\nu}(z)}\,dz, \] where, for each $\varepsilon>0$, $\mathbf{S_\varepsilon}$ is the path of integration given by the interval $R+i[\varepsilon,\infty)$ in the direction of increasing imaginary part and the interval $-R+i[\varepsilon,\infty)$ in the opposite direction. \end{Lem}
Then, by Lemma \ref{lem:expresnucleo} we have \[ \mathcal{K}_R^\delta(x,y)=\mathcal{I}_{R,1}^\delta(x,y)+\mathcal{I}_{R,2}^\delta(x,y) \] where $\mathcal{I}_{R,j}^\delta(x,y)=(xy)^{-(\nu+1/2)}I_{R,j}^{\delta}(x,y)$ for $j=1,2$. The main tool to deduce our negative results will be the following lemma
\begin{Lem} \label{lem:zero} For $\nu>-1/2$, $\delta>0$, and $R>0$ it is verified that \[ \mathcal{K}_R^\delta(0,y)=\frac{2^{\delta-\nu}\Gamma(\delta+1)}{\Gamma(\nu+1)}R^{2(\nu+1)} \frac{J_{\nu+\delta+1}(yR)}{(yR)^{\nu+\delta+1}}+\mathcal{I}_{R,2}^\delta(0,y), \] where \begin{equation}
\label{ec:boundI} \left|\mathcal{I}_{R,2}^\delta(0,y)\right|\le C\begin{cases} R^{2\nu-\delta+1}, & yR\le 1,\\ R^{\nu-\delta+1/2}y^{-(\nu+1/2)}, & yR>1. \end{cases} \end{equation} \end{Lem} \begin{proof} From \eqref{zero}, it is clear that \[ \mathcal{I}_{R,1}^\delta(0,y)=\frac{y^{-\nu}}{2^\nu\Gamma(\nu+1)}\int_0^R z^{\nu+1}\multiJ_{\nu}(zy)\, dz. \] Now, by using Sonine's identity \cite[Ch. 12, 12.11, p. 373]{Wat} \[ \int_0^1 s^{\nu+1}\left(1-s^2\right)^\deltaJ_{\nu}(sy)\, ds=2^{\delta}\Gamma(\delta+1)\frac{J_{\nu+\delta+1}(y)}{y^{\delta+1}}, \qquad \nu,\delta>-1, \] we deduce the leading term of the expression for $\mathcal{K}_{R}^\delta(0,y)$.
To control the term \[ \mathcal{I}_{R,2}^\delta(0,y)=\lim_{\varepsilon\to 0}\frac{y^{-(\nu+1/2)}}{2}\int_{\mathbf{S_\varepsilon}} \multi \frac{z^{\nu+1/2} H^{(1)}_{\nu}(z)(zy)^{1/2}J_{\nu}(zy)}{J_{\nu}(z)}\,dz, \] we start by using the asymptotic expansions given in \eqref{inftyH} and \eqref{infty} for $H_{\nu}^{(1)}(z)$ and $J_{\nu}(z)$. We see that on $\mathbf{S_\varepsilon}$, the path of integration described in Lemma \ref{lem:expresnucleo}, for $t=\mathop{\rm Im}(z)$ the estimate \[
\left|\frac{H_{\nu}(z)}{J_{\nu}(z)}\right|\leq C e^{-2t}, \] holds for $t>0$. Now, from \eqref{zero} and \eqref{infty}, it is clear that for $z=\pm R+it$ \[
|\sqrt{zy}J_{\nu}(zy)|\le Ce^{yt}\Phi_\nu((R+t)y) \] where $\Phi_\nu$ is the function in \eqref{ec:aux}. Then \[
|\mathcal{I}_{R,2}^\delta(0,y)|\le C y^{-(\nu+1/2)}R^{-2\delta}\int_0^\infty t^{\delta}(R+t)^{\nu+\delta+1/2}\Phi_\nu((R+t)y)e^{-(2-y)t}\, dt. \] If $y>1/R$ we have the inequality $\Phi_\nu((R+t)y)\le C$, then \begin{align*}
|\mathcal{I}_{R,2}^\delta(0,y)| &\le C y^{-(\nu+1/2)} R^{-2\delta} \int_0^\infty t^{\delta}(R+t)^{\nu+\delta+1/2} e^{-(2-y)t}\, dt\\ &\le C y^{-(\nu+1/2)}R^{-\delta}(R^{\nu+1/2}+R^{-\delta})\le C R^{\nu-\delta+1/2}y^{-(\nu+1/2)} \end{align*} and \eqref{ec:boundI} follows in this case. If $y\le 1/R$ we obtain the bound in \eqref{ec:boundI} with the estimate $\Phi_\nu((R+t)y)\le C (\Phi_\nu(yR)+(yt)^{\nu+1/2})$. Indeed, \begin{multline*}
|\mathcal{I}_{R,2}^\delta(0,y)| \le C y^{-(\nu+1/2)} R^{-2\delta} \Phi_\nu(yR) \int_0^\infty t^{\delta}(R+t)^{\nu+\delta+1/2} e^{-(2-y)t}\, dt\\+ C R^{-2\delta} \int_0^\infty t^{\nu+\delta+1/2}(R+t)^{\nu+\delta+1/2} e^{-(2-y)t}\, dt\\\le C (R^{2\nu-\delta+1}+R^{\nu-2\delta+1/2}+R^{\nu-\delta+1/2}+R^{-2\delta})\le R^{2\nu-\delta+1}. \end{multline*} \end{proof}
\begin{Lem} \label{lem:cota0} For $\nu>-1/2$ and $0<\delta\le \nu+1/2$, the estimate \[
\|\mathcal{K}_R^\delta(0,y)\|_{L^{p_0}((0,1),d\mu_\nu)}\ge C R^{\nu-\delta+1/2}(\log R)^{1/p_0} \] holds. \end{Lem} \begin{proof} We will use the decomposition in Lemma \ref{lem:zero}. By using \eqref{zero} and \eqref{infty} as was done in \cite[Lemma 2.1]{Ci-RoWave} we obtain that \[
\left\|R^{2(\nu+1)}
\frac{J_{\nu+\delta+1}(yR)}{(yR)^{\nu+\delta+1}}\right\|_{L^{p_0}((0,1),d\mu_\nu)}\ge C R^{\nu-\delta+1/2}(\log R)^{1/p_0}. \] With the bound \eqref{ec:boundI} it can be deduced that \[
\left\|\mathcal{I}_{R,2}^\delta(0,y)\right\|_{L^{p_0}((0,1),d\mu_\nu)}\le C R^{\nu-\delta+1/2}. \] With the previous estimates the proof is completed. \end{proof}
Finally, the last element that we need to prove Theorems \ref{th:noweak} and \ref{th:nostrong} is the norm inequality for
finite linear combinations of the functions $\{\psi_j\}_{j\ge 1}$ contained in the next lemma. Its proof is long and technical and it will be done in the last section.
\begin{Lem} \label{lem:pol} For $\nu>-1/2$, $R>0$, $1<p<\infty$ and $f$ a linear combination of the functions $\{\psi_j\}_{1\le j\le N(R)}$ with $N(R)$ a positive integer such that $N(R)\simeq R$, the inequality \[
\|f\|_{L^\infty ((0,1),d\mu_\nu)}\le C R^{2(\nu+1)/p}\|f\|_{L^{p,\infty} ((0,1),d\mu_\nu)} \] holds. \end{Lem}
\begin{proof}[Proof of Theorem \ref{th:noweak}] With the bound in Lemma \ref{lem:cota0} we have \begin{align*} (\log R)^{1/p_0}&\le C R^{-2(\nu+1)/p_1}
\left\|\mathcal{K}_{R}^\delta(0,y)\right\|_{L^{p_0}((0,1),d\mu_\nu)}\\ & = C R^{-2(\nu+1)/p_1}
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}\left|\int_0^1
\mathcal{K}_{R}^\delta(0,y) f(y)\, d\mu_\nu\right|\\ & = C R^{-2(\nu+1)/p_1}
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}\left|\mathcal{B}_{R}^\delta f(0)\right|. \end{align*} From the previous estimate the result for $\delta=\nu+1/2$ follows. In the case $\delta<\nu+1/2$ it is obtained by using Lemma \ref{lem:pol} because \begin{multline*} R^{-2(\nu+1)/p_1}
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}\left|\mathcal{B}_{R}^\delta f(0)\right|\\\le C
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}\left\|\mathcal{B}_{R}^\delta f(x)\right\|_{L^{p_1,\infty}((0,1),d\mu_\nu)} \end{multline*} since $\mathcal{B}_{R}^\delta f(x)$ is a linear combination of the functions $\{\psi_j\}_{1\le j\le N(R)}$ with $N(R)\simeq R$. \end{proof}
\begin{proof}[Proof of Theorem \ref{th:nostrong}] In the case $\delta <\nu+1/2$, the result follows from Theorem \ref{th:noweak} by using a duality argument. Indeed, it is clear that \begin{align} \sup_{E\subset
(0,1)}\frac{\|\mathcal{B}^{\delta}_{R}\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}
{\|\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}} &=\sup_{E\subset (0,1)}
\sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1}
\frac{\left|\int_0^1f(y)\mathcal{B}^{\delta}_{R}\chi_E(y)\, d\mu_\nu\right|}
{\|\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}\notag\\
&= \sup_{\|f\|_{L^{p_1}((0,1),d\mu_\nu)}=1} \sup_{E\subset (0,1)}
\frac{\left|\int_0^1\chi_E(y)\mathcal{B}^{\delta}_{R}f(y)\, d\mu_\nu\right|}
{\|\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}\label{ec:lambdacero}. \end{align} By Theorem \ref{th:noweak} it is possible to choose a function $g$
such that $\|g\|_{L^{p_1}((0,1),d\mu_\nu)}=1$ and \[
\|\mathcal{B}_{R}^\delta g(x)\|_{L^{p_1,\infty}((0,1),d\mu_\nu)}\ge C (\log R)^{1/p_0}. \] Then, with the notation \[ \mu_\nu(E)=\int_{E}\, d\mu_\nu, \] we have \begin{equation} \label{ec:lambda} \lambda^{p_1}\mu_\nu(A)\ge C (\log R)^{p_1/p_0}, \end{equation}
for some positive $\lambda$ and $A=\{x\in (0,1): |B_{R}^\delta g(x)|>\lambda\}$. Now, we consider the subsets of $A$ \[ A_1=\{x\in (0,1): B_{R}^\delta g(x)>\lambda\} \qquad\text{ and } \qquad A_2=\{x\in (0,1): B_{R}^\delta g(x)<-\lambda\} \] and we define $D=A_1$ if $\mu_\nu(A_1)\ge \mu_\nu(A)/2$ and $D=A_2$ otherwise. Then, by \eqref{ec:lambda}, we deduce that \begin{equation} \label{ec:lambda2} \lambda \ge C \frac{(\log R)^{1/p_0}}{\mu_\nu(D)^{1/p_1}}. \end{equation} Taking $f=g$ and $E=D$ in \eqref{ec:lambdacero} and using \eqref{ec:lambda2}, we see that \[ \sup_{E\subset
(0,1)}\frac{\|\mathcal{B}^{\delta}_{R}\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}}
{\|\chi_E\|_{L^{p_0}((0,1),d\mu_\nu)}} \ge C
\lambda\frac{\mu_\nu(D)}{\|\chi_D\|_{L^{p_0}((0,1),d\mu_\nu)}} \ge C (\log R)^{1/p_0} \] and the proof is complete in this case. For $\delta=\nu+1/2$ the result follows from Theorem \ref{th:noweak} with a standard duality argument. \end{proof}
\section{Proof of Lemma \ref{lem:pol}} \label{sec:techlemma}
To proceed with the proof of Lemma \ref{lem:pol} we need some auxiliary results that are included in this section.
We start by defining a new operator. For each non-negative integer $r$, we consider the vector of coefficients $\alpha=(\alpha_1,\dots,\alpha_{r+1})$ and we define \[ T_{r,R,\alpha}f(x)=\sum_{\ell=1}^{r+1}\alpha_\ell \mathcal{B}_{\ell R}^{r}f(x). \] This new operator is an analogous of the \textit{generalized delayed means} considered in \cite{SteinDuke}. In \cite{SteinDuke} the operator is defined in terms of the Ces\`{a}ro means instead of the Bochner-Riesz means. The properties of $T_{r,R,\alpha}$ that we need are summarized in the next lemma
\begin{Lem} \label{lem:delay} For each non-negative integer $r$ and $\nu\ge -1/2$, the following statements hold \begin{enumerate} \item[a)] $T_{r,R,\alpha}f$ is a linear combination of the functions $\{\psi_j\}_{1\le j\le N((r+1)R)}$, where $N((r+1)R)$ is a non-negative integer such that $N((r+1)R)\simeq (r+1)R$;
\item[b)] there exists a vector of coefficients $\alpha$, verifying that $|\alpha_\ell|\le A$, for $\ell=1,\dots, r+1$, with $A$ independent of $R$ and such that $T_{r,R,\alpha}f(x)=f(x)$ for each linear combination of the functions $\{\psi_j\}_{1\le j\le N(R)}$ where $N(R)$ is a positive integer. Moreover, in this case, for $r>\nu+1/2$, \[
\|Tf_{r,R,\alpha}\|_{L^1 ((0,1),d\mu_\nu)}\le C\|f\|_{L^1((0,1),d\mu_\nu)}\] and
\[\|T_{r,R,\alpha}f\|_{L^\infty ((0,1),d\mu_\nu)}\le C
\|f\|_{L^\infty((0,1),d\mu_\nu)}, \] with $C$ independent of $R$ and $f$. \end{enumerate} \end{Lem} \begin{proof} Part a) is a consequence of the definition of $T_{r,R,\alpha}$ and the fact that the $m$-th zero of the Bessel function $J_\nu$, with $\nu \ge-1/2$, is contained in the interval $(m\pi+\nu\pi/2+\pi/2,m\pi+\nu\pi/2+3\pi/4)$.
To prove b) we consider $f(x)=\sum_{j=1}^{N(R)} a_j \psi_j(x)$. In order to obtain the vector of coefficients such that $T_{r,R,\alpha}f(x)=f(x)$ the equations \[ \sum_{\ell=1}^{r+1}\alpha_\ell \left(1-\frac{s_{k}^2}{(\ell R)^2}\right)^r=1, \] for all $k=1,\dots,N(R)$, should be verified. After some elementary manipulations each one of the previous equations can be written as \[ \sum_{j=0}^r s_k^{2j}\binom{r}{j}\frac{(-1)^j}{R^{2j}}\sum_{\ell=1}^{r+1} \frac{\alpha_\ell}{\ell^{2j}}=1 \] and this can be considered as a polynomial in $s_k^2$ which must be equal $1$, therefore we have the system of equations \[ \sum_{\ell=1}^{r+1}\frac{\alpha_\ell}{\ell^{2j}}=\delta_{j,0}, \qquad j=0,\dots,r. \] This system has an unique solution because the determinant of the matrix of coefficients is a Vandermonde's one. Of course for each
$\ell=1,\dots,r+1$, it is verified that $|\alpha_\ell|\le A$, with $A$ a constant depending on $r$ but not on $N(R)$.
The norm estimates are consequence of the uniform boundedness \[
\|\mathcal{B}_R^\delta f\|_{L^p((0,1),d\mu_\nu)}\le C
\|f\|_{L^p((0,1),d\mu_\nu)}, \] for $p=1$ and $p=\infty$ when $\delta > \nu+1/2$ (see \cite{Ci-Ro}). \end{proof}
In the next lemma we will control the $L^\infty$-norm of a finite linear combination of the functions $\{\psi_j\}_{j\ge 1}$ by its $L^1$-norm.
\begin{Lem} \label{lem:infty1} If $\nu>-1/2$ and $f(x)$ is a linear combination of the functions $\{\psi_j\}_{1\le j\le N(R)}$ with $N(R)$ a positive integer such that $N(R)\simeq R$, the inequality \[
\|f\|_{L^\infty ((0,1),d\mu_\nu)}\le C R^{2(\nu+1)}\|f\|_{L^1((0,1),d\mu_\nu)} \] holds. \end{Lem} \begin{proof} It is clear that \[ f(x)=\sum_{j=1}^{N(R)} \psi_j(x)\int_0^1 f(y) \psi_j(y)\, d\mu_\nu(y). \] Now, using H\"{o}lder inequality and Lemma \ref{Lem:NormaFunc} we have \begin{align*}
\|f\|_{L^\infty ((0,1),d\mu_\nu)}&\le C\sum_{j=1}^{N(R)}
\|\psi_j\|_{L^\infty
((0,1),d\mu_\nu)}^2\|f\|_{L^{1}((0,1),d\mu_\nu)}\\&\le C
\|f\|_{L^{1}((0,1),d\mu_\nu)} \sum_{j=1}^{N(R)}j^{2\nu+1}\le C R^{2(\nu+1)}\|f\|_{L^1((0,1),d\mu_\nu)}. \end{align*} \end{proof}
The following lemma is a version in the space $((0,1),d\mu_\nu)$ of Lemma 19.1 in \cite{Ch-Muc}. The proof can be done in the same way, with the appropriate changes, so we omit it.
\begin{Lem} \label{lem:fuerdeb} Let $\nu>-1$, $1<p<\infty$ and $T$ be a linear operator defined for functions in $L^1((0,1),d\mu_\nu)$ and such that \[
\|Tf\|_{L^\infty ((0,1),d\mu_\nu)}\le A
\|f\|_{L^1((0,1),d\mu_\nu)} \,\text{ and }\,\|Tf\|_{L^\infty
((0,1),d\mu_\nu)}\le B \|f\|_{L^\infty((0,1),d\mu_\nu)}, \] then \[
\|Tf\|_{L^\infty ((0,1),d\mu_\nu)}\le C A^{1/p}B^{1/p'}\|f\|_{L^{p,\infty}((0,1),d\mu_\nu)}. \] \end{Lem}
Now, we are prepared to conclude the proof of Lemma \ref{lem:pol}.
\begin{proof}[Proof of Lemma \ref{lem:pol}] We consider the operator $T_{r,R,\alpha}f$ given in Lemma \ref{lem:delay} b) with $r>\nu+1/2$. By Lemma \ref{lem:delay} and Lemma \ref{lem:infty1} we have \begin{align*}
\|T_{r,R,\alpha}f\|_{L^\infty((0,1),d\mu_\nu)}&\le C ((r+1)R)^{2(\nu+1)}
\|T_{r,R,\alpha}f\|_{L^1((0,1),d\mu_\nu)}\\&\le C R^{2(\nu+1)}\|f\|_{L^1((0,1),d\mu_\nu)}. \end{align*} From b) in Lemma \ref{lem:delay} we obtain the estimate \[
\|T_{r,R,\alpha}f\|_{L^\infty((0,1),d\mu_\nu)}\le C
\|f\|_{L^\infty((0,1),d\mu_\nu)}. \] So, by using Lemma \ref{lem:fuerdeb}, we obtain the inequality \[
\|T_{r,R,\alpha}f\|_{L^\infty((0,1),d\mu_\nu)}\le C
R^{2(\nu+1)/p}\|f\|_{L^{p,\infty}((0,1),d\mu_\nu)} \] for any $f\in L^1((0,1),d\mu_{\nu})$. Now, since $T_{r,R,\alpha}f(x)=f(x)$ for a linear combination of the functions $\{\psi_j\}_{1\le j\le N(R)}$, the proof is complete. \end{proof}
\end{document} |
\begin{document}
\baselineskip=26pt
\address[F\'abio~P.~Machado] {Institute of Mathematics and Statistics \\ University of S\~ao Paulo \\ Rua do Mat\~ao 1010, CEP 05508-090, S\~ao Paulo, SP, Brazil.}
\address[Valdivino~V.~Junior] {Federal University of Goias \\ Campus Samambaia, CEP 74001-970, Goi\^ania, GO, Brazil.}
\address[Alejandro Roldan-Correa] {Instituto de Matem\'aticas, Universidad de Antioquia, Calle 67, no 53-108, Medellin, Colombia}
\title[Colonization and collapse on Homogeneous Trees]{Colonization and collapse on Homogeneous Trees} \author{Valdivino~V.~Junior} \author{F\'abio~P.~Machado} \author{Alejandro Rold\'an-Correa}
\noindent \email{[email protected], [email protected], [email protected]}
\thanks{Research supported by CNPq (306927/2007-1), FAPESP (2010/50884-4) and Universidad de Antioquia (SUI XXXXX).}
\keywords{Branching processes, Coupling, Catastrophes, Population dynamics.}
\subjclass[2010]{60J80, 60J85, 92D25}
\date{\today}
\begin{abstract}
We investigate a basic immigration process where co\-lo\-nies grow, during a random time, according to a general counting process until collapse. Upon collapse a random amount of indivi\-duals survive. These survivors try independently establishing new colonies at neighbour sites. Here we consider this general process subject to two schemes, Poisson growth with geometric catastrophe and Yule growth with binomial catastrophe. Independent of everything else colonies growth, during an exponential time, as a Poisson (or Yule) process and right after that exponential time their size is reduced according to geometric (or binomial) law. Each survivor tries independently, to start a new colony at a neighbour site of a homogeneous tree. That colony will thrive until its collapse, and so on. We study conditions on the set of parameters for these processes to survive, present relevant bounds for the probability of survival, for the number of vertices that were colonized and for the reach of the colonies compared to the starting point. \end{abstract}
\singlespacing
\maketitle \section{Introduction} \label{S: Introduction} Biological populations are subject to disasters that can cause from a partial elimination of the individuals until their total extinction. When a disaster occurs surviving individuals may react in different ways. A strategy adopted by some populations is the dispersion. In this case, individuals migrate, trying to create new colonies in other locations, there may be competition or collaboration between individuals of the same colony. Once they settle down a new colony in a new spot, again another disaster can strike, which causes a new collapse.
In this type of population dynamics there are some issues to consider, such as: What is the duration of colonization until the moment of the disaster? How much the population grows until be hit? How many individuals will survive? How survivors react when facing a disaster?
In recent articles, the main variables considered in population modeling are (i) the spatial structure where the colonies are located and individuals can move, (ii) the lifetime of a colony until the moment of collapse, (iii) the evolution of the number of individuals in the colony (random or deterministic growth, possible deaths or migration), (iv) the way the cathastrophes affects the size of the colony allowing or not the survival of some individuals and (v) whether the individuals that survive to the catastrophe are able to spread out.
Brockwell \textit{et al.}~\cite{BGR1982} and later Artalejo \textit{et al.}~\cite{AEL2007} considered a model for the growth of a population subject to collapse. In their model, two types of effects when a disaster strikes are analyzed separately, \textit{binomial effect} and \textit{geometric effect}. After the collapse, the survivors remain together in the same colony (there is no dispersion). They carried out an extensive analysis including first extinction time, number of individuals removed, survival time of a tagged individual, and maximum population size reached between two consecutive extinctions.
More recently, Schinazi~\cite{S2014} and Machado \textit{et al.}~\cite{MRS2015} proposed stochastic models for this kind of population dynamics. For these models they concluded that dispersion is a good survival strategy. Latter Junior \textit{et al.}~\cite{VAF2016} showed nice combinations of a type of catastrophe, spatial restriction and individual survival probability when facing the catastrophe where dispersion may not be a good strategy for group survival. For a comprehensive literature overview and motivation see Kapodistria \textit{et al.}~\cite{KPR2016}.
The paper is divided into four sections. In Section 2 we present a general model for the growth of populations subject to collapses, introduce the variables of interest, notation and two particular schemes: Poisson growth with geometric catastrophe and Yule growth with binomial catastrophe. In Section 3 we present the main results of the paper while their proofs are in Section 4.
\section{Colonization and Collapse models} \label{S: CCM}
In the beginning all vertices of $\mathbb{G}$, a infinite conected graph, are empty except for the origin where there is one individual. Besides that, at any time each colony is started by a single individual. The number of individuals in each colony behaves as $\mathcal{C}$, a Counting Process. To each colony is associated a non-negative random variable $T$ which defines its lifetime. After a period of time $T$, that colony collapses and the vertex where it is placed becomes empty. At the time of collapse, with a random effect $\mathcal{E}$, some individuals in the colony are able to survive while others die. By simplicity we represent this quantity by $N$. Note that this random quantity depends on the Counting Process which defines the growth of the colony, on the distribution of $T$ and on how the collapse afects the group of individuals present in the colony at time $T$. Each one individual that survives ($N$ individuals) tries to found a new colony on one of the nearest neighbour vertices by first picking one of them at random. If the chosen vertex is occupied, that individual dies, otherwise the individual founds there a new colony. We denote the Colonization and Collapse model generally described here either by ${\{{\mathbb G}; N\}}$ or ${\{\mathbb{G}; \mathcal{C}, \mathcal{E},T \}}$, a stochastic process whose state space is $\mathbb{N}^{\mathbb{T}^d}$. Along this paper we concentrate our attention on ${\mathbb T}^d$, a homogeneous tree where every vertex has $d+1$ nearest neighbours and on ${\mathbb T}^d_+$, a tree whose only difference from ${\mathbb T}^d$ is that its origin has degree $d$.
\begin{defn} Let us consider the following random variables \begin{itemize} \item $I_d:$ the number of colonies created from the beginning to the end of the process; \item $M_d:$ the distance from the origin to the furthest vertex where a colony is created; \item $\{X_t\}_{0 \le t \le T}$ growth process for the amount of individuals in a colony. \end{itemize} \end{defn}
We work in details some specific cases.
\begin{itemize} \item $T:$ Lifetime of a colony \begin{itemize} \item $T \sim {\mathcal Exp}(1)$, Exponential with mean 1 \[ P[T<t] = 1- e^{- t}, \ t > 0 \] \end{itemize} \item $X_t:$ Growth of the number of individuals \begin{itemize} \item $X_t \sim {\mathcal Poisson}(\lambda t)$, a Poisson point process with rate $\lambda$ \[P[X_t = k] = \frac{e^{-\lambda} \lambda^{k-1}}{(k-1)!} , \ k \in \{1,2,...\} \] \item $X_t \sim {\mathcal Geom}(e^{-\lambda t})$, a Yule process with rate $\lambda$ \[P[X_t = k] = e^{-\lambda t} (1-e^{-\lambda t})^{k-1} , \ k \in \{1,2,...\} \] \end{itemize} \item $N:$ Number of individuals able to survive \begin{itemize}
\item $N | X_T \sim {\mathcal B}(X_T,p),$ Binomial catastrophe
\[P[N = m | X_T = k] = {k \choose m} p^m(1-p)^{k-m} , \ m \in \{0, 1,...,k\} \]
\item $N | X_T = X_T- \min\{{\mathcal Geom}(p)-1;X_T\} \sim {\mathcal G}_{X_T}(p),$ Geometric catastrophe
\[ P[N = m | X_T] = \left\{ \begin{array}{ll} p (1-p)^{X_T - m} & \mbox{if $m \in \{1,...,X_T$\}} \\ \\ (1-p)^{X_T} & \mbox{if $m = 0$} \end{array} \right. \] \end{itemize} \end{itemize}
In general it is true that \begin{align*}
\mathbb{P}(N = n) &= \int_0^{\infty} \mathbb{P}(N=n|T=t)f_T(t)dt \\
\mathbb{P}(N = n|T=t) &= \sum_{x=n}^{\infty} \mathbb{P}(X_T=x|T=t)\mathbb{P}(N=n|X_T=x ; T=t). \end{align*}
Suppose that individuals are born following a Poisson process at rate $\lambda$, that the collapse time follows an exponential random variable with average 1 ($T \sim {\mathcal Exp}(1)$) and the individuals are exposed to the collapse effects, one by one, until the first individual survive, if any, then the collapse effects stop. If the collapse effects reach a fixed individual, it survives with probability $p$, meaning that $N_T \sim {\mathcal G}_{X_T}(p)$ (Geometric catastrophe) or ${\mathcal G}(p)$ for short. Let us consider the distribution of the number of survivals at collapse times \begin{align*} \mathbb{P}(N = 0) = \int_{0}^{\infty}e^{-t}\sum_{j=0}^{\infty}\frac {e^{-\lambda t}(\lambda t)^j}{j!}(1-p)^{j+1}dt = \frac{1-p}{1 +\lambda p } \end{align*} and for $n \geq 1$: \begin{align*} \mathbb{P}(N = n) = \int_{0}^{\infty}e^{-t}\sum_{j=n-1}^ {\infty}\frac{e^{-\lambda t}(\lambda t)^j}{j!} p(1-p)^{j+1-n}dt = \left(\frac{\lambda }{\lambda + 1}\right )^{n-1} \frac{p}{\lambda p +1}. \end{align*}
\noindent In this case the probability generating function is of $N$ is \begin{align} \label{eq: fgpP} \mathbb{E}(s^N) =& \frac{1-p}{1+\lambda p}+\sum_{n=1}^{\infty} s^n \left(\frac{\lambda }{1+\lambda }\right)^{n-1} \left(\frac{p}{\lambda p+1}\right) \\ =& \frac{1}{\lambda p +1}\left[1-p+\frac{(\lambda+1)ps}{1+\lambda-\lambda s}\right]. \end{align} while its average is $\displaystyle \mathbb{E}(N) = \frac{p (\lambda +1)^2}{(\lambda p + 1)}.$
Suppose now that individuals are born following a Yule process at rate $\lambda$, that $T \sim {\mathcal Exp}(1)$ and that the disaster reach the individuals simultaneously and independently of everything else. Assuming that each individual survives with probability $p$, we have that $N_T \sim {\mathcal B}(X_T, p)$ (Binomial catastrophe) or ${\mathcal B}(p)$ for short. Let us consider the distribution of the number of survivals at collapse times. \begin{align*} \mathbb{P}(N = 0) =&\int_0^\infty e^{-t}\sum_{j= 1}^\infty e^{-\lambda t}(1-e^{-\lambda t})^{j-1}(1-p)^{j}dt \\ =&\frac{1-p}{\lambda+1}\ {_2F_1}\left(1,1;2+\frac{1}{\lambda};{1-p}\right). \end{align*} \noindent and for $n \geq 1$ \begin{align*} \mathbb{P}(N = n) =& \int_0^\infty e^{-t}\sum_{j= n}^\infty e^{-\lambda t}(1-e^{-\lambda t})^{j-1}{j \choose n}p^n(1-p)^{j-n}dt \\ =&\frac{p^k}{\lambda}B\left(k,1+\frac{1}{\lambda}\right){_2F_1}\left(k+1,k;k+1+\frac{1}{\lambda};{1-p}\right). \end{align*} In this setup the probability generating function of $N$ is \begin{eqnarray}\label{eq: fgpY} \mathbb{E}(s^N)&=&\sum_{n=0}^{\infty} s^n \int_{0}^{\infty} e^{-t} \sum_{k=n \vee 1}^{\infty} e^{-\lambda t}(1-e^{-\lambda t})^{k-1} {k \choose n} p^n(1-p)^{k-n} \ dt\nonumber \\ &=&\frac{ps+1-p}{\lambda +1}\ {_2F_1}\left(1,1;2+\frac{1}{\lambda}; p(s-1)+1\right) \end{eqnarray} and its average is \begin{eqnarray}\label{eq: averageN} \mathbb{E}(N)= \left\{\begin{array}{cl} \displaystyle\frac{p}{1-\lambda} & ,\text{ se } \lambda<1 \\ \\ \infty &, \text{ se } \lambda\geq 1. \end{array}\right.\nonumber \end{eqnarray}
\section{Main Results} \label{S: Homogeneous Trees}
$\{ {\mathbb T}^d, \mathcal{C}, \mathcal{E}, T \}$ is a stochastic process whose state space is $\mathbb{N}^{\mathbb{T}^d}$ and whose evolution (status at time $t$) is denoted by $\eta_t$. For a vertex $x \in \mathbb{T}^d$, $\{\eta_t(x)=i\}$ means that at the time $t$ there are $i$
individuals at the vertex $x$. We consider $|\eta_t| = \sum_{x \in \mathbb{T}^d} \eta_t(x)$.
\subsection{Phase Transition} \label{SS: PT}
\begin{defn} Let $\eta_t$ be the process ${\{\bbT^d; \mathcal{C}, \mathcal{E},T \}}$. Let us define the event
\[ V_d = \{ |\eta_t| > 0, \hbox{ for all } t \ge 0 \}. \]
If $\mathbb{P}(V_d) > 0$ we say that the process ${\{\bbT^d; \mathcal{C}, \mathcal{E},T \}}$ {\it survives}. Otherwise, we say that the process ${\{\bbT^d; \mathcal{C}, \mathcal{E},T \}}$ {\it dies out }. \end{defn}
\begin{teo} \label{T: MCC1H} Consider the process ${\{\bbT^d; N\}}$. Then $\mathbb{P}(V_d) = 0 $ if \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] \geq \frac{d}{d+1} \end{displaymath} and $\mathbb{P}(V_d) > 0 $ if \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] < \frac{d-1}{d}. \end{displaymath} \end{teo}
\begin{cor} \label{C: MCC1HP} Consider the process ${\{\bbT^d; \mathcal{P}(\lambda), \mathcal{G}(p) \}}$.
\begin{itemize} \item[$(i)$] $\mathbb{P}(V_d) = 0$ if \begin{equation}\label{C:tfp1} (\lambda^2d + \lambda d + \lambda +d + 1)p \leq \lambda + d + 1. \end{equation}
\item[$(ii)$] $\mathbb{P}(V_d) > 0$ if \begin{equation}\label{C:tfp2} (\lambda^2d - \lambda^2+ \lambda d - \lambda +d )p > \lambda + d + 1. \end{equation}
\end{itemize} \end{cor}
\begin{cor} \label{C: MCC1HPY} Consider the process ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$.
\begin{itemize} \item[$(i)$] $\mathbb{P}(V_d) = 0 $ if \begin{equation}\label{C:tfy1} _2 F_1 \left(1,1; 2 +\frac{1}{\lambda}; \frac{d(1-p) +1}{d+1} \right) \geq \frac{d(\lambda +1)}{d +1 - p}. \end{equation}
\item[$(ii)$] $\mathbb{P}(V_d) > 0 $ if \begin{equation}\label{C:tfy2} _2 F_1 \left(1,1; 2 +\frac{1}{\lambda}; \frac{d(1-p) +1}{d+1} \right) < \frac{(d^2-1)(\lambda +1)}{d(d+1-p)}. \end{equation} \end{itemize} \end{cor}
Observe that for the process ${\{\mathbb{G}; \mathcal{C}, \mathcal{E},T \}},\hbox{ when } \mathcal{C} \in \{\mathcal{Y}(\lambda), \mathcal{P}(\lambda)\} \hbox{ and } \mathcal{E} \in \{\mathcal{G}(p), \mathcal{B}(p)\},$ by a coupling argument one can see that $\mathbb{P}(V_d)$ is a non-decreasing function of $\lambda$ and also of $p$. Moreover, the function $\lambda_c(p)$, defined by $$\lambda_c(p):=\inf\{\lambda: \mathbb{P}(V_d) >0 \},$$ is a non-increasing function of $p$, with $\lambda_c(1)=0$ and $\lambda_c(0)=\infty$.
\begin{defn} Let $\eta_t$ be a ${\{\mathbb{G}; \mathcal{C}, \mathcal{E},T \}} \hbox{ for } \mathcal{C} \in \{\mathcal{Y}(\lambda), \mathcal{P}(\lambda)\} \hbox{ and } \mathcal{E} \in \{\mathcal{G}(p), \mathcal{B}(p)\},$ with $0<p<1$. We say that $\eta_t$ exhibits \textit{phase transition} on $\lambda$ if $0<\lambda_c(p)<\infty.$ \end{defn}
Machado \textit{et al.}(2016) proved phase transition on $\lambda $ for the process ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$. So, there exists a function $\lambda_c(\cdot):(0,1)\rightarrow \mathbb{R}^+$ whose graphic splits the parametric space $\lambda \times p$ into two regions. For those values of $(\lambda,p)$ above the curve $\lambda_c(p)$, there is survival in ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$ with positive probability. Moreover, for those values of $(\lambda,p)$ below the curve $\lambda_c(p)$ extinction occurs in ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$ with probability 1.
However, it is not known anything about the continuity and strict monotonicity (in $p$) of the function $ \lambda_c(p) $. If there is continuity and strict monotonicity, then the process also has phase transition in $p$ for each $\lambda \in (0, \infty)$ fixed.
In order to answer the question about phase transition on $p$ for the process ${\{\mathbb{G}; \mathcal{C}, \mathcal{E},T \}}, \\ \hbox{ when } \mathcal{C} \in \{\mathcal{Y}(\lambda), \mathcal{P}(\lambda)\} \hbox{ and } \mathcal{E} \in \{\mathcal{G}(p), \mathcal{B}(p)\},$ we start with the following definition \[p_c(\lambda):=\inf\{p: \mathbb{P}(V_d) > 0 \}.\]
\begin{defn} Let $\eta_t$ be a ${\{\mathbb{G}; \mathcal{C}, \mathcal{E},T \}} \hbox{ for } \mathcal{C} \in \{\mathcal{Y}(\lambda), \mathcal{P}(\lambda)\} \hbox{ and } \mathcal{E} \in \{\mathcal{G}(p), \mathcal{B}(p)\},$ with $\lambda \in (0, \infty)$ fixed. We say that $\eta_t$ exhibits \textit{phase transition} on $p$ if $0<p_c(\lambda)<1.$ \end{defn}
The item $(i)$ of Corollary~\ref{C: MCC1HPY} coincides with item $(iii)$ of Theorem 3.1 from Machado {\it et al.}~\cite{MRS2015}. The novelty of Corollary~\ref{C: MCC1HPY} is its item $(ii)$ which provides a suficient condition for survival. Corollary~\ref{C: MCC1HPY} guarantees phase transition in $p$ for ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$ for $\lambda$ large enough, and gives lower and upper bounds for $\lambda_c(p)$.
\begin{exa} Consider ${\{\bbT^4; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$. The equalities in (\ref{C:tfy1}) and (\ref{C:tfy2}) provide lower and upper bounds, respectively, for $\lambda_c(p)$. See Figure \ref{F: LimitanteTransicaoAnt}. These bounds guarantees phase transition in $p$ for $\lambda>\lambda_4^*.$ Where $\lambda_d^*$ is an upper bound for $\displaystyle\lim_{p\rightarrow 1^-}\lambda_c(p)$, where the former is the solution for
\begin{displaymath}
_2 F_1 \left(1,1; 2 +\frac{1}{\lambda}; \frac{1}{d+1} \right) = \frac{(d^2-1)(\lambda +1)}{d^2},
\end{displaymath} see Corollary \ref{C: MCC1HPY} $(ii)$. The following table shows computations for $\lambda_d^*$ for some values of $d$
\begin{center}
\begin{tabular}{|l|c|c|c|c|c|c|}\hline
$d$ & 2 &3 & 4 & 5 & 6 & 10 \\ \hline
$\lambda_d^*$ & 0.4555826 & 0.1613016& 0.08212601 & 0.04961835 & 0.03315455 & 0.01110147 \\ \hline
\end{tabular}
\end{center}
\begin{figure}\label{F: LimitanteTransicaoAnt}
\end{figure}
\end{exa}
\begin{exa} Consider $\{{\mathbb T}^{4}; \mathcal{P}(\lambda), \mathcal{G}(p) \}$. The equalities in (\ref{C:tfp1}) and (\ref{C:tfp2}) provide lower and upper bounds, respectively, for $\lambda_c(p)$. See Figure \ref{F:Poison-Gem-d4}. These bounds guarantees phase transition in $p$ for $\lambda>\lambda_4^*.$ Where $\lambda_d^*$ is an upper bound for $\displaystyle\lim_{p\rightarrow 1^-}\lambda_c(p)$, where the former is the solution for
\begin{displaymath}
(\lambda^2d - \lambda^2+ \lambda d - \lambda +d )p = \lambda + d + 1,
\end{displaymath}
when $p=1,$ see Corollary \ref{C: MCC1HP} $(ii)$. Thus, $$\lambda_d^*=\frac{1}{d-1}.$$
\begin{figure}\label{F:Poison-Gem-d4}
\end{figure}
\end{exa}
\subsection{Probability of Survival} \label{SS: SP}
We denote by $T(n,k)$ the number of surjective functions $f:A \to B $, where $|A| = n$ and $|B| = k$, whose value is given, by the inclusion-exclusion principle (see Tucker~\cite{Tucker} p. 319), by \begin{displaymath} T(n,k) = \sum_{i=0}^{k} \left [ (-1)^i \binom{k}{i}(k-i)^n \right], n \geq k. \end{displaymath}
\begin{teo} \label{T: MCCHPE1} Consider the process ${\{\bbT^d; N\}}$. We have that \begin{displaymath}
\sum_{r=1}^{d+1} \left [(1 - \rho^r)\binom{d+1}{r}\sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] \leq \mathbb{P}(V_d) \leq 1-\psi \end{displaymath} where $\psi$ and $\rho$ are, respectively, the smallest non-negative solutions of \begin{align*} &\sum_{y=0}^{d+1} \left[ s^y\binom{d+1}{y}\sum_{n=y}^{\infty}\frac{T(n,y)}{(d+1)^n}\mathbb{P}(N=n) \right] = s,\\ &\sum_{y=1}^{d} \left [ s^y\binom{d}{y}\sum_{n=y}^{\infty}\frac{T(n,y)+T(n,y+1)}{(d+1)^n}\mathbb{P}(N=n)\right ] = s - \sum_{n=0}^{\infty}\frac{\mathbb{P}(N=n)}{(d+1)^n} . \end{align*} \end{teo}
\begin{teo} \label{T: MCCHPE1L} Consider the process ${\{\bbT^d; N\}}$. We have that \[ \lim_{d \to \infty} \mathbb{P}(V_d) = 1 - \nu \] where $\nu$ is the smallest non-negative solution of $ \mathbb{E}(s^N) = s.$ \end{teo}
\begin{cor} \label{C: MCC1HPS} Consider the process ${\{\bbT^d; \mathcal{P}(\lambda), \mathcal{G}(p) \}}$. Then \[ \lim_{d \to \infty} \mathbb{P}(V_d) = \max \left \{ 0, \frac{p(\lambda^2 + \lambda +1)}{\lambda (1+ \lambda p)} \right \}. \] \end{cor}
\begin{exa} Consider the process ${\{\bbT^{d}; \mathcal{P}(5), \mathcal{G}(0.6) \}}$. If $d=10$ then \begin{displaymath} \mathbb{P}(N = n ) = \left\{
\begin{array}{ll}
\frac{9}{50}\left(\frac{5}{6} \right )^n, & \hbox{$n \geq 1$;} \\
\frac{1}{10}, & \hbox{$n=0$.}
\end{array}
\right. \end{displaymath} By using Theorem~\ref{T: MCCHPE1} we have that $\psi = 0.12226$ and $\rho = 0.143256$. Then \[ 0.8733 \leq \mathbb{P}(V_{10}) \leq 0.8778.\] Besides \[ \displaystyle \lim_{d \to \infty} \mathbb{P}(V_d) = 0.93. \] \end{exa}
\begin{cor} \label{C: MCC1HPS2} Consider the process ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$. Then \[ \lim_{d \to \infty} \mathbb{P}(V_d) =1 - \nu \] where $\nu$ is the smallest non-negative solution of \[ _2 F_1 \left ( 1,1;2 + \frac{1}{\lambda}; p(s-1) + 1 \right ) = \frac{ s(\lambda +1)}{p(s + 1)}. \] \end{cor} \begin{exa} Consider the process ${\{\bbT^d; \mathcal{Y}(\lambda), \mathcal{B}(p) \}}$. If $\lambda = 2$ and $p = 0.5$ then, by using Corolary~\ref{C: MCC1HPS2} \[ \displaystyle \lim_{d \to \infty} \mathbb{P}(V_d) = 0.680977.\] \end{exa}
\subsection{The reach of the process} \label{SS: RP}
In order to show results for the reach of the process, meaning the distance from the origin to the furthest vertex where a colony is created, let us define a few technical quantities
\begin{defn} \begin{align*} \alpha&= d \left [ 1 - \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] \right ] \\ \beta &= (d+1) \left [ 1 - \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] \right ] = \alpha + 1- \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right]\\ D &= \max \left \{ 2; \frac{\beta}{\beta - \mathbb{P}(N \neq 0)} \right\} \\ B& =d(d-1)\left[ 1 - 2\mathbb{E} \left ( \left(\frac{d }{d+1} \right)^N \right) + \mathbb{E} \left ( \left(\frac{d-1 }{d+1} \right)^N \right) \right] \end{align*} \end{defn}
\begin{teo}\label{T: LEIT} Consider the process ${\{\bbT^d; N\}}$. Assume that \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] > \frac{d-1}{d} \end{displaymath} We have that \begin{displaymath} \frac{[1+D(1-\beta)][1-\beta^{m+1}]}{1+ D(1-\beta)-\beta^{m+1}} \leq \mathbb{P}(M_d \leq m) \leq \frac{[1 + \frac{\alpha(1-\alpha)}{B}](1-\alpha^{m+1})}{ 1 + \frac{\alpha(1-\alpha)}{B} - \alpha^{m+1}} \end{displaymath} and \begin{displaymath}
\frac{\alpha^2}{2(B+ \alpha)} + \alpha(1-\alpha)\frac{\ln \left[1 - \frac{\alpha B}{B + \alpha(1-\alpha)}\right]}{B \ln \alpha} \leq \mathbb{E}(M_d) \leq \frac{D \beta}{D+1} + D(1-\beta)\frac{\ln \left[1 - \frac{\beta}{1 + D(1-\beta)}\right]}{\ln \beta}. \end{displaymath} \end{teo}
\begin{cor} \label{C: CTM1} Consider the process ${\{\bbT^d; \mathcal{P}(\lambda), \mathcal{G}(p) \}}$. If \[ (\lambda^2d + \lambda d + \lambda +d + 1)p < \lambda + d + 1\] then Theorem~\ref{T: LEIT} holds under the values \begin{align*} \alpha &= \frac{dp(\lambda + 1)^2}{(d + \lambda + 1)(\lambda p + 1)},\ \beta = \frac{(d+1)p(\lambda + 1)^2}{(d + \lambda + 1)(\lambda p + 1)},\ D = \max \left \{ 2; \frac{(d+1)(\lambda + 1)}{d \lambda} \right\},\\ B& = 2d(d-1) \left [ \frac{(\lambda +1)^2(2(\lambda p + 1) - 1) + (\lambda +1)(p-1)d - (\lambda p+1)d^2}{(d + 2\lambda + 1)(d + 2 \lambda + 1)(\lambda p + 1)} \right ]. \end{align*} \end{cor}
\begin{teo}\label{T: LEITL} Consider the process ${\{\bbT^d; N\}}$. We have that \begin{displaymath} M_d \overset{D}{\to} M, \end{displaymath} where $\mathbb{P}( M \leq m) = g_{m+1}(0)$, being $g(s) = \mathbb{E}(s^N) $ and $g_{m+1}(s) = \overset{ m+1 \textrm { times }} {g(g(\cdots g(s)) \cdots )}$. \end{teo}
\begin{cor} \label{C: LEITL} Consider the process ${\{\bbT^d; \mathcal{P}(\lambda), \mathcal{G}(p) \}}$. \begin{itemize} \item[$(i)$] If $ p \neq {(\lambda^2 + \lambda + 1)}^{-1}$ then \[ \mathbb{P}(M \leq m) = \frac{1 - \left (\frac{(\lambda + 1)^2 p}{\lambda p + 1} \right )^{m+1}}{1 - \frac{\lambda(\lambda p +1)}{(1-p)(\lambda p + 1)}\left (\frac{(\lambda +1)^2 p}{\lambda p + 1} \right )^{m+1}}, \ m \geq 0 \] and \[ \mathbb{E}(M)= \frac{(1- p(\lambda^2 + \lambda + 1))}{\lambda (\lambda p + 1))} \lim_{ s \to \infty} \left [ \psi_{\gamma} \left( 1 - \frac{\ln{\frac{(\lambda +1)(1-p)}{\lambda ( \lambda p + 1)}}}{ \ln {\gamma}} \right) - \psi_{\gamma} \left( s - \frac{\ln{\frac{(\lambda +1)(1-p)}{\lambda ( \lambda p + 1)}}}{ \ln {\gamma}} + 1 \right) \right ] \] where $\displaystyle \gamma = \frac{(\lambda +1)^2 p}{\lambda p + 1}$ and $\displaystyle \psi_a(z) = -\ln(1-a) + \ln(a) \sum_{n=0}^{\infty} \frac{a^{n+z}}{1-a^{n+z}},$ being $\psi_a(z)$ known as the $a$-digama function.\\ \item[$(ii)$] If $ p = {(\lambda^2 + \lambda + 1)}^{-1}$ then \[ \mathbb{P}(M \leq m) = \frac{(m+1)\lambda}{(m+1)\lambda +1}\] and \[ \mathbb{E}(M)= \infty.\] \end{itemize} \end{cor}
\subsection{Number of collonies in the process} \label{SS: NC}
\begin{teo} \label{T: NC} Consider the process ${\{\bbT^d; N\}}$. If \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] > \frac{d-1}{d} \end{displaymath} then \begin{displaymath} \mathbb{E}(I_d) \leq (1 - \beta)^{-1} \textrm { and } \end{displaymath} \begin{displaymath} \mathbb{E}(I_d) \geq \sum_{r=1}^{d+1} \left [[1 + r\theta] \dbinom{d+1}{r} \sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] + \mathbb{P}(N=0) \end{displaymath} \noindent where $\theta = (1 - \alpha)^{-1}.$ Besides that, if $\mathbb{E}(N) < 1$ (the subcritical case) \[ \lim_{d \to \infty} \mathbb{E}(I_d) = \frac{1}{1 - \mathbb{E}(N)}. \] \end{teo}
\section{Proofs} \label{S: Proofs}
In order to prove the main results we define auxiliary processes whose understanding will provide bounds for the processes defined at introduction. In the first two auxiliary process, denoted by ${U\{\bbT^d; N\}}$ and ${U\{\bbT^d_+; N\}}$, every time a colony collapses the survival individuals are only allowed to choose neighbour vertices which are further (compared to the origin) that the vertex where their colony was placed. In other words an individual is not allowed to choose the neighbour vertex which has been already colonized. We refer to this process as \textit{Self Avoiding}. The last two auxiliary process, denoted by ${L\{\bbT^d; N\}}$ and ${L\{\bbT^d_+; N\}}$, while the survival individuals are allowed to choose the neighbour vertex which has been already colonized, those who does that are not able to colonize it as this place is considered hostile or infertile. We refer to this process as \textit{Move Forward or Die}. In both processes, $Y$, the number of new colonies at collapse times in a vertex $x$ equals the number of diferent neighbours chosen which are located further from the origin than $x$ is. Besides that, every new colony starts with only one individual.
\begin{prop} \label{P: CFGP} Consider a sequence of random variables $\{Y_d\}_{ d \in \mathbb{N}}$ whose sequence of probability generating functions is $\{g_{Y_d}(s)\}_{ d \in \mathbb{N}}$ and a random variable $Y$ such that $Y_d \overset{D}{\to} Y$. Then $g_{Y_d, m}(s)$, the $m-th$ composition of $g_{Y_d}(s)$, converges to $ g_{Y,m}(s)$, where $ g_{Y,m}(s)$ is the $m-th$ composition of $g_Y(s)$, the probability generating function of $Y$. \end{prop} \begin{proof}[Proof of Proposition~\ref{P: CFGP}] From the fact that $Y_d \overset{D}{\to} Y$ it follows that $\displaystyle g_Y(s) = \lim_{d \to \infty} g_{Y_d}(s)$.
\[ \lim_{d \to \infty} g_{Y_d, 2}(s) = \lim_{d \to \infty} g_{Y_d}(g_{Y_d}(s)) = \lim_{d \to \infty} \mathbb{E} \left [ \left ( \mathbb{E}(s^{Y_d}) \right)^{Y_d} \right ] \]
From the Dominated Convergence Theorem~\cite[Theorem 9.1 page 26]{Thorisson} (observe that $[\mathbb{E}(s^{Y_d})]^{Y_d} \in [0,1]$)
\[ \lim_{d \to \infty} \mathbb{E} \left [ [\mathbb{E}(s^{Y_d})]^{Y_d} \right ] = \mathbb{E} \left [ \lim_{d \to \infty} [\mathbb{E}(s^{Y_d})]^{Y_d} \right].
\]
Again, from the Dominated Convergence Theorem~\cite[Theorem 9.1 page 26]{Thorisson} (observe that $s^{Y_d} \in [0,1]$ and that $Y_d$ converges to $Y$ in distribution) $Y_d \ln \mathbb{E}(s^{Y_d})$ converges in distribution to $Y \ln \mathbb{E}(s^Y).$
So we conclude that \[ \mathbb{E} \left [ \lim_{d \to \infty} \left ( \mathbb{E}(s^{Y_d}) \right)^{Y_d} \right ] = \mathbb{E} \left [ [ \mathbb{E}(s^Y) ]^Y \right ]
\] and then
\[ \lim_{d \to \infty} g_{Y_d, 2}(s)= \lim_{d \to \infty} \mathbb{E} \left [ \left ( \mathbb{E}(s^{Y_d}) \right)^{Y_d} \right ] = \mathbb{E} \left [ [ \mathbb{E}(s^Y) ]^Y \right ] = g_{Y,2}(s).
\]
By induction one can prove that $\displaystyle\lim_{d \to \infty} g_{Y_d, m}(s) = g_{Y,m}(s).$ \end{proof}
\begin{prop} \label{P: ConvBranching} Let $\{Z_n \}_{n \geq 0}, \{Z_{1,n} \}_{n \geq 0}, \{Z_{2,n} \}_{n \geq 0}, \cdots$ be a branching processes and $Y, Y_1, Y_2, \cdots $, respectively, their offspring distributions. Supose that \begin{enumerate} \item[(i)] $Y_d \overset{D}{\to} Y$; \item[(ii)] $\mathbb{P}(Y_d \geq k) \leq \mathbb{P}(Y_{d+1} \geq k)$, for all $k$ and for all $d$; \end{enumerate} Then, if $\nu_d$ is the pro\-ba\-bi\-li\-ty of the extinction of the process $\{Z_{d,n} \}_{n \geq 0}$ and $\nu$ is the pro\-ba\-bi\-li\-ty of the extinction of the process $\{Z_{n} \}_{n \geq 0}$ we have that \[ \lim_{d \to \infty}\nu_d = \nu. \] \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: ConvBranching}] From \textit{(i)}, \textit{(ii)} and by using a coupling argument we have that
\begin{equation}\label{1}
\nu_d \ge \nu_{d+1} \ge \lim_{d \to \infty} \nu_d =: \nu_L \ge \nu. \end{equation}
From the fact that $Y_d \overset{D}{\to} Y$ and \cite[Theorem 25.8, page 335]{Billingsley} we have that
\begin{equation}\label{A} \phi_d(s):=\mathbb{E}[s^{Y_d}]\underset{d\to\infty}\longrightarrow\mathbb{E}[s^Y] := \phi(s). \end{equation}
Let $s\in[0,1]$ fixed and $f(y):=s^y, \ y\in\mathbb{N}$. Clearly, $f$ is non-increasing and therefore from \textit{(ii)} and \cite[equation (3.3), page 6]{Thorisson} we have that \begin{equation}\label{B} \phi_{d+1}(s)\leq \phi_{d}(s). \end{equation}
\noindent From (\ref{A}), (\ref{B}) and Dini's Theorem, we have that \begin{equation}\label{C} \phi_{d} (\cdot) \longrightarrow \phi(\cdot) \text{ uniformly.} \end{equation}
\noindent From (\ref{1}), (\ref{C}) and \cite[Exercise 9 - Chapter 7]{Rudin}: \begin{equation}\label{D} \lim_{d\to \infty}\phi_d(\nu_d)=\phi(\nu_L) \end{equation}
\noindent Finally, given that $\phi_d(\nu_d)=\nu_d$, from (\ref{D}) we obtain that \begin{equation}\label{E} \phi(\nu_L)=\nu_L. \end{equation}
\noindent From the convexity of $\phi(s)$ it follows that $\phi(s) = s$ (the fixed points of $\phi(\cdot)$) for at most two points in $[0,1]$. It is known that [see~\cite{Harris2002}, Theorem 6.1 and its proof] if $\nu<1$, the fixed points of $\phi(\cdot)$ are $s=\nu$ and $s=1$. If $\nu=1$, the unique solution is 1. So there are two cases to be considered.
\textbf{1.} If $\nu_d < 1$ for some $d\geq 1$, then from (\ref{1}) it follows that $\nu_L < 1$. If $\nu_L < 1$, it follows from (\ref{E}) that $\nu_L = \nu$.
\textbf{2.} If $\nu_d = 1$ for all $d\geq1$, then \[ \mathbb{E}(Y_d) \leq 1 \textrm { for all } d\geq1. \] Then, \begin{equation} \label{lim}
\lim_{d \to \infty} \mathbb{E}(Y_d) \leq 1. \end{equation}
From \textit{(ii)} we have that $\mathbb{P}(Y_d \geq k) \leq \mathbb{P}(Y \geq k)$, for all $k$ and all $d$. From \textit{(i)}, \textit{(ii)} and a non standart version of Fatou Lemma~\cite[page 230]{Ash} (applied to the sequence $a_{d,j} = j {\mathbb P}(Y_d=j)$), it follows that
\begin{equation} \label{TCD} \liminf_{d \to \infty} \mathbb{E}(Y_d) \ge \mathbb{E}(Y). \end{equation}
From (\ref{lim}) and (\ref{TCD}), it follows that $\nu=1.$ Then, from (\ref{E}) we have that $\nu_L=\nu.$ \end{proof}
\subsection{${U\{\bbT^d; N\}}$: The Self Avoiding model}
\begin{prop} \label{P: CCSRSR3} Consider the process ${U\{\bbT^d; N\}}$. $\mathbb{P}(V_d) > 0 $ if and only if \begin{displaymath} \mathbb{E} \left [ \left(\frac{d - 1}{d} \right)^N \right] < \frac {d-1}{d} \end{displaymath} \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: CCSRSR3}] First of all observe that for a fixed distribution for $N$, the processes ${U\{\bbT^d; N\}}$ and ${U\{\bbT^d_+; N\}}$ either both survives or both die. Next observe that the process ${U\{\bbT^d_+; N\}}$ behaves as a homogeneous branching process. Every vertex $x$ which is colonized produces $Y_d$ new colonies (whose distribution depends only on $N$) on the $d$ neighbour vertices which located are further from the origin than $x$ is. By conditioning one can see that \begin{equation} \label{E: MediaY}
\mathbb{E}(Y_d) = d \sum_{n=0}^{\infty} \left [\left(1 - \left(\frac{d-1}{d}\right)^n\right)\mathbb {P}(N = n) \right] = d \left[ 1- \mathbb{E}\left [\left(\frac{d-1}{d} \right)^N \right] \right]. \end{equation}
From the theory of homogeneous branching processes we see that ${U\{\bbT^d_+; N\}}$ (and also ${U\{\bbT^d; N\}}$) survives if and only if $\mathbb{E}[ (\frac{d - 1}{d} )^N ] < \frac {d-1}{d}.$ \end{proof}
\begin{prop} \label{P: CCSRSR1V} Consider the process ${U\{\bbT^d; N\}}$. Then \begin{displaymath} \mathbb{P}(V_d) = \sum_{r=1}^{d+1}\left [(1 - \psi^r)\binom{d+1} {r}\sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] \end{displaymath} where $\psi$, the extinction probability for the process ${U\{\bbT^d_+; N\}}$, is the smallest non-negative solution of \begin{displaymath} \sum_{y=0}^{d} \left[ s^y\binom{d}{y}\sum_{n=y}^{\infty}\frac{T(n,y)} {d^n}\mathbb{P}(N=n) \right] = s. \end{displaymath} On the sub critical regime, which means \begin{displaymath} \mathbb{E} \left [ \left(\frac{d - 1}{d} \right)^N \right] > \frac {d-1}{d}, \end{displaymath} it holds that \begin{displaymath} \mathbb{E}(I_d) = \sum_{r=1}^{d} \left [[1 + r\theta_u]\dbinom{d+1}{r} \sum_ {n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] + \mathbb{P}(N=0) \end{displaymath} where \begin{displaymath} \theta_u = \left \{ 1- d \left [ 1 - \mathbb{E} \left ( \left(\frac{d - 1}{d} \right)^N \right) \right ]\right\}^{-1}. \end{displaymath} \end{prop} \begin{proof}[Proof of Proposition~\ref{P: CCSRSR1V}] Let $Y_{d,R}$ be the number of colonies created at the neighbour vertices of the origin from its colony at the collapse time. Then \begin{displaymath}
\mathbb{P}(V_d) = \sum_{r=0}^{d+1} \mathbb{P}(V_d| Y_{d,R} = r) \mathbb{P} (Y_{d,R} = r) \end{displaymath} where \begin{displaymath} \mathbb{P}(Y_{d,R}=r) =\sum_{n=r}^{\infty} \left[ \mathbb{P}(N=n) \frac {\dbinom{d+1}{r}T(n,r)}{(d+1)^n} \right] \textrm { for } r =0,1,2, \cdots, d+1. \end{displaymath} because \begin{displaymath}
\mathbb{P}(Y_{d,R}=r | N = n) =\frac {\dbinom{d+1}{r}T(n,r)}{(d+1)^n}. \end{displaymath}
Given that $Y_{d,R} = r$ one have $r$ independent ${U\{\bbT^d_+; N\}}$ processes living on $r$ independent rooted trees. Every vertex $x$ which is colonized, on some of these trees, right after the collapse will have $N$ survival individuals. These individuals will produce $Y_d$ new colonies (whose distribution depends only on $N$) on the $d$ neighbour vertices which are located further from the origin than $x$ is. So we have that \begin{displaymath}
\mathbb{P}(Y_d=y | N = n) = \frac{\dbinom{d}{y}T(n,y)}{d^n}. \end{displaymath} From this, \begin{displaymath} \mathbb{P}(Y_d=y) =\sum_{n=y}^{\infty} \left[ \mathbb{P}(N=n) \frac {\dbinom{d}{y}T(n,y)}{d^n} \right] \textrm { for } y =0,1,2, \cdots, d, \end{displaymath} and \begin{displaymath} \mathbb{E}(s^{Y_d}) = \sum_{y=0}^{d} \left[ s^y\binom{d}{y}\sum_{n=y}^ {\infty}\frac{T(n,y)}{d^n}\mathbb{P}(N=n) \right]. \end{displaymath}
Then $\mathbb{P}(V_d^C | Y_{d,R} = r) = \psi^r \hbox{ for } r =0,1,2, \cdots, d+1$ and \begin{displaymath} \mathbb{P}(V_d) = \sum_{r=1}^{d+1}\left [(1 - \psi^r)\binom{d+1} {r}\sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] \end{displaymath} As for the second part of the proposition \begin{displaymath}
\mathbb{E}(I_d) = \sum_{r=0}^{d+1} \mathbb{E}(I_d| Y_{d,R} = r) \mathbb{P} (Y_{d,R}= r). \end{displaymath}
Besides that, $\mathbb{E}(I_d| Y_{d,R} = r) = r\theta_u + 1$ (see Stirzaker~\cite[Exercise 2b, page 280]{Stirzaker}). \end{proof}
\begin{prop} \label{P: CCSRSR1VL} Consider the process ${U\{\bbT^d; N\}}$. Then \begin{equation} \label{limPVD} \lim_{d \to \infty} \mathbb{P}(V_d) = 1 - \nu \end{equation} where $\nu$ is the smallest non-negative solution of $ \mathbb{E}(s^N) = s$. Besides that, if $\mathbb{E}(N) < 1$ (the subcritical case) then \begin{equation} \label{limEID} \lim_{d \to \infty} \mathbb{E}(I_d) = \frac{1}{1 - \mathbb{E}(N)}. \end{equation} \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: CCSRSR1VL}] In order to prove~(\ref{limPVD}) one has to apply Proposition~\ref{P: ConvBranching}, observing that $Y_d \overset{D}{\to} N$ and $Y_{d,R} \overset{D}{\to} N.$ Moreover to prove~(\ref{limEID}) observe that \[
\lim_{d \to \infty} \mathbb{E}(I_d) = \lim_{d \to \infty} \sum_{r=0}^{d+1} \mathbb{E}(I_d|Y_{d,R}=r){\mathbb P}(Y_{d,R}=r).\] As $Y_d \overset{D}{\to} N$ and $Y_{d,R} \overset{D}{\to} N $ then
\[ \lim_{d \to \infty} \mathbb{E}(I_d|Y_{d,R}=r) = \lim_{d \to \infty} r\theta_u +1 = \frac{r}{1-\mathbb{E}(N)}+1 \] and the result follows from the Dominated Convergence Theorem~\cite[Theorem 9.1 page 26]{Thorisson}. \end{proof}
\begin{prop} \label{P: LIMTSRSR1} Consider the process ${U\{\bbT^d_+; N\}}$. Assuming \begin{displaymath} \mathbb{E} \left [ \left(\frac{d - 1}{d} \right)^N \right] > \frac {d-1}{d} \end{displaymath} we have that \begin{displaymath} \frac{[1+D(1-\mu)][1-\mu^{m+1}]}{1+ D(1-\mu)-\mu^{m+1}} \leq \mathbb{P}(M_d \leq m) \leq \frac{[1 + \frac{\mu(1-\mu)}{B}](1-\mu^{m+1})}{ 1 + \frac{\mu(1-\mu)}{B} - \mu^{m+1}} \end{displaymath} and \begin{displaymath}
\frac{\mu^2}{2(B+ \mu)} + \mu(1-\mu)\frac{\ln \left[1 - \frac{\mu B}{B + \mu(1-\mu)}\right]}{B \ln \mu} \leq \mathbb{E}(M_d) \leq \frac{D \mu}{D+1} + D(1-\mu)\frac{\ln \left[1 - \frac{\mu}{1 + D(1-\mu)}\right]}{\ln \mu} \end{displaymath} where \begin{align*} \mu &= d \left [ 1 - \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] \right ] \\ D &= \max \left \{ 2; \frac{g^{\prime}(1)}{g^{\prime}(1) - \mathbb{P}(N \neq 0)} \right\} \\ B & = d(d-1)\left[ 1 - 2\mathbb{E} \left ( \left(\frac{d-1 }{d} \right)^N \right) + \mathbb{E} \left ( \left(\frac{d-2 }{d} \right)^N \right) \right]. \end{align*} Moreover, \begin{displaymath} M_d \overset{D}{\to} M, \end{displaymath} where $\mathbb{P}( M \leq m) = g_{m+1}(0)$, being $g(s) = \mathbb{E}(s^N) $ and $g_{m+1}(s) = \overset{ m+1 \textrm { times }} {g(g(\cdots g(s)) \cdots )}$. \end{prop} \begin{proof}[Proof of Proposition~\ref{P: LIMTSRSR1}]
Every vertex $x$ which is colonized produces $Y_d$ new colonies (whose distribution depends only on $N$) on the $d$ neighbour vertices which are located further from the origin than $x$ is. The random variable $Y_d$ can be seen as $Y_d = \sum_{i=1}^{d}I_i$ where for $i=1, \dots, d$ \begin{displaymath} I_i = \left\{ \begin{array}{ll} 1, & \hbox{the $i-th$ neighbour of $x$ is colonized} \\ 0, & \hbox{else.} \\ \end{array} \right. \end{displaymath} Defining $g_{Y_d}(s)$ as the generating function of $Y_d$ observe that equation~(\ref{E: MediaY}) gives $g_{Y_d}^{\prime}(1)$. Moreover \begin{displaymath} {Y_d}^2 = \left (\sum_{i=1}^{d}I_i \right )^2 = \sum_{i=1}^{d}I_i^2 + 2\sum_{1 \leq i < j \leq d}I_iI_j \end{displaymath} and \begin{displaymath}
\mathbb{E} \left({Y_d}^2 \right ) = d \mathbb{E} \left (I_1^2 \right ) + d(d-1) \mathbb{E}(I_1I_2) \end{displaymath} and finally \begin{displaymath}
\mathbb{E} \left({Y_d}^2 \right ) = d \left[ 1 - \mathbb{E} \left [\left( \frac{d-1}{d} \right)^N \right] \right] + d(d-1) \left [ 1 -2 \mathbb{E} \left [\left ( \frac{d-1}{d} \right )^N \right] + \mathbb{E} \left [\left ( \frac{d-2}{d} \right )^N \right] \right ]. \end{displaymath} Then \begin{align*} g_{Y_d}^{\prime \prime}(1) = \mathbb{E} \left(Y_d(Y_d-1) \right ) = d(d-1) \left [ 1 -2 \mathbb{E} \left [\left ( \frac{d-1}{d} \right )^N \right] + \mathbb{E} \left [\left ( \frac{d-2}{d} \right )^N \right] \right ]. \end{align*} Then the result follows from Theorem 1 page 331 in~\cite{AA}, where $ m = g_{Y_d}^{\prime}(1)$.
The convergence $M_d \overset{D}{\to} M$ follows from the fact that $Y_d \overset{D}{\to} N$ when $ d \to \infty$ and from Proposition \ref{P: CFGP}. \end{proof}
\subsection{{${L\{\bbT^d; N\}}$: Move Forward or Die}}
\begin{prop} \label{P: CCSRCR3} Consider the process ${L\{\bbT^d; N\}}$. $\mathbb{P}(V_d) > 0 $ if and only if \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] < \frac{d- 1}{d} \end{displaymath} \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: CCSRCR3}] First of all observe that for a fixed distribution for $N$, the processes ${L\{\bbT^d; N\}}$ and ${L\{\bbT^d_+; N\}}$ either both survives or both die. Next observe that the process ${L\{\bbT^d_+; N\}}$ behaves as a homogeneous branching process. Every vertex $x$ which is colonized produces a bunch of survival individuals right after the collapse which are willing to jump to one of the $d+1$ nearest neighbours vertices of $x$. All those which jump towards the origin get killed. So, $Y_d$ new colonies will be found on the $d$ neighbour vertices which are located further from the origin than $x$ is. By conditioning one can see that \begin{equation} \label{E: MediaYL}
\mathbb{E}(Y_d) = d\sum_{n=0}^{\infty} \left [\left(1 - \left(\frac{d}{d+1}\right)^n\right)\mathbb {P}(N = n) \right] = d \left[ 1- \mathbb{E} \left [\left(\frac{d}{d+1} \right)^N \right ]\right] \end{equation}
From the theory of homogeneous branching processes we see that ${L\{\bbT^d_+; N\}}$ (and also ${L\{\bbT^d; N\}}$) survives if and only if $\mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] < \frac{d-1}{d}.$ \end{proof}
\begin{prop} \label{P: CCSRCR1V} Consider the process ${L\{\bbT^d; N\}}$. Then \begin{displaymath} \mathbb{P}(V_d) = \sum_{r=1}^{d+1}\left [(1 - \rho^r)\binom{d+1} {r}\sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] \end{displaymath} where $\rho$, the extinction probability for the process ${L\{\bbT^d_+; N\}}$, is the smallest non-negative solution of \begin{displaymath} \sum_{y=0}^{d} \left[ s^y\binom{d}{y}\sum_{n=y}^{\infty}\frac{T(n,y)+ T(n,y+1)}{(d+1)^n}\mathbb{P}(N=n) \right] = s. \end{displaymath} On the subcritical regime, which means \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] > \frac{d- 1}{d}, \end{displaymath} it holds that \begin{displaymath} \mathbb{E}(I_d) = \sum_{r=1}^{d+1} \left [[1 + r\theta_l] \dbinom{d+1}{r} \sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] + \mathbb{P}(N=0) \end{displaymath} where \begin{displaymath} \theta_l = \left \{ 1- d \left [ 1 - \mathbb{E} \left ( \left(\frac{d }{d +1} \right)^N \right) \right ]\right\}^{-1}. \end{displaymath} \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: CCSRCR1V}] Let $Y_{d,R}$ be the number of colonies created at the neighbour vertices of the origin from its colony at the collapse time. Then \begin{displaymath}
\mathbb{P}(V_d) = \sum_{r=0}^{d+1} \mathbb{P}(V_d| Y_{d,R} = r) \mathbb{P} (Y_{d,R} = r) \end{displaymath} where \begin{displaymath} \mathbb{P}(Y_{d,R}=r) =\sum_{n=r}^{\infty} \left[ \mathbb{P}(N=n) \frac {\dbinom{d+1}{r}T(n,r)}{(d+1)^n} \right] \textrm { for } r =0,1,2, \cdots, d+1. \end{displaymath} because \begin{displaymath}
\mathbb{P}(Y_{d,R}=r | N = n) = \frac {\dbinom{d+1}{r}T(n,r)}{(d+1)^n}. \end{displaymath}
Given that $Y_{d,R} = r$ one have $r$ independent ${L\{\bbT^d_+; N\}}$ processes living on $r$ independent rooted trees. Every vertex $x$ which is colonized, on some of these trees, right after the collapse will have $N$ survival individuals. These individuals will produce $Y_d$ new colonies (whose distribution depends only on $N$) on the $d$ neighbour vertices which are located further from the origin than $x$ is. So we have that
\begin{displaymath}
\mathbb{P}(Y_d=y | N = n) = \frac{\dbinom{d}{y}[T(n,y)+ T(n,y+1)]}{(d +1)^n} \end{displaymath} From this, \begin{displaymath} \mathbb{P}(Y_d=y) =\sum_{n=y}^{\infty} \left[ \mathbb{P}(N=n) \frac {\dbinom{d}{y}[T(n,y)+ T(n,y+1)]}{(d+1)^n} \right] \textrm { for } y =0,1,2, \cdots, d. \end{displaymath} and \begin{displaymath} \mathbb{E}(s^{Y_d}) = \sum_{y=0}^{d}s^y \sum_{n=y}^{\infty} \left[ \mathbb{P}(N=n) \frac{\dbinom{d}{y}[T(n,y)+ T(n,y+1)]}{(d+1)^n} \right]. \end{displaymath}
Then $\mathbb{P}({V_d}^C | Y_{d,R} = r) = \rho^r,\ r =0,1,2, \cdots, d+1$ and \begin{displaymath} \mathbb{P}(V_d) = \sum_{r=1}^{d+1}\left [(1 - \rho^r)\binom{d+1} {r}\sum_{n=r}^{\infty}\frac{T(n,r)}{(d+1)^n}\mathbb{P}(N=n) \right] \end{displaymath} As for the second part of the proposition \begin{displaymath}
\mathbb{E}(I_d) = \sum_{r=0}^{d+1} \mathbb{E}(I_d| Y_{d,R} = r) \mathbb{P} (Y_{d,R} = r). \end{displaymath}
Besides that, $\mathbb{E}(I_d| Y_{d,R} = r) = r\theta_l + 1$ (see Stirzaker~\cite[Exercise 2b, page 280]{Stirzaker}). \end{proof}
\begin{prop} \label{P: RCR1VL2} Consider the process ${L\{\bbT^d; N\}}$. Then, \begin{equation} \label{limPVD2} \lim_{d \to \infty} \mathbb{P}(V_d) = 1 - \nu \end{equation} where $\nu$ is the smallest non-negative solution of $ \mathbb{E}(s^N) = s$. Besides that, if $\mathbb{E}(N) < 1$ (the subcritical case) then \begin{equation} \label{limEID2} \lim_{d \to \infty} \mathbb{E}(I_d) = \frac{1}{1 - \mathbb{E}(N)}. \end{equation} \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: RCR1VL2}] In order to prove~(\ref{limPVD2}) one has to aply Proposition~\ref{P: ConvBranching}, observing that $Y_d \overset{D}{\to} N$ and $Y_{d,R} \overset{D}{\to} N.$ For the proof of~(\ref{limEID2}) observe that \[
\lim_{d \to \infty} \mathbb{E}(I_d) = \lim_{d \to \infty} \sum_{r=0}^{d+1} \mathbb{E}(I_d|Y_{d,R}=r){\mathbb P}(Y_{d,R}=r).\] As $Y_d \overset{D}{\to} N$ and $Y_{d,R} \overset{D}{\to} N $ then
\[ \lim_{d \to \infty} \mathbb{E}(I_d|Y_{d,R}=r) = \lim_{d \to \infty} r \theta_l +1 = \frac{r}{1-\mathbb{E}(N)}+1. \] The result follows from the Dominated Convergence Theorem~\cite[Theorem 9.1 page 26]{Thorisson}. \end{proof}
\begin{prop} \label{P: LIMTSRCR1} Consider the process ${L\{\bbT^d_+; N\}}$. Assuming \begin{displaymath} \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] > \frac{d- 1}{d} \end{displaymath} We have that \begin{displaymath} \frac{[1+D(1-\mu)][1-\mu^{m+1}]}{1+ D(1-\mu)-\mu^{m+1}} \leq \mathbb{P}(M_d \leq m) \leq \frac{[1 + \frac{\mu(1-\mu)}{B}](1-\mu^{m+1})}{ 1 + \frac{\mu(1-\mu)}{B} - \mu^{m+1}} \end{displaymath} and \begin{displaymath}
\frac{\mu^2}{2(B+ \mu)} + \mu(1-\mu)\frac{\ln \left[1 - \frac{\mu B}{B + \mu(1-\mu)}\right]}{B \ln \mu} \leq \mathbb{E}(M_d) \leq \frac{D \mu}{D+1} + D(1-\mu)\frac{\ln \left[1 - \frac{\mu}{1 + D(1-\mu)}\right]}{\ln \mu} \end{displaymath} where \begin{align*} \mu &= d \left [ 1 - \mathbb{E} \left [ \left(\frac{d}{d+1} \right)^N \right] \right ] \\ D &= \max \left \{2; \frac{\mu}{\mu - \mathbb{P}(N \neq 0)} \right\} \\ B& = d(d-1)\left[ 1 - 2\mathbb{E} \left ( \left(\frac{d }{d+1} \right)^N \right) + \mathbb{E} \left ( \left(\frac{d-1 }{d+1} \right)^N \right) \right]. \end{align*} Besides that, \begin{displaymath} M_d \overset{D}{\to} M, \end{displaymath} where $\mathbb{P}( M \leq m) = g_{m+1}(0)$, being $g(s) = \mathbb{E}(s^N) $ and $g_{m+1}(s) = \overset{ m+1 \textrm { times }} {g(g(\cdots g(s)) \cdots )}$. \end{prop}
\begin{proof}[Proof of Proposition~\ref{P: LIMTSRCR1}] Every vertex $x$ which is colonized produces $Y_d$ new colonies (whose distribution depends only on $N$) on the $d$ neighbour vertices which are located further from the origin than $x$ is. The random variable $Y_d$ can be seen as $Y_d = \sum_{i=1}^{d}I_i$ where for $i=1, \dots, d$ \begin{displaymath} I_i = \left\{ \begin{array}{ll} 1, & \hbox{the $i-th$ neighbour of $x$ is colonized} \\ 0, & \hbox{else.} \\ \end{array} \right. \end{displaymath} Defining $g_{Y_d}(s)$ as the generating function of $Y_d$ observe that equation~(\ref{E: MediaYL}) gives $g_{Y_d}^{\prime}(1)$. Moreover \begin{displaymath}
{Y_d}^2 = \left (\sum_{i=1}^{d}I_i \right )^2 = \sum_{i=1}^{d}I_i^2 + 2\sum_{1 \leq i < j \leq d}I_iI_j \end{displaymath} and \begin{displaymath}
\mathbb{E} \left({Y_d}^2 \right ) = d \mathbb{E} \left (I_1^2 \right ) + d(d-1) \mathbb{E}(I_1I_2) \end{displaymath} and finally \begin{displaymath}
\mathbb{E} \left({Y_d}^2 \right ) = d \left[ 1 - \mathbb{E} \left [\left( \frac{d}{d+1} \right)^N \right] \right] + d(d-1) \left [ 1 -2 \mathbb{E} \left [\left ( \frac{d}{d+1} \right )^N \right] + \mathbb{E} \left [\left ( \frac{d-1}{d+1} \right )^N \right] \right ] \end{displaymath} Then \begin{align*} g_{Y_d}^{\prime \prime}(1) = \mathbb{E} \left(Y_d(Y_d-1) \right ) = d(d-1) \left [ 1 -2 \mathbb{E} \left [\left ( \frac{d}{d+1} \right )^N \right] + \mathbb{E} \left [\left ( \frac{d-1}{d+1} \right )^N \right] \right ] \end{align*} Then the result follows from Theorem 1 page 331 in~\cite{AA}, where $ m = g_{Y_d}^{\prime}(1)$.
The convergence $M_d \overset{D}{\to} M$ follows from the fact that $Y_d \overset{D}{\to} N$ when $ d \to \infty$ and from Proposition \ref{P: CFGP}. \end{proof}
\subsection{Proofs of the main results}
First we define a coupling between the processes ${\{\bbT^d; N\}}$ and ${L\{\bbT^d_+; N\}}$ in such a way that the former is dominated by the earlier. Every colony in ${L\{\bbT^d_+; N\}}$ is associated to a colony in ${\{\bbT^d; N\}}$. As a consequence, if the process ${\{\bbT^d; N\}}$ dies out, the same happens to ${L\{\bbT^d_+; N\}}$.
At every collapse time at a vertex $x$ in the original model, a non-empty group of individuals that tries to colonize the neighbour vertex to $x$ which is closer to the origin than $x$ will create there a new colony provided that that vertex is empty. In the model ${L\{\bbT^d_+; N\}}$ the same non-empty group of individuals that tries to colonize the same vertex, imediately dies.
Next we define a coupling between the processes ${\{\bbT^d; N\}}$ and ${U\{\bbT^{d+1}_+; N\}}$ in such a way that the former dominates the earlier. Every colony in ${\{\bbT^d; N\}}$ can be associated to a colony in ${U\{\bbT^{d+1}_+; N\}}$. As a consequence if the process ${U\{\bbT^{d+1}_+; N\}}$ dies out, the same happens to ${\{\bbT^d; N\}}$.
At every collapse time at a vertex $x$ we associate the neighbour vertex to $x$ which is closer to the origin than $x$ to the extra vertex on the model ${U\{\bbT^{d+1}_+; N\}}$. In the original model, a non-empty group of individuals that tries to colonize the neighbour vertex to $x$ which is closer to the origin than $x$ will create there a new colony provided that that vertex is empty. In the model ${U\{\bbT^{d+1}_+; N\}}$ the same non-empty group of individuals that tries to colonize the extra vertex, founds a new colonony there.
\begin{proof}[Proof of Theorem~\ref{T: MCC1H}]
The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: CCSRSR3} and~\ref{P: CCSRCR3}.
\end{proof}
\begin{proof}[Proof of Corollary~\ref{C: MCC1HP}] Assuming $s=\frac{d}{d+1}$ in (\ref{eq: fgpP}) and applying Theorem~\ref{T: MCC1H} the result follows. \end{proof}
\begin{proof}[Proof of Corollary~\ref{C: MCC1HPY}] Assuming $s=\frac{d}{d+1}$ in (\ref{eq: fgpY}) and applying Theorem~\ref{T: MCC1H} the result follows. \end{proof}
\begin{proof}[Proof of Theorem~\ref{T: MCCHPE1}] The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: CCSRSR1V} and~\ref{P: CCSRCR1V}. \end{proof}
\begin{proof}[Proof of Theorem~\ref{T: MCCHPE1L}] The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: CCSRSR1VL} and~\ref{P: RCR1VL2}. \end{proof}
\begin{proof}[Proof of Corollary~\ref{C: MCC1HPS}] The proof is just a matter of computing the smallest positive fixed point for the generating function of $N$ (the smallest positive $s$ such that $\mathbb{E}(s^N) = s$) for $\mathbb{E}(s^N)$ given in~(\ref{eq: fgpP}). \end{proof}
\begin{proof}[Proof of Corollary~\ref{C: MCC1HPS2}] The proof is just a matter of computing the smallest positive fixed point for the generating function of $N$ (the smallest positive $s$ such that $\mathbb{E}(s^N) = s$) for $\mathbb{E}(s^N)$ given in~(\ref{eq: fgpY}).
\end{proof}
\begin{proof}[Proof of Theorem~\ref{T: LEIT}] The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: LIMTSRSR1} and~\ref{P: LIMTSRCR1}. \end{proof}
\begin{proof}[Proof of Corollary~\ref{C: CTM1}] The proof is just a matter of computing the generating function of $N$ (see Equation~(\ref{eq: fgpP})) on both values $s = \frac{d}{d+1}$ and $s = \frac{d-1}{d+1}$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{T: LEITL}] The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: LIMTSRSR1} and~\ref{P: LIMTSRCR1}. \end{proof}
\begin{defn} \label{fgpfl} A fractional linear generating function is a probability generating function of the form \begin{equation*} f(b,c;s) = 1 - \frac{b}{1-c} + \frac{bs}{1-cs}, \ 0 \leq s \leq 1. \end{equation*} where $0 \leq b \leq 1$, $0 \leq c \leq 1$, and $b+c \leq 1$. \end{defn}
\begin{proof}[Proof of Corollary~\ref{C: LEITL}] Observe that the generating function of $N$ given in (\ref{eq: fgpP}) is a fractional linear generating function. The results follow from equations (3.1) and (3.2) in \cite{AA} page 330 and from Theorem \ref{T: LEITL}. \end{proof}
\begin{proof}[Proof of Theorem~\ref{T: NC}] The result follows from the fact that the process ${\{\bbT^d; N\}}$ dominates the process ${L\{\bbT^d_+; N\}}$ and by its turn, is dominated by the process ${U\{\bbT^{d+1}_+; N\}}$, together with Propositions~\ref{P: CCSRSR1V} and~\ref{P: CCSRCR1V}. \end{proof}
\end{document} |
\begin{document}
\title{On Minimizing Tardy Processing Time, Max-Min Skewed Convolution, and Triangular Structured ILPs hanks{Part of this research was done during the Discrete Optimization trimester program at Hausdorff Research Institute for Mathematics (HIM) in Bonn, Germany.}
\begin{abstract} The starting point of this paper is the problem of scheduling $n$ jobs with processing times and due dates on a single machine so as to minimize the total processing time of tardy jobs, i.e., $1\,|\,|\,\sum p_j U_j$\xspace. This problem was identified by Bringmann et al.~(Algorithmica 2022) as a natural subquadratic-time special case of the classic $1\,|\,|\,\sum w_j U_j$\xspace problem, which likely requires time quadratic in the total processing time $P$, because of a fine-grained lower bound. Bringmann et al.~obtain their $\widetilde{O}(P^{7/4})$ time scheduling algorithm through a new variant of convolution, dubbed Max-Min Skewed Convolution, which they solve in $\widetilde{O}(n^{7/4})$ time. Our main technical contribution is a faster and simpler convolution algorithm running in $\widetilde{O}(n^{5/3})$ time. It implies an $\widetilde{O}(P^{5/3})$ time algorithm for $1\,|\,|\,\sum p_j U_j$\xspace, but may also be of independent interest.
Inspired by recent developments for the Subset Sum and Knapsack problems, we study $1\,|\,|\,\sum p_j U_j$\xspace parameterized by the maximum job processing time $p_{\max}$. With proximity techniques borrowed from integer linear programming (ILP), we show structural properties of the problem that, coupled with a new dynamic programming formulation, lead to an $\widetilde{O}(n+p_{\max}^3)$ time algorithm. Moreover, in the setting with multiple machines, we use similar techniques to get an $n \cdot p_{\max}^{O(m)}$ time algorithm for $Pm\,|\,|\,\sum p_j U_j$\xspace.
Finally, we point out that the considered problems exhibit a particular triangular block structure in the constraint matrices of their ILP formulations. In light of recent ILP research, a question that arises is whether one can devise a generic algorithm for such a class of ILPs. We give a negative answer to this question: we show that already a slight generalization of the structure of the scheduling ILP leads to a strongly NP-hard problem. \end{abstract} \pagebreak \section{Introduction} We consider the scheduling problem $1\,|\,|\,\sum p_j U_j$\xspace of minimizing the total sum of processing times of \emph{tardy} jobs, where we are given a set of $n$ jobs, numbered from $1$ to $n$, and each job $j$ has a processing time $p_j$ and a due date $d_j$. A schedule is defined by a permutation $\sigma: \{1, \ldots ,n \} \to \{1, \ldots ,n \}$ of the jobs, and, based on this schedule $\sigma$, the \emph{completion time} of jobs is defined. The completion time $C_j$ of a job $j$ is $C_j = \sum_{i \, : \, \sigma(i) \leq \sigma(j)} p_i$. The objective of the problem is to find a schedule $\sigma$ that minimizes the sum of processing times of jobs that miss their due date $d_j$ (called \emph{tardy}), i.e., \[
\min_{\sigma} \ \sum_{\mathclap{j \, : \, C_j > d_j}} p_j. \] Note that for tardy jobs we pay their penalty regardless of the actual completion time. Therefore, in the remainder of the paper, we will use the equivalent problem formulation that we have to select a subset of jobs $S\subseteq \{1,\ldots,n\}$ such that all selected job can be completed by their due dates. In this case it can be assumed that the jobs from $S$ are scheduled by the earliest-due-date-first order~\cite{lawler1969functional_deadline}. One could also think of the problem as a scenario where the jobs that cannot be scheduled before their due dates on the available machine have to be outsourced somewhere else, and the cost of doing this is proportional to the total size of these outsourced jobs. These two properties are typical for hybrid cloud computing platforms.
For the case where all due dates are identical, the scheduling problem is equivalent to the classic Subset Sum problem. In the latter problem we are given a (multi-)set of numbers $\{a_1, \ldots , a_n\}$ and a target value $t$, and the objective is to find a subset of the numbers that sums up exactly to $t$, which is equivalent to the problem of finding the maximum subset sum that is at most $t$. With arbitrary due dates, $1\,|\,|\,\sum p_j U_j$\xspace behaves like a multi-level generalization of the Subset Sum problem, where upper bounds are imposed not only on the whole sum, but also on prefix sums.
In recent years there has been considerable attention on developing fast pseudopolynomial time algorithms solving the Subset Sum problem. Most prominent is the algorithm by Bringmann~\cite{bringmann2017subset_sum_near_linear} solving the problem in time $\widetilde{O}(t)$.\footnote{The $\widetilde O$ notation hides polylogarithmic factors.} The algorithm relies, among other techniques, on the use of Boolean convolution, which can be computed very efficiently in time $O(n \log n)$ by Fast Fourier Transform. Pseudopolynomial time algorithms have also been studied for a parameter $a_{\max} = \max_i a_i$, which is stronger, i.e., $a_{\max} \le t$. Eisenbrand and Weismantel~\cite{eisenbrand2018proximity} developed an algorithm for integer linear programming (ILP) that, when applied to Subset Sum, gives a running time of $O(n + a_{\max}^3)$, the first algorithm with a running time of the form $O(n + \mathrm{poly}(a_{\max}))$. Based on the Steinitz Lemma they developed proximity results, which can be used to reduce the size of $t$ and therefore solve the problem within a running time independent of the size of the target value $t$. The currently fastest algorithm in this regard is by Polak, Rohwedder and Węgrzycki~\cite{polak_subsetsum_an} with running time $\widetilde O(n+ a_{\max}^{5/3})$.
Given the recent attention on pseudopolynomial time algorithms for Subset Sum, it is not surprising that also~$1\,|\,|\,\sum p_j U_j$\xspace has been considered in this direction. Already in the late '60s, Lawler and Moore~\cite{lawler1969functional_deadline} considered this problem or, more precisely, the general form of $1\,|\,|\,\sum w_j U_j$\xspace, where the penalty for each job being tardy is not necessarily $p_j$, but can be an independent weight $w_j$. They solved this problem in time $O(nP)$, where $P$ is the sum of processing times over all jobs. Their algorithm follows from a (by now) standard dynamic programming approach. This general form of the problem is unlikely to admit better algorithms: under a common hardness assumption the running time of this algorithm is essentially the best possible. Indeed, assuming that $(\min,+)$-convolution cannot be solved in subquadratic time (a common hardness assumption in fine-grained complexity), there is no algorithm that solves $1\,|\,|\,\sum w_j U_j$\xspace in time $O(P^{2-\epsilon})$, for any $\epsilon>0$. This follows from the fact that $1\,|\,|\,\sum w_j U_j$\xspace generalizes Knapsack and the hardness already holds for Knapsack~\cite{cygan2019_minconv,KunnemannPS17_finegrainedDP}. Since the hardness does not hold for Subset Sum (the case of Knapsack where profits equal weights), one could hope that it also does not hold either for the special case of $1\,|\,|\,\sum w_j U_j$\xspace, where penalties equal processing times, which is precisely~$1\,|\,|\,\sum p_j U_j$\xspace. Indeed, Bringmann, Fischer, Hermelin, Shabtay, and Wellnitz~\cite{bringmann_deadline_scheduling} recently obtained a subquadratic algorithm with running time $\widetilde O(P^{7/4})$ for the problem. Along with this main result, they also consider other parameters: the sum of distinct due dates and the number of distinct due dates. See also Hermelin et al.~\cite{hermelin_deadline_scheduling} for more related results.
\paragraph*{Max-min skewed convolution.} Typically, a convolution has two input vectors $a$ and $b$ and outputs a vector $c$, where $c[k] = \oplus_{i + j = k} (a[i] \otimes a[j])$. Different kinds of operators $\oplus$ and $\otimes$ have been studied, and, if the operators require only constant time, then a quadratic time algorithm is trivial. However, if, for example, ``$\oplus$'' $=$ ``$+$'' (standard addition) and ``$\otimes$'' $=$ ``$\cdot$'' (standard multiplication), then Fast Fourier Transform can solve convolution very efficiently in time $O(n\log n)$. If, on the other hand, ``$\oplus$'' $=$ ``$\max$'' and ``$\otimes$'' $=$ ``$+$'', it is generally believed that no algorithm can solve the problem in time $O(n^{2 - \epsilon})$ for some fixed $\epsilon > 0$~\cite{cygan2019_minconv}. Convolution problems have been studied extensively in fine-grained complexity, partly because they serve as good subroutines for other problems, see also~\cite{BringmannKW19, Lincoln0W20} for other recent examples. The scheduling algorithm by Bringmann et al.~\cite{bringmann_deadline_scheduling} works by first reducing the problem to a new variant of convolution, called max-min skewed convolution, and then solving this problem in subquadratic time. Max-min skewed convolution is defined as the problem where we are given vectors $(a[0],a[1],\dotsc,a[n-1])$ and $(b[0],b[1],\dotsc,b[n-1])$ as well as a third vector $(d[0],d[1],\dotsc,d[2n-1])$ and our goal is to compute, for each $k = 0,1,\dotsc,2n-1$, the value \begin{equation*}
c[k] = \max_{i + j = k}\{\min\{a[i], b[j] + d[k]\} . \end{equation*} This extends the standard max-min convolution, where $d[k] = 0$ for all $k$. Max-min convolution can be solved non-trivially in time $\widetilde O(n^{3/2})$~\cite{maxminconv}. Bringmann et al.~develop an algorithm with running time $\widetilde O(n^{7/4})$ for max-min skewed convolution, when $d[k] = k$ for all $k$ (which is the relevant case for $1\,|\,|\,\sum p_j U_j$\xspace). By their reduction this implies an $\widetilde O(P^{7/4})$ time algorithm for~$1\,|\,|\,\sum p_j U_j$\xspace.
Our first result and our main technical contribution is a faster, simpler and more general algorithm for max-min skewed convolution. \begin{theorem} Max-min skewed convolution can be computed in time $O(n^{5/3} \log n)$. \end{theorem} As a direct consequence, we obtain an improved algorithm for~$1\,|\,|\,\sum p_j U_j$\xspace. \begin{corollary} The problem $1\,|\,|\,\sum p_j U_j$\xspace can be solved in $\widetilde O(P^{5/3})$ time, where $P$ is the sum of processing times. \end{corollary} Since convolution algorithms are often used as building blocks for other problems, we believe that this result is of independent interest, in particular, since our algorithm also works for arbitrary $d[k]$. From a technical point of view, the algorithm can be seen as a two-dimensional generalization of the approach used in the $\widetilde O(n^{3/2})$ time algorithm for max-min convolution~\cite{maxminconv}. This is quite different and more compact than the algorithm by Bringmann et al.~\cite{bringmann_deadline_scheduling}, which only relies on the ideas in~\cite{maxminconv} indirectly by invoking max-min convolution as a black box.
\paragraph*{Parameterization by the maximum processing time.} As mentioned before, Subset Sum has been extensively studied with respect to running times in the maximum value $a_{\max}$. Our second contribution is that we show that running time in similar spirit can also be achieved for~$1\,|\,|\,\sum p_j U_j$\xspace. Based on techniques from integer programming, namely, Steinitz Lemma type of arguments that have also been crucial for Subset Sum, we show new structural properties of solutions for the problem. Based on this structural understanding, we develop an algorithm that leads to the following result. \begin{theorem} The problem~$1\,|\,|\,\sum p_j U_j$\xspace can be solved in time $\widetilde O(n + p^3_{\max})$. \end{theorem} For the generalized problem $Pm\,|\,|\,\sum p_j U_j$\xspace with multiple machines, we present an algorithm relying on a different structural property. \begin{theorem} The problem~$Pm\,|\,|\,\sum p_j U_j$\xspace can be solved in time $O(n \cdot p_{\max}^{O(m)})$. \end{theorem} Similar algorithmic results with running times depending on the parameter $p_{\max}$ have been developed for makespan scheduling and minimizing weighted completion time (without due dates) \cite{knop2017scheduling}.
\paragraph*{Integer programming generalization and lower bounds.}
The problem $1\,|\,|\,\sum p_j U_j$\xspace can be formulated as an integer linear program (ILP) with binary variables as follows:
\begin{equation} \text{maximize} \quad \sum_j p_jx_j \quad \text{subject to} \quad \begin{pmatrix} p_1 & 0 & \cdots & 0\\ p_1 & p_2 & \ddots & \\ \vdots & & \ddots & 0 \\ p_1 & p_2 & \cdots & p_n \end{pmatrix} \cdot x \le \begin{pmatrix}d_1\\ d_2 \\ \vdots \\ d_n\end{pmatrix}, \quad x \in \{0, 1\}^n. \label{ILP_problem} \end{equation} Here, variable $x_j$ indicates whether job $j$ is selected in the solution (or, in the alternative formulation, finished within its due date). The objective is to maximize the processing time of the selected jobs. The necessary and sufficient conditions are that the total volume of selected jobs with due date at most some time $t$ does not exceed $t$. It suffices to consider only constraints for $t$ equal to one of the jobs' due dates.
Clearly, the shape of the non-zeros in the constraint matrix of the ILP exhibits a very special structure, a certain triangular shape. In recent years there has been significant attention in the area of structured integer programming on identifying parameters and structures for which integer programming is tractable~\cite{DBLP:conf/soda/CslovjecsekEHRW21, CslovjecsekEPVW21, JansenKL21, klein_multistage, KouteckyLO18, EisenbrandHK18}. The most prominent example in this line of work are so-called $n$-fold integer programs (see~\cite{DBLP:conf/soda/CslovjecsekEHRW21} and references therein), which are of the form \[ \text{maximize} \quad c^T x \quad \text{subject to} \quad \begin{pmatrix} A_1 & A_2 & \cdots & A_n \\ B_1 & 0 & & 0\\ 0 & B_2 & \ddots & \\ \vdots & \ddots & \ddots & 0 \\ 0 & \cdots & 0 & B_n \end{pmatrix} \cdot x = b \quad \text{and} \quad \forall_i \ x_i \in \mathbb{Z}_{\geqslant 0}. \]
Here $A_i$ and $B_i$ are ``small'' matrices in the sense that it is considered acceptable for the running time to depend superpolynomially (e.g., exponentially) on their parameters. It is known that these types of integer programs can be solved in FPT time $\max_i f(A_i, B_i) \cdot \mathrm{poly}(|I|)$, where $f(A_i, B_i)$ is a function that depends only on the matrices $A_i$ and $B_i$ (potentially superpolynomially), but not on $n$ or any other parameters, and $\mathrm{poly}(|I|)$ is some polynomial in the encoding length of the input~\cite{DBLP:conf/soda/CslovjecsekEHRW21}. There exist generalizations of this result and also other tractable structures, but none of them captures the triangular shape of~(\ref{ILP_problem}).
We now consider a natural generalization of $n$-fold ILPs that also contains the triangle structure~(\ref{ILP_problem}), in the remainder referred to as \emph{triangle-fold} ILPs. \[ \text{maximize} \quad c^T x \quad \text{subject to} \quad \begin{pmatrix} A_1 & 0 & \cdots & 0\\ A_1 & A_2 & \ddots & \\ \vdots & & \ddots & 0 \\ A_1 & A_2 & \cdots & A_n \\ B_1 & 0 & & 0\\ 0 & B_2 & \ddots & \\ \vdots & \ddots & \ddots & 0 \\ 0 & \cdots & 0 & B_n \end{pmatrix} \cdot x \le b \quad \text{and} \quad \forall_i \ x_i \in \mathbb{Z}_{\geqslant 0}. \]
Let us elaborate on some of the design choices and why they come naturally. First, it is obvious that this structure generalizes $n$-fold, because constraints can be ``disabled'' by selecting a right-hand side of $\infty$ (or some sufficiently large number). After disabling a prefix of constraints, we end up with exactly the $n$-fold structure except that we have inequality ($\le$) constraints instead of equality constraints. Clearly, equality constraints can easily be emulated by duplicating and negating the constraints. The reason we chose inequality is that with equality constraints this ILP would directly decompose into independent small subproblems, which would be uninteresting from an algorithmic point of view and also not general enough to capture~(\ref{ILP_problem}). The matrices $B_1, \dotsc, B_n$ can, for example, be used to express lower and upper bounds on variables, such as the constraints $x_i \in \{0, 1\}$ in~(\ref{ILP_problem}).
With this formulation it is also notable that the scheduling problem on multiple machines, $Pm\,|\,|\,\sum p_j U_j$\xspace, can be modelled easily: instead of one decision variable $x_j \in \{0, 1\}$ for each job $j$, we introduce variables $x_{j,1},x_{j,2},\dotsc,x_{j,m} \in \{0, 1\}$, one for each machine. Then we use the constraints $p_1 x_{1,i} + p_2 x_{2,i} + \cdots + p_j x_{j,i} \le d_j$ for each machine $i$ and job $j$, which ensure that on each machine the selected jobs can be finished within their respective due date. Finally, we use constraints of the form $x_{j,1} + x_{j,2} + \cdots + x_{j,m} \le 1$ to guarantee that each job is scheduled on at most one machine. It can easily be verified that these constraints have the form of a triangle-fold where the small submatrices have dimension only dependent on $m$.
Given the positive results for $1\,|\,|\,\sum p_j U_j$\xspace and $Pm\,|\,|\,\sum p_j U_j$\xspace, one may hope to develop a general theory for triangle-folds. Instead of specific techniques for these (and potentially other) problems, in this way one could create general techniques that apply to many problems. For example, having an algorithm with the running time of the form $\max_i f(A_i, B_i) \cdot \mathrm{poly}(|I|)$ (like we have for $n$-folds), one would directly get an FPT algorithm for $Pm\,|\,|\,\sum p_j U_j$\xspace with parameters $p_{\max}$ and $m$. However, we show strong hardness results, which indicate that such a generalization is not possible. \begin{theorem}\label{th:trianglefold}
There exist fixed matrices $A_i, B_i$ of constant dimensions for which
testing feasibility of triangle-fold ILPs is NP-hard.
This holds even in the case when $A_1 = A_2 = \cdots = A_n$ and
$B_1 = B_2 = \cdots = B_n$. \end{theorem} This hardness result has a surprising quality: not only are there no FPT algorithms for triangle-folds, but also no XP algorithms, which is in stark contrast to other similarly shaped cases.
\section{Max-min skewed convolution} Recall that max-min skewed convolution is defined as the problem where, given vectors $a$, $b$ and $d$, we want to compute, for each $k = 0,1,\dotsc,2n-1$, the value \begin{equation*}
c[k] = \max_{i + j = k}\{\min\{a[i], b[j] + d[k]\} . \end{equation*} In this section we give an algorithm that solves this problem in time $O(n^{5/3}\log n)$. \begin{figure}
\caption{Matrix $M_k$ for $k = 6$ and vectors $a$ and $b$, which it is derived from.
The entry in cell for row $a[i]$ and column $b[j]$ describes whether there exist $i', j'$ with $i' + j' = k$, $a[i'] \ge a[i]$ and $b[j'] \ge b[j]$. For example, the entry for $a[2]$ and $b[2]$ is $1$ because $a[3] \geq a[2]$ and $b[3] \geq b[2]$.}
\label{fig:conv-preprocessing}
\end{figure} Assume without loss of generality that the values in $a$ and $b$ are all different. This can easily be achieved by multiplying all values (including those in $d$) by $4 n$ and adding a different number from $0,1,\dotsc,2n-1$ to each entry in $a$ and $b$. After computing the convolution we then divide the output numbers by $4n$ and round down to reverse the effect of these changes.
The algorithm consists of two phases. In the first phase we build a data structure to aid the computation of each $c[k]$. Then in the second phase we compute each element $c[k]$ separately with a binary search. The goal of the data structure is thus to efficiently answer queries of the form ``is $c[k] > v$?'' for some given $k$ and $v$.
First we introduce a quite excessive data structure. Building it explicitly is impossible within our running time goals, but it will help gain intuition. Suppose for every $k$ we have a matrix $M_k$ (see Figure~\ref{fig:conv-preprocessing}), where the rows correspond to the values $a[0],a[1],\dotsc,a[n-1]$, the columns correspond to $b[0],b[1],\dotsc,b[n-1]$, and the entries tell us whether, for some $a[i], b[j]$, there exist $i'$ and $j'$ with $i' + j' = k$, $a[i'] \ge a[i]$ and $b[j'] \ge b[j]$. This matrix contains enough information to answer the queries above: To determine whether $c[k] > v$, we need to check if there are $i', j'$ with $i' + j' = k$ and $a[i'] > v$ and $b[j'] > v - d[k]$. Choose the smallest $a[i]$ such that $a[i] > v$ and the smallest $b[j]$ such that $b[j] > v - d[k]$. If such $a[i]$ or $b[j]$ does not exist, then we already know that $c[k] \le v$. Otherwise, if both elements exist, the entry $M_k[a[i], b[j]]$ directly gives us the answer: If it is $0$, then for all $i' + j' = k$ either (1) $a[i'] < a[i]$ and hence (since $a[i]$ is the smallest element greater than $v$) $a[i'] \le v$, or (2) $b[j'] < b[j]$ and hence $b[j'] \le v - d[k]$; thus $c[k] \le v$. The converse is also true: Suppose that $c[k] \le v$. Then for all $i'$, $j'$ with $i' + j' = k$ we have that either $a[i'] \le v < a[i]$ or $b[j'] \le v - d[k] < b[j]$, and thus the matrix entry at $(a[i], b[j])$ is $0$.
It is obvious that we cannot afford to explicitly construct the matrices described above; their space alone would be cubic in $n$. Instead, we are only going to compute a few carefully chosen values of these matrices, that allow us to recover any other value in sublinear time. First, reorder the columns and rows so that the corresponding values ($a[i]$ or $b[j]$) are increasing. We call the resulting matrix $M^{\mathrm{sort}}_k$, see also Figure~\ref{fig:conv-preprocessing2}. Clearly, this does not change the information stored in it, but one can observe that now the rows and columns are each nonincreasing. We will compute only the values at intersections of every $\lfloor n/p \rfloor$-th row and every $\lfloor n / p \rfloor$-th column, for a parameter $p \in \mathbb N$, which is going to be specified later. This means we are computing a total of $O(n p^2)$ many values. Although computing a single entry of one of the matrices $M^{\mathrm{sort}}_k$ would require linear time, we will show that computing the same entry for all matrices (all $k$) can be done much more efficiently than in $O(n^2)$ time.
Indeed, for fixed $u, w \in \mathbb Z$, it takes only $O(n \log n)$ time to compute, for all $k$, whether there exists $i'$ and $j'$ with $i' + j' = k$, $a[i'] \ge u$ and $b[j'] \ge v$. This fact follows from a standard application of Fast Fourier Transformation (FFT): we construct two vectors $a', b'$ where \begin{equation*}
a'[i] = \begin{cases}
1 &\text{ if } a[i] \ge u, \\
0 &\text{ otherwise,}
\end{cases}
\quad
b'[i] = \begin{cases}
1 &\text{ if } b[j] \ge w, \\
0 &\text{ otherwise,}
\end{cases} \end{equation*} and compute their $(+, \cdot)$-convolution with FFT. For non-zero output entries there exist $i', j'$ as above, and for zero entries they do not. It follows that computing the selected $O(n p^2)$ values of $M^{\mathrm{sort}}_0, M^{\mathrm{sort}}_1, \ldots, M^{\mathrm{sort}}_{2n-2}$ can be done in time $O(p^2 \cdot n \log n)$. \begin{figure}\label{fig:conv-preprocessing2}
\end{figure}
We now consider the second phase, where the algorithm computes $c[k]$, for each $k$ separately, using binary search. To this end, we design a procedure (see Algorithm~\ref{alg:bs}) to determine, given $k \in \{0,1,2,\ldots,2n-2\}$ and $v \in \mathbb{Z}$, whether $c[k] > v$. The procedure will run in $O(n/p)$ time. As described in the beginning of the proof, this corresponds to computing the value of a specific cell $(a[i], b[j])$ in the matrix $M^{\mathrm{sort}}_k$. If this cell happens to be among the precomputed values, we are done. Otherwise, consider the $\lfloor n/p \rfloor \times \lfloor n/p \rfloor$ submatrix that encloses $(a[i], b[j])$ and whose corners are among the precomputed values. If the lower right corner $(a[i''], b[j''])$ is equal to one, then entry $(a[i], b[j])$ must also be one by monotonicity. Hence, assume otherwise. The entry $(a[i], b[j])$ could still be one, but this happens only if there is a \emph{witness} $(i', j')$ that satisfies $i' + j' = k$ and \begin{enumerate}
\item $a[i''] > a[i'] \ge a[i]$ and $b[j'] \ge b[j]$, or
\item $b[j''] > b[j'] \ge b[j]$ and $a[i'] \ge a[i]$. \end{enumerate} The number of possible witnesses for the first case is bounded by $n / p$, since there are only $\lfloor n/p \rfloor$ many values $a[i']$ between $a[i]$ and $a[i'']$ (since they are in the same $\lfloor n/p \rfloor \times \lfloor n/p \rfloor$ submatrix) and the corresponding $j'$ is fully determined by $i'$. Likewise, there are at most $n / p$ many possible witnesses for the second case. Hence, we can compute the value of the cell $(a[i], b[j])$ by exhaustively checking all these candidates for a witness, i.e., \[\{ (i', k-i') \mid a[i'] \in [a[i], a[i'']) \} \cup \{ (k-j', j') \mid b[j'] \in [b[j], b[j'']) \}.\]
\begin{algorithm} \caption{Procedure used in binary search to check whether $c[k] > v$.} \label{alg:bs} $\mathrlap{i}\hphantom{j} \longleftarrow \argmin \{a[i] \mid a[i] > v\}$\; $j \longleftarrow \argmin \{b[j] \mid b[j] > v - d[k]\}$\; \lIf{$i$ \rm{or} $j$ \rm{does not exist}}{\KwRet{\textsc{no}}} $(a[i''], b[j'']) \longleftarrow$ the closest precomputed cell below and to the right of $M^{\mathrm{sort}}_k[a[i], b[j]]$\; \lIf{$M^{\mathrm{sort}}_k[a[i''],b[j'']] = 1$}{\KwRet{\textsc{yes}}} \ForEach{$i' \in \{i' \mid a[i] \le a[i'] < a[i'']\}$}{
\lIf{$b[k - i'] \ge b[j]$}{\KwRet{\textsc{yes}}}} \ForEach{$j' \in \{j' \mid b[j] \le b[j'] < b[j'']\}$}{
\lIf{$a[k - j'] \ge a[i]$}{\KwRet{\textsc{yes}}}} \KwRet{\textsc{no}}\; \end{algorithm}
Finally, let us note that, for a fixed $k$, the value of $c[k]$ has to be among the following $2n$ values: $a[0], a[1], \ldots, a[n-1], b[0] + d[k], b[1] + d[k], \ldots, b[n-1] + d[k]$. A careful binary search only over these values makes the number of iterations logarithmic in $n$, and not in the maximum possible output value. Indeed, after the preprocessing, we already have access to sorted versions of the lists $a[0],a[1],\dotsc,a[n-1]$ and $b[0],b[1],\dotsc,b[n-1]$ and, in particular, to a sorted version of $b[0] + d[k], b[1] + d[k], \ldots, b[n-1] + d[k]$ (since this is just a constant offset of the latter list). We then first binary search for the lowest upper bound of $c[k]$ in $a[0],a[1],\dotsc,a[n-1]$ and then in $b[0] + d[k], b[1] + d[k], \ldots, b[n-1] + d[k]$ in order to determine the exact value of $c[k]$.
The total running time of both phases is $O(p^2 \cdot n \log(n) + n \cdot \log(n) \cdot n/p)$. We set $p = n^{1/3}$ in order to balance the two terms, and this gives the desired $O(n^{5/3} \log n)$ running time.
\section{Parameterizing by the maximum processing time} In this section we study algorithms for~$1\,|\,|\,\sum p_j U_j$\xspace with running time optimized for $p_{\max}$ and $n$ instead of $P$. We present an algorithm with a running time of $\widetilde O(n + p_{\max}^3)$. Such a running time is particularly appealing when $n$ is much larger than $p_{\max}$. This complements Lawler and Moore's algorithm with complexity $O(nP) \le O(n^2 p_{\max})$, which is fast when $n$ is small. Interestingly, in both cases we have roughly cubic dependence on $n + p_{\max}$.
Our result is based on a central structural property that we prove for an optimal solution and a sophisticated dynamic programming algorithm that recovers solutions of this form. The structural property uses exchange arguments that are similar to an approach used by Eisenbrand and Weismantel~\cite{eisenbrand2018proximity} to prove proximity results in integer programming via the Steinitz Lemma.
In the following, a solution is characterized by the subset of jobs that are finished within their due date. Once such a subset is selected, jobs in this subset can be scheduled in non-decreasing order sorted by their due date. In the remainder we assume that $d_1 \le d_2 \le \cdots \le d_n$. This sorting can be done efficiently: we may assume without loss of generality that all due dates are at most $np_{\max}$. Then radix sort requires only time $O(n \log_n (n p_{\max})) \le O(n + p_{\max})$. \begin{lemma}\label{lem:pmax-structur}
There exists an optimal solution $S\subseteq \{1,2,\dotsc,n\}$
such that for all $i=1,2,\dotsc,n$ it holds that either
\begin{enumerate}
\item $|\{1,2,\dotsc,i\} \cap S| < 2 p_{\max}$, or
\item $|\{i + 1, i + 2,\dotsc, n\} \setminus S| < 2 p_{\max}$.
\end{enumerate} \end{lemma} \begin{proof} Let $S$ be an optimal solution that does not satisfy this property for some $i$. It is easy to see that if we find some $A \subseteq S \cap \{1,2,\dotsc,i\}$ and $B \subseteq \{i+1,i+2,\dotsc,n\} \setminus S$ with the same volumes (that is, $\sum_{j\in A} p_j = \sum_{j\in B} p_j$), then $(S \setminus A) \cup B$ would form an optimal solution that is closer to satisfying the property. Note that the solution $(S \setminus A) \cup B$ is feasible since the due dates of the jobs in $B$ are strictly larger than the due dates of the jobs in $A$.
Since both 1.~and 2.~are false for $i$, we have that $A' = S \cap \{1,2,\dotsc,i\}$ and $B' = \{i+1, i+2,\dotsc,n\} \setminus S$ both have cardinality at least $2 p_{\max}$. We construct $A$ and $B$ algorithmically as follows. Starting with $A_1 = B_1 = \emptyset$ we iterate over $k = 1,2,\dotsc, 2 p_{max}$. In each iteration we check whether $\sum_{j\in A_k} p_j - \sum_{j\in B_k} p_j$ is positive or not. If it is positive, we set $B_{k+1} = B_k \cup \{j\}$ for some $j\in B'\setminus B_k$ and $A_{k+1} = A_k$; otherwise we set $A_{k+1} = A_k \cup \{j\}$ for some $j \in A'\setminus A_k$. The difference $\sum_{j\in A_k} p_j - \sum_{j\in B_k} p_j$ is always between $-p_{\max}$ and $p_{\max} - 1$. Hence, by pidgeon-hole principle there are two indices $k < h$ such that $\sum_{j\in A_k} p_j - \sum_{j\in B_k} p_j = \sum_{j\in A_h} p_j - \sum_{j\in B_h} p_j$. Since $A_k \subseteq A_h$ and $B_k \subseteq B_h$ by construction, we can simply set $A = A_h \setminus A_k$ and $B = B_h \setminus B_k$ and it follows that $\sum_{j\in A} p_j = \sum_{j\in B} p_j$. \end{proof}
\begin{corollary}\label{cor:pmax-structur}
There exists an optimal solution $S\subseteq \{1,2,\dotsc,n\}$ and an index $i\in\{1,2,\dotsc,n\}$
such that
\begin{enumerate}
\item $|\{1,2,\dotsc,i\} \cap S| \le 2 p_{\max}$, and
\item $|\{i + 1, i + 2,\dotsc, n\} \setminus S| < 2 p_{\max}$.
\end{enumerate} \end{corollary} \begin{proof}
Consider the solution $S$ as it Lemma~\ref{lem:pmax-structur} and let $i$ be the maximum index such that $|S \cap \{1,2,\dotsc,i\}| < 2 p_{\max}$. If $i = n$ the corollary's statement follows. Otherwise, we set $i' = i+1$. Then
$|S \cap \{1,2,\dotsc,i'\}| = 2 p_{\max}$ and by virtue of Lemma~\ref{lem:pmax-structur} it must hold that
$|S \setminus \{i' + 1, i' + 2,\dotsc, n\}| < 2 p_{\max}$. \end{proof}
Although we do not know the index $i$ from Corollary~\ref{cor:pmax-structur}, we can compute efficiently an index, which is equally good for our purposes. \begin{lemma}\label{lem:pmax-idx} In time $O(n)$ we can compute an index $\ell$ such that there exists an optimal solution $S$ with \begin{enumerate}
\item $p(\{1,2,\dotsc,\ell\} \cap S) \le O(p^2_{\max})$, and
\item $p(\{\ell+1,\ell+2,\dotsc,n\} \setminus S) \le O(p^2_{\max})$, \end{enumerate} where $p(X) = \sum_{i \in X} p_i$, for a subset $X \subseteq \{1, \ldots , n \}$. \end{lemma} \begin{proof} For $j=1,2,\dotsc,n$ let $t_j$ denote the (possibly negative) maximum time such that we can schedule all jobs $j,j+1,\dotsc,n$ after $t_j$ and before their respective due dates. It is easy to see that \begin{equation*}
t_j = \min_{j' \ge j} \bigg( d_{j'} - \sum_{j \le k \le j'} p_k\bigg) \ . \end{equation*} It follows that $t_{j} = \min\{t_{j+1}, d_j\} - p_{j}$ and thus we can compute all values $t_j$ in time $O(n)$. Now let $h$ be the biggest index such that $t_h < 0$. If such an index does not exist, it is trivial to compute the optimal solution by scheduling all jobs. Further, let $k < h$ be the biggest index such that $\sum_{j=k}^h p_j > 2 p_{\max}^2$ or $k = 1$ if no such index exists. Similarly, let $\ell > h$ be the smallest index such that $\sum_{j=h}^{\ell} p_j > 4 p_{\max}^2$ or $\ell = n$ if this does not exist. Let $i$ be the index as in Corollary~\ref{cor:pmax-structur}. We will now argue that either $k \le i \le \ell$ or $\ell$ satisfies the claim trivially. This finishes the proof, since $p(\{i, i+1, \dotsc, \ell\}) \le p(\{k, k+1, \dotsc, \ell\}) \le O(p_{\max}^2)$ and thus the properties in this lemma, which $i$ satisfies due to Corollary~\ref{cor:pmax-structur}, transfer to $\ell$.
As for $k\le i$, we may assume that $k > 1$ and therefore $\sum_{j=k}^{\ell} p_j > 2 p^2_{\max}$. Notice that there must jobs in $\{k,k+1,\dotsc,n\}$ of total volume more than $2 p_{\max}^2$, which are not in $S$. This is because $t_k < t_h - 2 p^2_{\max} < - 2 p^2_{\max}$. On the other hand, from Property~1 of Corollary~\ref{cor:pmax-structur} it follows that $p(\{i+1,i+2,\dotsc,n\}\setminus S) < 2 p_{\max}^2$. This implies that $k\le i$.
We will now show that $i \le \ell$ or $\ell$ satisfies the lemma's statement trivially. Assume w.l.o.g.\ that $\ell < n$ and thus $\sum_{j=h}^{\ell} p_j > 4 p^2_{\max}$. Notice that $t_\ell \ge t_h + 4 p_{\max}^2 \ge 2 p_{\max}^2 + p_{\max}$. If $S \cap \{1,2,\dotsc,\ell\}$ contains jobs of a total processing time more than $2 p_{\max}^2$, then $i\le \ell$ follows directly because of Corollary~\ref{cor:pmax-structur}. Conversely, if the total processing time is at most $2 p_{\max}^2$, then we can schedule all these jobs and also all jobs in $\{\ell + 1, \ell + 2, \dotsc, n\}$ (since $t_{\ell} \ge 2 p_{\max}^2$), which must be optimal. Hence, $\ell$ satisfies the properties of the lemma. \end{proof} An immediate consequence of the previous lemma is that we can estimate the optimum up to an additive error of $O(p_{\max}^2)$. We use this to perform a binary search with $O(\log(p_{\max}))$ iterations. In each iteration we need to check if there is a solution of at least a given value $v$. For this it suffices to devise an algorithm that decides whether there exists a solution that runs a subset of jobs without any idle time (between $0$ and $d_n$) or not. To reduce to this problem, we add dummy jobs with due date $d_n$ and total processing time $d_n - v$; more precisely, $\min \{p_{\max}, d_n - v\}$ many jobs with processing time $1$ and $\max\{0, \lceil (d_n - v - p_{\max}) / p_{\max} \rceil\}$ many jobs with processing time $p_{\max}$. Using that w.l.o.g.\ $d_n \le n p_{\max}$, we can bound the total number of added jobs by $O(n + p_{\max})$, which is insignificant for our target running time. If there exists a schedule without any idle time, we remove the dummy jobs and obtain a schedule where jobs with total processing time at least $d_n - (d_n - v) = v$ finish within their due dates. If on the other hand there is a schedule for a total processing time $v' \ge v$, we can add dummy jobs of size exactly $d_n - v'$ to arrive at a schedule without idle time. For the remainder of the section we consider this decision problem. We will create two data structures that allow us to efficiently query information about the two partial solutions from Lemma~\ref{lem:pmax-idx}, that is, the solution for jobs $1,2,\dotsc,\ell$ and that for $\ell+1,\ell+2,\dotsc,n$.
\begin{lemma}\label{lem:pmax-dyn1}
In time $O(n + p_{\max}^3 \log(p_{\max}))$ we can compute
for each $T = 1,2,\dotsc, O(p_{\max}^2)$ whether there is a subset
of $\{1,2,\dotsc,\ell\}$ which can be run exactly during $[0, T]$. \end{lemma} \begin{proof} We will efficiently compute for each $T = 0,1,\dotsc,O(p_{\max}^2)$ the smallest index $i_T$ such that there exists some $A_T\subseteq \{1,2,\dotsc,i_T\}$, which runs on the machine exactly for the time interval $[0,T]$. Then it only remains to check if $i_T \le \ell$ for each $T$.
Consider $i_T$ for some fixed $T$. Then $i_T\in A_T$, since otherwise $i_T$ would not be minimal. Further, we can assume that job $i_T$ is processed exactly during $[T - p_{i_T}, T]$, since $i_T$ has the highest due date among jobs in $A_T$. It follows that $i_{T'} < i_T$ for $T' = T - p_i$. Using this idea we can calculate each $i_T$ using dynamic programming. Assume that we have already computed $i_{T'}$ for all $T' < T$. Then to compute $i_T$ we iterate over all processing times $k=1,2,\dotsc,p_{\max}$. We find the smallest index $i$ such that $p_i = k$, $i_{T - k} < i$, and $d_i \ge T$. Among the indices we get for each $k$ we set $i_T$ as the smallest, that is, \[i_T = \min_{k \in [p_{\max}]} \min \, \{ i \mid p_i = k, i > i_{T-k}, d_i \ge T\} . \] The inner minimum can be computed using a binary search over the $O(p_{\max}^2)$ jobs with $p_i = k$ and smallest due date, since the values of $i_T$ do not change when restricting to the $O(p_{\max}^2) \ge T$ jobs with smallest due date. It follows that computing $i_T$ can be done in $O(p_{\max} \cdot \log(p_{\max}))$. Computing $i_T$ for all $T$ requires time $O(p_{\max}^3 \cdot \log(p_{\max}))$. \end{proof}
\begin{lemma}\label{lem:pmax-dyn2}
In time $O(n + p_{\max}^3 \log(p_{\max}))$ we can compute
for each $T = 1,2,\dotsc, O(p_{\max}^2)$ whether there is some
$A\subseteq\{\ell+1,\ell+2,\dotsc,n\}$,
such that $\{\ell+1,\ell+2,\dotsc,n\} \setminus A$
can be run exactly during $[d_n + T - \sum_{j=\ell+1}^n p_j, d_n]$. \end{lemma} \begin{proof} For every $T = 1,\dotsc,O(p_{\max}^2)$ determine $i_T$, which is the largest value such that there exists a set $A_T \subseteq \{i_T,i_T + 1,\dotsc,n\}$ with $\sum_{j\in A_T} p_j = T$, such that $\{i+1,i+2,\dotsc,n\} \setminus A_T$ can be run exactly during the interval $[d_n + T - \sum_{j=i+1}^n p_j, d_n]$. Consider one such $i_T$ and corresponding $A_T$. Let $j\in A_T$ has the minimal due date. Then for $T' = T - p_j$ we have $i_{T'} > i_T$. To compute $i_T$ we can proceed as follows. Guess the processing time $k$ of the job in $A_T$ with the smallest due date. Then find the maximum $j$ with $p_j = k$ and $j < i_{T - k}$. Finally, verify that there is indeed a schedule. For this consider the schedule of all jobs $\{\ell+1,\ell+2,\dotsc,n\}$, where we run them as late as possible (a schedule that is not idle in $[d_n - \sum_{i=\ell+1}^n p_i, d_n]$. Here, we look at the ``earliness'' of each job in $\{\ell+1,\dotsc,j-1\}$. This needs to be at least $T$ for each of the jobs.
We can check the earliness efficiently by precomputing the mentioned schedule and building a Cartesian tree with the earliness values. This data structure can be built in $O(n)$ and allows us to query for the minimum value of an interval in constant time. \end{proof} We can now combine Lemmas~\ref{lem:pmax-idx}-\ref{lem:pmax-dyn2} to conclude the algorithm's description. Suppose that $S$ is a set of jobs corresponding to a solution, where the machine is busy for all of $[0, d_n]$. By Lemma~\ref{lem:pmax-idx} there exist some $T, T' \le O(p_{\max}^2)$ such that $p(\{1,2,\dotsc,\ell\} \cap S) = T$ and $p(\{\ell+1,\ell+2,\dotsc,n\} \setminus S) = T'$. In particular, the machine will be busy with jobs of $\{1,2,\dotsc,\ell\} \cap S$ during $[0, T]$ and with jobs of $\{\ell + 1, \ell + 2,\dotsc,n\} \cap S$ during $[T, d_n]$. The choice of $T$ and $T'$ implies that \begin{equation*}
d_n = p(S) = p(\{1,\dotsc,\ell\} \cap S) + p(\{\ell + 1,\dotsc,n\} \cap S) = T + p(\{\ell + 1, \dotsc, n\}) - T' . \end{equation*} Hence, $T = d_n + T' - p(\{\ell + 1, \dotsc, n\})$. In order to find a schedule where the machine is busy between $[0, d_n]$ we proceed as follows. We iterate over every potential $T = 0,1,\dotsc,O(p_{\max}^2)$. Then we check using Lemma~\ref{lem:pmax-dyn1} whether there exists a schedule of jobs in $\{1,2,\dotsc,\ell\}$ where the machine is busy during $[0, T]$. For the correct choice of $T$ this is satisfied. Finally, using Lemma~\ref{lem:pmax-dyn2} we check whether there is a subset of jobs in $\{\ell+1,\ell+2, \dotsc, n\}$ that can be run during $[T, d_n] = [d_n + T' - p(\{\ell + 1, \dotsc, n\}), d_n]$. The preprocessing of Lemmas~\ref{lem:pmax-idx}-\ref{lem:pmax-dyn2} requires time $O(n + p^3_{\max} \log(p_{\max}))$. Then determining the solution requires only $O(p_{\max}^2)$, which is dominated by the preprocessing. Finally, notice that we lose another factor of $\log(p_{\max})$ due to the binary search.
\section{Multiple machines} In this section we present an algorithm that solves~$Pm\,|\,|\,\sum p_j U_j$\xspace in $n \cdot p^{O(m)}_{\max}$ time. We assume that jobs are ordered non-decreasingly by due dates (using radix sort as in the previous section), and that all considered schedules use earliest-due-date-first on each machine. We will now prove a structural lemma that enables our result. \begin{lemma}\label{lem:Pm}
There is an optimal schedule such that
for every job $j$ and every pair of machines $i, i'$ we have
\begin{equation}\label{eq:machines-balance}
|\ell_i(j) - \ell_{i'}(j)| \le O(p_{\max}^2) ,
\end{equation}
where $\ell_i(j)$ denotes the total volume of jobs $1,2,\dotsc,j$ scheduled on machine $i$.
Furthermore, the schedule satisfies, for every $j$ and $i$, that
\begin{equation}\label{eq:machines-lb}
\ell_i(j) \le \min_{j' > j}\left\{ d_{j'} - \frac{1}{m} \sum_{j''=j+1}^{j'} p_{j''} \right\} + O(p_{\max}^2) .
\end{equation} \end{lemma} \begin{proof}
The proof relies on swapping arguments.
As a first step we make sure that between $\ell_1(n),\dotsc,\ell_m(n)$
the imbalance is at most $p_{\max}$. If it is bigger, we simply move the
last job from the higher loaded machine to the lower loaded machine.
Now we augment this solution to obtain that, for every two machines $i$, $i'$ and every~$j$, we satisfy~\eqref{eq:machines-balance}.
Let $j$, $i$, and $i'$ be such that $\ell_i(j) > \ell_{i'}(j) + 3 p_{\max}^2$.
This implies that on $i'$ there is a set of jobs $A$
with due dates greater than $d_j$ that is processed between $\ell_{i'}(d_j)$
and at least $\ell_i(d_j) - p_{\max}$ (the latter because of the balance of total machine loads). Thus, $p(A) \ge 3 p^2_{\max} - p_{\max}$ and consequently $|A| \ge 2 p_{\max}$.
On $i$ let $B$ be the set of jobs with due dates at most
$d_j$ that are fully processed
between $\ell_{i'}(d_j)$ and $\ell_i(d_j)$. Then also
$p(B) \ge 3 p^2_{\max} - p_{\max}$ and $|B| \ge 2 p_{\max}$.
By the same arguments
as in Lemma~\ref{lem:pmax-structur}, there are non-empty
subsets $A'\subseteq A$ and $B' \subseteq B$ with equal processing time.
We swap $A'$ and $B'$, which improves the balance between $\ell_i(j)$
and $\ell_{i'}(j)$.
We now need to carefully apply these swaps so that after a finite number of them we have no large imbalances anymore.
We start with low values of $j$, balance all $\ell_i(j)$, and then increase $j$.
However, it might be that balancing
$\ell_i(j)$ and $\ell_{i'}(j)$ affects the values $\ell_i(j')$ for $j' < j$. To avoid this, we will use some buffer space. Notice that because
\begin{equation*}
\sum_i \ell_i(j) = \sum_{j' : \text{$j'$ is scheduled and } j' \le j} p_{j'} ,
\end{equation*}
it follows that a swap does not change the average $\ell_i(j)$ over all $i$.
If we already balanced for all $j' \le j$,
then we will skip some values $j'' \ge j$
and only establish (\ref{eq:machines-balance})
again for the first $j''$ such that
\begin{equation*}
\frac 1 m \sum_i \ell_i(j'') > \frac 1 m \sum_i[\ell_i(j)] + 6 p^2_{\max} .
\end{equation*}
It is not hard to see that the balance for job prefixes between $j$ and $j''$ then also holds (with a slightly worse constant).
To balance the values for $j''$, a careful look at the swapping procedure reveals that it suffices to move jobs, which are scheduled after
$\frac 1 m \sum_i[\ell_i(j'')] - 3 p^2_{\max}$.
Such jobs cannot have a due date lower than $d_j$,
since $\max_i \ell_i(j) \le \frac 1 m \sum_i [\ell_i(j)] + 3 p^2_{\max} \le \frac 1 m \sum_i \ell_i(j'') - 3 p^2_{\max}$. Hence, the swaps do not affect the values $\ell_i(j')$ for $j' \le j$.
Continuing these swaps, we can establish~\eqref{eq:machines-balance}.
Let us now consider~\eqref{eq:machines-lb}. Suppose that for some job $j$ and machine $i$ we have
\begin{equation*}
\ell_i(j) > \min_{j' > j}\left\{ d_{j'} - \frac{1}{m} \sum_{j''=j+1}^{j'} p_{j''} \right\} + \Omega(p_{\max}^2) \ .
\end{equation*}
With a sufficiently large hidden constant (compared to~\eqref{eq:machines-balance}) it follows that
there is a volume of at least $3p_{\max}^2$ of jobs $j'$ with $d_{j'} > d_j$ that are not scheduled
by this optimal solution. This follows simply from
considering the available space on the machines.
Also with a sufficiently large constant it holds that $\ell_i(j) > 3 p^2_{\max}$.
In the same way as earlier in the proof, we can find two non-empty sets of jobs $A'$ and $B'$ where
$A'$ consists only of jobs $j'$ with $d_{j'} > d_j$ that are not scheduled and $B'$ consists only of
jobs $j'$ with $d_{j'} \le d_j$, which are scheduled on machine $i$. We can swap these two sets and
retain an optimal solution. Indeed, this may lead to a new violation of~\eqref{eq:machines-balance}.
In an alternating manner we establish~\eqref{eq:machines-balance} and then perform a swap for~\eqref{eq:machines-lb}.
Since every time the latter swap is performed the average due dates of the scheduled jobs increases, the process
must terminate eventually. \end{proof} Having this structural property, we solve the problem by dynamic programming: for every $j = 1,2,\dotsc,n$ and every potential machine-load pattern $(\ell_1(j),\ell_2(j),\dotsc,\ell_m(j))$ we store the highest volume of jobs in $1,2,\dotsc,j$ that achieves these machine loads or lower machine loads on all machines. By Lemma~\ref{lem:Pm}, we may restrict ourselves to patterns where machine loads differ pairwise by only $O(p_{\max}^2)$, but this is not sufficient to obtain our desired running time. For each job $j$ we will only look at patterns where all machines $i$ satisfy \begin{equation*}
\ell_i(j) = \min_{j'>j} \left\{d_{j'} - \frac{1}{m}\sum_{j'>j} p_{j'} \right\} + k, \qquad \text{for } k \in \{-O(p_{\max}^2),\dotsc,O(p_{\max}^2)\}. \end{equation*} By the second part of Lemma~\ref{lem:Pm} it is clear that we can ignore larger values of $k$, but it needs to be clarified why we can ignore smaller values of $k$ as well. In the case that $\ell_i(j) < \min_{j'>j} \{d_{j'} - \frac{1}{m}\sum_{j'>j} p_{j'} \} - \Omega(p^2_{\max})$ for some $i$ and $j$ (with sufficiently large hidden constants), we may assume with~\eqref{eq:machines-balance} that all machines $i'$ satisfy $\ell_i(j) \le \min_{j'>j} \{d_{j'} - \frac{1}{m}\sum_{j''=j+1}^{j'} p_{j''} \} - p_{\max}$. It is not hard to see that starting with such a solution we can greedily add all remaining jobs $j'>j$ to the schedule without violating any due date. This is still true if we increase the machine loads until we reach $k = -O(p_{\max}^2)$. It is therefore not necessary to remember any lower load.
For each $j = 1,2,\dotsc,n$, the number of patterns $(\ell_1(j), \ell_2(j),\dotsc,\ell_m(j))$ can now be bounded by $p_{\max}^{O(m)}$, so there are in total $n \cdot p_{\max}^{O(m)}$ states in the dynamic program. Calculations for each state take $O(m)$ time, because job $j$ is either scheduled as the last job on one of the $m$ machines, or not scheduled at all. Hence, we obtain an $n \cdot p_{\max}^{O(m)}$ running time.
\section{Hardness of ILP with triangular block structure}
In this section we will show that it is NP-hard to decide if there is a feasible solution to an integer linear program of the form
\[ \begin{pmatrix} A & 0 & \cdots & 0\\ A & A & \ddots & \vdots \\ \vdots & & \ddots & 0 \\ A & A & \cdots & A \end{pmatrix} \cdot \begin{pmatrix} x_1 \\ x_2 \\ \vdots \\ x_n \end{pmatrix} \leqslant \begin{pmatrix} b_1 \\ b_2 \\ \vdots \\ b_m \end{pmatrix}, \quad \forall_i \ 0 \leq x_i \leq u_i, \quad \forall_i \ x_i \in \mathbb{Z}, \] even when $A$ is a matrix of constant size with integers entries of at most a constant absolute values, $b_i \in \{0, 1, +\infty\}$ for $i = 1,2,\dotsc,m$, and $u_i \in \{0, +\infty\}$ for $i=1,2,\dotsc,n$. This implies Theorem~\ref{th:trianglefold} by taking $B_1, B_2, \dotsc, B_n$ each as an identity matrix and a negated identity matrix on top of each other, which allows us to easily implement the bounds on the variables.
We will give a reduction from the Subset Sum problem, which asks, given $n$ nonnegative integers $a_1, a_2, \ldots, a_n \in \mathbb{Z}_{\geqslant 0}$, and a target value $t \in \mathbb{Z}_{\geqslant 0}$, whether there exists a subset of $\{a_1, a_2, \ldots, a_n\}$ that sums up to $t$. The Subset Sum problem is weakly NP-hard~\cite{Karp72}, so the reduction will have to to deal with $n$-bit input numbers.
Before we describe the actual reduction, let us start with two general observations that we are going to use multiple times later in the proof.
First, even though the theorem statement speaks, for clarity, about an ILP structure with only ``$\leqslant$'' constraints, we can actually have all three types of constraints, i.e., ``$\leqslant$'', ``$=$'', and ``$\geqslant$''. Indeed, it suffices to append to the matrix $A$ a negated copy of each row, in order to be able to specify for each constraint not only an upper bound but also a lower bound (and use $+\infty$ when only one bound is needed). An equality constraint can then be expressed as an upper bound and a lower bound with the same value.
Second, even though the constraint matrix has a very rigid repetitive structure, we can selectively cancel each individual row, by setting the corresponding constraint to ``$\leq +\infty$'', or each individual column -- by setting the upper bound of the corresponding variable to $0$.
For now, let us consider matrix $A$ composed of four submatrices $B$, $C$, $D$, $E$, arranged in a $2 \times 2$ grid, as follows: \[ A = \begin{pmatrix} B & C \\ D & E \end{pmatrix}; \quad \begin{pmatrix} A & & & & \\ A & A & & & \\ A & A & A & & \\ \vdots & & &\ddots \end{pmatrix} = \begin{pmatrix} B & C & & & & & \\ D & E & & & & & \\ B & C & B & C & & & \\ D & E & D & E & & & \\ B & C & B & C & B & C & \\ D & E & D & E & D & E & \\ \vdots & & & & & & \ddots \\ \end{pmatrix}. \]
In every odd row of $A$'s we will cancel the bottom $(D, E)$ row, and in every even row -- the upper $(B, C)$ row, and similarly for columns, so that we obtain the following structure of the constraint matrix:
\[ \begin{pmatrix} B & \tikzmark{ta} C & \tikzmark{tb} & & & \tikzmark{tc} & \tikzmark{td} & & \\ \tikzmark{la} D & E & & & & & & & \tikzmark{ra} \\ \tikzmark{lb} B & C & B & C & & & & & \tikzmark{rb} \\ D & E & D & E & & & & & \\ B & C & B & C & B & C & & & \\ \tikzmark{lc}D & E & D & E & D & E & & & \tikzmark{rc} \\ \tikzmark{ld}B & C & B & C & B & C & B & C & \tikzmark{rd} \\ D & E & D & E & D & E & D & E & \\ \vdots & \tikzmark{ba} & \tikzmark{bb} & & & \tikzmark{bc} & \tikzmark{bd} & & \ddots \\ \end{pmatrix} \cong \begin{pmatrix} B & & & & \\ D & E & & & \\ B & C & B & & \\ D & E & D & E & \\ \vdots & & & & \ddots \end{pmatrix} \]
\begin{tikzpicture}[overlay,remember picture] \draw ($(pic cs:la)+(0pt,5pt)$) -- ($(pic cs:ra)+(0pt,5pt)$); \draw ($(pic cs:lb)+(0pt,5pt)$) -- ($(pic cs:rb)+(0pt,5pt)$); \draw ($(pic cs:lc)+(0pt,5pt)$) -- ($(pic cs:rc)+(0pt,5pt)$); \draw ($(pic cs:ld)+(0pt,5pt)$) -- ($(pic cs:rd)+(0pt,5pt)$); \draw ($(pic cs:ta)+(4pt,11pt)$) -- ($(pic cs:ba)+(0pt,0pt)$); \draw ($(pic cs:tb)+(0pt,11pt)$) -- ($(pic cs:bb)+(0pt,0pt)$); \draw ($(pic cs:tc)+(0pt,11pt)$) -- ($(pic cs:bc)+(0pt,0pt)$); \draw ($(pic cs:td)+(0pt,11pt)$) -- ($(pic cs:bd)+(0pt,0pt)$); \end{tikzpicture}
Now, let us set the four submatrices of $A$ to be $1 \times 1$ matrices with the following values. \[ B = \begin{pmatrix} 1 \end{pmatrix}, \quad C = \begin{pmatrix} -2 \end{pmatrix}, \quad D = \begin{pmatrix} 1 \end{pmatrix}, \quad E = \begin{pmatrix} -1 \end{pmatrix}. \]
Let us consider a constraint matrix composed of $2n$ block-rows and $2n$ block-columns. We set the first constraint to ``$\leq 1$'', and all the remaining constraints to ``$=0$''. Let us denote the variables corresponding to the first column of $A$ by $y_1, y_2, \ldots, y_n$, and those to the second column by $z_1, z_2, \ldots, z_n$. We have the following ILP:
\[ \begin{pmatrix} 1 & & & & & & \\ 1 & -1 & & & & & \\ 1 & -2 & 1 & & & & \\ 1 & -1 & 1 & -1 & & & \\ 1 & -2 & 1 & -2 & 1 & & \\ 1 & -1 & 1 & -1 & 1 & -1 & \\ \vdots & & & & & & \ddots \end{pmatrix} \cdot \begin{pmatrix} y_1 \\ z_1 \\ y_2 \\ z_2 \\ y_3 \\ z_3 \\ \vdots \end{pmatrix}\quad\begin{matrix} \leqslant \\ = \\ = \\ = \\ = \\ = \\ \vdots \end{matrix}\begin{pmatrix} 1 \\ 0 \\ 0 \\ 0 \\ 0 \\ 0 \\ \vdots \end{pmatrix} \]
Observe that $z_i = y_i$ and $y_{i+1} = y_1 + \cdots + y_i$ for every $i$. Since $y_1 \in \{0, 1\}$, it is easy to verify that there are exactly two solutions to this ILP. Indeed, either $y_i = z_i = 0$ for every $i$, or $y_1 = z_1 = 1$ and $y_{i+1}=z_{i+1} = 2^i$ for every $i$. In other words, either $z = (0,0,0,\ldots)$, or $z = (1, 1, 2, 4, 8, \ldots)$. We will call these two solutions \emph{all-zeros} and \emph{powers-of-two}, respectively.
Now, let us add one more column, namely $(1, 0)$, to matrix $A$, which therefore looks now as follows: \[ A = \begin{pmatrix} 1 & -2 & 1 \\ 1 & -1 & 0 \end{pmatrix}. \] The newly added column (and the corresponding variable) shall be cancelled (by setting the corresponding upper bound to $0$) in all but the last copy of $A$, which in turn shall have the other two columns cancelled. Let us call $w$ the variable corresponding to the only non-cancelled copy of the $(1, 0)$ column. Both solutions to the previous ILP extend to the current one, with $w = \sum_i z_i$. Note that, in both solutions, both $\sum_i (y_i - 2 z_i) + w = 0$, and $\sum_i (y_i - z_i) = 0$. Therefore, if we append another copy of the ILP to itself, as follows, \[ \begin{pmatrix} \tikzmark{startofmatrix} 1 & & & & & & & & & \\ 1 & -1 & & & & & & & & \\ 1 & -2 & 1 & & & & & & &\\ 1 & -1 & 1 & -1 & & & & & &\\ \vdots & & & & \ddots & & & & & \\ 1 & -2 & 1 & -2 & \cdots & 1 & & &\\ 1 & -1 & 1 & -1 & \cdots & 0 \tikzmark{endoffirst} & & &\\ 1 & -2 & 1 & -2 & \cdots & 1 & \tikzmark{startofsecond} 1 & &\\ 1 & -1 & 1 & -1 & \cdots & 0 & 1 & -1 &\\ \vdots & & & & & & & & \ddots \tikzmark{endofmatrix} \end{pmatrix} \cdot \begin{pmatrix} \tikzmark{startofvars} y_1 \\ z_1 \\ y_2 \\ z_2 \\ \vdots \\ w \tikzmark{endoffirstvars} \\ \tikzmark{startofsecondvars} y'_1 \\ z'_1 \\ \vdots \tikzmark{endofvars} \end{pmatrix}\quad\begin{matrix} \tikzmark{startofcons} \leqslant \\ = \\ = \\ = \\ \vdots \\ = \\ = \\ \tikzmark{startofsecondcons} \leqslant \\ = \\ \vdots \end{matrix}\begin{pmatrix} 1 \\ 0 \\ 0 \\ 0 \\ \vdots \\ 0 \\ 0 \tikzmark{endoffirstcons} \\ 1 \\ 0 \\ \vdots \tikzmark{endofcons} \end{pmatrix}, \] the two copies are independent from each other, and we get an ILP that has exactly four feasible solutions: both $z$ and $z'$ can be either all-zeros or powers-of-two, independently, giving four choices in total. \begin{tikzpicture}[overlay,remember picture] \draw[dashed] ($(pic cs:startofmatrix)+(-2pt,.9em)$) rectangle ($(pic cs:endoffirst)+(2pt,-2pt)$); \draw[dashed] ($(pic cs:startofsecond)+(-2pt,.9em)$) rectangle ($(pic cs:endofmatrix)+(2pt,-2pt)$);
\draw[dashed] ($(pic cs:startofvars)+(-10pt,.85em)$) rectangle ($(pic cs:endoffirstvars)+(10pt,-1pt)$); \draw[dashed] ($(pic cs:startofsecondvars)+(-10pt,.85em)$) rectangle ($(pic cs:endofvars)+(13pt,-1pt)$);
\draw[dashed] ($(pic cs:startofcons)+(-3pt,.85em)$) rectangle ($(pic cs:endoffirstcons)+(10pt,-1pt)$); \draw[dashed] ($(pic cs:startofsecondcons)+(-3pt,.85em)$) rectangle ($(pic cs:endofcons)+(13pt,-1pt)$); \end{tikzpicture}
Let $n$ be the number of elements in the Subset Sum instance we reduce from. We copy the above construction $n+1$ times, and we will call each copy a \emph{super-block}. In the last super-block we change the first constraint from ``$\leqslant 1$'' to ``$=1$'', effectively forcing the powers-of-two solution. Therefore, the resulting ILP has exactly $2^n$ feasible solutions -- two choices for each of the first $n$ super-blocks, one choice for the last super-block. We will denote by $z_{i,j}$ the $j$-th $z$-variable in the $i$-th super-block.
Now, we replace the $z$-column of $A$ with three identical copies of it, and each variable $z_{i,j}$ with three variables $p_{i,j}$, $q_{i,j}$, $r_{i,j}$.
For each $i$, $j$ we will set to $0$ exactly two out of the three upper bounds of $p_{i,j}$, $q_{i,j}$, $r_{i,j}$. Therefore, the solutions of the ILP after the replacement map one-to-one to the solutions of the ILP before the replacement, with $z_{i,j} = p_{i,j} + q_{i,j} + r_{i,j}$. Let $a_1, a_2, \ldots, a_n \in \mathbb{Z}_{\geqslant 0}$ be the elements in the Subset Sum instance we reduce from, and let $t \in \mathbb{Z}_{\geqslant 0}$ be the target value. The upper bounds are set as follows. For every $i$ and for $j=1$, we set $p_{i,1} \leqslant +\infty$ and $q_{i,1}, r_{i,1} \leqslant 0$. For $i = 1, 2, \ldots, n$, we set \begin{align*} &p_{i,j} \leqslant + \infty &&\text{and} &&q_{i,j} \leqslant 0 &&\text{if the $(j-1)$-th bit of $a_i$ is zero, and}\\ &p_{i,j} \leqslant 0 &&\text{and} &&q_{i,j} \leqslant +\infty &&\text{if the $(j-1)$-th bit of $a_i$ is one};\end{align*} in both cases $r_{i,j} \leqslant 0$. For $i = n + 1$ we look at $t$ instead of $a_i$, and we swap the roles of the $q$-variables and $r$-variables, i.e., we set \begin{align*} &p_{n+1,j} \leqslant + \infty &&\text{and} &&r_{n+1,j} \leqslant 0 &&\text{if the $(j-1)$-th bit of $t$ is zero, and}\\ &p_{n+1,j} \leqslant 0 &&\text{and} &&r_{n+1,j} \leqslant +\infty &&\text{if the $(j-1)$-th bit of $t$ is one};\end{align*} and in both cases $q_{n+1,j} \leqslant 0$.
Note that, for $i = 1, 2, \ldots, n$, depending on whether the part of the solution corresponding to the $i$-th super-block is the all-zeros or powers-of-two, $\sum_j q_{i,j}$ equals either $0$ or $a_i$. Hence, the set of the sums of all $q$-variables over all feasible solutions to the ILP is exactly the set of Subset Sums of $\{a_1, a_2, \ldots, a_n\}$. Moreover, $\sum_j r_{n+1,j} = t$.
We need one last step to finish the description of the ILP. We add to matrix A row $(0, 0, 1, -1, 0)$, so it looks as follows: \[ A = \begin{pmatrix} 1 & -2 & -2 & -2 & 1 \\ 1 & -1 & -1 & -1 & 0 \\ 0 & 0 & 1 & -1 & 0 \end{pmatrix}. \]
The newly added row (and the corresponding constraint) shall be cancelled (by setting the constraint to ``$\leqslant +\infty$'') in all but the last row of the whole constraint matrix. That last constraint in turn shall be set to ``$= 0$'', so that we will have $\sum_i \sum_j q_{i,j} - \sum_i \sum_j r_{i,j} = 0$, i.e., $\sum_i \sum_j q_{i,j} = t$. Hence, the final constructed ILP has a feasible solution if and only if there is a choice of all-zeros and powers-of-two solutions for each super-block that corresponds to a choice of a subset of $\{a_1, a_2, \ldots, a_n\}$ that sums up to $t$. In other words, the ILP has a feasible solution if and only if the Subset Sum instance has a feasible solution.
Finally, let us note that the ILP has $O(n^2)$ variables, and the desired structure.
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{State mapping and discontinuous entanglement transfer in a multipartite open system} \author{Matteo Bina}
\affiliation{Dipartimento di Fisica, Universit\`{a} di Milano, I-20133 Milano, Italy}
\affiliation{CNISM, UdR Milano, I-20133, Milano, Italy} \author{Federico Casagrande} \affiliation{Dipartimento di Fisica, Universit\`{a} di Milano, I-20133 Milano, Italy} \affiliation{CNISM, UdR Milano, I-20133, Milano, Italy} \email{[email protected]} \author{Marco G. Genoni} \affiliation{Dipartimento di Fisica, Universit\`{a} di Milano, I-20133 Milano, Italy} \affiliation{CNISM, UdR Milano, I-20133, Milano, Italy} \author{Alfredo Lulli} \affiliation{Dipartimento di Fisica, Universit\`{a} di Milano, I-20133 Milano, Italy} \affiliation{CNISM, UdR Milano, I-20133, Milano, Italy} \author{Matteo G. A. Paris} \affiliation{Dipartimento di Fisica, Universit\`{a} di Milano, I-20133 Milano, Italy} \affiliation{CNISM, UdR Milano, I-20133, Milano, Italy} \affiliation{ISI Foundation, I-10133, Torino, Italy}
\date{\today}
\begin{abstract} We describe the transfer of quantum information and correlations from an entangled tripartite bosonic system to three separate qubits through their local environments also in the presence of various dissipative effects. Optimal state mapping and entanglement transfer are shown in the framework of optical cavity quantum electrodynamics involving qubit-like radiation states and two-level atoms via the mediation of cavity modes. For an input GHZ state mixed with white noise we show the occurrence of sudden death and birth of entanglement, that is discontinuously exchanged among the tripartite subsystems. \end{abstract} \pacs{03.67.Mn,42.50.Pq} \maketitle
\section{Introduction} As early as in 1935 Einstein, Podolski, and Rosen \cite{EPR} as well as Schr\"{o}dinger \cite{Sch} drew the attention on the correlations in quantum composite systems and the problems raised by their properties. Much later, theoretical \cite{Bell} and experimental \cite{Aspect} cornerstones elucidated the issue of nonlocality. Entanglement is currently viewed as the key resource for quantum information (QI) processing \cite{Nielsen}, where it allowed a number of achievements such as teleportation \cite{Bennett}, cryptography \cite{Gisin} and enhanced measurements \cite{entame}. The deep meaning of multipartite entanglement, its quantification and detection \cite{Guhne1}, the possible applications, are the object of massive investigation. As a matter of fact, optical systems have been a privileged framework for encoding and manipulating quantum information, since bipartite and multipartite entanglement may be effectively generated either in the discrete or continuous variable regime. On the other hand, the development of QI also requires localized registers, e.g. for the storage of entanglement in quantum memories. \\\indent Cavity quantum electrodynamics (CQED) \cite{Haroche_Raimond} is a relevant scenario for this kind of investigations and has been addressed for the analysis of entanglement transfer of bipartite \cite{P04,P204,P304,mem,Hald00,Son02,Zou06,Cas07,CLP2008} and multipartite \cite{Paternostro,Casagrande,Cirac,Ser06} entanglement. In this framework we present a complete study on the entanglement dynamics of a nine parties open system whose implementation could be feasible in the optical regime of CQED \cite{Nussmann,ions}. In particular we describe a system where three radiation modes, prepared in qubit-like entangled states, are coupled by optical fibers to three separate optical cavities each of them containing a trapped two-level atom. This paradigmatic example allows us investigating multipartite entanglement transfer and swapping in a more realistic way than in \cite{Paternostro,Casagrande}, shedding light on fundamental processes related to quantum interfaces and memories in quantum networks \cite{Cirac,Ser06}. \\ \indent We demonstrate that a complete mapping of pure entangled states onto the tripartite atomic subsystem occurs when the external field has fed the cavities. If this field is then switched off, the quantum correlations can be periodically mapped onto the tripartite atomic and cavity mode subsystems according to a triple Jaynes-Cummings (JC) dynamics \cite{JC}. In the case of external radiation prepared in a mixed Werner state we deal with the recently observed phenomenon of entanglement sudden death (ESD) (and birth (ESB)) \cite{YuPRL,Almeida}, that in our case involves the abrupt vanishing (and raising) of quantum correlations in tripartite systems. Though this is in general a still open problem, nevertheless we can show the occurrence of discontinuous exchange of tripartite entanglement via ESD effects. We also describe the dissipative effects introduced by the presence of external environments, such as the decay of cavity modes, atomic excitations, and fiber modes. Some results are then reported in the case that the coupling between external and cavity mode fields is generalized from monomode to multimode.\\ \indent In Sec. II we introduce the model of the physical system. In Sec. III we derive all main results concerning state mapping, entanglement transfer, and entanglement sudden death, also adding the presence of external environments. The case of multimode external-cavity field coupling is addressed in Sec. IV. Some conclusive remarks are reported in Sec. V.
\section{Model of the physical system} \indent We consider an entangled tripartite bosonic system (f), prepared in general in a mixed state, interacting with three qubits (a) through their local environments (c) also in the presence of dissipative effects. In the interaction picture the system Hamiltonian has the form: \begin{eqnarray} \label{H_int} \hat{\mathcal{H}}^I=\hbar \big\{\sum_{J=A,B,C}\big[ g_J(\hat{c}_J\hat{\sigma}^{\dag}_J+\hat{c}^{\dag}_J\hat{\sigma}_J)\big]+\,\nonumber\\ +\sum_{J,K=A,B,C}\big[\nu_{J,K}(t)(\hat{c}_J\hat{f}^{\dag}_K+\hat{c}^{\dag}_J\hat{f}_K)\big]\big\} \end{eqnarray} The operators $\hat{c}_J,\hat{c}^{\dag}_J$ ($\hat{f}_J,\hat{f}_J^{\dag}$) are the annihilation and creation operators for the cavity (external radiation) modes, while $\hat{\sigma}_J,\hat{\sigma}_J^{\dag}$ are the raising and lowering operators for the atomic qubits in each subsystem ($J=$A,B,C). We consider real coupling constant $g_J$ for the atom-cavity mode interaction and $\nu_{J,K}(t)$ for the interaction of each cavity mode with the three modes of driving external radiation. We take time dependent constants in order to simulate the interaction switching-off at a suitable time $\tau_{off}$ for the external field. Now we take into account three processes of dissipation: the cavity losses at rate $\kappa_c$ due to interaction with a thermal bath with a mean photon number $\bar{n}$, the atomic spontaneous emission with a decay rate $\gamma_a$ for the upper level, and a loss of photons inside the fibers at a rate $\kappa_f$. All dissipative effects can be described under the Markovian limit by standard Liouville superoperators so that the time evolution of the whole system density operator $\hat{\rho}(t)$ can be described by the following ME written in the Lindblad form: \begin{eqnarray}\label{ME} \dot{\hat{\rho}}&=&-\frac{i}{\hbar}\left [\hat{\mathcal{H}}_e,\hat{\rho}\right ]+\sum_{J=A,B,C} \big[\hat{C}_{f,J}\hat{\rho}\hat{C}_{f,J}^{\dag}+\hat{C}^{(g)}_{c,J}\hat{\rho}\hat{C}_{c,J}^{(g)\dag}+\,\nonumber\\ &+&\hat{C}^{(l)}_{c,J}\hat{\rho}\hat{C}_{c,J}^{(l)\dag}+\hat{C}_{a,J}\rho\hat{C}_{a,J}^{\dag}\big] \end{eqnarray} where the non-Hermitian effective Hamiltonian is \begin{eqnarray}\label{He} \hat{\mathcal{H}}_e&=&\hat{\mathcal{H}}^I-\frac{i\hbar}{2}\sum_{J}\big[\hat{C}^{\dag}_{f,J}\hat{C}_{f,J}+\hat{C}^{(g)\dag}_{c,J}\hat{C}^{(g)}_{c,J}+\,\nonumber\\ &+&\hat{C}^{(l)\dag}_{c,J}\hat{C}^{(l)}_{c,J}+\hat{C}^{\dag}_{a,J}\hat{C}_{a,J}\big]. \end{eqnarray} The jump operators for the atoms are $\hat{C}_{a,J}=\sqrt{\gamma_a}\hat{\sigma}_J$, for the fiber losses $\hat{C}_{f,J}=\sqrt{\kappa_f}\hat{f}_J$, and for the cavity modes $\hat{C}_{c,J}^{(l)}=\sqrt{\kappa_c(\bar{n}+1)}\hat{c}_J$ (loss of a photon) and $\hat{C}_{c,J}^{(g)}=\sqrt{\kappa_c\bar{n}}\hat{c}^{\dag}_J$ (gain of a photon). The above ME can be solved numerically by the Monte Carlo Wave Function method \cite{MCWF}. From now on we consider dimensionless parameters, all scaled to the coupling constant $g_A$, and times $\tau=g_At$.\\ \indent As a significative example, the implementation of our scheme may be realized in the optical regime of CQED by choosing a continuous variable (CV) entangled field for the subsystem (f) and two-level atoms as qubits (a). Each qubit is trapped in a one-sided optical cavity (c), where the radiation modes can be coupled to the cavity modes via optical fibers as in \cite{SMB}. In optical cavities thermal noise is negligible ($\bar{n}\cong 0$), spontaneous emission can be effectively suppressed, and single atoms can remain trapped even for several seconds \cite{Nussmann}.\\ \indent Here we focus on external field prepared in a qubit-like entangled state $\hat{\rho}_f(0)$ because this is the condition for high entanglement transfer for CV field \cite{Casagrande}. The generation of photon number multimode entangled radiation was recently demonstrated \cite{Papp}. Under qubit-like behavior we can describe the entanglement of all three-qubit subsystems (a, c, f) by combining the information from tripartite negativity \cite{Sabin}, entanglement witnesses \cite{Acin} for the two inequivalent classes GHZ and W \cite{Dur}, and recently proposed criteria for separability \cite{Ghune2}. In fact, the tripartite negativity $E^{(\alpha)} (\tau)$($\alpha$=a, c, f), defined as the geometric mean of the three bipartite negativities \cite{Vidal}, is an entanglement measure providing only a sufficient condition for entanglement detection, though its positivity guarantees the GHZ-distillability that is an important feature in QI.
\section{State mapping and tripartite entanglement transfer for single mode coupling} \subsection{Hamiltonian regime for external field in qubit-like pure states} \indent We first illustrate the Hamiltonian dynamics ($\{\tilde{\kappa}_f,\tilde{\kappa}_c,\tilde{\gamma}_a\}\ll 1$) for the external field prepared in a qubit-like entangled pure state $\ket{\Psi(0)}_f$, atoms prepared in the lower state $\ket{ggg}_a$, and cavities in the vacuum state $\ket{000}_c$ . By choosing $\nu_{JK}(\tau)=0$ if $J\neq K$ and $\nu_{J,J}=g_A$ we describe single-mode fibers each one propagating a mode of the entangled radiation. We show that under optimal conditions for mode matching it is possible to map $\ket{\Psi(0)}_f$ onto atomic and cavity mode states for suitable interaction times. Overall we are dealing with an interacting 9-qubit system, though the input field will be switched off at a time $\tau_{off}$ such that the atomic probability of excited state $p_e(\tau)$ reaches the maximum. Injected field switch-off can be obtained, e.g., by rotating fiber polarization.\\ \indent In Fig.~\ref{fig:fig1} we show numerical results for the external field prepared in the GHZ state $\ket{\Psi(0)}_f=(\ket{000}_f+\ket{111}_f)/\sqrt{2}$. In the time interval $0<\tau \leq\tau_{off}$ (transient regime) each flying qubit transfers its excitation to the cavity which in turn passes it onto the atom (see Fig.~\ref{fig:fig1}a). The cavity mode, simultaneously coupled to the external field and to the atom, exchanges energy according to a Tavis-Cummings dynamics at an effective frequency $g\sqrt{2}$ \cite{Haroche_Raimond,TC}. During the transient up to time $\tau_{off}=\pi/\sqrt{2}$ the mean photon number $N^{(c)}(\tau)\equiv\langle \hat{c}^\dag \hat{c}\rangle(\tau)$ in each cavity completes a cycle. In the same period the atomic excitation probability $p_e(\tau)$ reaches its maximum value, while the input field has completely fed the cavity, i.e., its mean photon number $N^{(f)}(\tau)\equiv\langle \hat{f}^\dag \hat{f}\rangle(\tau)$ vanishes. \begin{figure}
\caption{Dynamics for the external field in a GHZ state: (a) $N^{(c)}$(dashed), $N^{(f)}$(dotted) and $p_e$(solid); (b) $E^{(\alpha)}$ for atoms (solid), cavity modes (dashed) and external field (dotted); (c) $\mu^{(a)}$ (solid) and $F_{\phi}^{(a)}$ with $\phi=0$ (dashed), $\phi=\pi$ (dotted); (d) $\mu^{(c)}$ (solid) and $F_{\phi}^{(c)}$ with $\phi=-\pi/2$ (dashed), $\phi=+\pi/2$ (dotted).}
\label{fig:fig1}
\end{figure} In Fig.~\ref{fig:fig1}b we show that in the transient the atomic tripartite negativity is always positive and $E^{(a)}(\tau_{off})=1$, that is the value of the injected GHZ state. Until $\tau_{off}$ the dynamics maps the whole initial state $\ket{\Psi(0)}_f\otimes\ket{000}_c\otimes\ket{ggg}_a$ onto the pure state $\ket{000}_f\otimes\ket{000}_c\otimes \ket{\Psi(0)}_a$, where $\ket{\Psi(0)}_a$ is obtained from $\ket{\Psi(0)}_f$ by the correspondence $\ket{0}_f\rightarrow\ket{g}_a$ and $\ket{1}_f\rightarrow\ket{e}_a$. This is confirmed in Fig.~\ref{fig:fig1}c by the time evolution of the purity $\mu^{(a)}(\tau)=\hbox{Tr}_{a}[\hat{\rho}^2_{\alpha}(\tau)]$ and the fidelity $F^{(a)}(\tau)={}_{a} \meanvalue{\Psi(0)}{\hat{\rho}_{a}(\tau)}{\Psi(0)}_{a}$, where $\hat{\rho}_{a}(\tau)$ is the atomic reduced density operator. As for the cavity mode dynamics we note that (see Fig.~\ref{fig:fig1}b,d) the local maximum of $E^{(c)}(\tau_{off}/2)$ does not correspond to a pure state, i.e. the initial state $\ket{\Psi(0)}_f$ cannot be mapped onto the cavity modes during the transient regime. The entanglement is only partially transferred to the cavity modes nevertheless allowing the building up of full atomic entanglement later on. This dynamics is quite different than in \cite{Casagrande} where the entangled field was mapped onto the cavity modes before the interaction with the atoms.\\ \indent At the end of the transient regime the external radiation is turned off and the subsequent dynamics is described by a triple JC ruled by oscillations at the vacuum Rabi frequency $2g$, hence with a dimensionless period $\pi$ as shown by cavity mean photon number and atomic probability in Fig.~\ref{fig:fig1}a. The purities $\mu^{(a,c)}(\tau)$ in Figs.~\ref{fig:fig1}c,d oscillate at a double frequency between pure entangled (maximum negativity) and separable (zero negativity) states. In particular, at times $\tau_m=\tau_{off}+m\pi$ ($m=0,1,2...)$ the atoms are in the entangled states $\hat{U}^{(a)}_{\phi}\ket{\Psi(0)}_a$, where $\hat{U}^{(a)}_{\phi}=\bigotimes_{J}e^{-i\phi\hat{\sigma}_J^{\dag}\hat{\sigma}_{J}}$ is a local phase operator where $\phi=0$ ($\phi=\pi$) applies for even (odd) values of $m$, that are the peaks of $E^{(a)}(\tau)$ in Fig.~\ref{fig:fig1}b. At times $\tau_n=\tau_{off}+(n+\frac{1}{2})\pi$ ($n=0,1,2...$) the cavity mode states are obtained by applying $\hat{U}^{(c)}_{\phi}=\bigotimes_{J}e^{-i\phi\hat{c}_J^{\dag}\hat{c}_{J}}$, where $\phi=-\frac{\pi}{2}$ $(+\frac{\pi}{2})$ for even (odd) values of $n$, to the state $\ket{\Psi(0)}_c$ derived from $\ket{\Psi(0)}_f$ by the correspondence $\ket{0}_f\leftrightarrow\ket{0}_c$ and $\ket{1}_f\leftrightarrow\ket{1}_c$. By choosing to turn off the external field at times shorter than $\tau_{off}$ we find a progressive reduction in the entanglement transfer to the atomic and cavity subsystems, simulating the effect of non perfect cavity mirror transmittance. Up to 10\% changes in the value of $\tau_{off}$, the fidelity $F^{(a)}(\tau_{off})$ remains above 99.9\%. We remark that the state mapping process can be obtained for any $\ket{\Psi(0)}_f$ written in a generalized Schmidt decomposition \cite{Dur}, as well as for mixed states as described below.\\ \subsection{Tripartite entanglement sudden death for external field in a Werner state} \indent For the injected field we consider the Werner state $\hat{\rho}_f(0)=(1-p)\ketbra{GHZ}{GHZ}+\frac{p}{8} \hat{I}$, $(0\leq p\leq 1)$, because it is relevant in QI and it is possible to fully classify its entanglement as a function of parameter $p$. In fact, for $0\leq p<\frac{2}{7}$ the state belongs to the GHZ class and to the W class up to $p=\frac{4}{7}$. The tripartite negativity is positive up to $4/5$, and due to the structure of $\hat{\rho}_f(0)$, the state is clearly inseparable under all bipartitions (INS), i.e. it cannot be written as convex combination of biseparable states. For $4/5\leq p\leq1$ it is known that the state is fully separable \cite{Pittenger,Ghune2}.\\ \indent The system dynamics can be divided into a transient and an oscillatory regime, and the state mapping of $\hat{\rho}_f(0)$ onto atoms (cavity modes) still occurs at times $\tau_m$ ($\tau_n$). Out of these times the density matrices of all subsystems lose the form of a GHZ state mixed with white noise but still preserve invariance under all permutations of the three qubits and present only one non vanishing coherence as in $\hat{\rho}_f(0)$. This greatly helps us in the entanglement classifications in the plane $(\tau,p)$ shown in Fig.~\ref{fig:fig2}. In fact, in the regions where $E^{(\alpha)}(\tau)>0$ but out of W class we can exclude the biseparability. The fully separability criteria in \cite{Ghune2} are violated only where $E^{(a)}(\tau)>0$ so that if $E^{(a)}(\tau)=0$ the state may be fully separable or biseparable. Nevertheless, in the latter case the state should be symmetric and biseparable under all bipartitions and hence it is fully separable \cite{Kraus}. For any fixed value of $p$ in the range $0<p<4/5$ we thus show the occurrence of entanglement sudden death and birth at the boundaries between fully separable and INS states. In particular, for $0<p<4/7$ we find genuine tripartite ESD and ESB phenomena. Note that, for a fixed value of $p$, the passage of atomic state during time evolution from W-class to GHZ-class (or viceversa) entangled states is permitted by the non-unitarity of the partial trace over non-atomic degrees of freedom (so that the overall operation on the initial three qubits is not SLOCC). We also notice that for times $\tau\geq\tau_{off}$ we can solve exactly the triple JC dynamics, confirming our numerical results and generalizing \cite{ESDChina} to mixed states.\\ \indent In Fig.~\ref{fig:fig2}b we see that, for increasing values of $p$, there is an increase of both the slope of $E^{(a)}(\tau)$ and the time interval of fully separability. \begin{figure}
\caption{ ESD/ESB for external field in a GHZ state mixed with white noise. a) Regions in the plain ($\tau,p$) for atomic entanglement of type GHZ, W, INS, and fully separable (black). b) Sections $E^{(a)}(\tau)$ for selected values of $p$. c) Zoom on $E^{(\alpha)}(\tau_{off}/2)$ for field (dotted), cavity modes (dashed), and atoms (solid) with $p=0$ (black), $p=0.2$ (blue), $p=0.4$ (green), $p=0.6$ (red). d) Classification for cavity mode entanglement.}
\label{fig:fig2}
\end{figure} In Fig.~\ref{fig:fig2}c we show in detail the transient dynamics of the tripartite negativities $E^{(\alpha)}(\tau,p)$ ($\alpha=a,c,f$) in the crucial region around $\tau_{off}/2$. We consider some values of $p$ where the atoms exhibit in times different classes of entanglement. We see that for $p=0.2$, where the input state has GHZ class entanglement, the ESB of subsystems (c),(a) anticipates the ESD of (f),(c), and there is an interval around $\tau_{off}/2$ where all three subsystems are entangled (of INS-type). As $p$ grows, hence the initial state becomes more noisy, the effects of ESD occur earlier and those of ESB later. For $p=0.4$, involving W-class entanglement, only at most two subsystems are simultaneously entangled (first (f),(c) and then (c),(a)). For $p=0.6$, involving only entanglement of INS-type, the cavity modes do not entangle at all (see Fig.~\ref{fig:fig2}d). They physically mediate the discontinuous entanglement transfer from (f) to (a), where for $p\rightarrow 4/5$ the time interval without any entanglement increases while the entanglement level vanishes.\\ \subsection{Effect of dissipation on state mapping} \indent In the perspective of experimental implementation for QI purposes an important issue is the effect of dissipation on both state mapping and entanglement transfer. For external field prepared in a GHZ pure state we first evaluated the effect of cavity decay rates in the range $0<\tilde{\kappa}_c\leq0.5$ for negligible values of all other decay rates. We consider as function of $\tilde{\kappa}_c$ the behavior of the fidelities $F^{(\alpha)}(\tau_{m,n})$ and the tripartite negativities $E^{(\alpha)}(\tau_{m,n})$ ($\alpha=c,f$) at the first peaks $(m=n=0)$. \begin{figure}
\caption{ Effect of cavity mode dissipation. At the first peaks $\tau_{m,n}$ with $m=n=0$ we consider the tripartite negativities as function of $\tilde{\kappa}_c\equiv\frac{\kappa_c}{g_A}$: $F^{(a)}(\tau_0)$ (1), $E^{(a)}(\tau_0)$ (2), $F^{(c)}(\tau_0)$ (3), $E^{(c)}(\tau_0)$ (4).}
\label{fig:fig3}
\end{figure} In Fig.~\ref{fig:fig3} we see that the above functions of $\tilde{\kappa}_c$ can be well fitted by exponential functions, whose decay rates for the atomic subsystem are $\beta^{(a)}_{F}=0.75$, $\beta^{(a)}_{E}=1.09$, and for the cavity modes $\beta^{(c)}_{F}=1.80$, $\beta^{(c)}_{E}=2.94$. As expected, quantum state mapping and entanglement transfer are by far more efficient onto atomic than cavity qubits. For instance, if $\tilde{\kappa}_c = 0.1$ we obtain a state mapping onto the atoms (cavity modes) with a fidelity of $\cong0.93$ ($\cong0.83$).\\ \indent We can now add the further dissipative effect of atomic decay. For instance we find that, for an atomic decay rate $\tilde{\gamma}_a=0.03$ and in the presence of cavity decay with a rate $\tilde{\kappa}_c = 0.1$, the fidelity of the atomic (cavity mode) subsystem reduces by $4.4\%$ ($8.9\%$).\\ \indent Finally, we evaluate the effect of losses in the fibers used to inject the external field into each cavity. Clearly, this effect is relevant only up to the time $\tau_{off}=2.22$. We evaluated the effects of fiber decay rates $\tilde{\kappa}_f$ up to 1.0 for negligible values of atomic and cavity decay rates ($\tilde{\kappa}_c<<1$, $\tilde{\gamma}_a<<1$) (see Fig.~\ref{fig:fig4}). We show the effect of parameter $\tilde{\kappa}_f$ on cavity field mean photon number $N^{(c)}(\tau_{off}/2)$ and atomic excitation probability $p_e(\tau_{off})$ and we see that the amount of energy transferred to the atoms and to the cavity modes decreases exponentially for increasing values of $\tilde{\kappa}_f$; the decay rates are $\cong 0.42$ and $\cong 0.82$, respectively. Also the behavior of the tripartite negativity $E^{(a)}(\tau_{0})$ and fidelity $F^{(a)}(\tau_{0})$ at the first peak can be described versus $\tilde{\kappa}_f$ by exponential functions whose decay rates are $\cong1.51$ and $\cong 1.95$, respectively. \begin{figure}
\caption{ Effect of fiber mode decay rate $\tilde{\kappa}_f\equiv\kappa_f/g_A$ for $\tilde{\kappa}_c<<1$, $\tilde{\gamma}_a<<1$. We evaluate at time $\tau_{off}$ the atomic tripartite negativity $E^{a}$(solid), the fidelity $F^{a}$(dash-dot), and the atomic probability $p_e$(dot), and at time $\tau_{off}/2$ the cavity mean photon number $N^{(c)}$(dash).}
\label{fig:fig4}
\end{figure} \section{State mapping and tripartite entanglement transfer for multi-mode coupling} \indent Finally, we consider multi-mode coupling of the external field to each cavity mode. For simplicity we choose equal coupling constants $\tilde{\nu}_{J,K}\equiv\nu_{J,K}/g_A\neq 0$ if $K\neq J$ and we consider values in the range $0-1.4$. In the transient regime the dynamics is sharply modified with respect to the case of single mode fiber shown in Fig.~\ref{fig:fig1}. By increasing the values of $\tilde{\nu}_{J,K}$ the period of energy exchange decreases from $2\pi/\sqrt{2}$ to $\cong2.6$. The maximum of cavity mode mean photon number grows up to $N^{(c)}\cong 0.41$ whereas the maximum of atomic excitation probability decreases to $p_{e}\cong 0.24$. The external field mean photon number does not vanish but it reaches a minimum, that can be always found between the two maxima of $N^{(c)}(\tau)$ and $p_{e}(\tau)$, such that $ 0.002<N^{(f)}<0.02$ changing $\tilde{\nu}_{J,K}$ from 0.1 to 1.4. We investigate the differences in the entanglement transfer for three selections of switching-off time $\tau_{off}$ corresponding to the maximum of $p_{e}(\tau)$, the minimum of $N^{(f)}(\tau)$, and the maximum of $N^{(c)}(\tau)$. In Fig.~\ref{fig:fig5}a we show the dependence of $\tau_{off}$ on $\tilde{\nu}_{J,K\neq J}$. Switching off the external field at times $\tau_{off}$ corresponding to the maxima of $p_{e}(\tau)$, as in the previous case with single-mode fibers, we find (Fig.~\ref{fig:fig5}b,c) that the maxima of tripartite negativities $E^{(\alpha)}(\tau)$ after the transient regime reduce for increasing values of $\tilde{\nu}_{J,K}$ for both atomic and cavity mode subsystems. \begin{figure}
\caption{ Effect of multimode coupling. a) Dependence of $\tau_{off}$ on the coupling constants $\tilde{\nu}_{J,K}$ for different choices of switching-off the external field: maximum of $p_{e}(\tau)$ (o), minimum of $N^{(f)}(\tau)$ (x), and maximum of $N^{(c)}(\tau)$ (+). Tripartite negativities $E^{(\alpha)}$ ($\alpha=a,c$) for $\tilde{\nu}_{J,K}=0$ (solid gray), 0.3 (dashed), 0.6 (dotted), 1.0 (dashed-dotted), and 1.4 (solid black): b,c) $\tau_{off}$ in the maximum of $p_{e}(\tau)$; d,e) $\tau_{off}$ in the minimum of $N^{(f)}(\tau)$; f,g) $\tau_{off}$ in the maximum of $N^{(c)}(\tau)$.}
\label{fig:fig5}
\end{figure} If we consider $\tau_{off}$ corresponding to the minimum of $N^{(f)}(\tau)$ (Fig.~\ref{fig:fig5}d,e) we observe a small reduction of the peak values of $E^{(\alpha)}(\tau)$. Finally, if we turn off the external field at the first maximum of the cavity field mean photon number we note that by increasing the values of $\tilde{\nu}_{J,K}$ it is possible to improve the entanglement transfer (Fig.~\ref{fig:fig5}f,g). The peak value of tripartite negativity grows up to $\cong0.93$ for $\tilde{\nu}_{J,K}=1.4$ and the fidelity up to $\cong0.95$ for both subsystems (a) and (c). We remark that these values cannot be significantly increased for larger values of $\tilde{\nu}_{J,K}$. In conclusion, for all the above choices of switching-off time $\tau_{off}$ we observe that, by increasing the values of $\tilde{\nu}_{J,K}$, the amount of entanglement that can be transferred to the cavity modes in the transient regime also increases. This is due to the fact that the amount of energy transferred to each cavity mode increases: in fact, the peak value of $N^{(c)}(\tau)$ progressively grows up from $\cong 0.25$ to $\cong 0.41$. Nevertheless, multimode coupling for larger values of $\tau_{off}$ results in a less favorite condition for entanglement transfer.\\ \section{conclusions}
\indent In this paper we have addressed the transfer of quantum information and entanglement from a tripartite bosonic system to three localized qubits through their environments, also in the presence of external environments. We considered an implementation in the optical regime of CQED based on CV photon number entangled fields and atomic qubits trapped in one-sided optical cavities, where the radiation modes can couple to cavity modes by optical fibers.\\ \indent In the nine-qubit transient regime the quantum state is mapped from tripartite entangled radiation to tripartite atomic system via the cavity modes. After the transient we switch off the external field. The subsequent triple JC dynamics, that we solved analytically (numerically) for pure (mixed) input states, shows how the effect of mapping can further affect the atom-cavity six qubits system. Its relevance is in the possible manipulation for QI purposes of entanglement stored in separate qubits of atomic or bosonic nature. Hence the interest to put quantitative limits dictated by cavity, atomic and fiber mode decays, that we evaluated at the times where the transfer protocol is optimal.\\ \indent In the case of a GHZ input state mixed with white noise, we provide a full characterization of the separability properties of the tripartite subsystems. We can then show the occurrence of entanglement sudden death effects at the tripartite level, deriving the conditions for the repeated occurrence of discontinuous exchange of quantum correlations among the tripartite subsystems. This is an issue of fundamental interest as well as worth investigating for all applications in quantum information processing, remarkably computing and error correction, where disentanglement, which may be faster than decoherence, has to be carefully controlled.\\ \indent An extension and comparison to other types of entangled qubit-like input fields and experimentally available CV fields \cite{CV} will be presented elsewhere \cite{Bina}.
\acknowledgements This work has been partially supported by the CNR-CNISM convention.
\end{document} |
\begin{document}
\title[Fubini-Tonelli type theorem]{Fubini-Tonelli type theorem for non product measures in a product space} \author{ Jorge Salazar}
\address{DMAT, Universidade de \'Evora, \'Evora - Portugal} \email{[email protected]}
\subjclass{ } \keywords{}
\date{}
\begin{abstract} I prove a theorem about iterated integrals for non-product measures in a product space. The first task is to show the existence of a family of measures on the second space, indexed by the points on of the first space (outside a negligible set), such that integrating the measures on the index against the first marginal gives back the original measure (see Theorem \ref{teo}). At the end, I give a simple application in Optimal Transport. \end{abstract} \maketitle
\section{Introduction} \noindent The Fubini-Tonelli theorem states that the integral of a function defined on a product space, against a measure which is a product of measures on the factor spaces, can be obtained by iterated integration, i.e. integrating one variable (against its marginal measure) at the time. \vskip 2mm \noindent
If a measure on a product space is not a product measure, is it still possible to decompose the measure and evaluate the integral using iterated integration? To better understand the problem, imaging we are dealing with a measure $ \zeta $ which is absolutely continuous with respect to the product measure $ \mu\otimes\nu $, i.e. there is a function $ \delta: X\times Y\rightarrow \left[ 0, \infty\right] $, such that for all measurable set $C\subseteq X\times Y$,
\[ \zeta\left( C\right) =\int_{C} \delta(x,y)\,\mu\otimes\nu\left( dx, dy \right) . \]
\vskip 2mm
\noindent
Then, using the classical Fubini-Tonelli theorem, we can decompose this integral into
\[ \zeta\left( C\right) =\int_X\left( \int_{C_x} \delta(x,y)\, \nu\left( dy \right) \right) \mu\left( dx \right) ,\]
where $C_x :=\left\lbrace y\in Y;\, \left( x,y\right)\in C \right\rbrace $
is the slice of $C$ at $X$.
Accordingly, $\nu$ decomposes into the measures
\[ \nu_x(dy):=\delta(x,y)\, \nu\left( dy \right) , \]
which integrates against $\mu$ to give $\zeta$. Symbolically,
\[ \zeta\left( dx, dy \right) =\nu_x(dy)\, \mu\left( dx \right) . \]
With this decomposition the order of integration is not interchangeable, since the first measure depends on the second variable.
To interchange the order of integration, we must decompose $\mu$ in a similar way,
\[ \mu_y(dx):=\delta(x,y)\, \mu\left( dx \right) , \]
which integrates against $\nu$ to give $\zeta$. Symbolically,
\[ \zeta\left( dx, dy \right) =\mu_y(dx)\, \nu\left( dy \right) . \]
\noindent
In this paper, the existence of this kind of decomposition is established for arbitrary Borel probability measures on
the product of two complete, separable, locally compact,
metric spaces (see Theorem \ref{teo}). I restricted myself to the case of probability measures to simplify the discourse, although the results stay valid for $\sigma-$finite measures. \vskip 2mm \noindent
In the best of my knowledge, there is nothing of the kind in the literature on foundations of measure theory that describes similar results. Nonetheless, this question is natural and I believe it may provide a useful calculation and/or analytical tool as much as the classical Fubini-Tonelli theorem does.
\vskip 2mm
\noindent
Optimal Transport, for example, deals with fixed marginal probability measures and a minimal cost is seek among all the couplings of the given marginal probabilities, i.e. among all the probabilities on the product space, such that the marginal measures are the ones given.
It would be a nice research project to look for a new characterization of the optimal transport plans in terms of the measures along the ``fibers'', obtained from the decomposition described in Theorem \ref{teo}. In section \ref{OT}, I give a simple application of Theorem \ref{teo}, showing that a pair of competitive price functions, whose integral with respect to some transference plan matches the transport cost, are conjugate to each other almost surely. This complements the Kantorovich duality theorem on the nature, regarding convexity/concavity, of pair of competitive prices maximizing the profit. See Villani's book \cite{vill}, page 70, for a very detailed discussion of the Kantorovich theorem.
\vskip 2mm
\noindent Another interesting project is the application of Theorem \ref{teo} to the study of measures on the Tangent bundle of Riemannian manifolds. Indeed, the local charts of the tangent bundle are Cartesian products of Euclidean open sets. Using local charts, we can transport the measure to this product to be decomposed and then sent back the family of measures fiber-wise. In the literature, the measures on tangent bundles are a kind of product measures, as is the volume obtained from the Sasaki \cite{sasaki} metric, or the measure on the unit sphere on the tangent space, integrated against the volume element of the base manifold. I believe Theorem \ref{teo} is a tool that could help exploring general integration on tangent bundles.
\section{Main theorem}
{\thm\label{teo} Let $X\times Y$ be the product of two complete, separable, locally compact,
metric spaces. We equip $X$, $Y$, and $X\times Y$ with their Borel $\sigma-$algebras, denoted by $\mathfrak{B}_X$, $\mathfrak{B}_Y$, and $\mathfrak{B}_{X\times Y}$ respectively.
\vskip 3pt
\noindent
Let $\zeta$ be a probability measure on $\mathfrak{B}_{X\times Y}$, and denote $\mu$ and $\nu$ the marginal probabilities on $\mathfrak{B}_X$, $\mathfrak{B}_Y$ respectively. i.e.
\[ \forall A\in \mathfrak{B}_X, \ \mu(A)= \zeta(A\times Y) \]
and
\[ \forall B\in \mathfrak{B}_Y, \ \nu(B)= \zeta(X\times B) .\] \vskip 3pt \noindent Then, outside an exceptional $\mu-$negligible set $E_1 \in \mathfrak{B}_X$ ($\mu\left( E_1\right) = 0$), for all
$ x \in X\setminus E_1 $, there is a measure $\nu_x$ defined on $\mathfrak{B}_Y$, such that for all $C\in \mathfrak{B}_{X\times Y}$, the function \begin{equation}\label{meas0}
x \in X\setminus E_1 \longrightarrow \nu_x\left( C_x \right) ,
\end{equation} where $ C_x = C\cap\left( \left\lbrace x \right\rbrace\times Y \right) $, is $\mathfrak{B}_X-$measurable and \begin{equation}\label{int0}
\zeta(C)= \int_X\nu_x\left(C_x \right) \mu(dx).
\end{equation}
In particular,
\[ \forall B\in \mathfrak{B}_Y, \ \nu(B)= \int_{X} \nu_x\left(B \right) \mu(dx) ,\] Moreover, for all positive $\mathfrak{B}_{X\times Y}-$measurable function, $f: X\times Y\rightarrow \mathbb{R}$, \begin{equation}\label{meas0f} x \in X\setminus E_1 \longrightarrow \int_Y f(x,y)\, \nu_x\left(dy\right) \end{equation}
is $\mathfrak{B}_X-$measurable and
\begin{equation}\label{int0f}
\int_{X\times Y} f(x,y)\, \zeta\left(dx,dy\right)= \int_X \left( \int_Y f(x,y)\, \nu_x\left(dy\right) \right) \mu(dx).
\end{equation}
\vskip 3mm \noindent Likewise, there is a $\nu-$negligible set $E_2 \in \mathfrak{B}_Y$, such that for every $\, y\in Y\setminus E_2 $
there is a measure $\mu_y$ on $\mathfrak{B}_X$, such that for all $C\in \mathfrak{B}_{X\times Y}$, the function \[ y\in Y\setminus E_2 \longrightarrow \mu_y\left(C_y \right) ,\] where $ C_y = C\cap\left( X \times\left\lbrace y \right\rbrace \right) $, is $\mathfrak{B}_{ Y}-$measurable and \[ \zeta(C)= \int_{X} \mu_y\left(C_y \right) \mu(dx). \] In particular, \[ \forall A\in \mathfrak{B}_X, \ \mu(A)=\int_{Y} \mu_y\left(A \right) \nu(dx) .\] Moreover, for all positive $\mathfrak{B}_{X\times Y}-$measurable function, $f: X\times Y\rightarrow \mathbb{R}$, \[ y\in Y\setminus E_2 \rightarrow \int_{X} f(x,y)\, \mu_y\left(dx\right) \] is $\mathfrak{B}_{Y}-$measurable and \[ \int_{X\times Y} f(x,y)\, \zeta \left(dx, dy\right) = \int_{Y} \left( \int_{X} f(x,y)\, \mu_y \left(dx\right) \right) \nu(dy). \]
\vskip 3mm \noindent As a consequence, given a $\mathfrak{B}_{X\times Y}-$measurable function, $f: X\times Y\rightarrow \mathbb{R}$, the following affirmations are equivalent
\vskip 2mm
\begin{enumerate}
\item $f: X\times Y\rightarrow \mathbb{R}$ is $\zeta-$integrable. \vskip 3mm
\item $ x \in X\setminus E_1 \rightarrow \int_Y \left| f(x,y)\right| \, \nu_x\left( dy\right) $ is $\mu-$integrable. \vskip 3mm
\item $ y\in Y\setminus E_2 \rightarrow \int_{X}\left| f(x,y)\right| \, \mu_y\left( dx\right) $ is $\nu-$integrable. \end{enumerate} \vskip 2mm\noindent And \[ \int_{X\times Y} f(x,y)\, \zeta\left( dx, dy\right)= \int_X \left( \int_Y f(x,y)\, \nu_x\left( dy\right) \right) \mu\left( dx\right) . \] \[ \qquad \qquad \qquad \qquad \qquad = \int_{Y} \left( \int_{X} f(x,y)\, \mu_y \left( dx\right) \right) \nu\left( dy\right) . \] }
\section{Proof of Theorem \ref{teo}}
\noindent \emph{Note about the notation}: We will use $x$ and $y$ to denote generic points in $X$ and $Y$ respectively. In this way, $B_r\left( x\right) $ automatically refers to a ball in $X$, of center $x$ and and radius $r$, while $B_r\left( y\right)$ represents a ball in $Y$ (different space, different metric). \vskip 2mm \noindent The proof of Theorem \ref{teo} will be given in several steps.
\subsection{Definition of $ \mathit{l}_x $}
Let $\mathcal{Y} $ be a dense subset of $Y$. Denote by $ \mathcal{B} $ the set of open balls $B_r\left( y\right) $ with center $y\in \mathcal{Y} $ and radius $ r\in\mathbb{Q} $. i.e. \begin{equation}\label{balls} \mathcal{B}:= \left\lbrace B_r \left( y\right) ;\, y\in\mathcal{Y},\ r\in \mathbb{Q} \right\rbrace . \end{equation}
\noindent Consider also the complement of the closed balls, \[ \mathcal{B}_{\mathrm{c}} :=\left\lbrace Y\setminus \overline{B}_r\left( y\right) ; {B}_r\left( y\right) \in\mathcal{B} \right\rbrace . \]
\noindent Finally, let $ \mathcal{L} $ be the set of finite unions of finite intersections of elements of $ \mathcal{B}\cup \mathcal{B}_{\mathrm{c}} $ (note that $\emptyset\in \mathcal{L}$). \vskip 2mm \noindent For each $O\in \mathcal{L}$, define the measure \[ A\in \mathfrak{B}_X\rightarrow \mu_O \left( A\right) =\zeta\left( A\times O\right) .\]
\noindent Since $\mu_O \left( A\right) \le \mu\left( A\right) $, $\mu_O $ is absolutely continuous with respect to $ \mu $. By Radon-Nikodym's Theorem, there is a density function $ \frac{d\mu_O}{d\mu} $, defined $\mu-$almost surely, such that $\mu_O $ is represented as an integral of this density against $\mu$. \vskip 2mm \noindent To obtain a common exceptional $\mu-$negligible set outside which $ \frac{d\mu_O}{d\mu} $ is well defined (by a formula) for all $ O\in \mathcal{L} $, we choose the version of $ \frac{d\mu_O}{ d\mu} $ given by the limit of the quotient of balls. To avoid talking about measurability issues, we fix once and for all a sequence $\rho_k$ decreasing to $0$. Given $ O\in \mathcal{L} $, define \[ \overline{\mathit{l}}_x\left( O\right) :=\limsup_{k\rightarrow \infty} \frac{\mu_O \left( B_{\rho_k}(x)\right) }{\mu \left( B_{\rho_k}(x)\right) },\] and \[ \underline{\mathit{l}}_x\left( O\right) :=\liminf_{k\rightarrow \infty} \frac{\mu_O \left( B_{\rho_k}(x)\right) }{\mu \left( B_{\rho_k}(x)\right) },\] \vskip 2mm \noindent It is well known, by a generalization of Lebesgue differentiation theorem (see for example Federer \cite{fed}, section 2.9),
that $ \overline{\mathit{l}}_x $ and $ \underline{\mathit{l}}_x $ are versions of
$ \frac{d\mu_O}{d\mu} $. i.e. For all $ A\in \mathfrak{B}_X$, \begin{equation}\label{int} \mu_O \left( A\right) =\int_A \overline{\mathit{l}}_x\left( O\right) \mu\left( dx\right) =\int_A \underline{\mathit{l}}_x \left( O\right) \mu\left( dx\right) . \end{equation} In particular, $$ \overline{\mathit{l}}_x\left( O\right) = \underline{\mathit{l}}_x\left( O\right) ,\ \mu-\mathrm{a.s.}.$$ \noindent Let $E_O$ be the exceptional set where the limit does not exist. i.e. $$E_O:=\left\lbrace x\in X;\ \overline{\mathit{l}}_x\left( O\right) -\underline{\mathit{l}}_x \left( O\right) >0\right\rbrace \in \mathfrak{B}_X$$
\noindent Put \[ E:=\bigcup_{O\in \mathcal{L} } E_O. \]
\noindent Since $\mu\left( E_O\right) =0$ for all $ O\in \mathcal{L} $, and $\mathcal{L}$ is numerable, we have $$\mu\left( E\right) =0 .$$
For $O\in \mathcal{L}$, and $x\in X\setminus E$, put
$$ \mathit{l}_x\left( O\right) = \overline{\mathit{l}}_x \left( O\right) =\underline{\mathit{l}}_x \left( O\right) .$$
\vskip 2pt \noindent Recapitulating, for all $O\in \mathcal{L}$ and every $ A\in \mathfrak{B}_Y $, by (\ref{int}) we have \begin{equation}\label{int1}
\zeta\left( A\times O\right) =\int_A {\mathit{l}}_x\left( O\right) \mu\left( dx\right) . \end{equation}
\subsection{Outer measure $ \nu^*_x $}
Changing the standpoint, we fix $x\in X\setminus E$ and consider the set function $$ O\in \mathcal{L}\rightarrow \mathit{l}_x\left( O\right) .$$ For future reference, observe that $ \mathit{l}_x $ has the following properties: For all $ O$ and $ \tilde{O}\in \mathcal{L} $, \vskip 3pt \noindent \emph{Finite additivity} \begin{equation}\label{add}
\mathit{l}_x\left( O\right) +\mathit{l}_x\left( \tilde{O}\right) = \mathit{l}_x\left( O\cup \tilde{O}\right) + \mathit{l}_x\left( O\cap \tilde{O}\right) , \end{equation} \vskip 2pt \noindent \emph{Finite subadditivity}
\begin{equation}\label{subadd} \mathit{l}_x\left( O\cup \tilde{O}\right)\le \mathit{l}_x\left( O\right) +\mathit{l}_x\left( \tilde{O}\right)
\end{equation} \vskip 2pt \noindent
\emph{Monotonicity} \begin{equation}\label{mono}
O\subseteq \tilde{O} \, \Rightarrow \, \mathit{l}_x\left( O\right) \le \mathit{l}_x\left( \tilde{O}\right) \end{equation} \vskip 3pt \noindent We need a measure on $\mathfrak{B}_Y$, capable of fulfilling the role of $\mathit{l}_x$ in equation (\ref{int1}). Let us start by defining the outer measure \begin{equation} \label{outer} C\subseteq Y\rightarrow \nu^*_x\left( C\right) :=\inf\sum_{i=1}^{\infty}\mathit{l}_x\left( O_i\right) , \end{equation} where the infimum is taken over all the covers $ \left\lbrace O_i \right\rbrace _{i\in \mathbb{N}} \subseteq \mathcal{L} $ of $C$. i.e. \[ C\subseteq \bigcup_{i=1}^\infty O_i \, ,\ \mathrm{and}\ \forall i\in \mathbb{N}, \, O_i\in \mathcal{L}\]
\noindent It is well known that $\nu^*_x$, restricted to the set of $\nu^*_x-$measurable sets, is a measure (denoted $\nu_x$). What we need to prove are: Firstly, that every Borel subset of $Y$ (i.e. in $\mathfrak{B}_Y$) is $\nu^*_x-$measurable and secondly that the integration property (\ref{int1}) is preserved when $\mathit{l}_x$ is replaced by $\nu_x$ (and therefore valid for any set in $\mathfrak{B}_Y$). \vskip 2mm \noindent
Unfortunately, $\mathit{l}_x$ is not countably subadditive, as a result, $\nu^*_x$ is not the extension of $\mathit{l}_x$, hardening our task a little bit. Indeed, for all $ O\in \mathcal{L} $, we clearly have $ \nu^*_x(O)\le \mathit{l}_x(O)$, but the inverse inequality may fail, as the following example shows. \vskip 3mm \noindent \emph{Example}: Let $X=Y=\left[ 0,1\right] $. Let $\mu=\nu$ be the Lebesgue measure on $ \left[ 0,1\right] $ and $\zeta$ the normalized length on the diagonal $\left\lbrace \left(x,x \right) ;\, x\in \left[ 0,1\right] \right\rbrace $. \vskip 3mm \noindent Observe that for every $ x\in \left] 0,1\right[ \, $ and $\rho_k$ small enough, \[ \frac{\mu_{\left[ 0,x\right[} \left( B_{\rho_k}(x)\right) }{\mu \left( B_{\rho_k}(x)\right) }=\frac{1}{2}.\]
\noindent So, $ \mathit{l}_x \left( \left[ 0,x\right[\right) = 1/2 $, while $ \nu_x^* \left( \left[ 0,x\right[\right) = 0 $. In fact, we can cover $ \left[ 0,x\right[ $ with a sequence of intervals $ \left[ 0,x_n\right[ $, where $x_n \in \mathbb{Q}$ increases to $x$. Each one of the intervals $ \left[ 0,x_n\right[ $ verify \[ \frac{\mu_{\left[ 0,x_n\right[} \left( B_{\rho_k}(x)\right) }{\mu \left( B_{\rho_k}(x)\right) }=0, \] for all $\rho_k$ small enough. Therefore, for all $n\in \mathbb{N}$, $ \mathit{l}_x \left( \left[ 0,x_n\right[\right) =0 $ and \[ \nu_x^* \left( \left[ 0,x\right[\right) \le \sum_{i=1}^\infty \mathit{l}_x \left( \left[ 0,x_n\right[\right) = 0 .\]
\vskip 3mm \noindent \subsection{Borel subsets of $Y$ are $\nu^*_x-$measurable} Let's prove first that any open ball $B_r(y)\in\mathcal{L}$ is $\nu^*_x-$measurable. To this end, fix $C\subseteq Y$ and a cover $ \left\lbrace O_i\right\rbrace_{i\in\mathbb{N}} \subseteq \mathcal{L}$ of $C$. We must show that \begin{equation}\label{measurable} \nu^*_x \left( C \cap B_r(y) \right) + \nu^*_x \left( C\setminus B_r(y)\right) \le \sum_{i=1}^\infty \mathit{l}_x\left( O_i\right). \end{equation} \noindent Since $ O_i\cap B_r(y) \in \mathcal{L}$ and $ \left\lbrace O_i\cap B_r(y) \right\rbrace_{i\in\mathbb{N}} $ is a covering of $ C\cap B_r(y) $, \begin{equation}\label{uno}
\nu^*_x\left( C\cap B_r(y)\right) \le \sum_{i=1}^\infty \mathit{l}_x\left( O_i\cap B_r(y) \right) . \end{equation} \noindent Now, let $\alpha_{i}<1$, $\alpha_{i}\in\mathbb{Q}$. Then, $O_i\setminus \overline{B}_{ \alpha_{i} r}(y)\in \mathcal{L}$ and $ \left\lbrace O_i\setminus \overline{B}_{ \alpha_{k_i} r}(y) \right\rbrace_{i\in\mathbb{N}} $ is a covering of $C\setminus B_r(y) $. So,
\begin{equation}\label{dos}
\nu^*_x\left( C\setminus B_r(y)\right) \le \sum_{i=1}^\infty \mathit{l}_x\left( O_i\setminus \overline{B}_{ \alpha_i r}(y) \right) .
\end{equation}
By (\ref{add}) and (\ref{mono}), we have
\begin{equation}\label{tres}
\mathit{l}_x\left( O_i\cap B_r(y) \right) + \mathit{l}_x\left( O_i\setminus \overline{B}_{ \alpha r}(y) \right) \le \mathit{l}_x\left( O_i \right) + \mathit{l}_x \left( B_r(y) \setminus \overline{B}_{ \alpha_i r}(y) \right).
\end{equation} \vskip 2mm \noindent Adding (\ref{uno}) and (\ref{dos}), and using (\ref{tres}), we obtain \[\nu^*_x\left( C\cap B_r(y)\right) +\nu^*_x\left( C\setminus B_r(y)\right) \] \[\le \sum_{i=1}^\infty \mathit{l}_x\left( O_i \right) +\sum_{i=1}^\infty \mathit{l}_x\left( B_r(y)\setminus \overline{B}_{ \alpha_i r}(y) \right) .\] \vskip 2mm \noindent The result follows if we can make the second sum as small as we want. Unfortunately, for a fixed $x\in E$, we might fail to do so, even though \begin{equation}\label{empty} \bigcap_{i=1}^\infty B_r(y) \setminus \overline{B}_{ \alpha_i r}(y) =\emptyset , \end{equation} for any sequence $\alpha_i\rightarrow 1$. In fact, we can not switch the limits in \[ \lim_{i\rightarrow \infty}\mathit{l}_x\left( B_r(y)\setminus \overline{B}_{ \alpha_i r}(y) \right) = \lim_{i\rightarrow \infty}\lim_{k\rightarrow \infty} \frac{\zeta \left( B_{\rho_k}\left( x\right) \times \left( B_r(y)\setminus \overline{B}_{ \alpha_i r}(y) \right) \right) }{\mu\left( B_{\rho_k}\left( x\right) \right)} .\] So, we need to look back at what happens for $x$ variable and check whether we can solve the problem by throwing away a few more points (meaning to enlarge $E$). \vskip 2mm \noindent
By (\ref{int1}) and ($\ref{empty}$), and $\gamma < 1$, \[ \int_X \mathit{l}_x\left( B_r(y) \setminus \overline{B}_{ \gamma r}(y)\right) \mu\left( dx\right) =\nu\left( B_r(y) \setminus \overline{B}_{ \gamma r}(y)\right) \longrightarrow_{\gamma \nearrow 1} 0. \] Therefore, \begin{equation}\label{null-a.e.} \mathit{l}_x\left( B_r(y) \setminus \overline{B}_{ \gamma r}(y)\right) \longrightarrow_{\gamma \nearrow 1} 0,\ \mu-\mathrm{a.s.} \end{equation} \vskip 2mm \noindent Now, fix an increasing sequence $ \left\lbrace \gamma_j\right\rbrace_{j\in\mathbb{N}} \subseteq \mathbb{Q}$, $ \gamma_j \rightarrow 1$. Define, for all $y\in\mathcal{Y}$ and $r\in\mathbb{Q}$, \[ {E}_{r,y}:=\left\lbrace x\in X\setminus E ;\ \liminf_{j\rightarrow \infty}\mathit{l}_x\left( B_r(y) \setminus \overline{B}_{ \gamma_j r}(y)\right) > 0 \right\rbrace .\] By (\ref{null-a.e.}), $\mu\left( {E}_{r,y}\right) =0 $. Since \[ {E}_1:= E\cup\bigcup_{r\in\mathbb{Q} ,\, y\in\mathcal{Y} }{E}_{r,y} \] is a countable union of sets of $\mu-$measure 0, we have $$\mu\left( {E}_1\right) =0 . $$ For all $x\in X\setminus E_1 $, and every $\epsilon>0$, we can choose a subsequence $ \left\lbrace \alpha_i\right\rbrace_{i\in\mathbb{N}} $ of $ \left\lbrace \gamma_j\right\rbrace_{k\in\mathbb{N}} $, such that \[ \sum_{i=1}^\infty \mathit{l}_x\left( B_r(y)\setminus \overline{B}_{ \alpha_i r}(y) \right) < \epsilon .\] This completes the proof of (\ref{measurable}). \vskip 2mm \noindent Consequently, for all $x \in X\setminus E_1$, $\nu_x$ is a measure defined at least in the $\sigma-$field generated by $\mathcal{B}$, i.e. $\mathfrak{B}_Y$.
\subsection{Measurability and integrability for compact sets} Our task now is to prove that given $B\in \mathfrak{B}_Y$, the function \begin{equation}\label{meas}
x\in X\setminus E_1 \longrightarrow \nu_x\left( B\right) \end{equation} is measurable and, for all $A\in \mathfrak{B}_X$ \begin{equation}\label{int2} \zeta\left( A\times B\right) =\int_A \nu_x\left( B\right) \mu\left( dx\right) . \end{equation} \vskip 2pt \noindent Let's consider first a finite intersection of closed, compact balls $$ \overline{B}=\overline{B}_{r_1}(y_1) \cap\cdots\cap \overline{B}_{r_n}(y_n) ,$$ with $y_1,\cdots,y_n\in\mathcal{Y}$ and $ r_1,\cdots,r_n\in\mathbb{Q} $.
By the measurability of $\mathit{l}_x$ and (\ref{int1}), the properties (\ref{meas}) and (\ref{int2}) are proven at once if we show \begin{equation}\label{l_x} \nu_x \left( \overline{B} \right) =1-\mathit{l}_x\left( Y\setminus \overline{B}\right) ,\ \mu-\mathrm{a.s.} \end{equation} (We use $\mathit{l}_x\left( Y\setminus \overline{B}\right) $ just because $\mathit{l}_x $ is not defined for $ \overline{B}$.) \vskip 2pt \noindent Let $O_1,\, O_2, \cdots, O_m\subseteq \mathcal{L}$ be a covering of $ \overline{B} $. We can assume the covering is finite, since $\overline{B}$ is compact and the sets in $\mathcal{L}$ are open. \vskip 3pt \noindent Clearly, for all $ x\in X\setminus E $, \[ \lim_{k\rightarrow \infty} \frac{\zeta\left( {B}_{\rho_k}(x)\times \overline{B}\right) }{\mu\left( {B}_{\rho_k}(x)\right) }=\lim_{k\rightarrow \infty} \left( 1-\frac{\zeta\left( {B}_{\rho_k}(x)\times \left( Y\setminus \overline{B} \right)\right) }{\mu\left( {B}_{\rho_k} (x)\right) }\right) =1-\mathit{l}_x\left( Y\setminus \overline{B}\right) . \] Since the covering is finite, by (\ref{subadd}),
\[ \lim_{k\rightarrow \infty} \frac{\zeta\left( {B}_{\rho_k}(x)\times \overline{B}\right) }{\mu\left( {B}_{\rho_k}(x)\right) }\le \mathit{l}_x\left( \bigcup_{i=1}^{n} O_i\right) \le \mathit{l}_x\left( O_1\right) + \cdots + \mathit{l}_x\left( O_m\right). \] Then, \[ 1-\mathit{l}_x\left( Y\setminus \overline{B}\right) \le \nu_x \left( \overline{B} \right) .\] \vskip 2pt \noindent On the other hand, given $ \eta >1 $, $ \eta\in\mathbb{Q} $, and denoting \[ B_\eta={B}_{\eta r_1}(y_1) \cap\cdots\cap {B}_{\eta r_n}(y_n) , \] we have \[ \nu_x \left( \overline{B}\right) \le \mathit{l}_x\left( {B}_\eta \right) .\] Consequently, taking any sequence $\eta_k\searrow 1$, $ \eta_k \in\mathbb{Q} $, we have \[ 1-\mathit{l}_x\left( Y\setminus \overline{B}\right) \le \nu_x \left( \overline{B}\right) \le \lim_{k\rightarrow\infty}\mathit{l}_x\left( {B}_{\eta_k}\right) .\] \vskip 2pt \noindent Since the functions at the left and at the right of the above inequalities are $\mathfrak{B}_{X}-$measurable, and equal between them $\mu-$almost-surely, using (\ref{int1}), we have proved (\ref{l_x}) and, a fortiori, (\ref{meas}) and (\ref{int2}), at least for a finite intersection of compact balls.
\subsection{Measurability and integrability for Borel sets}
Let $\mathcal{M}$ be the collection of sets $B\in \mathfrak{B}_Y$ actually verifying (\ref{meas}) and (\ref{int2}).
\vskip 3pt
\noindent
Clearly, $\emptyset\in \mathcal{M} $ and $ Y\setminus B\in \mathcal{M} $, whenever $B \in \mathcal{M} $. \vskip 3pt \noindent Now, take a disjoint sequence $B_1,B_2,\, \cdots \, \in \mathcal{M}$ ($B_i\cap B_j= \emptyset $, $ i\neq j $). Since, for all $x\in X\setminus E_1 $, $\nu_x$ is a measure on $\mathfrak{B}_Y$, \begin{equation}\label{suma}
\nu_x\left( \bigcup_{i=1}^{\infty} B_i\right) = \sum_{i=1}^{\infty}\nu_x\left( B_i\right) . \end{equation} By (\ref{meas}), $ x\rightarrow \nu_x\left( \bigcup_{i=1}^{\infty} B_i\right) $ is $\mathfrak{B}_{X}-$measurable, being a countable sum of $\mathfrak{B}_{X}-$measurable functions.
\vskip 2pt \noindent By (\ref{suma}), the monotone convergence theorem, and (\ref{int2}) (remember $B_i \in \mathcal{M}$, for all $i\in \mathbb{N}$), given $ A\in\mathfrak{B}_X $, \[ \int_A \nu_x\left( \bigcup_{i=1}^{\infty} B_i\right) \mu\left( dx\right) = \int_A \sum_{i=1}^{\infty}\nu_x\left( B_i\right) \mu\left( dx\right) \] \[ \ \quad \qquad \qquad \qquad \qquad = \sum_{i=1}^{\infty} \int_A \nu_x\left( B_i\right) \mu\left( dx\right) \] \[ \qquad \quad \qquad \qquad = \sum_{i=1}^{\infty} \zeta\left( A\times B_i\right) \] \[ \qquad \qquad \qquad \qquad = \zeta \left( \bigcup_{i=1}^{\infty} A\times B_i\right) . \] \[ \qquad \qquad \qquad \qquad = \zeta \left( A\times\bigcup_{i=1}^{\infty} B_i\right) . \]
Then,
\[ \bigcup_{i=1}^{\infty} B_i\in \mathcal{M}. \] Since $\mathcal{M}$ contains all the finite intersections of compact balls with center in $\mathcal{Y}$ and rational radius, by the $\pi-\lambda$ theorem (see \cite{Bi}, page 36),
$$ \mathcal{M}=\mathfrak{B}_Y. $$
\subsection{Proof of the theorem} We have proven so far (\ref{meas0}) and (\ref{int0}) for sets of the form $C=A\times B$, with $ A\in \mathfrak{B}_X $ and $ B\in \mathfrak{B}_Y $. \vskip 2mm \noindent Using a similar argument as before, let $\tilde{\mathcal{M}}$ denote the collection of sets $C\in \mathfrak{B}_{X\times Y} $ verifying (\ref{meas0}) and (\ref{int0}). We readily see that $\emptyset\in \mathcal{M} $.
\vskip 2mm \noindent Now, let $C\in\tilde{\mathcal{M}} $. Since $\left( \left( X\times Y\right) \setminus C\right)_x = Y \setminus C_x $, the function \[ x\rightarrow \nu_x\left(\left( \left( X\times Y\right) \setminus C\right) _x \right) = 1- \nu_x\left( C_x\right) \]
is $ \mathfrak{B}_{X}-$measurable, and \[ \int_X \nu_x\left(\left( \left( X\times Y\right) \setminus C\right)_x \right) \mu\left( dx\right) = 1- \int_X \nu_x\left(C_x \right) \mu\left( dx \right) \] \[ \ \qquad \qquad \qquad \qquad = 1-\zeta\left( C\right)\] \[\ \qquad \qquad \qquad \qquad \qquad \ \quad = \zeta\left( \left( X\times Y\right) \setminus C\right). \]
\noindent Then, for all $C\in\tilde{\mathcal{M}} $, we have $\left( X\times Y\right) \setminus C \in\tilde{\mathcal{M}} $. \vskip 3pt \noindent Finally, let $C_1,C_2,\, \cdots \, \in \tilde{\mathcal{M}} $ a sequence of disjoint sets ($C_i\cap C_j= \emptyset $, for all $ i\neq j $). Since $\nu_x$ is a measure on $\mathfrak{B}_Y$, \begin{equation}\label{suma1} \nu_x\left( \left( \bigcup_{i=1}^{\infty} C_i\right)_x\,\right) = \nu_x\left( \bigcup_{i=1}^{\infty} \left( C_i\right) _x\right) = \sum_{i=1}^{\infty}\nu_x\left( \left( C_i\right)_x \right) . \end{equation} Then, $ x\rightarrow \nu_x\left( \left( \bigcup_{i=1}^{\infty} C_i\right)_x \,\right) $ is $\mathfrak{B}_{X}-$measurable, being a sum of $\mathfrak{B}_{X}-$measurable functions, since $C_i \in \tilde{\mathcal{M}}$, for all $i\in \mathbb{N}$.
\vskip 3pt\noindent By (\ref{suma1}), the monotone convergence theorem, and (\ref{int0}) ($C_i \in \tilde{\mathcal{M}}$), \[ \int_X \nu_x\left( \left( \bigcup_{i=1}^{\infty} C_i\right)_x\,\right) \mu\left( dx \right) = \int_X \sum_{i=1}^{\infty}\nu_x\left( \left( C_i\right) _x\right) \mu\left( dx \right) \ \quad \quad \] \[ \ \quad \qquad \qquad \qquad \qquad = \sum_{i=1}^{\infty} \int_X \nu_x\left( \left( C_i\right) _x\right) \mu\left( dx \right) \] \[ \qquad \qquad = \sum_{i=1}^{\infty} \zeta\left( C_i\right) \] \[ \ \qquad \qquad = \zeta \left( \bigcup_{i=1}^{\infty} C_i\right) \] Then, \[ \bigcup_{i=1}^{\infty} C_i\in \tilde{\mathcal{M}}. \] \vskip 2mm \noindent Since \[ \left\lbrace A\times B;\, A\in \mathfrak{B}_X ,\, B\in \mathfrak{B}_Y \right\rbrace \subseteq \tilde{\mathcal{M}} ,\]
by the $\pi-\lambda$ theorem, \[ \tilde{\mathcal{M}} = {\mathfrak{B}_{X\times Y}} .\] \vskip 3mm \noindent The remaining of the proof is standard. Assume $f :{X\times Y}\rightarrow \mathbb{R}$ is a positive, $\mathfrak{B}_{X\times Y}-$measurable functions. Then, $f$ can be approached by an increasing sequence of simple, $\mathfrak{B}_{X\times Y}-$measurable functions (linear combinations of characteristic functions) \[ f_k(x,y)=\sum_{i=1}^{n_k} \lambda_{k,i}C_{k,i},\] where $\lambda_{k,i}\in\mathbb{R}$ and $C_{k,i} \in \mathfrak{B}_{X \times Y}$ ($i=1,\cdots, n_k $, $ k\in \mathbb{N}$). \vskip 3mm \noindent By linearity, for each $ f_k$, properties (\ref{meas0f}) and (\ref{int0f}) are a direct consequence of (\ref{meas0}) and (\ref{int0}). Passing to the limit as $k\rightarrow \infty$, equations (\ref{meas0f}) and (\ref{int0f}) are conserved, therefore valid for any positive function like $f$. \vskip 3mm \noindent To establish the integrability equivalence, we apply the preceding result to the positive and negative parts of the given function. In this way, the proof of Theorem \ref{teo} is complete.
\section{An aplication to Optimal Transport} \label{OT}
\noindent In this section we use Theorem \ref{teo} to show that a pair of competitive price functions, whose integral with respect to some transference plan equals the transport cost, are conjugate to each other almost surely, complementing Kantorovich's duality theorem on the nature of a pairs of competitive prices maximizing the profit. See Villani's book \cite{vill}, page 70, for a very detailed discussion on Kantorovich's theorem, in particular
Theorem 5.1, part \textit{(ii)}, item \textit{(d)}. For this result, we do not need lower semicontinuity of the cost and other assumptions used to prove Kantorovich's theorem, so we state the following lemma in its simplest form, using the notations in Theorem \ref{teo}, by the way.
{\lem\label{ot} Let $ c : X \times Y \rightarrow \mathbb{R} \cup \left\lbrace +\infty\right\rbrace $ be a ${\mathfrak{B}_{X\times Y}}-$measurable,
cost function, and
$ \psi :X\rightarrow \mathbb{R}\cup \left\lbrace +\infty\right\rbrace \ \mathrm{and}\ \phi :Y\rightarrow \mathbb{R}\cup \left\lbrace -\infty\right\rbrace $
be a pair of competitive prices, i.e.
\[ \forall \, \left( x,y\right) \in X\times Y, \ \phi \left( y\right) -\psi\left( x\right) \le c\left( x,y\right) . \] Let $\pi$ be a probability measure on ${\mathfrak{B}_{X\times Y}}$, with marginal measures $\mu$ on ${\mathfrak{B}_X}$ and $\nu$ on ${\mathfrak{B}_Y}$. (i.e. $\pi$ is a transference plan between $\mu$ and $\nu$.) Assume that $ \phi -\psi$ and $ c $ are $\pi-$integrable and \[ \phi \left( y\right) -\psi\left( x\right) = c\left( x,y\right) , \ \pi-\mathrm{a.s.} . \] Then \begin{equation}\label{intes01} \psi\left( x\right) = \sup_{y\in Y}\left(\phi \left( y\right) - c\left( x,y\right) \right) , \ \mu-\mathrm{a.s.} \end{equation} and \begin{equation}\label{intes02}
\phi \left( y\right) = \inf_{x\in X}\left( \psi \left( y\right) + c\left( x,y\right)\right) , \ \nu-\mathrm{a.s.}
\end{equation} } \vskip 2mm \noindent \emph{Proof}: Since $ \psi+ c-\phi = 0 $, $\pi-$a.s., \[\int_{X\times Y} \left( \psi(x)+ c(x,y)-\phi(y) \right) \pi\left(dx,dy\right)= 0. \] Using the decomposition given by Theorem \ref{teo}, equation (\ref{int0f}), \begin{equation}\label{nullint} \int_X \left( \int_Y \left( \psi(x)+ c(x,y)-\phi(y) \right) \nu_x\left(dy\right) \right) \mu(dx)=0. \end{equation} Since $ \psi+ c-\phi \ge 0 $, \begin{equation}\label{sup} \psi(x)\ge \sup_{y\in Y} \left(\phi(y) - c(x,y) \right) \end{equation} and, for all $x$ where it is defined, the function \[ x\longrightarrow \int_Y \left( \psi(x)+ c(x,y)-\phi(y) \right) \nu_x\left(dy\right)
\] is nonnegative. By (\ref{nullint}), \[ \int_Y \left( \psi(x)+ c(x,y)-\phi(y) \right) \nu_x\left(dy\right) = 0,\ \mu-\mathrm{a.s.} \] Then,
\begin{equation}\label{intas} \psi(x)= \int_Y \left(\phi(y) - c(x,y)\right) \nu_x\left(dy\right)\le \sup_{y\in Y} \left(\phi(y) - c(x,y) \right) ,\ \mu-\mathrm{a.s.} \end{equation} Combining (\ref{sup}) and (\ref{intas}), we obtain (\ref{intes01}). Equation (\ref{intes02}) is validated in a similar way.
\end{document}
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
\end{document} |
\begin{document}
\setlength{\unitlength}{0.01in} \linethickness{0.01in} \begin{center} \begin{picture}(474,66)(0,0) \multiput(0,66)(1,0){40}{\line(0,-1){24}} \multiput(43,65)(1,-1){24}{\line(0,-1){40}} \multiput(1,39)(1,-1){40}{\line(1,0){24}} \multiput(70,2)(1,1){24}{\line(0,1){40}} \multiput(72,0)(1,1){24}{\line(1,0){40}} \multiput(97,66)(1,0){40}{\line(0,-1){40}} \put(143,66){\makebox(0,0)[tl]{\footnotesize Proceedings of the Ninth Prague Topological Symposium}} \put(143,50){\makebox(0,0)[tl]{\footnotesize Contributed papers from the symposium held in}} \put(143,34){\makebox(0,0)[tl]{\footnotesize Prague, Czech Republic, August 19--25, 2001}} \end{picture} \end{center}
\setcounter{page}{271} \title[Fuzzy functions and $L$-Top]{Fuzzy functions and an extension of the category $L$-Top of Chang-Goguen $L$-topological spaces} \author{Alexander P. \v{S}ostak} \address{Department of Mathematics\\ University of Latvia\\ Riga\\ Latvia} \email{[email protected]} \thanks{Partly supported by grant 01.0530 of Latvijas Zin\=atnes Padome} \subjclass[2000]{03E72, 18A05, 54A40} \keywords{Fuzzy category} \thanks{This article will be revised and submitted for publication elsewhere.} \thanks{Alexander P. \v{S}ostak, {\em Fuzzy functions and an extension of the category $L$-Top of Chang-Goguen $L$-topological spaces}, Proceedings of the Ninth Prague Topological Symposium, (Prague, 2001), pp.~271--294, Topology Atlas, Toronto, 2002} \begin{abstract} We study $\mathcal{F}TOP(L)$, a fuzzy category with fuzzy functions in the role of morphisms. This category has the same objects as the category L-TOP of Chang-Goguen L-topological spaces, but an essentially wider class of morphisms---so called fuzzy functions introduced earlier in our joint work with U. H\"ohle and H. Porst. \end{abstract} \maketitle
\section*{Introduction}
In research works where fuzzy sets are involved, in particular, in Fuzzy Topology, mostly certain usual functions are taken as morphisms: they can be certain mappings between corresponding sets, or between the fuzzy powersets of these sets, etc. On the other hand, in our joint works with U.~H\"ohle and H.E.~Porst \cite{HPS1}, \cite{HPS2} a certain class of $L$-relations (i.e.\ mappings $F: X\times Y \to L$) was distinguished which we view as ($L$-){\it fuzzy functions} from a set $X$ to a set $Y$; these fuzzy functions play the role of morphisms in an {\it $L$-fuzzy category} of sets {$\mathcal{F}SET(L)$}, introduced in \cite{HPS2}.
Later on we constructed a fuzzy category {$\mathcal{F}TOP(L)$}\ related to topology with fuzzy functions in the role of morphisms, see \cite{So2000}. Further, in \cite{So2001} a certain uniform counterpart of {$\mathcal{F}TOP(L)$}\ was introduced. Our aim here is to continue the study of {$\mathcal{F}TOP(L)$}. In particular, we show that the top frame {$\mathcal{F}TOP(L)$}$^\top$ of the fuzzy category {$\mathcal{F}TOP(L)$}\ is a topological category in H. Herrlich's sense \cite{AHS}) over the top frame {$\mathcal{F}SET(L)$}$^\top$ of the fuzzy category {$\mathcal{F}SET(L)$}.
In order to make exposition self-contained, we start with Section 1 Prerequisities, where we briefly recall the three basic concepts which are essentially used in this work: they are the concepts of a $GL$-monoid (see e.g.\ \cite{Ho91}, \cite{Ho94}, etc.); of an $L$-valued set (see e.g.\ \cite{Ho92}, etc.), and of an $L$-fuzzy category (see e.g.\ \cite{So91}, \cite{So92}, \cite{So97}, etc.). In Section 2 we consider basic facts about fuzzy functions and introduce the $L$-fuzzy category {$\mathcal{F}SET(L)$}\ \cite {HPS1}, \cite{HPS2}. The properties of this fuzzy category and some related categories are the subject of Section 3. {$\mathcal{F}SET(L)$}\ is used as the ground category for the $L$-fuzzy category {$\mathcal{F}TOP(L)$}\ whose objects are Chang-Goguen $L$-topological spaces \cite{Ch}, \cite{Go73}, and whose morphisms are certain fuzzy functions, i.e.\ morphisms from {$\mathcal{F}SET(L)$}. Fuzzy category {$\mathcal{F}TOP(L)$}\ is considered in Section 4. Its crisp top frame {$\mathcal{F}TOP(L)^\top$}\ is studied in Section 5. In particular, it is shown that {$\mathcal{F}TOP(L)^\top$}\ is a topological category over {$\mathcal{F}SET(L)$}$^\top$. Finally, in Section 6 we consider the behaviour of the topological property of compactness with respect to fuzzy functions --- in other words in the context of the fuzzy category {$\mathcal{F}TOP(L)$}\ and, specifically, in the context of the category {$\mathcal{F}TOP(L)^\top$}.
\section{Prerequisities}
\subsection{$GL$-monoids}
Let $(L, \leq)$ be a complete infinitely distributive lattice, i.e.\ $(L, \leq)$ is a partially ordered set such that for every subset $A \subset L$ the join $\bigvee A$ and the meet $\bigwedge A$ are defined and $(\bigvee A) \wedge \alpha = \bigvee \{ a\wedge \alpha) \mid a \in A \}$ and $(\bigwedge A) \vee \alpha = \bigwedge \{a\vee \alpha) \mid a \in A \}$ for every $\alpha \in L$. In particular, $\bigvee L =: \top$ and $\bigwedge L =: \bot$ are respectively the universal upper and the universal lower bounds in $L$. We assume that $\bot \ne \top$, i.e.\ $L$ has at least two elements.
A $GL-$monoid (see \cite{Ho91}, \cite{Ho92}, \cite{Ho94}) is a complete lattice enriched with a further binary operation $*$, i.e.\ a triple $(L, \leq, *)$ such that: \begin{enumerate} \item[(1)] $*$ is monotone, i.e.\ $\alpha \leq \beta$ implies $\alpha * \gamma \leq \beta * \gamma$, $\forall \alpha, \beta, \gamma \in {\it L}$; \item[(2)] $*$ is commutative, i.e.\ $\alpha * \beta = \beta * \alpha$, $\forall \alpha, \beta \in {\it L}$; \item[(3)] $*$ is associative, i.e.\ $\alpha * (\beta * \gamma) = (\alpha * \beta) * \gamma$, $\forall \alpha, \beta, \gamma \in L$; \item[(4)] $(L,\leq,*)$ is integral, i.e.\ $\top$ acts as the unity: $\alpha * \top = \alpha$, $\forall \alpha \in {\it L}$; \item[(5)] $\bot$ acts as the zero element in $(L, \leq, *)$, i.e.\ $\alpha * \bot = \bot$, $\forall \alpha \in {\it L}$; \item[(6)] $*$ is distributive over arbitrary joins, i.e.\ $\alpha * (\bigvee_j \beta_j) = \bigvee_j (\alpha * \beta_j)$, $\forall \alpha \in {\it L}, \forall \{ \beta_j : j \in J \} \subset {\it L}$; \item[(7)] $(L, \leq, *)$ is divisible, i.e.\ $\alpha \leq \beta$ implies existence of $\gamma \in L$ such that $\alpha = \beta * \gamma$. \end{enumerate}
It is known that every $GL-$monoid is residuated, i.e.\ there exists a further binary operation ``$\longmapsto$'' (implication) on $L$ satisfying the following condition: $$\alpha * \beta \leq \gamma \Longleftrightarrow \alpha \leq (\beta \longmapsto \gamma) \qquad \forall \alpha, \beta, \gamma \in L.$$ Explicitly implication is given by $$\alpha \longmapsto \beta = \bigvee \{ \lambda \in L \mid \alpha * \lambda \leq \beta \}.$$
Below we list some useful properties of $GL-$monoids (see e.g.\ \cite{Ho91}, \cite{Ho92}, \cite{Ho94}): \begin{enumerate} \item[(i)] $\alpha \longmapsto \beta = \top \Longleftrightarrow \alpha \leq \beta$; \item[(ii)] $\alpha \longmapsto (\bigwedge_i \beta_i) = \bigwedge_i (\alpha \longmapsto \beta_i)$; \item[(iii)] $(\bigvee_i \alpha_i) \longmapsto \beta = \bigwedge_i (\alpha_i \longmapsto \beta)$; \item[(v)] $\alpha * (\bigwedge_i \beta_i) = \bigwedge_i (\alpha * \beta_i)$; \item[(vi)] $(\alpha \longmapsto \gamma ) * (\gamma \longmapsto \beta) \leq \alpha \longmapsto \beta$; \item[(vii)] $\alpha * \beta \leq (\alpha * \alpha) \vee (\beta * \beta)$. \end{enumerate}
Important examples of $GL$-monoids are Heyting algebras and $MV$-alg\-ebras. Namely, a {\it Heyting algebra} is $GL$-monoid of the type $(L,\leq,\wedge,\vee,\wedge)$ (i.e.\ in case of a Heyting algebra $\wedge = *$), cf.\ e.g.\ \cite{Jhst}. A $GL$-monoid is called an {\it $MV$-algebra} if $(\alpha \longmapsto \bot) \longmapsto \bot = \alpha \quad \forall \alpha \in L$, \cite{Ch58}, \cite{Ch59}, see also \cite[Lemma 2.14]{Ho94}. Thus in an $MV$-algebra an order reversing involution $^c: L \to L$ can be naturally defined by setting $\alpha^c := \alpha \longmapsto \bot \quad \forall \alpha \in L$.
If $X$ is a set and $L$ is a $GL$-monoid, then the fuzzy powerset $L^X$ in an obvious way can be pointwise endowed with a structure of a $GL$-monoid. In particular the $L$-sets $1_X$ and $0_X$ defined by $1_X (x):= \top$ and $0_X (x) := \bot$ $\forall x \in X$ are respectively the universal upper and lower bounds in $L^X$.
In the sequel $L$ denotes an arbitrary $GL$-monoid.
\subsection{$L$-valued sets}
Following U.~H\"ohle (cf.\ e.g.\ \cite{Ho92}) by a (global) {\it $L$-valued set} we call a pair $(X,E)$ where $X$ is a set and $E$ is an {\it $L$-valued equality}, i.e.\ a mapping $E: X \times X \to L$ such that \begin{enumerate} \item[(1eq)] $E(x,x) = \top$ \item[(2eq)] $E(x,y) = E(y,x) \quad \forall x, y \in X$; \item[(3eq)] $E(x,y)*E(y,z) \leq E(x,z) \quad \forall x, y, z \in X$. \end{enumerate}
A mapping $f: (X,E_X) \to (Y,E_Y)$ is called {\it extensional} if $$E_X(x,x') \leq E_Y(f(x),f(x'))\ \forall x,x' \in X.$$ Let $SET(L)$ denote the category whose objects are $L$-valued sets and whose morphisms are extensional mappings between the corresponding $L$-valued sets. Further, recall that an $L$-set, or more precisely, an $L$-subset of a set $X$ is just a mapping $A: X \to L$. In case $(X,E)$ is an $L$-valued set, its $L$-subset $A$ is called {\it extensional} if $$\bigvee_{x\in X} A(x) * E(x,x') \leq A(x') \quad \forall x' \in X.$$
\subsection{{\it L}-fuzzy categories}
\begin{definition}[\cite{So91}, \cite{So92}, \cite{So97}, \cite{So99}] An {\it L}-fuzzy category is a quintuple $\mathcal{C} = (\OBC, \omega, \MC, \mu, \circ)$ where $\mathcal{C}_\bot = (\mathcal{O}b(\mathcal{C}), \mathcal{M}(\mathcal{C}), \circ)$ is a usual (classical) category called {\em the bottom frame} of the fuzzy category $\mathcal{C}$; $\omega: \mathcal{O}b(\mathcal{C}) \longrightarrow {\it L} $ is an $L$-subclass of the class of objects $\mathcal{O}b(\mathcal{C})$ of $\mathcal{C}_\bot$ and $\mu : \mathcal{M}(\mathcal{C}) \longrightarrow {\it L} $ is an $L$-subclass of the class of morphisms $\mathcal{M}(\mathcal{C})$ of $\mathcal{C}_\bot$. Besides $\omega$ and $\mu$ must satisfy the following conditions: \begin{enumerate} \item[(1)] if $f: X \to Y$, then $ \mu (f) \leq \omega (X) \wedge \omega (Y)$; \item[(2)] $\mu (g \circ f) \geq \mu (g) * \mu (f)$ whenever composition $g \circ f$ is defined; \item[(3)] if $e_X: X \to X$ is the identity morphism, then $\mu(e_X) = \omega(X)$. \end{enumerate} \end{definition}
Given an {\it L}-fuzzy category $ \mathcal{C} = (\OBC, \omega, \MC, \mu, \circ)$ and $X \in \mathcal{O}b(\mathcal{C})$, the intuitive meaning of the value $\omega (X)$ is the {\it degree} to which a potential object $X$ of the {\it L}-fuzzy category $\mathcal{C}$ is indeed its object; similarly, for $f \in \mathcal{M}(\mathcal{C})$ the intuitive meaning of $\mu (f)$ is the degree to which a potential morphism $f$ of $\mathcal{C}$ is indeed its morphism.
\begin{definition} Let $\mathcal{C} = (\OBC, \omega, \MC, \mu, \circ)$ be an {\it L}-fuzzy category. By an ({\it L}-fuzzy) subcategory of $\mathcal{C}$ we call an {\it L}-fuzzy category $$\mathcal{C}' = (\OBC, \omega', \MC, \mu', \circ)$$ where $\omega' \leq \omega$ and $\mu' \leq \mu$. A subcategory $\mathcal{C}'$ of the category $\mathcal{C}$ is called full if $\mu'(f) = \mu(f) \wedge \omega'(X) \wedge \omega'(Y)$ for every $f \in \mathcal{M}_\mathcal{C} (X,Y)$, and all $X$,$Y \in \mathcal{O}b(\mathcal{C})$. \end{definition}
Thus an {\it L}-fuzzy category and its subcategory have the same classes of potential objects and morphisms. The only difference of a subcategory from the whole category is in {\it L}-fuzzy classes of objects and morphisms, i.e.\ in the belongness degrees of potential objects and morphisms.
Let $\mathcal{C} = (\mathcal{O}b(\mathcal{C}), \mathcal{M}(\mathcal{C}), \circ)$ be a crisp category and $\mathcal{D} = (\mathcal{O}b(\mathcal{D}), \mathcal{M}(\mathcal{D}), \circ)$ be its subcategory. Then for every $GL$-monoid {\it L}\ the category $\mathcal{D}$ can be identified with the {\it L}-fuzzy subcategory $$\tilde{\mathcal{D}} = (\mathcal{O}b(\mathcal{C}), \omega', \mathcal{M}(\mathcal{C}), \mu', \circ)$$ of $\mathcal{C}$ such that $\omega'(X) = \top$ if $X \in \mathcal{O}b(\mathcal{D})$ and $\omega'(X) = \bot$ otherwise; $\mu' (f) = \top$ if $f \in \mathcal{M}(\mathcal{D})$ and $\mu' (f) = \bot$ otherwise. In particular, ${\tilde{\mathcal{D}}}_\top = \mathcal{D}$.
On the other hand sometimes it is convenient to identify a fuzzy subcategory $$\mathcal{C}' = (\OBC, \omega', \MC, \mu', \circ)$$ of the fuzzy category $$\mathcal{C} = (\OBC, \omega, \MC, \mu, \circ)$$ with the fuzzy category $$\mathcal{D} = (\OBD, \omega_\mathcal{D}, \MD, \mu_\mathcal{D},\circ)$$ where $$\mathcal{O}b(\mathcal{D}) := \{X \in \mathcal{O}b(\mathcal{C}) \mid \omega'(X) \ne \bot \}$$ and $$\mathcal{M}(\mathcal{D}) := \{f \in \mathcal{M}(\mathcal{C}) \mid \mu'(f) \ne \bot \}$$ and $\omega_\mathcal{D}$ and $\mu_\mathcal{D}$ are restrictions of $\omega'$ and $\mu'$ to $\mathcal{O}b(\mathcal{D})$ and $\mathcal{M}(\mathcal{D})$ respectively.
\section{Fuzzy functions and fuzzy category {$\mathcal{F}SET(L)$}}
As it was already mentioned above, the concept of a fuzzy function and the corresponding fuzzy category {$\mathcal{F}SET(L)$}\ were introduced in \cite{HPS1}, \cite{HPS2}. There were studied also basic properties of fuzzy functions. In this section we recall those definitions and results from \cite{HPS2} which will be needed in the sequel\footnote{Actually, the subject of \cite{HPS1}, \cite{HPS2} was a more general fuzzy category $L$-{$\mathcal{F}SET(L)$}\ containing {$\mathcal{F}SET(L)$}\ as a full subcategory. However, since for the merits of this work the category {$\mathcal{F}SET(L)$}\ is of importance, when discussing results from \cite{HPS1}, \cite{HPS2} we reformulate (simplify) them for the case of {$\mathcal{F}SET(L)$}\ without mentioning this every time explicitly}. Besides some new needed facts about fuzzy functions will be established here, too.
\subsection{Fuzzy functions and category {$\mathcal{F}SET(L)$}}
\begin{definition}[cf.\ {\cite[2.1]{HPS1}}] A fuzzy function\footnote{Probably, the name {\it an $L$-fuzzy function} would be more adequate here. However, since the $GL$-monoid $L$ is considered to be fixed, and since the prefix ``$L$'' appears in the text very often, we prefer to say just {\it a fuzzy function}} $F$ from an $L$-valued set $(X,E_X)$ to $(Y,E_Y)$ (in symbols $F: (X,E_X) \rightarrowtail (Y,E_Y))$) is a mapping $F: X \times Y \to L$ such that \begin{enumerate} \item[(1ff)] $F(x,y) * E_Y(y,y') \leq F(x,y') \quad \forall x \in X, \forall y,y' \in Y$; \item[(2ff)] $E_X(x,x') * F(x,y) \leq F(x',y) \quad \forall x,x' \in X, \forall y \in Y$; \item[(3ff)] $F(x,y) * F(x,y') \leq E_Y(y,y') \quad \forall x \in X, \forall y,y' \in Y$. \end{enumerate} \end{definition}
{\small Notice that conditions (1ff)--(2ff) say that $F$ is a certain $L-$relation, while axiom (3ff) together with evaluation $\mu(F)$ (see Subsection \ref{fsetl}) specify that the $L$-relation $F$ is a fuzzy {\it function}}.
\begin{remark} Let $F: (X,E_X) \rightarrowtail (Y,E_Y)$ be a fuzzy function, $X' \subset X$, $Y' \subset Y$, and let the $L$-valued equalities $E_{X'}$ and $E_{Y'}$ on $X'$ and $Y'$ be defined as the restrictions of the equalities $E_X$ and $E_Y$ respectively. Then defining a mapping $F': X'\times Y' \to L$ by the equality $F'(x,y) = F(x,y)\ \forall x \in X', \forall y \in Y'$ a fuzzy function $F': (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ is obtained. We refer to it as the {\it restriction of $F$} to the subspaces $(X',E_{X'})$ $(Y',E_{Y'})$ \end{remark}
Given two fuzzy functions $F: (X,E_X) \rightarrowtail (Y,E_Y)$ and $G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ we define their {\it composition} $G \circ F: (X,E_X) \rightarrowtail (Z,E_Z)$ by the formula $$(G\circ F)(x,z) = \bigvee_{y\in Y} \bigl( F(x,y) * G(y,z))\bigr).$$
In \cite{HPS2} it was shown that the composition $G \circ F$ is indeed a fuzzy function and that the operation of composition is associative. Further, if we define the identity morphism by the corresponding $L$-valued equality: $E_X: (X,E_X) \rightarrowtail (X,E_X),$ we come to a category {$\mathcal{F}SET(L)$}\ whose objects are $L$-valued sets and whose morphisms are fuzzy functions $F: (X,E_X) \rightarrowtail (Y,E_Y)$.
\subsection{Fuzzy category {$\mathcal{F}SET(L)$}}\label{fsetl}
Given a fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ let $$\mu(F) = \inf_x \sup_y F(x,y).$$ Thus we define an $L$-subclass $\mu$ of the class of all morphisms of {$\mathcal{F}SET(L)$}. In case $\mu(F) \geq \alpha$ we refer to $F$ as a {\it fuzzy $\alpha$-function}.
If $F: (X,E_X) \rightarrowtail (Y,E_Y) \mbox{ and } G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ are fuzzy functions, then $\mu (G \circ F) \geq \mu(G) * \mu(F)$ \cite{HPS2}. Further, given an $L$-valued set $(X,E)$ let $\omega(X,E) := \mu(E) = \inf_x E(x,x) = \top$. Thus a {\it fuzzy category} {$\mathcal{F}SET(L)$} = $(FSET(L), \omega, \mu)$ is obtained.
\begin{example}\label{ex-crisp} Let $* = \wedge$ and $E_Y$ be a crisp equality on $Y$, i.e.\ $E_Y(y,y') = \top \mbox{ iff } y = y'$, and $E_Y(y,y') = \bot$ otherwise. Then every fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ such that $\mu(F) = \top$ is uniquely determined by a usual function $f: X \to Y$. Indeed, let $f(x) = y \mbox{ iff } F(x,y) = \top$. Then condition (3ff) implies that there cannot be $f(x)=y$, $f(x) = y'$ for two different $y$, $y' \in Y$ and condition $\mu(F) = \top$ guarantees that for every $x \in X$ one can find $y \in Y$ such that $f(x)=y$. If besides $E_X$ is crisp, then, vice versa, every mapping $f: X \to Y$ can be viewed as a fuzzy mapping $F: (X,E_X) \rightarrowtail (Y,E_Y)$ (since the conditions of extensionality (2ff) and (3ff) are automatically fulfilled in this case) \end{example}
\begin{remark} If $F'\colon (X',E_{X'}) \rightarrowtail (Y,E_{Y})$ is the restriction of $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ (see Remark above) and $\mu(F) \geq \alpha$, then $\mu(F') \geq \alpha$. However, generally the restriction $F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ of $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ may fail to satisfy condition $\mu(F') \geq \alpha$. \end{remark}
\subsection{Images and preimages of $L$-sets under fuzzy functions}\label{impref}
Given a fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ and $L$-subsets $A: X \to L$ and $B: Y \to L$ of $X$ and $Y$ respectively, we define the fuzzy set $F^{\rightarrow}(A): Y \to L$ (the image of $A$ under $F$) by the equality $F^{\rightarrow}(A)(y) = \bigvee_x F(x,y) * A(x)$ and the fuzzy set $F^{\leftarrow}(B): X \to L$ (the preimage of $B$ under $F$) by the equality $F^{\leftarrow}(B)(x) = \bigvee_y F(x,y) * B(y)$.
Note that if $A \in L^X$ is extensional, then $F^{\rightarrow}(A) \in L^Y$ is extensional (by (2ff)) and if $B \in L^Y$ is extensional, then $F^{\leftarrow}(B) \in L^X$ is extensional (by (3ff)).
\begin{proposition}[Basic properties of images and preimages of $L$-sets under fuzzy functions] \label{im-pr} \mbox{} \begin{enumerate} \item $F^{\rightarrow}(\bigvee_{i \in \mathcal{I}} (A_i) = \bigvee_{i \in \mathcal{I}} F^{\rightarrow}(A_i) \qquad \forall \{A_i: i \in {\mathcal{I}} \} \subset L^X$; \item $F^{\rightarrow}(A_1\bigwedge A_2) \leq F^{\rightarrow}(A_1) \bigwedge F^{\rightarrow}(A_2) \qquad \forall A_1, A_2 \in L^X$; \item If $L$-sets $B_i$ are {\em extensional}, then $$ \bigwedge_{i \in {\mathcal{I}}} F^{\leftarrow}(B_i)*\mu(F)^2 \leq F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \leq \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i) \qquad \forall \{B_i: i \in \mathcal{I} \} \subset L^Y. $$ In particular, if $\mu(F) = \top$, then $F^{\leftarrow}(\bigwedge_{i\in \mathcal{I}} B_i) = \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i)$ for every family of extensinal $L$-sets $\{B_i: i \in \mathcal{I} \} \subset L^Y$. \item $F^{\leftarrow}(\bigvee_{i \in \mathcal{I}} B_i) = \bigvee_{i \in \mathcal{I}} F^{\leftarrow}(B_i) \qquad \forall \{B_i: i \in {\mathcal{I}} \} \subset L^Y$. \item $A*\mu(F)^2 \leq F^{\leftarrow}(F^{\rightarrow}(A))$ for every $A \in L^X$. \item $F^{\to}\bigl(F^{\gets}(B)\bigr) \leq B$ for every {\em extensional} $L$-set $B\in L^Y$. \item $F^{\leftarrow}(c_Y) \geq \mu(F) * c$ where $c_Y: Y \to L$ is the constant function taking value $c \in L$. In particular, $F^{\leftarrow}(c_Y) = c$ if $\mu(F) = \top$. \end{enumerate} \end{proposition}
\begin{proof} (1). $$ \begin{array}{lll} \bigl(\bigvee_i F^{\rightarrow}(A_i)\bigr)(y)& =& \bigvee_i \bigvee_x \bigl(F(x,y) * A_i(x) \bigr)\\ & =& \bigvee_x \bigvee_i \bigl(F(x,y) * A_i (x)\bigr)\\ & =& \bigvee_x (F(x,y) * (\bigvee_i A_i)(x))\\ & =& F^{\rightarrow}(\bigvee_i A_i)(y). \end{array} $$
(2). The validity of (2) follows from the monotonicity of $F^{\to}$.
(3). To prove property 3 we first establish the following inequality \begin{equation}\label{basic1} \bigvee_{y\in Y}\bigl(F(x,y)\bigr)^2 \geq \bigl(\bigvee_{y\in Y} F(x,y)\bigr)^2. \end{equation} Indeed, by a property (vii) of a GL-monoid $$ \begin{array}{lll} \Bigl(\bigvee_{y\in Y} F(x,y)\Bigr)^2& =& \Bigl(\bigvee_{y\in Y} F(x,y)\Bigr) * \Bigl(\bigvee_{y'\in Y} F(x,y')\Bigr)\\ & =& \bigvee_{y,y'\in Y} \Bigl(F(x,y) * F(x,y') \Bigr)\\ & \leq& \bigvee_{y,y'\in Y} \Bigl(F(x,y)^2 \vee F(x,y')^2\Bigr)\\ & =& \bigvee_{y\in Y} \Bigl(F(x,y)\Bigr)^2. \end{array}$$ In particular, it follows from (\ref{basic1}) that \begin{equation}\label{basic2} \forall x \in X\ \bigvee_{y\in Y} \bigl(F(x,y)\bigr)^2 \geq \mu(F)^2. \end{equation} Now, applying (\ref{basic2}) and taking into account extensionality of $L$-sets $B_i$, we proceed as follows: $$ \begin{array}{lll} \multicolumn{3}{l}{ \Bigl(\bigwedge_i F^{\gets}(B_i)\Bigr)(x) * \bigl(\mu(F)\bigr)^2 } \\ & \leq& \Bigl(\bigwedge_i \bigl(\bigvee_{y_i \in Y} (F(x,y_i)*B_i(y_i)\bigr)\Bigr) * \bigvee_{y\in Y}\bigl(F(x,y)\bigr)^2\\ & =& \bigvee_{y\in Y} \Bigl(\bigl(F(x,y)\bigr)^2 * \bigwedge_i\bigl(\bigvee_{y_i \in Y} F(x,y_i) * B_i(y_i)\bigr)\\ & =& \bigvee_{y\in Y}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(F(x,y) * \bigl(\bigvee_{y_i \in Y} F(x,y_i) * B_i(y_i)\bigr)\bigr)\Bigr)\\ & =& \bigvee_{y\in Y}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{y_i\in Y} (F(x,y) * F(x,y_i)\bigr) \bigr) * B_i(y_i)\bigr)\Bigr)\\ & \leq& \bigvee_{y\in Y}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{y_i \in Y} E(y,y_i) * B_i(y_i)\bigr)\bigr)\Bigr)\\ & \leq& \bigvee_{y\in Y}\bigl(F(x,y) * \bigl(\bigwedge_i B_i(y)\bigr)\bigr)\\ & =& F^{\gets}\bigl(\bigwedge_i B_i)\bigr)(x),$$ \end{array} $$ and hence $$\Bigl(\bigwedge_i F^{\gets}(B_i)\Bigr)* \bigl(\mu(F)\bigr)^2 \leq F^{\gets}\bigl(\bigwedge_i B_i)\bigr).$$ To complete the proof notice that the inequality $$F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \leq \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i)$$ is obvious.
(4). The proof of (4) is similar to the proof of (1) and is therefore omitted.
(5). Let $A \in L^X$, then $$ \begin{array}{lll} F^{\leftarrow}(F^{\rightarrow}(A))(x)& =& \bigvee_y (F(x,y)*F^{\rightarrow}(A)(y))\\ & =& \bigvee_y \bigl(F(x,y)*\bigl(\bigvee_{x'} F(x',y) * A(x') \bigr)\bigr)\\ & \geq& \bigvee_y (F(x,y)^2 * A(x))\\ & \geq& (\mu(F))^2 * A(x) \end{array} $$ for every $x\in X$, and hence 5 holds.
(6). To show property 6 assume that $B\in L^Y$ is extensional. Then $$ \begin{array}{lll} F^{\rightarrow}(F^{\leftarrow}(B))(y)& =& \bigvee_x\bigl(F(x,y) * F^{\gets}(B)(x)\bigr)\\ & =& \bigvee_x F(x,y) * \bigl(\bigvee_{y'} F(x,y') * B(y') \bigr)\\ & =& \bigvee_{x \in X, y\in Y} \bigl(F(x,y) * F(x,y') * B(y')\bigr)\\ & \leq& E_Y(y,y')*B(y')\\ & \leq& B(y), \end{array} $$ and hence $F^{\to}\bigl(F^{\gets}(B)\bigr) \leq B$.
(7). The proof of property 7 is straightforward and therefore omitted. \end{proof}
\begin{comments} \mbox{} \begin{enumerate} \item Properties 1,2 and 4 were proved in \cite[Proposition 3.2]{HPS2}. Here we reproduce these proofs in order to make the article self-contained. \item The inequality in item 2 of the previous proposition obviously cannot be improved even in the crisp case. \item One can show that the condition of extensionality cannot be omitted in items 3 and 6. \item The idea of the proof of Property 3 was communicated to the author by U.~H\"ohle in Prague at TOPOSYM in August 2001. \item In \cite{HPS2} there was established the following version of Property 3 without the assumption of extensionality of $L$-sets $B_i$ in case $L$ is completely distributive: $$ {(\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i))}^5 \leq F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \leq \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i) \qquad \forall \{B_i: i \in \mathcal{I} \} \subset L^Y$$ and $$ \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i) = F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \qquad \forall \{B_i: i \in \mathcal{I} \} \subset L^Y, \mbox{ in case } * = \wedge$$ \end{enumerate} \end{comments}
\subsection{Injectivity, surjectivity and bijectivity of fuzzy functions}
\begin{definition}\label{inj} A fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ is called {\it injective}, if \begin{itemize} \item[(inj)] $F(x,y) * F(x',y) \leq E_X(x,x') \quad \forall x,x' \in X, \forall y \in Y$. \end{itemize} \end{definition}
\begin{definition}\label{sur} Given a fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$, we define its degree of surjectivity by the equality: $$\sigma(F) := \inf_y \sup_x F(x,y)$$ In particular, a fuzzy function $F$ is called $\alpha$-surjective, if $\sigma(F) \geq \alpha$.
In case $F$ is injective and $\alpha$-surjective, it is called {\it $\alpha$-bijective}. \end{definition}
\begin{remark} Let $(X,E_X)$, $(Y,E_Y)$ be $L$-valued sets and $(X',E_{X'})$, $(Y',E_{Y'})$ be their subspaces. Obviously, the restriction $F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ of an injection $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ is an injection. The restriction $F'\colon (X,E_{X}) \rightarrowtail (Y',E_{Y'})$ of an $\alpha$-surjection $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ is an $\alpha$-surjection. On the other hand, generally the restriction $F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ of an $\alpha$-surjection $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ may fail to be an $\alpha$-surjection. \end{remark}
A fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ determines a fuzzy {\it relation} $F^{-1}: X\times Y \to L$ by setting $F^{-1}(y,x) = F(x,y)$\ $\forall x \in X$, $\forall y \in Y$.
\begin{proposition}[Basic properties of injections, $\alpha$-surjections and $\alpha$-bi\-jections] \label{in-sur} \mbox{} \begin{enumerate} \item $F^{-1} :(Y,E_Y) \rightarrowtail (X,E_X)$ is a fuzzy function iff $F$ is injective (actually $F^{-1}$ satisfies (3ff) iff F satisfies (inj)) \item $F$ is $\alpha$-bijective iff $F^{-1}$ is $\alpha$-bijective. \item If $F$ is injective, and L-sets $A_i$ are extensional, then $$ (\bigwedge_i F^{\rightarrow}(A_i))*(\sigma(F))^2 \leq F^{\rightarrow}(\bigwedge_i A_i) \leq \bigwedge_i F^{\rightarrow}(A_i) \qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X. $$ In particular, if $F$ is $\top$-bijective, then $$ (\bigwedge_i F^{\rightarrow}(A_i) = F^{\rightarrow}(\bigwedge_i A_i) \qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X.$$ \item $F^{\rightarrow}(F^{\leftarrow}(B)) \geq \sigma(F)^2 * B$. In particular, if $F$ is $\top$-surjective and $B$ is extensional, then $F^{\rightarrow}(F^{\leftarrow}(B)) = B$. \item $F^{\rightarrow}(c_X) \geq \sigma(F) * c$ where $c_X: X \to L$ is the constant function with value $c$. In particular, $F^{\rightarrow}(c_X) = c$ if $\sigma(F) = \top$. \end{enumerate} \end{proposition}
\begin{proof} Properties 1 and 2 follow directly from the definitions.
(2). The proof of Property 3 is analogous to the proof of item 3 of Proposition \ref{im-pr}:
First, reasoning as in the proof of $(\ref{basic1})$ we establish the following inequality \begin{equation}\label{basic3} \bigvee_{x\in X}\bigl(F(x,y)\bigr)^2 \geq \bigl(\bigvee_{x\in X} F(x,y)\bigr)^2. \end{equation} In particular, from here it follows that \begin{equation}\label{basic4} \forall y \in Y\ \bigvee_{x\in X} \bigl(F(x,y)^2\bigr) \geq \sigma(F)^2. \end{equation} Now, applying $(\ref{basic4})$ and taking into account extensionality of $L$-sets $A_i$, we proceed as follows: $$ \begin{array}{lll} \multicolumn{3}{l}{ \Bigl(\bigwedge_i F^{\to}(A_i)\Bigr)(x) * \bigl(\sigma(F)\bigr)^2 } \\ & \leq& \Bigl(\bigwedge_i \bigl(\bigvee_{x_i \in X} (F(x_i,y)*A_i(x_i)\bigr)\Bigr) * \bigvee_{x\in X}\bigl(F(x,y)\bigr)^2 \\ & =& \bigvee_{x\in X} \Bigl(\bigl(F(x,y)\bigr)^2 * \bigwedge_i\bigl(\bigvee_{x_i \in X} F(x_i,y) * A_i(x_i)\bigr) \\ & =& \bigvee_{x\in X}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(F(x,y) * \bigl(\bigvee_{x_i \in X} F(x_i,y) * A_i(x_i)\bigr)\bigr)\Bigr) \\ & = & \bigvee_{x\in X}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{x_i\in X} (F(x,y) * F(x_i,y)\bigr) \bigr) * A_i(x_i)\bigr)\Bigr) \\ & \leq& \bigvee_{x\in X}\Bigl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{x_i \in X} E_X(x,x_i) * A_i(x_i)\bigr)\bigr)\Bigr) \\ & \leq& \bigvee_{x\in X}\bigl(F(x,y) * \bigl(\bigwedge_i A_i(x)\bigr)\bigr) \\ & =& F^{\to}\bigl(\bigwedge_i A_i)\bigr)(y), \end{array} $$ and hence $$\Bigl(\bigwedge_i F^{\to}(A_i)\Bigr)* \bigl(\sigma(F)\bigr)^2 \leq F^{\to}\bigl(\bigwedge_i A_i)\bigr).$$ To complete the proof notice that the inequality $$F^{\rightarrow}(\bigwedge_{i \in \mathcal{I}} A_i) \leq \bigwedge_{i \in \mathcal{I}} F^{\rightarrow}(A_i)$$ is obvious.
(4). Let $B \in L^Y$, then $$ \begin{array}{lll} F^{\rightarrow}(F^{\leftarrow}(B))(y)& =& \bigvee_x \bigl(F(x,y)*F^{\gets}(B)(x)\bigr)\\ & =& \bigvee_x F(x,y) * \bigl(\bigvee_{y'} F(x,y') * B(y) \bigr)\\ & \geq& \bigvee_x F(x,y) * F(x,y) * B(y)\\ & \geq& \sigma (F)^2 * B(y), \end{array} $$ and hence the first inequality in item 4 is proved. From here and Proposition \ref{im-pr} (6) the second statement of item 4 follows.
(5) The proof of the last property is straightforward and therefore omitted. \end{proof}
\begin{question} We do not know whether inequalities in items 3, 4 and 5 can be improved. \end{question}
\begin{comments} \mbox{} \begin{enumerate} \item Properties 1 and 2 were first established in \cite{HPS2}. \item In \cite{HPS2} the following version of Property 3 was proved:\\ If $L$ is completely distributive and $F$ is injective, then $$ (\bigwedge_i F^{\rightarrow}(A_i))^5 \leq F^{\rightarrow}(\bigwedge_i A_i) \leq \bigwedge_i F^{\rightarrow}(A_i) \qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X $$ and $$F^{\rightarrow}(\bigwedge_i A_i) = \bigwedge_i F^{\rightarrow}(A_i) \quad \mbox{ in case } \wedge = *$$ No extensionality is assumed in these cases. \item In case of an ordinary function $f:X \to Y$ the equality $$f^{\to}\bigl(\bigwedge_{i\in \mathcal{I}} (A_i)\bigr) = \bigwedge_{i\in\mathcal{I}} f^{\to}(A_i)$$ holds just under assumption that $f$ is injective. On the other hand, in case of a fuzzy function $F$ to get a reasonable counterpart of this property we need to assume that $F$ is bijective. The reason for this, as we see it, is that in case of an ordinary function $f$, when proving the equality, we actually deal only with points belonging to the image $f(X)$, while the rest of $Y \setminus f(X)$ does not play any role. On the other hand, in case of a fuzzy function $F: X \rightarrowtail Y$ the whole $Y$ is ``an image of $X$ to a certain extent'', and therefore, when operating with images of $L$-sets, we need to take into account, to what extent a point $y$ is in the ``image'' of $X$. \end{enumerate} \end{comments}
\section{Further properties of fuzzy category {$\mathcal{F}SET(L)$}}
In this section we continue to study properties of the fuzzy category {$\mathcal{F}SET(L)$}. As different from the previous section, were our principal interest was in the ``set-theoretic'' aspect of fuzzy functions, here shall be mainly interested in their properties of ``categorical nature''.
First we shall specify the two (crisp) categories related to {$\mathcal{F}SET(L)$}: namely, its bottom frame {$\mathcal{F}SET(L)$}$^\bot$ (={$\mathcal{F}SET(L)$}\ (this category was introduced already in Section 2) and its top frame {$\mathcal{F}SET(L)$}$^\top$. The last one will be of special importance for us. By definition its morphisms $F$ satisfy condition $\mu(F) = \top$, and as we have seen in the previous section, fuzzy functions satisfying this condition ``behave themselves much more like ordinary functions'' than general fuzzy functions. Respectively, the results which we are able to establish about {$\mathcal{F}SET(L)$}$^\top$ and about topological category {$\mathcal{F}TOP(L)^\top$} based on it, are more complete and nice, then their more general counterparts.
Second, note that the ``classical'' category $SET(L)$ of $L$-valued sets can be naturally viewed as a subcategory of {$\mathcal{F}SET(L)$}$^\top$. In case $L=\{0,1\}$ obviously the two categories collapse into the category SET of sets. On the other hand, starting with the category SET (=SET$(\{0,1\})$) (i.e.\ $L=\{0,1\}$) of sets and enriching it with respective fuzzy functions, we obtain again the category SET as {$\mathcal{F}SET(L)$}$^\top$ and obtain the category of sets and partial functions as {$\mathcal{F}SET(L)$}$^\bot$.
\subsection{Preimages of $L$-valued equalities under fuzzy functions}
Let an $L$-valued set $(Y,E_Y)$, a set $X$ and a mapping $F: X \times Y \to L$ be given. We are interested to find the largest $L$-valued equality $E_X$ on $X$ for which $F: (X,E_X) \to (Y,E_Y)$ is a fuzzy function. This $L$-valued equality will be called {\it the preimage of $E_Y$ under $F$} and will be denoted $F^{\gets}(E_Y)$.
Note first that the axioms \begin{enumerate} \item[(1ff)] $F(x,y)*E_Y(y,y') \leq F(x,y')$, and \item[(3ff)] $F(x,y)*F(x,y') \leq E(y,y')$ \end{enumerate} do not depend on the $L$-valued equality on $X$ and hence we have to demand that the mapping $F$ originally satisfies them. To satisfy the last axiom \begin{enumerate} \item[(2ff)] $E_X(x,x') * F(x,y) \leq F(x',y)$ \end{enumerate} in an ``optimal way'' we define $$E_X(x,x') := \bigwedge_y \Bigl(\bigl( F(x,y) \longmapsto F(x',y)\bigr) \wedge \bigl(F(x',y) \longmapsto F(x,y) \bigr)\Bigr).$$ Then $E_X: X\times X \to L$ is an $L$-valued equality on $X$. Indeed, the validity of properties $E_X(x,x) = \top$ and $E_X(x,x') = E_X(x',x)$ is obvious. To establish the last property, i.e.\ $E_X(x,x') * E_X(x',x'') \leq E_X(x,x'')$, we proceed as follows: $$ \begin{array}{lll} \multicolumn{3}{l}{ E_X(x,x') * E_X(x',x'') } \\ & =& \bigwedge_y \Bigl( \bigl(F(x,y) \longmapsto F(x',y)\bigr) \wedge \bigl(F(x',y) \longmapsto F(x,y)\bigr) \Bigr) \\ & & * \bigwedge_y \Bigl( \bigl(F(x',y) \longmapsto F(x'',y)\bigr) \wedge \bigl(F(x'',y) \longmapsto F(x',y)\bigr) \Bigr) \\ & \leq& \bigwedge_y \Bigl( \bigl(F(x,y) \longmapsto F(x',y)\bigr) \wedge \bigl(F(x',y) \longmapsto F(x,y) \bigr) \\ & & \ * \bigl(F(x',y) \longmapsto F(x'',y)\bigr) \wedge \bigl(F(x'',y) \longmapsto F(x',y)\bigr) \Bigr) \\ & \leq& \bigwedge_y \Bigl( \bigl(F(x,y) \longmapsto F(x',y)\bigr) * \bigl(F(x',y) \longmapsto F(x,y) \bigr) \\ & & \ \wedge (\bigl(F(x',y) \longmapsto F(x'',y)\bigr) * \bigl(F(x'',y) \longmapsto F(x',y) \bigr) \Bigr) \\ & \leq& \bigwedge_y \Bigl( \bigl(F(x,y) \longmapsto F(x'',y)\bigr) \wedge \bigl(F(x'',y) \longmapsto F(x,y)\bigr) \Bigr) \\ & =& E_X(x,x''). \end{array} $$ Further, just from the definition of $E_X$ it is clear that $F$ satisfies the axiom {\it (2ff)} and hence it is indeed a fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y).$ Moreover, from the definition of $E_X$ it is easy to note that it is really the largest $L$-valued equality on $X$ for which $F$ satisfies axiom {\it (2ff)}.
Finally, note that the value $\mu(F)$ is an inner property of the mapping $F: X\times Y \to L$ and does not depend on $L$-valued equalities on these sets.
\begin{question} We do not know whether the preimage $F^{\gets}(E_Y)$ is the initial structure for the source $F: X \rightarrowtail (Y,E_Y)$ in {$\mathcal{F}SET(L)$}. Namely, given an $L$-valued set $(Z,E_Z)$ and a ``fuzzy quasi-function'' $G: (Z,E_Z) \rightarrowtail X$ is it true that composition $F \circ G: (Z,E_Z) \rightarrowtail (Y,E_Y)$ is a fuzzy function if and only if $G: (Z,E_Z) \rightarrowtail (X,E_X)$ is a fuzzy function? By a fuzzy quasi-function we mean that $G$ satisfies properties {\it (1ff)} and {\it (3ff)} which do not depend on the equality on $X$. \end{question}
\subsection{Images of $L$-valued equalities under fuzzy functions}
Let an $L$-valued set $(X,E_X)$, a set $Y$ and a mapping $F: X \times Y \to L$ be given. We are interested to find the smallest $L$-valued equality $E_Y$ on $Y$ for which $F: (X,E_X) \to (Y,E_Y)$ is a fuzzy function. This $L$-valued equality will be called {\it the image of $E_X$ under $F$} and will be denoted $F^{\to}(E_X)$.
Note first that the axiom \begin{enumerate} \item[(2ff)] $E_X(x,x') * F(x,y) \leq F(x',y)$ \end{enumerate} does not depend on the $L$-valued equality on $Y$ and hence we have to demand that the mapping $F$ originally satisfies it. Therefore we have to bother that $F$ satisfies the remaining two axioms: \begin{enumerate} \item[(1ff)] $F(x,y)*E_Y(y,y') \leq F(x,y')$, and \item[(3ff)] $F(x,y)*F(x,y') \leq E(y,y')$ \end{enumerate} These conditions can be rewritten in the form of the double inequality: $$ \begin{array}{lll} F(x,y)*F(x,y')& \leq& E_Y(y,y')\\ & \leq& \bigl(F(x,y')\longmapsto F(x,y)\bigr) \wedge \bigl(F(x,y) \longmapsto F(x,y')\bigr). \end{array} $$ Defining $E_Y$ by the equality $$E_Y(y,y') = \bigvee_x \bigl(F(x,y) * F(x,y')\bigr),$$ we shall obviously satisfy both of them. Moreover, it is clear that $E_Y$ satisfies property (3ff) and besides $E_Y$ cannot be diminished without loosing this property. Hence we have to show only that $E_Y$ is indeed an $L$-valued equality. However, to prove this we need the assumption that $\sigma(F) = \top$, that is $F$ is $\top$-surjective. Note that $$E_Y(y,y) = \bigvee_x \bigl(F(x,y)*F(x,y)\bigr) \geq (\sigma(F))^2,$$ and hence the first axiom is justified in case $\sigma(F) = \top$.
The equality $E_Y(y,y') = E_Y(y',y)$ is obvious.
Finally, to establish the last property, we proceed as follows. Let $y,y',y'' \in Y$. Then $$ \begin{array}{lll} \multicolumn{3}{l}{ E_Y(y,y')*E_Y(y',y'') } \\ & =& \bigvee_x\bigl(F(x,y)*F(x,y')\bigr) \, * \, \bigvee_x\bigl(F(x,y')*F(x,y'')\bigr) \\ & =& \bigvee_{x,x'} \bigl(F(x,y)*F(x,y')* F(x',y')*F(x',y'')\bigr)\\ & \leq& \bigvee_{x,x'} \bigl(F(x,y)* E_X(x,x')*F(x',y'')\bigr)\\ & \leq& \bigvee_x \bigl(F(x,y) * F(x,y'')\bigr) \\ & =& E(y,y''). \end{array} $$
\begin{question} We do not know whether the image $F^{\to}(E_X)$ is the final structure for the sink $F: (X,E_X) \rightarrowtail Y$ in {$\mathcal{F}SET(L)$}\ in case $\sigma(F) = \top$. Namely, given an $L$-valued set $(Z,E_Z)$ and a ``fuzzy almost-function'' $G: Y \rightarrowtail (Z,E_Z) $ is it true that composition $F \circ G: (Z,E_Z) \rightarrowtail (Y,E_Y)$ is a fuzzy function if and only if $G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ is a fuzzy function? By a fuzzy almost-function we mean that $G$ satisfies property (2ff) which does not depend on the equality on $Y$. \end{question}
\subsection{Products in {$\mathcal{F}SET(L)^\top$}}
Let $\mathcal{Y} = \{(Y_i, E_i): i \in \mathcal{I} \}$ be a family of $L$-valued sets and let $Y = \prod_i Y_i $ be the product of the corresponding sets. We introduce the $L$-valued equality $E: Y\times Y \to L$ on $Y$ by setting $E_Y(y,y') = \bigwedge_{i\in\mathcal{I}} E_i(y_i,{y'}_i)$ where $y=(y_i)_{i\in\mathcal{I}}$, $y'=({y'}_i)_{i\in\mathcal{I}}$. Further, let $p_i :Y \to Y_i$ be the projection. Then the pair $(Y,E)$ thus defined with the family of projections $p_i :Y \to Y_i$, $i \in \mathcal{I}$, is the product of the family $\mathcal{Y}$ in the category {$\mathcal{F}SET(L)^\top$}.
To show this notice first that, since the morphisms in this category are fuzzy functions, a projection $p_{i_0}: Y \to Y_{i_0}$ must be realized as the fuzzy function $p_{i_0}: Y \times Y_{i_0} \to L$ such that $p_{i_0}(y,y^0_{i_0}) = \top$ if and only if the $i_0$-coordinate of $y$ is $y^0_{i_0}$ and $p_{i_0}(y,y^0_{i_0}) = \bot$ otherwise.
Next, let $F_i: (X,E_X) \rightarrowtail (Y_i,{E_i})$, $i \in \mathcal{I}$ be a family of fuzzy functions. We define the fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ by the equality: $$F(x,y) = \bigwedge_{i\in\mathcal{I}} F_i(x,y_i).$$ It is obvious that $\mu(F) = \top$ and hence $F$ is in {$\mathcal{F}SET(L)^\top$}. Finally, notice that the composition $$(X,E_X) \ \stackrel{F}{\longrightarrow} \ (Y,E_Y) \ \stackrel{p_{i_0}}{\longrightarrow} \ (Y_{i_0},E_{i_0})$$ is the fuzzy function $$F_{i_0}: (X,E_X) \rightarrowtail (Y_{i_0},E_{i_0}).$$ Indeed, let $x^0 \in X$ and $y^0_{i_0} \in Y_{i_0}$. Then, taking into account that $\mu(F_i) = \top$ for all $i \in \mathcal{I}$, we get $$ \begin{array}{lll} (p_{i_0} \circ F)(x^o,y^0_{i_0})& =& \bigvee_{y\in Y} \bigl(p_{i_0}(y,y^0_{i_0}) \wedge F(x^0,y)\bigr)\\ & =& \bigvee_{y\in Y} \bigl(p_{i_0}(y,y^0_{i_0}) \wedge \bigwedge_{i\in\mathcal{I}} F_i(x^0,y_i)\bigr)\\ & =& F_{i_0}(x^0,y_{i_0}). \end{array}$$
\begin{question} We do not know whether products in {$\mathcal{F}SET(L)$}\ can be defined in a reasonable way. \end{question}
\subsection{Coproducts in {$\mathcal{F}SET(L)$}}
Let $\mathcal{X}$ = $\{(X_i,E_i): i \in \mathcal{I} \}$ be a family of $L$-valued sets, let $X = \bigcup X_i$ be the disjoint sum of sets $X_i$. Further, let $q_i: X_i \to X$ be the inclusion map. We introduce the $L$-equality on $X_0$ by setting $E(x,x') = E_i(x,x')$ if $(x,x') \in X_i \times X_i$ for some $i \in \mathcal{I}$ and $E(x,x') = \bot$ otherwise (cf.\ \cite{Ho92}). An easy verification shows that $(X,E)$ is the coproduct of $\mathcal{X}$ in {$\mathcal{F}SET(L)$}\ and hence, in particular, in {$\mathcal{F}SET(L)^\top$}.
Indeed, given a family of fuzzy functions $F_i: (X_i,E_i) \to (Y,E_Y)$, let the fuzzy function $$\oplus_{i\in\mathcal{I}} F_i: (X,E) \to (Y,E_Y)$$ be defined by $$\oplus_{i\in\mathcal{I}} F_i(x,y) = F_{i_0}(x,y) \mbox{ whenever } x \in X_{i_0}.$$ Then for $x=x_{i_0} \in X_{i_0}$ we have $$ \begin{array}{lll} \bigl(\oplus_{i\in\mathcal{I}} F_i \circ q_{i_0} \bigr)(x,y)& =& \bigvee_{x'\in X} \Bigl(q_{i_0}(x,x') \wedge \bigl(\oplus_{i\in\mathcal{I}} F_i (x',y)\bigr)\Bigr)\\ & =& F_{i_0}(x,y). \end{array} $$
\subsection{Subobjects in {$\mathcal{F}SET(L)$}}
Let $(X,E)$ be an $L$-valued set, let $Y \subset X$ and let $e: Y \to X$ be the natural embedding. Further, let $E_Y := e^{\gets}(E)$ be the preimage of the $L$-valued equality $E$. Explicitly, in this case this means that $E_Y(y,y') = E(y,y')$ for all $y,y' \in Y$. One can easily see that $(Y,E_Y)$ is a subobject of $(X,E)$ in the fuzzy category {$\mathcal{F}SET(L)$}.
\section{Fuzzy category {$\mathcal{F}TOP(L)$}}
\subsection{Basic concepts}
\begin{definition}[see \cite{So2000}, cf.\ also \cite{Ch}, \cite{Go73}, \cite{HoSo99}]\label{LFTop} A family $\tau_X \subset L^X$ of {\it extensional} $L$-sets\footnote{Since $L$-topology is defined on an {\it $L$-valued set} $X$ the condition of extensionality of elements of $L$-topology seems natural. Besides the assumption of extensionality is already implicitly included in the definition of a fuzzy function.} is called an $L$-topology on an $L$-valued set $(X,E_X)$ if it is closed under finite meets, arbitrary joins and contains $0_X$ and $1_X$. Corresponding triple $(X,E_X,\tau_X)$ will be called an $L$-valued $L$-topological space or just an $L$-topological space for short. A fuzzy function $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is called {\it continuous} if $F^{\leftarrow}(V) \in \tau_X$ for all $V \in \tau_Y$. \end{definition}
$L$-topological spaces and continuous fuzzy mappings between them form the fuzzy category which will be denoted {$\mathcal{F}TOP(L)$}. Indeed, let $$F\colon (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)\ \mbox{and}\ G\colon (Y,E_Y,\tau_Y) \rightarrowtail (Z,E_Z,\tau_Z)$$ be continuous fuzzy functions and let $W \in \tau_Z$. Then $$ \begin{array}{lll} (G \circ F)^{\gets}(W)(x)& =& \bigvee_z\Bigl(\bigl(G \circ F)(x,z) * W(z)\Bigr)\\ & =& \bigvee_{z,y} \Bigl(F(x,y)*G(y,z)*W(z)\Bigr). \end{array} $$ On the other hand, $G^{\gets}(W)(y) = \bigvee_z G(y,z) * W(z)$ and $$ \begin{array}{lll} F^{\gets}\bigl(G^{\gets}(W)\bigr)(x)& =& \bigvee_y F(x,y) *\bigl(\bigvee_z (G(y,z) * W(z))\bigr)\\ & =& \bigvee_{z,y} \Bigl(F(x,y)*G(y,z)*W(z)\Bigr). \end{array} $$ Thus $(G \circ F)^{\gets}(W) = G^{\gets}(F^{\gets}(W))$ for every $W$, and hence composition of continuous fuzzy functions is continuous. Besides, we have seen already before that $\mu(G \circ F) \geq \mu(G) * \mu(F)$. Finally, $E_X^{\leftarrow}(B) = B$ for every {\it extensional} $B \in L^X$ and hence the identity mapping $E_X: (X,E_X,\tau_X) \rightarrowtail (X,E_X,\tau_X)$ is continuous.
\begin{remark} In case when $L$-valued equality $E_X$ is crisp, i.e.\ when $X$ is an ordinary set, the above definition of an L-topology on $X$ reduces to the ``classical'' definition of an $L$-topology in the sense of Chang and Goguen, \cite{Ch}, \cite{Go73}. \end{remark}
\begin{remark} Some (ordinary) subcategories of the fuzzy category {$\mathcal{F}TOP(L)$}\ will be of special interest for us. Namely, let {$\mathcal{F}TOP(L)$}$^\bot$ =: FTOP(L) denote the bottom frame of {$\mathcal{F}TOP(L)$}, let {$\mathcal{F}TOP(L)^\top$}\ be the top frame of {$\mathcal{F}TOP(L)$}, and finally let L-TOP(L) denote the subcategory of {$\mathcal{F}TOP(L)$}\ whose morphisms are ordinary functions. Obviously the ``classical'' category L-TOP of Chang-Goguen $L$-topological spaces can be obtained as a full subcategory L-TOP(L) whose objects carry crisp equalities. Another way to obtain L-TOP is to consider fuzzy subcategory of {$\mathcal{F}TOP(L)$} whose objects carry crisp equalities and whose morphisms satisfy condition $\mu(F) > \bot.$ \end{remark}
In case when $L$ is an $MV$-algebra and involution $^c: L \to L$ on $L$ is defined in the standard way, i.e.\ $\alpha^c := \alpha \longmapsto \bot$ we can reasonably introduce the notion of a closed $L$-set in an $L$-topological space:
\begin{definition} An $L$-set $A$ in an $L$-topological space $(X,E_X,\tau_X)$ is called closed if $A^c \in \tau_X$ where $A^c \in L^X$ is defined by the equality $$A^c(x) := A(x) \longmapsto \bot \quad \forall x \in X.$$ \end{definition}
Let $\mathcal{C}_X$ denote the family of all closed $L$-sets in $(X,E_X,\tau_X)$. In case when $L$ is an $MV$-algebra the families of sets $\tau_X$ and $\mathcal{C}_X$ mutually determine each other: $$A \in \tau_X \Longleftrightarrow A^c \in \mathcal{C}_X.$$
\subsection{Analysis of continuity}
Since the operation of taking preimages $F^{\leftarrow}$ commutes with joins, and in case when $\mu(F)=\top$ also with meets (see Proposition \ref{im-pr}), one can easily verify the following
\begin{theorem}\label{cont} Let $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces, $\beta_Y$ be a base of $\tau_Y$, $\xi_Y$ its subbase and let $F: X \rightarrowtail Y$ be a fuzzy function. Then the following are equivalent: \begin{enumerate} \item[(1con)] $F$ is continuous; \item[(2con)] for every $V \in \beta_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$; \item[(3con)] $F^{\leftarrow}(Int_Y(B)) \leq Int_X (F^{\leftarrow}(B))$, for every $B \in L^Y$ where $Int_X$ and $Int_Y$ are the corresponding $L$-interior operators on $X$ and $Y$ respectively. \end{enumerate} In case when $\mu(F) = \top$ these conditions are equivalent also to the following \begin{enumerate} \item[(4con)] for every $V \in \xi_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$. \end{enumerate} \end{theorem}
In case when $L$ is an MV-algebra one can characterize continuity of a fuzzy function by means of closed $L$-sets and $L$-closure operators:
\begin{theorem}\label{cont-cl} Let $(L,\leq,\vee,\wedge,*)$ be an MV-algebra, $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces and $F: X \rightarrowtail Y$ be a fuzzy function. Further, let $\mathcal{C}_X$, $\mathcal{C}_Y$ denote the families of closed $L$-sets and $cl_X$, $cl_Y$ denote the closure operators in $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ respectively. Then the following two conditions are equivalent: \begin{enumerate} \item[(1con)] $F$ is continuous; \item[(5con)] For every $B \in \mathcal{C}_Y$ it follows $F^{\leftarrow}(B) \in \mathcal{C}_X$. \end{enumerate} In case when $\mu(F) = \top$, the previous conditions are equivalent to the following: \begin{enumerate} \item[(6con)] For every $A \in L^X$ it holds $F^{\rightarrow}(cl_X(A)) \leq cl_Y(F^{\rightarrow}(A)).$ \end{enumerate} \end{theorem}
\begin{proof} In case when $L$ is equipped with an order reversing involution, as it is in our situation, families of closed and open $L$-sets mutually determine each other. Therefore, to verify the equivalence of (1con) and (5con) it is sufficient to notice that for every $B \in L^Y$ and every $x \in X$ it holds $$ \begin{array}{lll} F^{\leftarrow}(B^c)(x)& =& \bigvee_y \bigl(F(x,y) * (B(y) \longmapsto \bot)\bigr)\\ & =& \bigvee_y \bigl(F(x,y) * B(y) \longmapsto \bot \bigr)\\ & =& \bigl(\bigvee_y (F(x,y) * B(y))\bigr)^c\\ & =& (F^{\leftarrow}(B))^c(x), \end{array} $$ and hence $$ F^{\leftarrow}(B^c) = ( F^{\leftarrow}(B))^c \quad \forall B \in L^Y, $$ i.e.\ operation of taking preimages preserves involution.
To show implication (5con) $\Longrightarrow$ (6con) under assumption $\mu(F)=\top$ let $A \in L^X$. Then, according to Proposition \ref{im-pr} (5), $$A \leq F^{\leftarrow}(F^{\rightarrow}(A)) \leq F^{\leftarrow}(cl_Y(F^{\rightarrow}(A))),$$ and hence, by (5con), also $$cl_X (A) \leq F^{\leftarrow}(cl_Y (F^{\rightarrow}(A))).$$ Now, by monotonicity of the image operator and by Proposition \ref{im-pr} (6) (taking into account that $cl_X A$ is extensional as a closed $L$-set), we get: $$ F^{\rightarrow}(cl_X (A)) \leq F^{\rightarrow}\bigl(F^{\leftarrow}(cl_Y (F^{\rightarrow}(A)))\bigr) \leq cl_Y (F^{\rightarrow}(A)).$$
Conversely, to show implication (6con) $\Longrightarrow$ (5con) let $B \in \mathcal{C}_Y$ and let $F^{\gets}(B) := A$. Then, by (6con), $$F^{\to}(cl_X (A)) \leq cl_Y (F^{\to} (A)) \leq cl_Y (B) = B.$$ In virtue of Proposition \ref{im-pr} (5) and taking into account that $\mu(F) = \top$, it follows from here that $cl_X (A) \leq F^{\gets}(B) = A$, and hence $cl_X (A) = A$. \end{proof}
\subsection{Fuzzy $\alpha$-homeomorphisms and fuzzy $\alpha$-homeomorphic spaces}
The following definition naturally stems from Definitions \ref{inj}, \ref{sur} and \ref{LFTop} and item 2 of Proposition \ref{im-pr}:
\begin{definition} Given $L$-topological spaces $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$, a fuzzy function $F: X \rightarrowtail Y$ is called a fuzzy $\alpha$-homeomorphism if $\mu(F) \geq \alpha$, $\sigma(F) \geq \alpha$, it is injective, continuous, and the inverse fuzzy function $F^{-1}: Y \rightarrowtail X$ is also continuous. Spaces $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ are called fuzzy $\alpha$-homeomorphic if there exists a fuzzy $\alpha$-homeomorphism $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$. \end{definition}
One can easily verify that composition of two fuzzy $\alpha$-homeomorphisms is a fuzzy $\alpha^2$-home\-omorphism; in particular, composition of fuzzy $\top$-homeo\-morphisms is a fuzzy $\top$-homeo\-morphism, and hence fuzzy $\top$-homeo\-morph\-isms determine the equivalence relation $\stackrel{\top}{\approx}$ on the class of all $L$-topological spaces. Besides, since every (usual) homeomorphism is obviously a fuzzy $\top$-homeo\-morphism, homeomorphic spaces are also fuzzy $\top$-homeo\-morphic: $$(X,E_X,\tau_X) \approx (Y,E_Y,\tau_Y) \Longrightarrow (X,E_X,\tau_X) \stackrel{\top}{\approx} (Y,E_Y,\tau_Y).$$ The converse generally does not hold:
\begin{example} Let $L$ be the unit interval $[0,1]$ viewed as an $MV$-algebra (i.e.\ $\alpha * \beta = max\{\alpha+\beta-1, 0\}$, let $(X,\varrho)$ be an uncountable separable metric space such that $\varrho(x,x') \leq 1$ $\forall x,x' \in X$, and let $Y$ be its countable dense subset. Further, let the $L$-valued equality on $X$ be defined by $E_X(x,x') := 1 - \varrho(x,x')$ and let $E_Y$ be its restriction to $Y$. Let $\tau_X$ be any $L$-topology on an $L$-valued set $(X,E_X)$ (in particular, one can take $\tau_X := \{c_X \mid c \in [0,1] \}$). Finally, let a fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ be defined by $F(x,y) := 1 - \varrho(x,y)$. It is easy to see that $F$ is a $\top$-homeomorphism and hence $(X,E_X,\tau_X) \stackrel{\top}{\approx} (Y,E_Y,\tau_Y)$. On the other hand $(X,E_X,\tau_X) \not{\approx} (Y,E_Y,\tau_Y)$ just for set-theoretical reasons. \end{example}
\section{Category {$\mathcal{F}TOP(L)^\top$}}
Let {$\mathcal{F}TOP(L)^\top$}\ be the top-frame of {$\mathcal{F}TOP(L)$}, i.e.\ {$\mathcal{F}TOP(L)^\top$}\ is a category whose objects are the same as in {$\mathcal{F}TOP(L)$}, that is L-topological spaces, and morphisms are continuous fuzzy functions $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ such that $\mu(F) = \top.$
Note, that as different from the fuzzy category {$\mathcal{F}TOP(L)$}, {$\mathcal{F}TOP(L)^\top$}\ is a usual category. Applying Theorem \ref{cont}, we come to the following result:
\begin{theorem}\label{cont-top} Let $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces, $\beta_Y$ be a base of $\tau_Y$, $\xi_Y$ its subbase and let $F: X \rightarrowtail Y$ be a fuzzy function. Then the following conditions are equivalent: \begin{enumerate} \item[(1con)] $F$ is continuous; \item[(2con)] for every $V \in \beta_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$; \item[(3con)] $F^{\leftarrow}(Int_Y(B)) \leq Int_X (F^{\leftarrow}(B))$, where $Int_X$ and $Int_Y$ are the corresponding $L$-interior operators on $X$ and $Y$ respectively; \item[(4con)] for every $V \in \xi_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$. \end{enumerate} \end{theorem}
In case when $L$ is an MV-algebra, we get from \ref{cont-cl}
\begin{theorem}\label{cont-cltop} Let $(L,\leq,\vee,\wedge,*)$ be an MV-algebra, $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces and $F: X \rightarrowtail Y$ be a a morphism in {$\mathcal{F}TOP(L)^\top$}. Further, let $\mathcal{C}_X$, $\mathcal{C}_Y$ denote the families of closed $L$-sets and $cl_X$, $cl_Y$ denote the closure operators on $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ respectively. Then the following two conditions are equivalent: \begin{enumerate} \item[(1con)] $F$ is continuous; \item[(5con)] For every $B \in \mathcal{C}_Y$ it follows $F^{\leftarrow}(B) \in \mathcal{C}_X$; \item[(6con)] For every $A \in L^X$ it holds $F^{\rightarrow}(cl_X(A)) \leq cl_Y(F^{\rightarrow}(A))$. \end{enumerate} \end{theorem}
\begin{theorem}\label{topcat} $$\mbox{ {$\mathcal{F}TOP(L)^\top$} is a topological category over the category {$\mathcal{F}SET(L)^\top$}. }$$ \end{theorem}
\begin{proof} Since intersection of any family of L-topologies is an L-topology, {$\mathcal{F}TOP(L)^\top$}\ is fiber complete. Therefore we have to show only that any structured source in {$\mathcal{F}SET(L)^\top$}\ $F: (X,E_X) \rightarrowtail (Y,E_Y,\tau_Y)$ has a unique initial lift. Let $$\tau_X := F^{\gets}(\tau_Y):= \{F^{\gets}(V) \mid V \in \tau_Y \}.$$ Then from theorem \ref{im-pr} it follows that $\tau_X$ is closed under taking finite meets and arbitrary joins. Furthermore, obviously $F^{\gets}(0_Y) = 0_X$ and taking into account condition $\mu(F)=\top$ one easily establishes that $F^{\gets}(1_Y) = 1_X$. Therefore, taking into account that preimages of extensional $L$-sets are extensional, (see Subsection \ref{impref}) we conclude that the family $\tau_X$ is an $L$-topology on $X$.
Further, just from the construction of $\tau_X$ it is clear that $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is continuous and, moreover, $\tau_X$ is the weakest L-topology on $X$ with this property.
Let now $(Z,E_Z,\tau_Z)$ be an $L$-topological space and $H: (Z,E_Z) \rightarrowtail (X,E_X)$ a fuzzy function such that the composition $G := H \circ F: (Z,E_Z,\tau_Z) \rightarrowtail (Y,E_Y,\tau_Y)$ is continuous. To complete the proof we have to show that $H$ is continuous.
Indeed, let $U \in \tau_X$. Then there exists $V \in \tau_Y$ such that $U = F^{\gets}(V)$. Therefore $$H^{\gets}(U) = H^{\gets}\bigl(F^{\gets}(V)\bigr) = G^{\gets}(V) \in \tau_Z$$ and hence $H$ is continuous. \end{proof}
\subsection{Products in {$\mathcal{F}TOP(L)^\top$}}
Our next aim is to give an explicite description of the product in {$\mathcal{F}TOP(L)^\top$}.
Given a family $\mathcal{Y} = \{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ of $L$-topological spaces, let $(Y,E)$ be the product of the corresponding $L$-valued sets $\{(Y_i, E_i): i \in \mathcal{I} \}$ in {$\mathcal{F}SET(L)^\top$} and let $p_{i}: Y \to Y_{i}$ be the projections.
Further, for each $U_i \in \tau_i$ let $\hat U_i := p_i^{-1}(U_i)$. Then the family $\xi := \{\hat U_i: U_i \in \tau_i, i \in \mathcal{I} \}$ is a subbase of an $L$-topology $\tau$ on the product $L$-valued set $(X,E_X)$ which is known to be the product $L$-topology for L-topological spaces $\{(Y_i,\tau_i) \mid i \in \mathcal{I} \}$ in the category L-TOP. In its turn the triple $(Y,E,\tau)$ is the product of $L$-topological spaces $\{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ in the category {$\mathcal{F}TOP(L)^\top$}. Indeed, let $(Z,E_Z,\tau_Z)$ be an $L$-topological space and $\{ F_i: Z \rightarrowtail Y_i \mid i \in \mathcal{I} \}$ be a family of continuous fuzzy mappings. Then, defining a mapping $F: Z\times Y \to L$ by $F(z,y) = \wedge_{i\in\mathcal{I}} F_i(z,y_i)$ we obtain a fuzzy function $F: Z \rightarrowtail Y$ such that $\mu(F) = \bigwedge_{i\in\mathcal{I}}\mu(F_i) = \top$ and besides it can be easily seen that for every $i_0 \in \mathcal{I}$, every $z \in Z$, and for every $U_{i_0} \in \tau_{i_0}$ it holds: $$\begin{array}{lll} F^{\leftarrow}(\hat U_{i_0})(z)& =& \bigvee_{y \in Y} \Bigl(\bigl(\bigwedge_{i\in\mathcal{I}}F_i(z,y_i)\bigr) * U_{i}(y_i)\Bigr) \\ & =& \bigvee_{y_i \in Y_i} \Bigl(\bigwedge_{i\in\mathcal{I}} \bigl(F_i(z,y_i)\bigr) * U_{i_0}(y_{i_0})\Bigr)\\ & =& \bigwedge_{i\in \mathcal{I}} \Bigl(\bigvee_{y_i \in Y_i} F_i(z,y_i)\Bigr) \wedge \bigvee_{y_{i_0} \in X_{i_0}} \bigl(F_{i_0} (z,y_{i_0}) * U_{i_0}(y_{i_0})\bigr) \\ & =& \top \wedge F^{\gets}_{i_0}(U_{i_0})(z)\\ & =& F^{\gets}_{i_0}(U_{i_0})(z). \end{array}$$ Hence continuity of all $F_i$ guarantees the continuity of $F: (Z,E_Z,\tau_Z) \rightarrowtail (Y,E,\tau)$. Thus $(Y,E,\tau)$ is indeed the product of $\mathcal{Y} = \{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ in {$\mathcal{F}SET(L)$}$^\top$.
\begin{question} We do not know whether products in the fuzzy category {$\mathcal{F}TOP(L)$}\ exist. \end{question}
\subsection{Subspaces in {$\mathcal{F}TOP(L)^\top$}}
Let $(X,E,\tau)$ be an $L$-valued $L$-topological space, $Y \subset X$ and let $(Y,E_Y)$ be the subobject of the L-valued set $(X,E)$ in the category {$\mathcal{F}SET(L)$}. Further, let $\tau_Y$ be the subspace L-topology, that is $(Y,\tau_Y)$ is a subspace of the $L$-topological space $(X,\tau)$ in the category L-TOP. Then it is clear that the triple $(Y,E_Y,\tau_Y)$ is the subobject of $(X,E, \tau)$ in the category {$\mathcal{F}TOP(L)^\top$} (and in the fuzzy category {$\mathcal{F}TOP(L)$}\ as well).
\subsection{Coproducts in {$\mathcal{F}TOP(L)^\top$}}
Given a family $\mathcal{X} := \{(X_i,E_i,\tau_i) \}$ of $L$-topological spaces let $(X,E)$ be the direct sum of the corresponding $L$-valued sets $(X_i,E_i)$ in {$\mathcal{F}SET(L)$}. Further, let $\tau$ be the $L$-topology on $X$ determined by the subbase $\bigcup_{i\in\mathcal{I}} \tau_i \subset 2^X$. In other words $(X,\tau)$ is the coproduct of $L$-topological spaces $(X_i,\tau_i)$ in the category L-TOP. Then the triple $(X,E,\tau)$ is the coproduct of the family $\mathcal{X} := \{(X_i,E_i,\tau_i) \}$ in the category {$\mathcal{F}TOP(L)^\top$} (and in the fuzzy category {$\mathcal{F}TOP(L)$} as well). Indeed, let $q_i: (X_i,E_i,\tau_i) \to (X,E,\tau), i\in\mathcal{I}$ denote the canonical embeddings. Further, consider an $L$-topological space $(Y,E_Y,\tau_Y)$ and a family of continuous fuzzy functions $F_i: (X_i,E_i,\tau_i) \rightarrowtail (Y,E_Y,\tau_Y)$. Then, by setting $F(x,y) := F_i(x_i,y)$ whenever $x = x_i \in X_i$, we obtain a continuous fuzzy function $F: (X,E,\tau) \rightarrowtail (Y,E_Y,\tau_Y)$ (i.e.\ a mapping $F: X\times Y \to L$) such that $F_i = q_i \circ F$ for every $i \in \mathcal{I}$.
\subsection{Quotients in {$\mathcal{F}TOP(L)^\top$}}
Let $(X,E_X,\tau_X)$ be an $L$-topological space, let $q: X \to Y$ be a surjective mapping. Further, let $q^{\to}(E_X) =: E_Y$ be the image of the $L$-valued equality $E_X$ and let $\tau_Y = \{V \in L^Y \mid q^{-1}(V) \in \tau_X \}$, that is $\tau_Y$ is the quotient $L$-topology determined by the mapping $q: (X,\tau) \to Y$ in the category L-TOP. Then $(Y,E_Y,\tau_Y)$ is the quotient object in the category {$\mathcal{F}TOP(L)^\top$}. Indeed, consider a fuzzy function $F: (X,E_X,\tau_X) \rightarrowtail (Z,E_Z,\tau_Z)$ and let $G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ be a morphism in {$\mathcal{F}SET(L)$}\ such that $q \circ G = F$. Then an easy verification shows that the fuzzy function $G: (Y,E_Y,\tau_Y) \rightarrowtail (Z,E_Z,\tau_Z)$ is continuous (i.e.\ a morphism in {$\mathcal{F}TOP(L)^\top$}) if and only if $F: (X,E_X,\tau_X) \to (Z,E_Z,\tau_Z)$ is continuous (i.e.\ a morphism in {$\mathcal{F}TOP(L)^\top$}).
Our next aim is to consider the behaviour of some topological properties of L-valued L-topological spaces in respect of fuzzy function. In this work we restrict our interest to the property of compactness. Some other topological properties, in particular, connectedness and separation properties will be studied in a subsequent work.
\section{Compactness}
\subsection{Preservation of compactness by fuzzy functions}
One of the basic facts of general topology --- both classic and ``fuzzy'', is preservation of compactness type properties by continuous mappings. Here we present a counterpart of this fact in {$\mathcal{F}TOP(L)$}. However, since in literature on fuzzy topology different definitions of compactness can be found, first we must specify which one of compactness notions will be used.
\begin{definition}\label{comp} An $L$-topological space $(X,E,\tau)$ will be called $(\alpha, \beta)$-compact where $\alpha, \beta \in L$, if for every family $\mathcal{U} \subset \tau$ such that $\bigvee \mathcal{U} \geq \alpha$ there exists a finite subfamily $\mathcal{U}_0 \subset \mathcal{U}$ such that $\bigvee\mathcal{U}_0 \geq \beta$. An $(\alpha,\alpha)$-compact space will be called just $\alpha$-compact.\footnote{Note that Chang's definition of compactness \cite{Ch} for a $[0,1]$-topological space is equivalent to our $1$-compactness. An $[0,1]$-topological space is compact in Lowen's sense \cite{Lo76} if it is $(\alpha,\beta)$-compact for all $\alpha \in [0,1]$ and all $\beta < \alpha$} \end{definition}
\begin{theorem}\label{pr-comp} Let $(X,E_X, \tau_X)$, $(Y, E_Y, \tau_Y)$ be $L$-topological spaces, $F:X \rightarrowtail Y $ be a continuous fuzzy function such that $\mu(F) \geq \beta$, and $\sigma(F) \geq \gamma$. If $X$ is $\alpha * \beta$-compact, then $Y$ is $(\alpha, \alpha*\beta*\gamma)$-compact. \end{theorem}
\begin{proof} Let $\mathcal{V} \subset \tau_Y$ be such that $\bigvee \mathcal{V} \geq \alpha$. Then, applying Proposition \ref{im-pr} (4), (7) and taking in view monotonicity of $F^{\gets}$, we get $$\bigvee_{V \in \mathcal{V}} F^{\leftarrow}(V) = F^{\leftarrow}(\bigvee_{V \in \mathcal{V}}V) \geq F^{\leftarrow}(\alpha) \geq \alpha * \beta.$$ Now, since $(X,E_X,\tau_X)$ is $\alpha * \beta$-compact, it follows that there exists a finite subfamily $\mathcal{V}_0 \subset \mathcal{V}$ such that $$\bigvee_{V\in \mathcal{V}_0} F^{\leftarrow}(V) \geq \alpha*\beta.$$ Applying Propositions \ref{im-pr} (6),(4) and \ref{in-sur} (5) we obtain: $$\bigvee_{V\in\mathcal{V}_0} V \geq F^{\rightarrow}\Bigl(F^{\leftarrow}\bigl(\bigvee_{V\in\mathcal{V}_0} V\bigr)\Bigr) = F^{\rightarrow}\Bigl(\bigvee_{V\in\mathcal{V}_0} \bigl(F^{\leftarrow}(V)\bigr)\Bigr) \geq F^{\rightarrow}\bigl(\alpha * \beta\bigr) \geq \alpha*\beta*\gamma.$$ \end{proof}
\begin{corollary} Let $(X,E_X, \tau_X)$, $(Y,E_Y,\tau_Y)$ be $L$-topological spaces, $F:X \rightarrowtail Y$ be a fuzzy function such that $\mu(F) = \top $ and $\sigma(F) = \top$. If $X$ is $\alpha$-compact, then $Y$ is also $\alpha$-compact. \end{corollary}
\subsection{Compactness in case of an $MV$-algebra}
In case $L$ is an MV-algebra one can characterize compactness by systems of closed $L$-sets:
\begin{proposition} Let $(X,E_X,\tau_X)$ be an $L$-topological space and let $\mathcal{C}_X$ be the family of its closed $L$-sets. Then the space $(X,E_X,\tau_X)$ is $(\alpha,\beta)$-compact if and only if for every $\mathcal{A} \subset \mathcal{C}_X$ the following implication follows: $$ \mbox{if } \bigwedge_{A\in \mathcal{A}_0} A \not\leq \beta^c \mbox{ for every finite family } \mathcal{A}_0 \subset \mathcal{A}, \mbox{ then } \bigwedge_{A\in \mathcal{A}} A \not\leq \alpha^c. $$ \end{proposition}
\begin{proof} One has just to take involutions ``$\longmapsto \bot$'' in the definition of $(\alpha,\beta)$-compactenss and apply De Morgan law. \end{proof}
\subsection{Perfect mappings: case of an MV-algebra $L$}
In order to study preservation of compactness by preimages of fuzzy functions we introduce the property of $(\alpha,\beta)$-perfectness of a fuzzy function. Since we shall operate with closed $L$-sets, from the beginning it will be assumed that $L$ is an $MV$-algebra.
First we shall extend the notion of compactness for $L$-subsets of $L$-topolog\-ical spaces. We shall say that an $L$-set $S: X \to L$ is $(\alpha,\beta)$-compact if for every family $\mathcal{A}$ of closed $L$-sets of $X$ the following implication holds: $$ \mbox{ if } S\wedge\bigl(\bigwedge_{A\in \mathcal{A}_0} A\bigr) \not\leq \beta^c \mbox{ for every finite } \mathcal{A}_0 \in \mathcal{A} \mbox{ then } S\wedge\bigl(\bigwedge_{A\in \mathcal{A}} A \bigr) \not\leq \alpha^c. $$
Further, since the preimage $F^{\gets}(y_0): X \to L$ of a point $y_0 \in Y$ under a fuzzy function $F: X \rightarrowtail Y$ is obviously determined by the equality $$F^{\gets}(y_0)(x) = \bigvee_{y\in Y} F(x,y)*y_0(y) = F(x,y_0),$$ the general definition of $(\alpha,\beta)$-compactness of an $L$-set in this case means the following:
The preimage $F^{\gets}(y_0)$ of a point $y_0$ under a fuzzy function $F$ is $(\alpha,\beta)$-compact if for every family $\mathcal{A}$ of closed sets of $X$ the following implication holds: $$ \bigvee_x \Bigl(F(x,y_0)\wedge(\bigwedge_{A\in \mathcal{A}_0} A(x))\Bigr) \not\leq \beta^c\ \
\forall \mathcal{A}_0 \subset \mathcal{A}, |\mathcal{A}_0| < \aleph_0 $$ implies $$\bigvee_x \bigl(F(x,y_0) \wedge (\bigwedge_{A\in \mathcal{A}} A(x))\bigr) \not\leq \alpha^c.$$
Now we can introduce the following
\begin{definition} A continuous fuzzy mapping $F\colon (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is called $(\alpha,\beta)$-perfect if \begin{itemize} \item $F$ is closed, i.e.\ $F^{\to}(A) \in \mathcal{C}_Y$ for every $A \in \mathcal{C}_X;$ \item the preimage $F^{\gets}(y)$ of every point $y \in Y$ is $(\alpha,\beta)$-compact. \end{itemize} \end{definition}
\begin{theorem} Let $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ be an $(\alpha,\gamma)$-perfect fuzzy function such that $\mu(F) = \top$ and $\sigma(F) = \top$. If the space $(Y,E_Y,\tau_Y)$ is $(\gamma,\beta)$-compact, then the space $(X,E_X,\tau_X)$ is $(\alpha,\beta)$-compact. \end{theorem}
\begin{proof} Let $\mathcal{A}$ be a family of closed $L$-sets in $X$ such that $\bigwedge_{A\in\mathcal{A}_0} A \not \leq \beta^c$. Without loss of generality we may assume that $\mathcal{A}$ is closed under taking finite meets. Let $B := B_A := F^{\to}(A)$ and let $\mathcal{B} := \{B_A: A \in \mathcal{A} \}$. Then, since $\mu(F) = \top$, by \ref{im-pr} (7) it follows that $B \not \leq \beta^c\ \forall B \in \mathcal{B}$, and moreover, since $\mathcal{A}$ is assumed to be closed under finite meets, $$ \begin{array}{lll} B_{A_1}\wedge\ldots\wedge B_{A_n}& =& F^{\to}(A_1)\wedge\ldots\wedge F^{\to}(A_n)\\ & \geq& F^{\to}(A_1 \wedge\ldots\wedge A_n)\\ & =& F^{\to}(A), \end{array} $$ for some $A \in \mathcal{A}$, and hence $\bigwedge_{B\in\mathcal{B}_0}(B) \not \leq \beta^c$ for every finite subfamily $\mathcal{B}_0 \subset \mathcal{B}$. Hence, by $(\gamma,\beta)$-compactness of the space $(Y,E_Y,\tau_Y)$ we conclude that $\bigwedge_{B \in \mathcal{B}}(B) \not \leq \gamma^c$, and therefore there exists a point $y_0 \in Y$ such that $F^{\to}(A)(y_0) = B_A(y_0) \not \leq \gamma^c$ for all $A \in \mathcal{A}$. Now, applying $(\alpha,\gamma)$-compactness of the preimage $F^{\gets}(y_0)$ and recalling that $\mathcal{A}$ was assumed to be closed under taking finite meets, we conclude that $$\bigvee_x \bigl(F(x,y_0) \wedge (\bigwedge_{A\in \mathcal{A}} A(x))\bigr) \not\leq \alpha^c.$$ and hence, furthermore, $$\bigwedge_{A\in\mathcal{A}} A \not\leq \alpha^c.$$ \end{proof}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title{On thermalization of two-level quantum systems}
\section{Introduction}
The study of evolution of open systems towards equilibrium has always been a challenging problem in Statistical Mechanics. The difficulty lies in prescribing a form of interaction between the system and the environment at the microscopic level that will give rise to equilibration. It has been evaded by proposing the so called {\it H-theorem} which states that a system attains equilibrium when the entropy function is maximized over the accessible states of the system.
Although this has proved to be a very efficient way to calculate and work with equilibrium states, the heart of the problem remains unsolved. We look at this thermodynamic problem from a quantum mechanical perspective. Quantum Thermodynamics have received a lot attention in the recent past \cite{rt1,rt2}. The concepts and laws of thermodynamics are presumably valid only in macroscopic regime. To see how the laws and definitions of thermodynamic quantities viz heat, work, etc behave in microscopic regime is one of the main objectives of Quantum Thermodynamics.
There has been a number of works \cite{pop1,pop2,short1,short2,gogolin} where the problem of equilibration is looked at from a quantum mechanical perspective. For example, Linden et al. \cite{linden} looked into the problem of smallest possible quantum refrigerator. In the process, they considered a two-qubit system as a refrigerator in which one qubit acts as the system to be cooled while the other works as the coil of the refrigerator by extracting heat from the body (to be cooled), and releasing it to the environment. The two-qubit refrigerator is derived from the equilibrium (steady) state solution of a three-qubit master equation which the authors provided phenomenologically. This motivated us to see if, instead of following this phenomenological approach, a microscopic description for the thermalization process (equilibration to a thermal state) is possible through a thermalizing Hamiltonian. Such a simulation of the thermalization process can serve at least two purposes: (i) simulating a natural thermalization process in lab, and (ii) comparing different time scales (e.g., time scales for thermalization versus interaction time scales of different constituents of the system) without assuming a priori their ordering.
To completely characterize the joint Hamiltonian of the system and environment that results in equilibration of the system, is a formidable task. So, instead we ask the following question: whether for a given thermalization process of a system, there exists an ancilla in a specific state and a joint Hamiltonian of system-ancilla that gives rise to the exact process of equilibration on the system. In this paper, we provide an affirmative answer to this question in the case of quantum-optical master equation.
We work out a {\it thermalizing Hamiltonian} $H_{th}$ for the quantum-optical master equation \cite{text1} which gives rise to thermal equilibration of a qubit. We find that a single-qubit ancilla initialized in a thermal state is sufficient for such a dynamics to be mimicked.
Our next aim is to look for such simulations of thermalization process which evolves under the action of non-Markovian dynamics. We analyse such situations further by considering a general form of thermalizing Hamiltonian of which the quantum-optical master equation dynamics is a special case. We work out the necessary and sufficient conditions for Markovianity of the system dynamics given a form of the simulating interaction Hamiltonian. Note that not every non-Markovian dynamics gives rise to equilibration of the system, and thereby, thermalization. Our approach here provides one possible way of generating a thermalizing non-Markovian dynamics through the prescription of a simulating Hamiltonian. It is worth mentioning here that, as there are a number of definitions of Markovianity in quantum mechanical scenario \cite{markov1,markov2,markov3,markov4} we stick to the definition of completely positive (CP) divisibility \cite{markov3,markov4} and use the characterization of Wolf et. al. \cite{cirac} for finding out the aforementioned conditions.
An interesting model of thermalization was proposed by V. Scarani et. al. \cite{scarani}. Another model of thermalization (for spin-$\frac{1}{2}$ systems) has been developed by Kleinbolting and Klesse \cite{klesse}. In these works, they used the swap operation between system and bath to give rise to thermalization. But a drawback of these methods is that the system is fully thermalized after a \textit{finite} time interval, which would imply that the thermalizing map is a function of only the temperature to which the system will thermalize and the time interval taken to reach it. This proposition seems to be unrealistic as this does not take into account the intricacies of the system, environment or the correlations shared between them, that might affect the process of thermalization.
In \cite{oliveira1,oliveira2,oliveira3}, M. J. de Oliveira has shown another novel approach to thermalization for systems in contact with an environment (typically, heat reservoirs). In \cite{oliveira1}, a quantum Focker-Planck-Kramers (FPK) equation is derived via canonical quantization of the classical FPK equation to account for quantum dissipation of systems interacting with environment. The dissipation term is chosen such that the system equilibriates to the Gibbs thermal state i.e. system thermalizes. In \cite{oliveira2,oliveira3}, the quantum FPK equation is further exploited to study heat transport properties in harmonic oscillator chains and bosonic systems. Although our approach to thermalization also begins with solving a master equation, it differs from de Oliveira's in that our aim is to derive simulating Hamiltonians for thermalization and thereby study generic features of thermalization in open quantum systems.
First, we describe the thermalizing process of a qubit as a pin map. We then look at the quantum optical master equation for a qubit to find out its time dependent solutions. The affine transformation relating the initial state and the time-evolved state is then described. This affine transformation is then parametrized to find out the thermalizing Hamiltonian $H_{th}$ with a single-qubit ancilla simulating the heat bath. In the following section, we consider the thermalizing Hamiltonian in a more general form and derive the conditions on the time dependence for thermalization to occur (in the infinite time limit). We also derive the necessary and sufficient conditions for such a Hamiltonian to lead to Markovian dynamics for the system evolution. Further, we derive the Lindblad type master equation for a system dynamics arising out of our general form of Hamiltonian. And finally, we draw our conclusions.
\section{Form of thermalizing Hamiltonian}
The starting point of our work is realising that thermalization can be achieved through several ways, one of which being Markovian master equations with a thermal bath. Therefore, we take a Markovian master equation, the quantum optical master equation, where a qubit (two levels of an atom) is in contact with a bath (a system of non-interacting radiation field). Given the fact that all Markovian master equations with thermal baths give rise to equilibration to {\it thermal states} $(\rho_{th} = e^{-H/k_B T})$, with $H$ being the system Hamiltonian, we try to figure out a joint Hamiltonian between the system and an ancilla, which will do the same. This system-ancilla Hamiltonian, which will henceforth be called as thermalizing Hamiltonian $H_{th}$, will give rise to a unitary process where the system (two levels of the atom) will equilibriate to a (constant) thermal state.
To calculate the thermalizing Hamiltonian $H_{th}$, we find the affine transformation on the Bloch vector of the system qubit that will give rise to the same evolution as the quantum optical master equation. In doing so, we realize that the affine transformation is a special case of the generalized amplitude damping channel \cite{text2}. We then refer to a result by Narang and Arvind \cite{narang} where it is shown that it is enough for certain qubit channels to have a single-qubit mixed state ancilla to simulate the action of the channel as a sub-system dynamics of a system-ancilla unitary evolution. It may be noted here that Terhal et. al. \cite{smolin} have shown that certain single-qubit channels can only be simulated through qutrit mixed state environments. Incidentally, our affine transformation fits into the criterion for single-qubit ancilla as in Narang and Arvind \cite{narang}, and we find a two-qubit Hamiltonian that simulates the evolution of the system qubit via the quantum optical master equation.
Given below are the details of the aforesaid process. We will be working in the computational basis unless mentioned otherwise.
\subsection{Thermalizing maps for a qubit: Pin Map} Before we introduce the optical master equation for a two-level quantum system, we look for the most general way a qubit can lead to thermalization -- a qubit channel -- a completely positive trace preserving map $\mathcal{N}:\mathcal{L}(\mathbb{C}^2)\rightarrow \mathcal{L}(\mathbb{C}^{2})$ such that $\mathcal{N}(\rho)=\rho_{th}=\text{diag}(p,1-p)$ for all single-qubit states $\rho$ with $0\leq p\leq 1$. Such a map is called a {\it pin} map. Here, $\mathcal{L}(\mathbb{C}^2)$ is the set of all bounded linear operators $A: \mathbb{C}^2\rightarrow\mathbb{C}^2$. Thus, we have: \begin{equation} \label{N} \mathcal{N} = \begin{bmatrix} p & 0 & 0 & p\\ 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0\\ 1-p & 0 & 0 & 1-p \end{bmatrix}. \end{equation} The Kraus operator for $\mathcal{N}$ are: \begin{gather*} \label{k1} K_{00} = \begin{bmatrix} \sqrt{p} & 0\\ 0 & 0 \end{bmatrix}, ~~~ K_{01} = \begin{bmatrix} 0 & \sqrt{p} \\ 0 & 0 \end{bmatrix}, \end{gather*}
\begin{gather} \label{k2} K_{10} = \begin{bmatrix} 0 & 0\\ \sqrt{1-p} & 0 \end{bmatrix}, ~~~ K_{11} = \begin{bmatrix} 0 & 0 \\ 0 & \sqrt{1-p} \end{bmatrix}. \end{gather}
\subsection{Optical master equation} It would have been useful to have a dynamical version of the pin map, whose Kraus operators are given in equation (\ref{k2}). This would then give rise to a master equation corresponding to pin map, and thereby, for thermalization. In the absence of such a dynamical version in general, we now look at the optical master equation to come up with one possible dynamical version of the Kraus operators in equation (\ref{k2}).
We choose the following Markovian master equation (quantum optical master equation) which corresponds to a qubit interacting with a bosonic thermal bath under Markovian conditions.
\begin{equation} \label{lind} \begin{split} \frac{d\rho (t)}{dt} &= \gamma_0 (N +1) \Big(\sigma_-\rho(t)\sigma_+ - \frac{1}{2} \{ \sigma_+\sigma_-,\rho(t) \}\Big)\\ &~~ + \gamma_0 N \Big(\sigma_+\rho(t)\sigma_- - \frac{1}{2}\{\sigma_-\sigma_+,\rho(t)\}\Big) \end{split} \end{equation}
Here, $N=(\exp\frac{E(\omega)}{k_B T}-1)^{-1}$ is the Planck distribution, $k_B$ is the Boltzmann constant, $T$ is temperature of the heat bath and $E(\omega)=\hbar\omega$ is the energy of the system at frequency $\omega$. $\gamma_0$ is the spontaneous emission rate of the bath, and $\gamma = \gamma_0 (2N+1)$ is the total emission rate (including thermally induced emission and absorption processes). Here we have neglected the free evolution part. For more details, refer to \cite{text1}.
If the initial system qubit state is given by $\rho(0)=\frac{1}{2}(\mathbb{1}+\bar{r}(0).\bar{\sigma})$, where $\bar{r}(0) = (r_1(0),r_2(0),r_3(0))$, the master equation can be readily solved by choosing the time-evolved state to be $\rho(t)=\frac{1}{2}(\mathbb{1}+\bar{r}(t).\bar{\sigma})$ where $\bar{r}(t) = (r_1(t),r_2(t),r_3(t))$. We find, $r_1(t) = r_1(0) e^{-\gamma t/2}, r_2(t) = r_2(0) e^{-\gamma t/2}, r_3(t) = (r_3(0)+g) e^{-\gamma t} - g.$
Here $g =\frac{\gamma_0}{\gamma}=(2N+1)^{-1}$ and so, $g\in [0,1]$. $g$ gives us a measure of the temperature $T$. It can be easily seen that, higher the value of $g$, lower the temperature and vice versa. Specifically, $g=0$ for $T=\infty$ and $g=1$ for $T=0$. The steady state solution for the system is a thermal state as expected, and corresponds to the Bloch vector $(0,0,-g)$. Explicitly, \begin{equation} \label{gval} \rho_{th} = \text{diag}(\frac{1-g}{2},\frac{1+g}{2}). \end{equation}
\subsection{Affine Transformation} Any single-qubit channel can be written as an affine transformation of the form $r_i(t) = \sum_{j=0}^{3}M_{ij} r_j(0) + C_i$ \cite{text2,narang}. Thus, we can express the corresponding affine transformation for our solution (given in equation (\textcolor{red}{4})) as a $3\times3$ matrix $M$ and a column matrix $C$:
\begin{equation} \label{M1} M = \begin{bmatrix} e^{-\gamma t/2} & 0 & 0\\ 0 & e^{-\gamma t/2} & 0\\ 0 & 0 & e^{-\gamma t} \end{bmatrix}, C = \begin{bmatrix} 0\\ 0\\ g(e^{-\gamma t}-1) \end{bmatrix}. \end{equation}
Here, we notice that this affine transformation is a special kind of generalized amplitude damping channel. Amplitude damping channels describe the effect of energy dissipation to environment at finite temperature. The affine transformation for a generalized amplitude damping channel has two positive parameters $B$, $p \in [0,1]$. It is given by: \begin{equation} M_{GAD} = \begin{bmatrix} \sqrt{1-B} & 0 & 0\\ 0 & \sqrt{1-B} & 0\\ 0 & 0 & 1-B \end{bmatrix}, \end{equation} \begin{equation} C_{GAD} = \begin{bmatrix} 0\\ 0\\ B(2p-1) \end{bmatrix}. \end{equation}
We can see that our thermalization process is a generalized amplitude damping channel with the parameter $p<\frac{1}{2}$.
\subsection{Parametrizing the transformation}
In \cite{narang}, Narang and Arvind used a single-qubit mixed state ancilla to parametrize the affine transformation of a single-qubit channel. We follow their technique to simulate our dynamical process for thermalization. To do so, we consider a single-qubit mixed state ancilla of the form $\rho_e=(1-\lambda)\frac{\mathbb{1}}{2}+\lambda|\phi\rangle\langle\phi|.$ where $\frac{\mathbb{1}}{2}$ is the maximally mixed state and $|\phi\rangle$ is a general pure state given by, $|\phi\rangle= \cos\Big( \frac{\xi}{2}\Big)|0\rangle+ e^{-i\eta}\sin\Big(\frac{\xi}{2}\Big)|1\rangle.$
If $\rho_e$ plays the role of a bath state of a single-qubit system then evolution through the most general two-qubit unitary $U$ (upto a freedom of local unitary actions), given in equation (\ref{ud}) below, will result in the following affine transformation for the system qubit, as given in equations (\ref{M}) and (\ref{C}) below. Apart from $\eta,\xi,\lambda$, three more parameters $\alpha,\beta,\delta$ are needed to completely identify the channel. Thus, the class of single-qubit channels which can be simulated by a single-qubit mixed state ancilla is a six parameter family ($\alpha,\beta,\delta,\eta,\xi,\lambda$) of affine transformations:
\begin{equation} \label{ud} U = \begin{bmatrix} \cos\frac{\alpha+\delta}{2} & 0 & 0 & i\sin\frac{\alpha+\delta}{2}\\ 0 & e^{-i\beta}\cos\frac{\alpha-\delta}{2} & ie^{-i\beta}\sin\frac{\alpha-\delta}{2} & 0\\ 0 & ie^{-i\beta}\sin\frac{\alpha-\delta}{2} & e^{-i\beta}\cos\frac{\alpha-\delta}{2} & 0\\ i\sin\frac{\alpha+\delta}{2} & 0 & 0 & \cos\frac{\alpha+\delta}{2}\\ \end{bmatrix} \end{equation}
\begin{equation} \label{C}
C = \begin{bmatrix} -\lambda\sin\delta\sin\beta\sin\xi\cos\eta\\ -\lambda\sin\alpha\sin\beta\sin\xi\sin\eta\\ -\lambda\sin\alpha\sin\delta\cos\xi \end{bmatrix}. \end{equation} \begin{widetext} \begin{equation} \label{M} M = \begin{bmatrix} \cos\delta\cos\beta & \lambda\cos{\delta}\sin{\beta}\cos\xi & -\lambda\sin\delta\cos\beta\sin\eta\sin\xi\\ -\lambda\cos\alpha\sin\beta\cos\xi & \cos\alpha\cos\beta & \lambda\sin\alpha\cos\beta\cos\eta\sin\xi\\ -\lambda\cos\alpha\sin\delta\sin\eta\sin\xi & -\lambda\sin\alpha\cos\delta\sin\xi\cos\eta & \cos\alpha\cos\delta \end{bmatrix}. \end{equation} \end{widetext}
Refer to Appendix A for details of our calculation.
It is also important to note here that by using the ancilla qubit, we are only simulating the dynamics of the system qubit leading to the infinite time thermalization. More specifically, we do not have the ancilla state remaining static, as is the case for the bosonic bath. The ancilla state does in fact change.
Now, we compare the parametrized forms of C and M in equations (\ref{C}) and (\ref{M}) respectively with the affine transformation of quantum optical master equation in (\ref{M1}). Thus, we get a joint unitary giving rise to thermalization. One can check that the unitary does indeed lead to thermalization in the infinite time limit. Equivalently, this can also be seen by calculating the Kraus operators for the system qubit from the joint unitary operator and then applying infinite time limit, $\lim_{t\to\infty} \rho_s(t) = \rho_{th}.$
Now, the thermalizing Hamiltonian $(H_{th})$ is found and details of the derivation are given in Appendix B.
$H_{th}$ is of the following form, \begin{equation} \label{hth}
H_{th}(t) = f(t) \Big(|\phi^+\rangle\langle\phi^+ | - |\phi^-\rangle\langle\phi^- |\Big), \end{equation}
where, \begin{align} \label{ft} &f(t)= \frac{\gamma e^{-\gamma t/2}}{2\sqrt{1- e^{-\gamma t}}},\\ &\ket{\phi^\pm} = \frac{1}{\sqrt{2}}(\ket{00} \pm \ket{11}). \end{align}
The most general two-qubit time-dependent Hamiltonian which gives rise to the affine transformation (\ref{M1}), by acting on tensor product of arbitrary intitial state of the system qubit and the initial state of the ancilla qubit being $\rho_{th}$ (given in equation (\ref{gval})), is of the form given in equation (\ref{hth}) above.
\section{On Markovianity of Dynamics for Thermalization}
Given a 2-qubit Hamiltonian of the form, \begin{equation} \label{Hft} H(t) = f(t) (\ket{\phi^+}\bra{\phi^+}-\ket{\phi^-}\bra{\phi^-}) \end{equation} where $\ket{\phi^\pm}=(\ket{00}\pm\ket{11})/\sqrt{2}$, we can ask what are the conditions on $f(t)$ such that the system will thermalize in the asymptotic time limit. Moreover, we can ask when the evolution of the system follows Markovian dynamics. The main reason behind the search for generic properties of $f(t)$ in the above equation is to look for a generic Hamiltonian (involving ancilla) method for thermalization which does not necssarily follow from the optical master equation - in the latter case the system is known to thermalize in the infinite time limit. We can rewrite eq. (\ref{Hft}) in the Pauli basis as $f(t)(\sigma_x\otimes\sigma_x - \sigma_y\otimes\sigma_y)$. This represents a kind of spin exchange interaction similar to the double-quantum Hamiltonian used in NMR experiments \cite{dqh}.In particular, $f(t)$ can be interpreted as a time-dependent coupling strength between the spins. Such Hamiltonians can in principle be realized in lab.
\subsection{Thermalization} Given an arbitrary initial state for the system (say, $\rho_s^i$) and an initial thermal state for the ancilla (say, $\rho_e^i=\frac{1}{2}\text{diag}(1+g,1-g)$), we can derive the condition on a generic $f(t)$ such that the system will thermalize in the infinite time limit i.e. by imposing the following constraint,
\begin{equation*} \lim_{t\to\infty}\text{Tr}_e \Big[U(t,0) (\rho_s^i\otimes\rho_e^i)U(t,0)^{\dagger}\Big]=\text{diag}(\frac{1-g}{2},\frac{1+g}{2}) \end{equation*} where $U(t,0)=\exp{\left(-i\int_0^t H(\tau)d\tau \right)}$ with $H(\tau)$ defined above in (\ref{Hft}) and the RHS is as we saw in (\ref{gval}).
This condition for thermalization is finally found to be,
\begin{equation} \lim_{t\to\infty} F(t) = (2n+1)\frac{\pi}{2} \end{equation} where, $F(t)=\int_0^t f(\tau)d\tau$ and $n$ is any integer. \subsection{Markovianity of System Evolution} Another interesting question we can raise is about the nature of the system evolution under such a Hamiltonian - will it be Markovian always? To answer this we refer to \cite{cirac} in which the authors have produced necessary and sufficient conditions for a given master equation $\dot{\rho}=L_t[\rho]$ to be Markovian (CP divisible) in nature. These conditions are:
\begin{itemize} \item $L_t$ must be hermiticity preserving. \item $L_t^*(\mathbb{1})=0$, \text{and} \item $\omega_c L_t^\Gamma\omega_c \geqslant 0$, \end{itemize} for all times $t$, where $L_t^*$ and $L_t^\Gamma$ are the adjoint map and Choi map of $L_t$ respectively. $\omega_c=\mathbb{I}-\ket{\omega}\bra{\omega}$ is the projector onto the orthogonal complement of the maximally entangled state $\ket{\omega}=\sum_i \frac{1}{\sqrt{2}}\ket{i,i}$.
It can be seen that the hermiticity preserving condition will always be satisfied for our particular case. Imposing the other conditions, we obtain the following necessary and sufficient constraints on the time dependence of the Hamiltonian for ensuring Markovianity of the dynamical map, \begin{align}
0\leqslant F(t) &\leqslant \frac{\pi}{2},~\forall t\\
\frac{d}{dt}F(t)&\geqslant 0,~\forall t \end{align}
Note that alternatively, we can have a monotonically decreasing $F(t)$ bounded between $[-\frac{\pi}{2},0]$ if we choose $-f(t)$ in our Hamiltonian (\ref{Hft}).
We may now think of a functional form of $f(t)$ which satisfies the thermalization condition but violates the markovianity conditions - namely that $F(t)$ be monotonic and bounded. A simple example for such a non-Markovian thermalizing form is,
\begin{equation} \label{eg} F(t)=\frac{\sin(20 t)}{1+10 t}+(1-e^{-t})\frac{\pi}{2} \end{equation}
FIG. 1 plots $F(t)=\int_0^t f(\tau)d\tau$ for $f(\tau)$ given by equation (\ref{ft}) and also $F(t)$ given by equation (\ref{eg}).
\begin{figure}
\caption{(colour online) Red solid line is the $F(t)$ corresponding to non-Markovian thermalizing Hamiltonian while the black corresponds to that of our Markovian thermalizing form. Note that both converge to $\frac{\pi}{2}$ asymptotically and hence signify thermalization.}
\end{figure}
In the preceding sections, we have derived a specific form of thermalizing Hamiltonian from the quantum optical master equation and then we generalized it by identifying conditions for the dynamics to be Markovian. We now derive the master equation that refers to the system dynamics for thermalization under our specific form of Hamiltonian given by equation (\ref{Hft}). It is found to be of the following form,
\begin{equation} \label{lindtype} \begin{split} \frac{d \rho (t)}{dt} &= \gamma_1(t) \Big(\sigma_-\rho(t)\sigma_+ - \frac{1}{2} \{ \sigma_+\sigma_-,\rho(t) \}\Big)\\ &~~ + \gamma_2(t) \Big(\sigma_+\rho(t)\sigma_-\frac{1}{2}\{\sigma_-\sigma_+,\rho(t)\}\Big) \end{split} \end{equation}
where, \begin{align*} \gamma_1(t) &= (1+g) f(t) \tan [F(t)] \\ \gamma_2(t) &= (1-g) f(t) \tan [F(t)] \end{align*}
Here, $F(t)=\int_0^t f(\tau)d\tau$ and $g$ is the parameter referring to the bath temperature used in defining the initial ancilla state as $\sigma_e(0)=\frac{1}{2}(\mathbb{1}+g \sigma_3)$. For more details regarding the derivation of master equation, refer to Appendix C.
The above form of master equation is immediately reminiscent of the Lindblad (Markovian) form that we have used at the beginning in equation (\ref{lind}), hence we have a master equation that is of the Lindblad type, but with time-dependent coefficients $\gamma_1(t)$ and $\gamma_2(t)$. It has been shown that the negativity of decoherence rates represent non-Markovianity \cite{hall}. Simply put, if the decoherence rates remain non-negative for all time, then the master equation represents a Markovian evolution. On the other hand, if for some time interval, it becomes negative, the dynamics is necessarily non-Markovian.
Thus, we have derived a class of master equations that can describe both Markovian as well as non-Markovian thermalization depending on the choice of $f(t)$ in the Hamiltonian.
For example, consider the non-Markovian $F(t)$ we have defined in equation (\ref{eg}). The corresponding $f(t)$ is calculated by taking the derivative of $F(t)$ and
thus, we can derive the master equation governing such a dynamics. It can be checked that the coefficients $\gamma_1(t)$ and $\gamma_2(t)$ will not be non-negative for all time. Thus, it is seen to signify the non-Markovian nature of the dynamics.
It can also be seen that when we consider the $f(t)$ we originally derived given by equation (\ref{ft}), we recover the quantum optical master equation (\ref{lind}) with $\gamma_1(t)$ and $\gamma_2(t)$ reducing to the appropriate time-independent, positive coefficients.
\section{Conclusion} In this paper, we look at a Markovian master equation of a qubit that leads to thermalization and simulate it through a unitary process by replacing the thermal bath with a single-qubit mixed state ancilla. Thus, we derive a thermalizing Hamiltonian for a single qubit corresponding to the quantum optical master equation.
Although a Markovian model of thermalization has been used here, there exist non-Markovian models as well. Those models need not necessarily be simulatable through a single-qubit ancilla (mixed or pure). For example, we considered the case of post-Markovian master equation as in \cite{manis,lidar} and find that a single-qubit ancilla is not sufficient to simulate the thermalization process described therein.
We derive necessary and sufficient conditions for thermalization and Markovianity of the state evolution under a specific form (\ref{Hft}) of system-ancilla Hamiltonian. We find that it is indeed possible for us to have non-Markovian thermalization processes even for this specific kind of Hamiltonian we have described in this work.
We also derive a Lindblad type master equation for system dynamics arising out of the Hamiltonian described in our work. We see that it is possible to find signature of non-Markovian dynamics based on the negativity of decoherence rates in the master equation.
In principle, the method we have employed can be used for finding simulating Hamiltonians in higher (finite) dimensions as well. It is non-trivial because the parametrization of unitary operators for higher dimensions aren't readily available as was the case for 2-qubit unitaries. Nevertheless, in the case of infinite dimensional systems (eg: quantum harmonic oscillators), covariance matrices can be employed to proceed in this direction. As a future project, it would be interesting to study simulating thermalizing Hamiltonians for single mode harmonic oscillator.
We expect that our result will stimulate further interest in finding out the fundamental dynamics that leads to thermalization (for example, studying adiabaticity in open quantum systems). As an extension to this work, we hope to look into more general thermalization models (including non-Markovian) which will require two-qubit ancillae. Also, finding similar thermalizing Hamiltonian models for leaking cavity modes of radiation fields is an intriguing future project.
\textbf{Acknowledgements:} We would like to thank Daniel Alonso and Ramandeep Johal for insightful comments. We would also like to acknowledge productive discussions with Sandip Goyal, Manik Banik, Arindam Mallick, and George Thomas.
\section{Appendix A}
Here, we discuss the explicit calculations involved in parametrizing $C$ and $M$ matrices (appearing in equations (\ref{C}) and (\ref{M})) of the single-qubit channels simulatable through a single-qubit mixed state ancilla.
The form of $U$, given in equation (\ref{ud}), can be re-written after a simple basis change in the following way, \begin{equation} \begin{split} U &= K_0(\mathbb{1}^{(s)}\otimes \mathbb{1}^{(e)}) + K_1(\sigma_1^{(s)} \otimes \sigma_1^{(e)})\\
&+ K_2(\sigma_2^{(s)} \otimes \sigma_2^{(e)})+ K_3(\sigma_3^{(s)} \otimes \sigma_3^{(e)}) \end{split}\tag{A.1} \end{equation} where, \begin{equation}
\begin{split}
&K_0 = \frac{1}{2}\left(\cos\frac{\alpha+\delta}{2} + e^{-i\beta}\cos\frac{\alpha-\delta}{2}\right),\\
&K_1 = \frac{i}{2}\left(\sin\frac{\alpha+\delta}{2} + e^{-i\beta}\sin\frac{\alpha-\delta}{2}\right),\\
&K_2 = \frac{-i}{2}\left(\sin\frac{\alpha+\delta}{2} - e^{-i\beta}\sin\frac{\alpha-\delta}{2}\right),\\
&K_3 = \frac{1}{2}\left(\cos\frac{\alpha+\delta}{2} - e^{-i\beta}\cos\frac{\alpha-\delta}{2}\right).
\end{split}\tag{A.2} \end{equation}
Now recalling the form of the mixed state ancilla $\rho_e$ and using an arbitrary initial state for the system qubit $\rho_s = \frac{1}{2}(\mathbb{1}+\bar{r}.\bar{\sigma})$, we can define the composite initial state, $\rho_{se}^{initial}=\rho_s\otimes\rho_e.$
The final time-evolved state of the system qubit can be found as, $\rho_s^{final}= \text{Tr}_e \Big[U \rho_{se}^{initial} (U)^{\dagger}\Big]$.
Now we can find out the components of $\rho_s^{final}$ in the basis $\{\sigma_1^{(s)},\sigma_2^{(s)},\sigma_3^{(s)}\}$ by computing Tr$[\sigma_i^{(s)}\rho_s^{final}]$. Thereby, we can read out the elements of $M$ and $C$. For example, consider $i=3$, we get: \begin{equation} \text{Tr}[\sigma_3^{(s)}\rho_s^{final}] = M_{31}n_1+M_{32}n_2+M_{33}n_3+C_3.\tag{A.3} \end{equation} Finally, we get the parametrized matrices $M$ and $C$ produced earlier.
\section{Appendix B}
To find the thermalizing Hamiltonian, we first need to find the values of the parameters that match with our particular case. For this, we compare the affine transformation for the quantum optical case in equations (\ref{M1}) with the parametrized matrices in equations (\ref{C}) and (\ref{M}). It can be easily seen that there exists a set of parameters as given below: \begin{equation} \label{set1} \lambda=g,\cos\alpha=\cos\delta= e^{\frac{-\gamma t}{2}},\cos\beta=\pm 1 =\cos\xi,\tag{B.1} \end{equation} where $\eta$ can be arbitrary. So finally, we get the mixed state ancilla as the following thermal state, \begin{equation*}
\rho_e = \frac{1+g}{2}|0\rangle\langle 0| + \frac{1-g}{2}|1\rangle\langle 1|. \end{equation*}
Putting the values from equation (\ref{set1}) in the form of unitary given in equation (\ref{ud}), we get the unitary for the thermalization process. Note that we now have a time dependent unitary, \begin{equation} U(t,0) = \begin{bmatrix} e^{\frac{-\gamma t}{2}} & 0 & 0 & i\sqrt{1- e^{-\gamma t}}\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0\\ i\sqrt{1- e^{-\gamma t}} & 0 & 0 & e^{\frac{-\gamma t}{2}} \end{bmatrix}.\tag{B.2} \end{equation} From here, we can calculate $H_{th}$ as follows. We know, $$U(t_2,t_1) = \exp{\bigg(-i\int_{t_1}^{t_2} H(s) ds\bigg)}, \text{and}$$ \begin{equation*} \begin{split} U(t+\Delta t,t) &= \exp{\left(-i\int_{t}^{t+\Delta t} H(s) ds\right)}\\ &\approx \mathbb{1} -i\Delta t H(t) \end{split} \end{equation*} Using the semi-group property of $U(t)$ (which holds good for small time interval $\Delta t$ even if $H$ is time-dependent) we get, \begin{equation*} \begin{split} U(t+\Delta t,0) &= U(t+\Delta t,t)U(t,0)\\ \Rightarrow U(t+\Delta t,t) &= U(t+\Delta t,0)U^{\dagger} (t,0)\\ &= \Big(U(t,0) + \Delta t\frac{dU(t,0)}{dt}+\cdots\Big)U^{\dagger}(t,0)\\ &\approx \mathbb{1} + \Delta t \frac{dU(t,0)}{dt}U^{\dagger}(t,0) \end{split} \end{equation*} Comparing with the RHS of the previous equation, we get: \begin{equation*} H_{th}(t) = i\left(\frac{dU(t,0)}{dt}\right)U^{\dagger}(t,0) \end{equation*} Thus, we get: \begin{equation} \begin{split}
H_{th} &= \frac{\pm \gamma e^{\frac{-\gamma t}{2}}}{2\sqrt{1-e^{-\gamma t}}} \Big(|00\rangle\langle11| + |11\rangle\langle 00|\Big)\\
&= f(t) \big(|\phi^+\rangle\langle\phi^+| - |\phi^-\rangle\langle\phi^-|\big) \end{split} \tag{B.3} \end{equation} Without loss of generality, we choose the positive sign for the $f(t)$ in this paper.
\section{Appendix C}
We consider a Hamiltonian of the form (\ref{Hft}), with fixed initial state of ancilla qubit as $\sigma_e(0)=\frac{1}{2}(\mathbb{1}+g \sigma_3)$ (i.e. a thermal state with temperature defined through $g$ as previously explained) and an arbitrary initial state of system qubit $\rho_s(0)=\frac{1}{2}(\mathbb{1}+\bar{r}.\bar{\sigma})$ with $\bar{r}=(x,y,z)$. The time evolved state of the system under the action of such a Hamiltonian can be calculated as, \begin{equation} \rho_s(t)= \text{Tr}_e \Big[U(t,0) \rho_{s}(0)\otimes\sigma_e(0) (U(t,0))^{\dagger}\Big].\tag{C.1} \end{equation} where, $U(t,0)=\exp{\left(-i\int_0^t H(\tau)d\tau \right)}$.
Now, we use the mathematical prescription described in the Appendix of \cite{jordan} to derive the master equation for such a dynamics. First, we express $\rho_s(0)$ and $\rho_s(t)$ as vectors in the operator space of the system which has basis $\{\mathbb{1},\sigma_1,\sigma_2,\sigma_3\}$.The density matrix of the system can be represented by a $4\times1$ vector, and a superoperator on the system can be represented by a $4\times4$ matrix. In this representation, $\boldsymbol{v}_0=\frac{1}{2}[1,x,y,z]^{T}$ is the vector form of the initial arbitrary density matrix of the system qubit and the vector form of the system qubit at time $t$ is, \begin{equation} \boldsymbol{v}_t=\frac{1}{2}[1,C_t x,C_t y,C_t^2 z + g S_t^2]^{T}=Q_t \boldsymbol{v}_0\tag{C.2} \end{equation} where, $C_t\equiv \cos(F(t)),S_t\equiv \sin(F(t))$ and $Q_t$ is the matrix representation of the system qubit evolution from the initial time to the time $t$, \begin{equation} Q_t=\left(\begin{array}{cccc} 1 & 0 & 0 & 0\\ 0 & C_t & 0 & 0\\ 0 & 0 & C_t & 0\\ g S_t^2 & 0 & 0 & C_t^2 \end{array}\right).\tag{C.3} \end{equation}
It can be seen that $Q_t$ is invertible for finite $t$. Thus we can find that,$\partial_t\boldsymbol{v}_t=\dot{Q}_t \boldsymbol{v}_0=\dot{Q}_t Q^{-1}_t\boldsymbol{v}_t.$
Thus, $\dot{Q}_t Q^{-1}_t$ is the matrix representation of the linear transformation corresponding to the time derivative of the system density matrix, and \begin{equation} \dot{Q}_t Q^{-1}_t=\left(\begin{array}{cccc} 0 & 0 & 0 & 0\\ 0 & \alpha_t & 0 & 0\\ 0 & 0 & \alpha_t & 0\\ \beta_t & 0 & 0 & 2\alpha_t \end{array}\right).\tag{C.4} \end{equation} where, $\alpha_t =-f(t)\tan(F(t)),\beta_t = 2gf(t)\tan(F(t))$.
Now we can find the superoperator corresponding to $\dot{Q}_t Q^{-1}_t$. In order to do this, we need to know the matrix representations $s_{ij}$ for the basis of the superoperator $\sigma_i[\cdot]\sigma_j$. These representations are easy to find and are given in equation (S15) in \cite{hall}. Decomposing $\dot{Q}_t Q^{-1}_t$ into the matrix representation, we get, $\dot{Q}_t Q^{-1}_t=\sum_{i,j=0}^3 a_{ij} s_{ij}.$
In our particular case, the non-zero components $a_{ij}$ turns out to be $a_{00}=4\alpha_t,a_{03}=a_{30}=\beta_t,a_{11}=a_{22}=-2\alpha_t$ and $a_{21}=-a{12}=i\beta_t$. Now by de-vectorizing, the master equation can be written as,
\begin{equation} \begin{split} \partial_t \rho(t) &= 4\alpha_t\rho -2\alpha_t(\sigma_1\rho\sigma_1+\sigma_2\rho\sigma_2)\\ &~~ +i\beta_t(\sigma_2\rho\sigma_1-\sigma_1\rho\sigma_2)+\beta_t\{\rho,\sigma_3\} \end{split}\tag{C.5} \end{equation}
Using the fact that $\sigma_\pm=\sigma_1\pm i \sigma_2$, the above equation can easily be recast into the Lindblad type master equation as given in equation (\ref{lindtype}).
\end{document} |
\begin{document}
\title{Bound States for Magic State Distillation in Fault-Tolerant Quantum Computation}
\author{Earl T. Campbell} \affiliation{Department of Physics and Astronomy, University College London, Gower Street, London, WC1E 6BT, UK.} \author{Dan E. Browne} \affiliation{Department of Physics and Astronomy, University College London, Gower Street, London, WC1E 6BT, UK.}
\begin{abstract} Magic state distillation is an important primitive in fault-tolerant quantum computation. The magic states are pure non-stabilizer states which can be distilled from certain mixed non-stabilizer states via Clifford group operations alone. Because of the Gottesman-Knill theorem, mixtures of Pauli eigenstates are not expected to be magic state distillable, but it has been an open question whether all mixed states outside this set may be distilled. In this Letter we show that, when resources are finitely limited, non-distillable states exist outside the stabilizer octahedron. In analogy with the bound entangled states, which arise in entanglement theory, we call such states bound states for magic state distillation. \pacs{03.67.Pp} \end{abstract}
\maketitle
The significant noise and decoherence in quantum systems means that harnessing these systems for computational tasks must be performed fault tolerantly~\cite{STEANE95,G01a}. In a wide variety of setups only a limited set of gates, known as the \textit{Clifford group}, are implemented in a manifestly fault tolerant manner. Examples include some anyonic topological quantum computers~\cite{Moore90,Lloyd02,Doucot02}, post-selected quantum computers~\cite{Knill05,Rei02a} and measurement based topological quantum computers~\cite{RHG01a}. This motivates the problem of when such devices, with practically error free Clifford gates, may be promoted to a full quantum computer. The celebrated Gottesman-Knill theorem shows that a Clifford circuit acting on stabilizer states --- simultaneous eigenstates of several Pauli operators --- can be efficiently simulated by a classical computer~\cite{G02a}. However, given a resource of pure non-stabilizer states, we can implement gates outside the Clifford group. For example, a qubit in an eigenstate of the Hadamard enables one to implement a $\pi/8$ phase gate that when supplementing the Clifford group gives a dense covering of all unitary operations~\cite{BraKit05}, and so enables universal quantum computation.
Preparation of non-stabilizer states would usually require a non-Clifford operation, so in this context, one would require that even noisy copies of these states enable high fidelity quantum computation. Bravyi and Kitaev~\cite{BraKit05} showed that this can be achieved. Coining the term \textit{magic state distillation}, they showed that most mixed non-stabilizer states can be \textit{distilled} via Clifford group circuits to fewer copies of a lower entropy state, reaching in {the limit of infinite iterations a pure non-stabilizer \textit{magic state}. However, the protocols they presented do not succeed for all mixed non-stabilizer states. Bravyi and Kitaev were not satisfied by the ambiguous status of these states and concluded that ``\textit{The most exciting open problem is to understand the computational power of the model in [this] region of parameters.}''. Either all non-stabilizer states are efficiently distillable by an undiscovered protocol, or there exist non-stabilizer states that are impossible to distill. Such undistillable states we call \textit{bound states} for magic state distillation, in analogy with bound states in entanglement distillation~\cite{Horodecki99activate}. Here we make progress by showing that bound states exist for a very broad class of protocols. By showing that a single round of a finite sized protocol will not improve these states, it follows that repeating such a protocol, even with an infinite number of iterations, will also have no benefit. Hence, we explain why all known protocols fail to distill some states.
The single-qubit stabilizer states, for which the Gottesman-Knill theorem applies, are the six pure stabilizer states (the eigenstates of $\pm X, \pm Y$ and $\pm Z$) and \earl{any incoherent mixture of these}. In the Bloch sphere, this convex set with 6 vertices forms the \textit{stabilizer octahedron} partially shown in figure~\ref{fig:Outline}a. Single-qubit states have density matrices: \begin{equation} \label{eqn:NONedge}
\rho(f, \vec{a}) = \left( \mbox{\small 1} \!\! \mbox{1} + (2f-1)( a_{X} X + a_{Y} Y + a_{Z} Z ) \right) / 2, \end{equation}
\earl{where $\vec{a}=(a_{X}, a_{Y}, a_{Z})$ is a unit vector}, and $f$ is the fidelity w.r.t the pure state $|\psi_{\vec{a}}\rangle\langle\psi_\vec{a}|=(\mbox{\small 1} \!\! \mbox{1} + a_{X}X+a_{Y}Y+a_{Z}Z)/2$. Stabilizer states satisfy: \begin{equation} \label{eqn:OctSurface}
|2f-1|(|a_{X}|+|a_{Y}|+|a_{Z}|) \leq 1 \end{equation} where the equality holds for states on the surface of the octahedron, and we denote the fidelity of such surface states as ${f^S_{\vec{a}}}$, which is unique assuming $f \geq 1/2$.
\begin{figure}
\caption{One octant of the Bloch sphere with various regions and directions shown. (a) The blue region shows the stabilizer states in one octant. Each octant is identical, with all stabilizer states forming an octahedron. (b) The yellow plane is the distillation threshold for the 5 qubit code, with the direction of $\vec{a}_{T}$ shown. The yellow plane is parallel to the underlying blue face of the stabilizer octahedron, but is displaced by a small gap. (c) The three green planes are the thresholds for the Steane code, with each plane differing by local Clifford gates. These planes meet the stabilizer octahedron at its edges. The three vectors are axes of H-like gates, e.g. $\vec{a}_{H}$; (d) The combined region of states distilled by either the 5 qubit code or the Steane code. This region only touches the stabilizer octahedron at its edges, and no other known protocol is tight in any other direction.}
\label{fig:Outline}
\end{figure}
Prior protocols for magic state distillation~\cite{BraKit05,Rei01a,Rei02a,Rei03a} increase fidelity towards eigenstates of Clifford gates, such as the Hadamard $H$ and the $T$ gate\footnote{The T gate performs, $TXT^{\dagger}=Y$, $TYT^{\dagger}=Z$.}. These eigenstates have $\vec{a}_{H}=(1,0,1)/\sqrt{2}$ and $\vec{a}_{T}=(1,1,1)/\sqrt{3}$, with $f=1$ for ideal magic states. Given the ability to prepare a mixed non-stabilizer state, $\rho$, we can perform an operation called \textit{polarization}, or \textit{twirling}, that brings $\rho$ onto a symmetry axis of the octahedron. For example, by randomly applying $\mbox{\small 1} \!\! \mbox{1}$, $T$ or $T^{\dagger}$, we \earl{map} $\rho \rightarrow \rho(f, \vec{a}_{T})$.
Bravyi and Kitaev proposed the following protocol~\cite{BraKit05} for $\ket{T}$ state distillation: (1) Prepare 5 copies of $\rho(f,\vec{a}_{T})$; (2) Measure the 4 stabilizers of the five-qubit error correcting code; (3) If all measurements give $+1$, the protocol succeeds and the encoded state is decoded into a single qubit state, and otherwise restart. Upon a successful implementation of this protocol the output qubit has a fidelity $F(f)$ plotted in figure~\ref{fig:fidelities}b. Provided the initial fidelity is greater than some threshold, a successful implementation yields a higher fidelity. This protocol has a non-tight threshold, and exhibits a gap between the threshold and the set of stabilizer states. Because the initial state was twirled onto the T axis, the threshold forms a plane in the Bloch sphere (see figure~\ref{fig:Outline}). In contrast, Reichardt has proposed a protocol that does have a tight threshold for distillation of $\rho(f, \vec{a}_{H})$ states in a $H$-like direction~\cite{Rei01a}. His protocol is similar to above, but uses 7 qubits each attempt and measures the 6 stabilizers of the STEANE code~\cite{STEANE95}. In figure~\ref{fig:fidelities}a we show the performance of this protocol, where there is no threshold gap. When the initial mixture is not of the form $\rho(f, \vec{a}_{H})$, we twirl the initial mixture onto the $H$ axis. Hence, the threshold forms a plane for each $H$-like direction (see figure~\ref{fig:Outline}). Although the protocol is tight in directions crossing an octahedron edge, the protocol fails to distill some mixed states just above the octahedron faces, and so is not tight in all directions. Even the combined region of states distilled by all known protocols still leaves a set of states above the octahedron faces, whose distillability properties are unknown.
Here we show that for all size $n$ protocols there is a region of bound states above the octahedron faces. More formally, we considering all states $\rho(f, \vec{a}_{P})$ where $\vec{a}_{P}$ has all positive (non-zero) components. Having all components as non-zero excludes states above octahedron edges. Considering only states in the positive octant is completely general as Clifford gates enable movement between octants. Many copies of bound states cannot be used to improve on a single copy, and below we formalize the idea of \textit{not improved} and state our main result.
\begin{defin} We say $\rho'$ is not an improvement on $\rho(f, \vec{a}_{P})$, when $\rho'$ is a convex mixture of $C_{i}\rho(f, \vec{a}_{P})C_{i}^{\dagger}$ and stabilizer states, where $C_{i}$ are Clifford group gates. \end{defin}
\begin{theorem} \label{THMgeneralcase} Consider a device capable of ideal Clifford gates, preparation of stabilizer states, classical feedforward and Pauli measurements. For any protocol on this device that takes $\rho(f, \vec{a}_{P})^{\otimes n}$ and outputs a single qubit, $\rho'$, there exists an $\epsilon > 0$ such that $\rho'$ is not an improvement on $\rho(f, \vec{a}_{P}) $ for $f \leq {f^S_{\vec{a}_{P}}}+\epsilon$. \end{theorem}
Theorem~\ref{THMgeneralcase} covers \earl{a wide} class of protocols, \earl{which} attain a fidelity that is upper-bounded by a narrower class of protocols~\cite{Camp09c}, such that theorem~\ref{THMgeneralcase} follows from:
\begin{theorem} \label{THMstabilizercase} Consider all protocols that follow these steps: (i) prepare $\rho(f, \vec{a}_{P})^{\otimes n}$; (ii) measure the $n-1$ generators of an $n$ qubit stabilizer code $\mathcal{S}_{n-1}$ with one logical qubit; (iii) postselect on all ``+1" measurement outcomes; (iv) decode the stabilizer code and output the logical qubit as the single qubit state $\rho'$. For all such protocols there exists an $\epsilon > 0$ such that $\rho'$ is not an improvement on $\rho(f, \vec{a}_{P}) $ for $f \leq {f^S_{\vec{a}_{P}}}+\epsilon$. \end{theorem}
\begin{figure}\label{fig:fidelities}
\end{figure}
Prior protocols, such as those based on the STEANE code and 5 qubit code, are covered explicitly by theorem~\ref{THMstabilizercase}. Here we use the structure of stabilizer codes to prove theorem~\ref{THMstabilizercase}, with theorem~\ref{THMgeneralcase} following directly from the results of~\cite{Camp09c}, where such distillation protocols are shown to have equal efficacy with more general Clifford protocols. It is crucial to consider the implication of these theorems when an $n$-qubit protocol is iterated $m$ times. When a single round provides no improvement on the initial resource, the input into the second round will only differ by Clifford group operations, and hence our theorem applies to the second, and all subsequent, rounds. Hence, repeated iteration cannot be used to circumvent our theorem. Before proving these theorems, we derive a pair of powerful lemmas that identify bound states. \begin{lem} \label{LEM1} Consider n copies of an octahedron surface state $\rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})$ projected onto the codespace of $\mathcal{S}_{n-1}$ and then decoded. If the output qubit is in the octahedron interior, then there exists an $\epsilon >0$ such that for $f\leq {f^S_{\vec{a}_{P}}}+\epsilon$ the same projection on $\rho(f, \vec{a}_{P})^{\otimes n}$ also projects onto a mixed stabilizer state. \end{lem} This lemma follows directly from the dependence of the output on $f$, which for finite $n$ is always continuous. We can observe this lemma at work in figure~\ref{fig:fidelities}b. Our next lemma identifies when octahedron surface states are projected into the octahedron interior. Before stating this we must establish some notation. An initial state, $\rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n}$, is an ensemble of pure stabilizer states: \begin{equation} \label{eqn:initial_ensemble}
\rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n}=\sum_{\vec{g} \in \{X, Y, Z\}^{n}} q_{\vec{g}} \kb{\Psi_{\vec{g}}}{\Psi_{\vec{g}}}, \end{equation} where $\ket{\Psi_{\vec{g}}}$ is stabilized, $g\ket{\Psi_{\vec{g}}}=\ket{\Psi_{\vec{g}}}$, by the group $\mathcal{G}_{\vec{g}}$ generated by $\vec{g}=$($g_{1}$, $g_{2}$,... $g_{n}$). The operator $g_{i}$ is $X_{i}$, $Y_{i}$ or $Z_{i}$, with $i$ labeling the qubit on which it acts. Each contribution has a weighting $q_{\vec{g}} = \prod_{i} ( a_{g_{i}} /(a_{X}+a_{Y}+a_{Z} ) )$. Measuring the generators of $\mathcal{S}_{n-1}$ and post-selecting on ``+1'' outcomes, projects onto the codespace of $\mathcal{S}_{n-1}$ with projector $P=\sum_{s \in \mathcal{S}_{n-1}}s/2^{n-1} $, producing: \begin{equation} \label{eqn:final_state}
\frac{P \rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n} P}{\ensuremath{\mathrm{tr}} [ P \rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n} P ] } = \sum_{\vec{g} \in \{ X, Y, Z \}^{n} } q'_{\vec{g}} \kb{\Psi'_{\vec{g}}}{\Psi'_{\vec{g}}},
\end{equation} with projected terms, $\ket{\Psi'_{\vec{g}}}$, of new weighting $ q'_{\vec{g}}$. Each $\ket{\Psi'_{\vec{g}}}$ has its stabilizer generated by ($G_{\vec{g}}, s_{1}, s_{2},.... s_{n-1}$), where $G_{\vec{g}}$ is an independent generator that: (a) was present in the initial group $G_{\vec{g}} \in \mathcal{G}_{\vec{g}}$; and (b) commutes with the measurement stabilizers $G_{\vec{g}} \mathcal{S}_{n-1}= \mathcal{S}_{n-1}G_{\vec{g}}$. In other words, it must be equivalent to one of six logical Pauli operators of the codespace. We denote the set of logical operators as $\mathcal{L}$, and its elements $\pm X_{L}, \pm Y_{L}$ and $\pm Z_{L}$, and so $G_{\vec{g}} \in \mathcal{L} . \mathcal{S}_{n-1}$. This defines a decoding via the Clifford map, $X_{L}\rightarrow X_{1}$ and $Z_{L} \rightarrow Z_{1}$. Since there are only six distinct logical states, we can combine many terms in equation~\ref{eqn:final_state}: \begin{equation}
\frac{P \rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n} P}{\ensuremath{\mathrm{tr}} [ P \rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})^{\otimes n} P ] } = \sum_{L \in \mathcal{L} } q_{L} \kb{\Psi_{L}}{\Psi_{L}} , \end{equation} where $\ket{\Psi_{L}}$ has stabilizer generators ($L, s_{1}, s_{2},.... s_{n-1} $). The new weighting is $q_ {L} = \sum q'_{\vec{g}}$ with the sum taken over all $\vec{g}$ that generate $\mathcal{G}_{\vec{g}}$ containing an element $G_{\vec{g}} \in L.\mathcal{S}_{n-1}$. We can now state the next lemma:
\begin{lem} \label{LEM2} Given $n$ copies of $\rho({f^S_{\vec{a}_{P}}}, \vec{a}_{P})$ projected into the codespace of $\mathcal{S}_{n-1}$ and decoded, the output qubit is in the octahedron interior if there exist any two pure states in the initial ensemble, $\ket{\Psi_\vec{g}}$ and $\ket{\Psi_\vec{g'}}$ (defined in equation~\ref{eqn:initial_ensemble}), such that both: \begin{enumerate}
\item[(i)] the projected pure states are orthogonal, so that $L \in \mathcal{G}_{\vec{g}}$ and $-s L \in \mathcal{G}_{\vec{g}'}$ where $L \in \mathcal{L}$ and $s \in \mathcal{S}_{n-1}$; \\
\textit{and}
\item[(ii)] upon projection $\ket{\Psi_\vec{g}}$ and $\ket{\Psi_\vec{g'}}$ do not vanish, so $q'_{\vec{g}} \neq 0$ and $q'_{\vec{g}'} \neq 0$. \end{enumerate} \end{lem} \noindent We prove this lemma by contradiction. From equation~\ref{eqn:OctSurface}, and $(2f-1)a_{L}=(q_{L}-q_{-L})$, surface states satisfy:
\begin{equation}
|q_{X_{L}}-q_{-X_{L}}|+ |q_{Y_{L}}-q_{-Y_{L}}|+ |q_{Z_{L}}-q_{-Z_{L}}|=1, \end{equation}
and we assume to the contrary that the projected state has this form. Since $q_{\pm L}$ are non-negative reals, we have $|q_{L}-q_{-L}|=q_{L}+q_{-L}-2\mathrm{Min}(q_{L}, q_{-L})$, where $\mathrm{Min}(q_{L}, q_{-L})$ is the minimum of $q_{L}$ and $q_{-L}$. Along with the normalization condition, $\sum_{L} q_{L}=1$, this entails: \begin{equation} \mathrm{Min}(q_{X_{L}}, q_{-X_{L}})+\mathrm{Min}(q_{Y_{L}}, q_{-Y_{L}})+\mathrm{Min}(q_{Z_{L}}, q_{-Z_{L}}) =0. \nonumber \end{equation} Since all terms are positive, no cancellations can occur and so every term must vanish, hence $\mathrm{Min}(q_{L}, q_{-L}) = 0, \forall L$. However, conditions (\textit{i}) and (\textit{ii}) of the lemma entail that there exists a non-vanishing $\mathrm{Min}(q_{L}, q_{-L})$, as $q_{L}\geq q'_{\vec{g}} \neq 0$ and $q_{-L}\geq q'_{\vec{g}'} \neq 0$. Having arrived at this contradiction, we conclude the falsity of the assumption that the projected state remains on the octahedron surface, and so must be in the octahedron interior. This proves lemma~\ref{LEM2}, and we now show that lemma~\ref{LEM2} applies to all stabilizer reductions that do not trivially take $\rho(f, \vec{a}_{P})^{\otimes n} \rightarrow C_{i} \rho(f, \vec{a}_{P}) C_{i}^{\dagger}$.
Our proof continues by finding canonical generators for the code $\mathcal{S}_{n-1}$. A related method has been used to prove that all stabilizer states are local Clifford equivalent to a graph state~\cite{VdNDDM01a}, and we review this first. All stabilizer states have a stabilizer $\mathcal{S}_{n}$ with $n$ generators. Each generator is a tensor product of $n$ single-qubit Pauli operators. This can be visualized as an $n$ by $n$ matrix with elements that are Pauli operators, each row a generator and each column a qubit. Different, yet equivalent, generators are produced by row multiplication, via which we can produce a canonical form. In this form column $i$ has a non-trivial Pauli operator $A_{i}$ that appears on the diagonal, and all other operators in that column are either the identity or another operator $B_{i}$. Note that $A_{i}$ and $B_{i}$ compose a third non-trivial Pauli $A_{i}B_{i}=i(-1)^{\gamma_{i}}C_{i}$ with $\gamma_{i}=0,1$. Hence, all stabilizer states differ from some graph state by only local Cliffords that map $(A_{i}, B_{i}) \rightarrow (X_{i}, Z_{i})$.
A code, $\mathcal{S}_{n-1}$, has one less generator than the number of qubits, and so more columns than rows. We can apply the diagonalisation procedure on an $n-1$ by $n-1$ submatrix, to bring this submatrix into canonical form. Hence, we can find generators of $\mathcal{S}_{n-1}$ such that: \begin{equation}
s_{j} = (-1)^{\alpha_{j}} A_{j} ( \prod_{k \neq j, n} B_{k}^{\beta_{k,j}} ) T_{j, n} , \end{equation} where the variables $\beta_{k,j}=0,1$ denote whether $B_{k}$ or $\mbox{\small 1} \!\! \mbox{1}_{k}$ is present, and $\alpha_{j}=0,1$ defines the phase. With the $n^{\mathrm{th}}$ column out of canonical form, this leaves the $n^{\mathrm{th}}$ qubit operator $T_{j,n}$ unspecified. However, if all these generators have $T_{j,n}=\mbox{\small 1} \!\! \mbox{1}_{n}$, then the protocol is trivial and projects $n-1$ qubits into a known stabilizer state and the last qubit untouched, and so no improvement is made for any $f$. Hence, herein we assume the non-trivial case; in particular we assume stabilizer $T_{n-1, n} \neq \mbox{\small 1} \!\! \mbox{1}_{n}$. Since, we can always relabel qubits this is completely general. Furthermore, we can define $T_{n-1, n}=A_{n}$. \earl{Now} we can define a logical operator in the codespace of $\mathcal{S}_{n-1}$: \begin{eqnarray}
Z_{L} & = & \left( \prod_{1 \leq j \leq n-2} B^{\zeta_{j}}_{j} \right) B_{n-1} B_{n}, \end{eqnarray} where the variables $\zeta_{j}=0,1$ are uniquely fixed by commutation relations $Z_{L} s_{j}=s_{j} Z_{L}$. Note that $Z_{L}$ has some inbuilt freedom as $B_{n}$ is not fixed other than that $B_{n}\neq A_{n}, \mbox{\small 1} \!\! \mbox{1}_{n}$, which is equivalent to free choice of $\gamma_{n}$ in the expression $A_{n}B_{n}=i (-1)^{\gamma_{n}}C_{n}$. Now we enquire whether the final state contains two terms stabilized by $Z_{L}$ and $-sZ_{L}$ respectively, hence satisfying the conditions for lemma~\ref{LEM2}. If we consider the product of $Z_{L}$ and $s_{n-1}$, and choose $\gamma_{n}=\alpha_{n-1} + \gamma_{n-1}$ mod 2, we have: \begin{equation} - s_{n-1} Z_{L} = \left( \prod_{1 \leq k \leq n-2} B_{k}^{\beta_{k, n-1}+\zeta_{k}} \right) C_{n-1} C_{n} . \end{equation} Our choice of $\gamma_{n}$ ensures a minus sign on the left hand side, which aids in finding $\ket{\Psi_{\vec{g}}}$ and $\ket{\Psi_{\vec{g}'}}$ that satisfy our lemma by being stabilized by $G_{\vec{g}} = Z_{L}$ and $G_{\vec{g}'} =-s_{n-1}Z_{L}$ respectively. This criterion is fulfilled when: \begin{eqnarray*} \vec{g} & = & ( B_{1}, B_{2}, ....., B_{n-2}, B_{n-1}, B_{n}) ,\\ \nonumber \vec{g'} & = & ( B_{1}, B_{2}, ....., B_{n-2}, C_{n-1}, C_{n}) . \end{eqnarray*} These states only vanish under projection, $q'_{\vec{g}}, q'_{\vec{g'}}=0$, if they are stabilized by the negative of some element of the code $\mathcal{S}_{n-1}$. To prove they don't vanish, we first observe that every element of $\mathcal{G}_{\vec{g}}$ and $\mathcal{G}_{\vec{g}'}$ has either $\mbox{\small 1} \!\! \mbox{1}_{j}$ or $B_{j}$ acting on qubit $j$, for all $j=1,2,...n-2$. The only elements of $\mathcal{S}_{n-1}$ for which this is true are $\mbox{\small 1} \!\! \mbox{1}$ and $s_{n-1}$, but $s_{n-1}$ has $A_{n-1}A_{n}$ acting on the last two qubits and neither $\mathcal{G}_{\vec{g}}$ or $\mathcal{G}_{\vec{g}'}$ contain any such element.
Using a canonical form of the generators of $\mathcal{S}_{n-1}$, we have shown that non-trivial codes always satisfy the conditions of lemma~\ref{LEM2}. That is, all non-trivial codespace projections take many surface states into the octahedron interior. From the continuity expressed by lemma~\ref{LEM1}, this entails the existance of a finite region of non-stabilizer states that are also projected into the octahedron. Hence, all $n$-copy protocols do no improve on a single copy for some region of bound states above the octahedron faces, completing the proof. This does not contradict known tight thresholds in edge directions, as these directions have $\vec{a}$ with one zero component.
Although our proof holds for protocols using fixed and finite $n$ copies of $\rho(f, \vec{a}_{P})$, we could conceive of a protocol that varies $n$. If this varying-$n$ protocol has an $n$-dependent threshold, $f^{T}_{\vec{a}_{P}}(n)$, and $f^{T}_{\vec{a}_{P}}(n) \rightarrow f^{S}_{\vec{a}_{P}}$ as $n \rightarrow \infty$, then its threshold would be arbitrarily suppressible. \earl{Repeated iterations of a protocol, or equivalently employing concatenation of a single-qubit code, will not change the threshold. However, one could consider a broader class of protocols consisting of iterates that act on p qubits and output q qubits (for $p > q > 1$) followed by a final round outputting a single qubit. Such protocols map $n$ qubits to $1$ qubit, with $n$ growing each iterate, but with only $p$ qubits involved in each iterate. This implies that multi-qubit output iterates may suppress the threshold effectively, and are worth further study. Currently, no such protocol is known.} As such, in the asymptotic regime, bound magic states may not exist. However, numerical evidence so far indicates that smaller codes tend to produce better thresholds than larger codes. Nevertheless, the theorem does not rule out infinite cases from attaining a tight threshold. In the regime of finite resources, bound states do exist, and it is interesting ask what computational power Clifford circuits acting on such states possess. Can we find methods of efficiently classically simulating bound states; or can bound states be exploited in algorithms that offer a speedup over classical computation?
Furthermore, our proof assumes a protocol acting on identical copies, which invites study into whether our results extend to non-identical copies. In particular, following the analogy with entanglement distillation, we speculate that bound magic states may be distillable via ``catalysis'', where some non-consumed distillable resource activates the distillation~\cite{Horodecki99activate}. Finally we note that noisy Clifford gates can also enable quantum computation~\cite{Plenio08,vanDam09}, and we conjecture that a similar theorem will apply to a class of noisy Clifford gates analogous to states just above the octahedron faces.
The authors would like to thank Shashank Virmani, Matthew Hoban, Tobias Osborne, Ben Reichardt and Steve Flammia for interesting discussions. We acknowledge support from the Royal Commission for the Exhibition of 1851, the QIP IRC, QNET and the National Research Foundation and Ministry of Education, Singapore
\end{document} |
\begin{document}
\title{Growth of solutions for QG and 2D Euler equations}
\author{Diego Cordoba \\ {\small Department of Mathematics} \\ {\small University of Chicago} \\ {\small 5734 University Av, Il 60637} \\ {\small Telephone: 773 702-9787, e-mail: [email protected]} \\ {\small and} \\ Charles Fefferman\thanks{Partially supported by NSF grant DMS 0070692.}\\ {\small Princeton University} \\ {\small Fine Hall, Washington Road, NJ 08544} \\ {\small Phone: 609-258 4205, e-mail: [email protected]} \\ }
\date{January 17 2001}
\maketitle
\markboth{QG and 2D Euler equations}{D.Cordoba and C.Fefferman}
\newtheorem {Thm}{Theorem} \newtheorem {Def}{Definition} \newtheorem {Lm}{Lemma} \newtheorem {prop}{Proposition} \newtheorem {Rem}{Remark} \newtheorem {Cor}{Corollary} \def\mathcal{\mathcal} \newtheorem {Ack*}{Acknowledgments}
\section{Abstract}
We study the rate of growth of sharp fronts of the Quasi-geostrophic equation and 2D incompressible Euler equations.. The development of sharp fronts are due to a mechanism that piles up level sets very fast. Under a semi-uniform collapse, we obtain a lower bound on the minimum distance between the level sets.
\section{Introduction}
The work of Constantin-Majda-Tabak [1] developed an analogy between the Quasi-geostrophic and 3D Euler equations. Constantin, Majda and Tabak proposed a candidate for a singularity for the Quasi-geostrophic equation. Their numerics showed evidence of a blow-up for a particular initial data, where the level sets of the temperature contain a hyperbolic saddle. The arms of the saddle tend to close in finite time, producing a a sharp front. Numerics studies done later by Ohikitani-Yamada [8] and Constantin-Nie-Schorgofer [2], with the same initial data, suggested that instead of a singularity the derivatives of the temperature where increasing as double exponential in time.
The study of collapse on a curve was first studied in [1] for the Quasi-geostrophic equation where they considered a simplified ansatz for classical frontogenesis with trivial topology. At the time of collapse, the scalar $\theta$ is discontinues across the curve $x_2 = f(x_1)$ with different limiting values for the temperature on each side of the front. They show that under this topology the directional field remains smooth up to the collapse, which contradicts the following theorem proven in [1]: \begin{eqnarray*}
\text{If locally the direction field remains smooth as t}\ \ \ \ \\ \text{ approaches $T_*$, then no finite singularity is possible}\ \ \ \ \\
\text{ as t approaches $T_*$.}\ \ \ \ \quad \quad \quad \quad \quad \quad \quad \quad \end{eqnarray*} The simplified ansatz with trivial topology studied in [1] does not describe a hyperbolic saddle.
Under the definition of a simple hyperbolic saddle, in [3], it was shown that the angle of the saddle can not decrease faster than a double exponential in time.
The criterion obtained in [5] for a sharp front formation for a general two dimensional incompressible flow is : \begin{eqnarray*} \text{A necessary condition to have a sharp front at time T is}\\
\int_{0}^{T}|u|_{L^{\infty}}(s) ds = \infty\ \quad \quad \quad \quad \ \ \ \ \quad \end{eqnarray*}
For the Quasi-geostrophic equation it is not known if the quantity $\int_{0}^{T}|u|_{L^{\infty}}(s) ds$ diverges or not. And the criterion does not say how fast the arms of a saddle can close.
In this paper we do not assume anything on the velocity field, and we show that under a semi-uniform collapse the distance between two level curves cannot decrease faster than a double exponential in time. The semi-uniform collapse assumption greatly weakens the assumptions made in [1] for an ansatz for classical frontogenesis, and the simple hyperbolic saddle in [3].
In the case of 2D incompressible Euler equation we are interested in the large time behavior of solutions.
The two equations we discus in this paper, have in common the property that a scalar function is convected by the flow, which implies that the level curves are transported by the flow. The possible singular scenario is due to level curves approaching each other very fast which will lead to a fast growth on the gradient of the scalar function. Below we study the semi-uniform collapse of two level sets on a curve. By semi-uniform collapse we mean that the distance of the two curves in any point are comparable.
The equations we study are as follows:
\bc {\underline{The Quasi-geostrophic (QG) Equation}} \ec
Here the unknowns are a scalar $\theta(x,t)$ and a velocity field $u(x,t) = (u_1(x,t), u_2(x,t)) \in R^2$, defined for $t\in[0,T^*)$ with $T^*\leq \infty$, and for $x \in \Omega$ where $\Omega = R^2$ or $R^2/Z^2$. The equations for $\theta$, u are as follows \begin{eqnarray} \left (\partial_t + u\cdot\nabla_x \right ) \theta = 0 \\ u = \nabla_{x}^{\perp}\psi\ \ and \ \ \psi = (-\triangle_x)^{-\frac{1}{2}}\theta, \nonumber \end{eqnarray} where $\nabla_{x}^{\perp} f = (-\frac{\partial f}{\partial x_2}, \frac{\partial f}{\partial x_1})$ for scalar functions f. The initial condition is $\theta(x,0) = \theta_0(x)$ for a smooth initial datum $\theta_0$.
\bc {\underline{The Two-Dimensional Euler Equation}} \ec
The unknown is an incompressible velocity field u(x,t) as above with vorticity denoted by $\omega$. The 2D Euler equation may be written in the form \begin{eqnarray} \left (\partial_t + u\cdot\nabla_x \right ) \omega = 0 \\
u = \nabla_{x}^{\perp}\psi\ \ and \ \ \psi = (-\triangle_x)^{-1}\omega, \nonumber \end{eqnarray} with u(x,0) equal to a given smooth divergence free $u_0(x)$.
\section{Results}
Asssume that q = q(x,t) is a solution to (1) or (2), and that a level curve of q can be parameterized by
\begin{eqnarray} x_2=\phi_{\rho}(x_1,t)\ \ for\ \ x_1\in[a,b] \label{eq:1} \end{eqnarray} with $\phi_{\rho}\in C^1([a,b]\cap [0,T^*))$, in the sense that \begin{eqnarray} q(x_1,\phi_{\rho}(x_1,t), t) = G(\rho)\ \ for\ \ x_1\in[a,b],\label{eq:2} \end{eqnarray} and for certain $\rho$ to be specified below.
The stream function $\psi$ satisfies \begin{eqnarray} \nabla^{\perp}\psi=u. \end{eqnarray}
From (3) and (4), we have \begin{eqnarray} \frac{\partial q}{\partial x_1} + \frac{\partial q}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial x_1} = 0 \label{eq:3} \end{eqnarray} \begin{eqnarray} \frac{\partial q}{\partial t} + \frac{\partial q}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial t} = 0 \label{eq:4} \end{eqnarray}
By (1), (2), (5), (6) and (7) we obtain \begin{eqnarray*} \frac{\partial\phi_{\rho}}{\partial t} & = & -\frac{\frac{\partial q}{\partial t}}{\frac{\partial q}{\partial x_2}} = \frac{<-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <\frac{\partial q}{\partial x_1},\frac{\partial q}{\partial x_2} >}{\frac{\partial q}{\partial x_2}} \\ & = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <\frac{\frac{\partial q}{\partial x_1}}{\frac{\partial q}{\partial x_2}}, 1> \\ & = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <-\frac{\partial\phi_{\rho}}{\partial x_1}, 1> \end{eqnarray*} Next \begin{eqnarray*} \frac{\partial}{\partial x_1}\left (\psi(x_1,\phi_{\rho}(x_1,t), t)\right ) & = & \frac{\partial\psi}{\partial x_1} + \frac{\partial\psi}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial x_1} \\ & = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <-\frac{\partial\phi_{\rho}}{\partial x_1}, 1> \end{eqnarray*}
Therefore \begin{eqnarray} \frac{\partial\phi_{\rho}}{\partial t} = \frac{\partial}{\partial x_1}\left (\psi(x_1,\phi_{\rho}(x_1,t), t)\right ) \label{eq:5} \end{eqnarray}
With this formula we can write a explicit equation for the change of time of the area between two fixed points a, b and two level curves $(\phi_{\rho_1}, \phi_{\rho_2})$; \begin{eqnarray} \frac{d }{d t}\left ( \int_{a}^{b} [\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)] dx_1 \right ) \nonumber \\ = \psi(b,\phi_{\rho_2}(b,t), t) - \psi(a,\phi_{\rho_2}(a,t), t) \nonumber \\
+ \psi (a,\phi_{\rho_1}(a,t), t) - \psi(b,\phi_{\rho_1}(b,t), t) \label{eq:6} \end{eqnarray}
Assume that two level curves $\phi_{\rho_1}$ and $\phi_{\rho_2}$ collapse when t tends to $T^*$ uniformly in $a\leq x_1\leq b$ i.e. $$ \phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t) \sim \frac{1}{b - a} \int_{a}^{b} [\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)] dx_1 $$ In other words; the distance between two level sets are comparable for $a \leq x_1 \leq b$.
Let $$
\delta(x_1,t) = |\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)| $$ be the thickness of the front.
We define semi-uniform collapse on a curve if (3) and (4) holds and there exists a constant $c$, independent of t, such that $$ min \delta (x_1,t) \geq c\cdot max \delta (x_1,t) $$ for $a\leq x_1 \leq b$, and for all $t\in [0,T^*)$.
We call the length b-a of the interval [a,b] the length of the front.
Now we can state the following theorem
\begin{Thm} For a QG solution with a semi-uniform front, the thickness $\delta(t)$ satisfies \begin{eqnarray*} \delta(t) > e^{-e^{At + B}} \ \ for\ \ all\ \ t\in[0,T^*). \end{eqnarray*} Here, the constants A and B may be taken to depend only on the length of the front, the semi-uniformity constant, the initial thickness $\delta(0)$, and the norm of the initial datum $\theta_0(x)$ in $L^1\cap L^{\infty}$. \end{Thm}
Proof: From (9) we have
\begin{eqnarray}
|\frac{d}{d t} A(t)| < \frac{C}{b - a} sup_{a\leq x_1\leq b} |\psi(x_1,\phi_{\rho_2}(x_1,t), t) - \psi(x_1,\phi_{\rho_2}(x_1,t), t)| \end{eqnarray} where \begin{eqnarray*} A(t) = \frac{1}{b - a} \int_{a}^{b} [\phi_{\rho_2}(x_1,t) & - &\phi_{\rho_1}(x_1,t)] dx_1, \end{eqnarray*} and C is determined by the semi-uniformity constant c.
The estimate of the difference of the value of the stream function at two different points that are close to each other is obtained by writing the stream function as follows; \begin{eqnarray*}
\psi(x,t) = - \int_{\Omega}\frac{\theta(x + y,t)}{|y|} dy, \end{eqnarray*} and this is because $\psi = (-\triangle_x)^{-\frac{1}{2}}\theta$.
Therefore \begin{eqnarray*}
\psi(z_1, t) - \psi(z_2,t) & = & \int_{\Omega}\theta(y)(\frac{1}{|y - z_1|} - \frac{1}{|y - z_2|}) dy \\ & = & \int_{|y - z_1| \leq 2\tau} + \int_{2\tau < |y - z_2| \leq k} + \int_{k < |y - z_1| } \\ & \equiv & I_{1} + I_{2} + I_{3}.\end{eqnarray*}
where $\tau = |z_1 - z_2| $.
Furthermore \begin{eqnarray*}
|I_{1}| & \leq & ||\theta||_{L^{\infty}} \cdot\int_{|y - z_1| \leq 2\tau}(\frac{1}{|y - z_1|} + \frac{1}{|y - z_2|}) dy \\ & \leq & C\tau \end{eqnarray*}
We define s to be a point in the line between $z_1$ and $z_2$, then $|y - z_1|
\leq 2|y - s|$ and $I_{2}$ can be estimated by \begin{eqnarray*}
|I_{2}| &\leq& C\tau \cdot \int_{2\tau < |y - z_1| \leq k}max_{s}|\nabla(\frac{1}{|y - s|})| dy \\ &\leq& C\tau \cdot
\int_{2\tau < |y - z_1| \leq k}max_{s}\frac{1}{|y - s|^{2}} dy \\ &\leq&
C\tau \cdot |\log \tau| \end{eqnarray*}
We use the conservation of energy to estimate $I_{3}$ by \begin{eqnarray*}
|I_{3}| \leq C \cdot \tau \end{eqnarray*}
Finally, by choosing $\tau = |z_1 - z_2|$ we obtain \begin{eqnarray}
|\psi(z_1, t) - \psi(z_2,t)| \leq M|z_1 - z_2||log |z_1 - z_2|| \end{eqnarray} where M is a constant that depend on the initial data $\theta_0$. (See details in [3].)
Then we have \begin{eqnarray*}
|\frac{d}{d t} A(t)| & \leq & \frac{M}{b - a} sup_{a\leq x_1\leq b}|\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)||log |\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)|| \\
& \leq & \frac{C\cdot M}{\cdot(b - a)} |A(t)||log A(t)| \end{eqnarray*} and therefore \begin{eqnarray*} A(t) >> A(0)e^{-e^{\frac{C\cdot M}{\cdot(b - a)} t}} \end{eqnarray*}
\begin{Thm} For a 2D Euler solution with a semi-uniform front, the thickness $\delta(t)$ satisfies \begin{eqnarray*} \delta(t) > e^{-[At + B]} \ \ for\ \ all\ \ t\in[0,T^*). \end{eqnarray*} \end{Thm}
Here, the constants A and B may be taken to depend only on the length of the front, the semi-uniformity constant, the initial thickness $\delta(0)$, and the norm of the initial vorticity in $L^1\cap L^{\infty}$.
The proof theorem 2 is similar to theorem 1 with the difference that instead of the estimate (11), we have \begin{eqnarray*}
|\psi(z_1, t) - \psi(z_2,t)|\leq M|z_1 - z_2| \end{eqnarray*} where M is a constant that depend on the initial data $u_0$. (See details in [3].)
Similar estimates can be obtain for 2D ideal Magneto-hydrodynamics (MHD) Equation, with the extra assumption that $\int_{0}^{T^*}|u|_{L^{\infty}}(s) ds$ is bounded up to the time of the blow-up. This estimates are consequence of applying the Mean value theorem in (10). Nevertheless in the case of MHD these estimates improve the results obtain in [6].
\begin{Ack*}
This work was initially supported by the American Institute of Mathematics. \end{Ack*}
\end{document} |
\begin{document}
\subject{Dissertation} \title{On Time-Reversal Equivariant\\ Hermitian Operator Valued Maps\\
from a 2D-Torus Phase Space} \author{Moritz Schulte} \date{\today} \maketitle
\cleardoublepage \tableofcontents
\addchap{Acknowledgements}
First of all, I would like to express my sincerest gratitude to my supervisor Professor Huckleberry. His help and guidance have been invaluable to me throughout the project. The mathematical discussions we shared were crucial to my progress, in particular when he helped with several of the proofs by suggesting key ideas. The work enabled me to grasp what mathematical research is like.
I would also like to thank my parents, my family and my friends for supporting me throughout my studies and research.
And finally, there is somebody whose role throughout the critical phase of my PhD project I would like to underline: Thank you, Christina, for your unwavering support, especially when some of the proofs had given me a hard time. Your part in the completion of this paper can hardly be overestimated.
This research was conducted as part of the the Sonderforschungsbereich TR12 (SFB TR12) ``Symmetries and Universality in Mesoscopic Systems'' and was partially financed by the Deutsche Forschungsgemeinschaft (DFG) for which I am very thankful.
\chapter{Introduction}
In this paper we are primarily concerned with the equivariant homotopy classification of maps from a torus phase space $X$ to $\mathcal{H}_n^*$, the space of $n \times n$, non-singular hermitian operators. The equivariance is with respect to a time-reversal involution on $X$ and an involution on $\mathcal{H}_n^*$ defining a certain symmetry class. This is in contrast to related works in this area (e\,g. \cite{Kennedy}), which deal with the classification of equivariant maps defined on \emph{momentum space}, where the time-reversal involution on momentum space is given by $k \mapsto -k$. Here we consider the model problem where the phase space is a $2$-dimensional symplectic torus $(X,\omega)$ and the time-reversal involution $T$ is antisymplectic, i.\,e. \begin{align*}
T^*(\omega) = -\omega. \end{align*} In this case it is known (see e.\,g. \cite{Seppala}, \cite{Tehrani}) that up to equivariant symplectic diffeomorphisms there are exactly three classes of antisymplectic involutions on $X$. Representatives for these three classes can be given by \begin{align*}
[z] \mapsto [\overline{z}] \;\;\text{,}\;\; [z] \mapsto [i\overline{z}] \;\;\text{and}\;\; [z] \mapsto [\overline{z} + \text{\textonehalf}], \end{align*} where $X$ is regarded as the square torus $\mathbb{C}/\Lambda$ defined by the lattice $\Lambda = \LatGen{1}{i}$. In this paper we only deal with the first two involutions, as these are the only ones, which have fixed points. In particular, in local coordinates they are of the form \begin{align*}
(q,p) \mapsto (q,-p). \end{align*} We call the first one \emph{type I} involution and the second one \emph{type II} involution. On $\mathcal{H}_n^*$ resp. $\mathcal{H}_n$ we define the involution, which we also denote by $T$, to be \begin{align*}
H \mapsto \overline{H} = \ltrans{H}. \end{align*} We believe, though, that the methods described in this paper, are also applicable, when $X$ is equipped with a time-reversal involution coming from the third class of (free) antisymplectic involutions on $X$ and when physically relevant image spaces other than $(\mathcal{H}_n,T)$ are considered.
Letting $G$ be the cyclic group $C_2$ of order two and $X$ and $\mathcal{H}_n$ be equipped as above with $G$-actions, our main results in chapter~\ref{ChapterClassification} give a complete description of the spaces \begin{align*}
[X,\mathcal{H}_n^*]_G \end{align*} of $G$-equivariant homotopy classes of maps $X \to \mathcal{H}_n^*$, where $X$ is equipped with the type I or the type II involution. These descriptions are in terms of numerical topological invariants. It turns out that the topological invariant given by the \emph{total
degree} together with certain \emph{fixed point degrees} defines a \emph{complete} topological invariant of equivariant homotopy. Recall that an invariant is called \emph{complete} if the equality of the invariants associated to two objects (equivariant maps in our case) implies the equivalence of the objects (the maps are equivariantly homotopic). It is important to emphasize that not all combinations of total degree and fixed point degrees are realizable; a certain elementary number theoretic condition is necessary and sufficient for the existence of a map with given invariants.
In chapter~\ref{ChapterJumps} we study the situation where a certain controlled degeneration is allowed. In formal terms this is a curve \begin{align*}
f\colon [-1,1] \times X \to \mathcal{H}_n \end{align*} where the image $\Im(f_t)$ is contained in $\mathcal{H}_n^*$ for $t \not= 0$ and where the degeneration $f_0(x)$ being singular is only allowed at isolated points in $X$. The interesting case is where $f_{-1}$ and $f_{+1}$ are in distinct $G$-homotopy classes, i.\,e. where we have a sort of \emph{jump curve}. In this case we show exactly which jumps are possible and produce maps with given jumps.
This research was carried out in the context of the interdisciplinary project SFB TR12. Thus we have attempted to present a work that is mostly self-contained; for the convenience of the reader, the appendix (Chapter~\ref{ChapterAppendix}) includes some well-known background material.
\section{Outline}
The equivariant homotopy classification of maps $X \to \mathcal{H}_n^*$ is contained in chapter~\ref{ChapterClassification}. We begin by identifying the connected components of $\mathcal{H}_n^*$. These are the subspaces $\mathcal{H}_{(p,q)}$ of the matrices with $p$ positive and $q$ negative eigenvalues ($p + q = n$). It follows that this \emph{eigenvalue signature} $(p,q)$ is a homotopy invariant of maps $X \to \mathcal{H}_n^*$ and the classification of maps to $\mathcal{H}_n^*$ reduces to the classification of maps to $\mathcal{H}_{(p,q)}$, where $(p,q)$ takes on all possible eigenvalue signatures. We then begin by examining the case where $X$ is equipped with the type I involution and $n=2$. An ad-hoc computation shows that $\mathcal{H}_{(2,0)}$ resp. $\mathcal{H}_{(0,2)}$ are equivariantly contractible to plus resp. minus the identity matrix, whereas $\mathcal{H}_{(1,1)}$ contains a $U(2)$-orbit diffeomorphic to $S^2$ as equivariant strong deformation retract. The induced $T$-action on the sphere is given by a reflection along the $x,y$-plane. This motivates the need to classify equivariant maps $X \to S^2$. Also note that the $2$-sphere can be equivariantly identified with the projective space $\mathbb{P}_1$, on which the involution is the standard real structure given by complex conjugation. Since $X$ and $S^2$ are both $2$-dimensional manifolds, we obtain the (Brouwer) degree, which we call \emph{total degree}, as a homotopy invariant of maps $X \to S^2$. The equivariance requires that fixed point sets are mapped to fixed point sets. The fixed point set in the torus -- for the type I involution -- consists of two disjoint circles -- $C_0$ and $C_1$ -- while the fixed point set in the sphere consists of only the equator $E$. This defines two additional invariants of equivariant homotopy: The \emph{fixed point degrees}, which are the degrees of the restrictions of the maps to the respective fixed point circles in $X$, regarded as maps to the equator. This gives rise the \emph{degree triple map} $\mathcal{T}$, which sends an equivariant map $f\colon X \to S^2$ to the degree triple $\Triple{d_0}{d}{d_1}$, where $d$ is the total degree of $f$ and $d_0$, $d_1$ denote the fixed point degrees of $f$. A convenient property of the type I involution is the fact that a fundamental region for this involution is given by a \emph{cylinder}, which we call $Z$. The boundary circles of the cylinder are the fixed point circles in $X$. Maps $(Z,C_0 \cup C_1) \to (S^2,E)$ can be uniquely equivariantly extended to the full torus $X$. It follows that the equivariant homotopy classification of maps $X \to S^2$ reduces to the (non-equivariant) homotopy classification of maps $(Z, C_0 \cup C_1) \to (S^2,E)$. We then regard maps from the cylinder $Z = I \times S^1$ to $S^2$ as \emph{curves} in the free loop space $\mathcal{L}S^2$. After this translation of the problem and a certain normalization procedure on the boundary circles, we are dealing with the problem of computing homotopy classes of curves in $\mathcal{L}S^2$ (with fixed endpoints). The key point now is to define the \emph{degree map}, which associates to curves in $\mathcal{L}S^2$ the degree of its equivariant extension to the full torus. The degree map factors through the space of homotopy class of curves. Furthermore it satisfies a certain compatibility condition with respect to the concatenation of curves. In particular, the degree map becomes a group homomorphism, when it is restricted to a based fundamental group. This can be used for proving that the restriction of the degree map to based fundamental groups is injective. The injectivity of this map is then the basis for the proof of the statement that maps with the same degree triple are equivariantly homotopic. To summarize the above: The degree triple map $\mathcal{T}$ is well-defined on the set of equivariant homotopy classes of maps $X \to S^2$ and defines a bijection to a subset of $\mathbb{Z}^3$. We call a degree triple, which is contained in the image of the degree triple map, \emph{realizable}. As a preperation for describing the image of the degree triple map we express the concatenation of curves in $\mathcal{L}S^2$ in terms of a binary operation (``concatenation'') on the space of degree triples: \begin{align*}
\Triple{d_0}{d}{d_1} \bullet \Triple{d_1}{d'}{d_2} = \Triple{d_0}{d+d'}{d_2}. \end{align*} This concatenation operation is only defined for degree triples which are compatible in the sense that the fixed point degrees in the middle coincide. Using this formalism we observe that the image of the degree triple map $\Im(\mathcal{T})$ is closed under the concatenation operation. Furthermore we show that certain basic triples, e.\,g. $\Triple{0}{1}{0}$, are not contained in the image of the degree triple map. These are the key tools for completely describing the image $\Im(\mathcal{T})$ using the formalism of degree triple concatenations. It turns out that the number theoretic condition \begin{align}
\label{DegreeTripleCondition}
d \equiv d_0 + d_1 \mod 2 \end{align} is sufficient and necessary for a degree triple $\Triple{d_0}{d}{d_1}$ to be in the image of the degree triple map. This completely solves the problem for the type I involution in the case $n=2$.
In the next step we reduce the classification problem for the type II involution to the type I classification. The key observation here is that maps $X \to S^2$, where $X$ is equipped with the type II involution, can be normalized on a subspace $A \subset X$ such that they push down to the quotient $X/A$, which happens to be equivariantly diffeomorphic to $S^2$. The action on the $2$-sphere is again given by a reflection along the $x,y$-plane. After another normalization procedure, concerning the images of the two poles of $S^2$, we prove a one-to-one correspondence between equivariant maps $S^2 \to S^2$ and equivariant maps from a torus equipped with the type I involution to $S^2$ having the one fixed point degree be zero. This allows us to use the results for the type I involution. In particular it implies that we are dealing with degree \emph{pairs} instead of degree triples. The degree pair map associates to an equivariant map $f\colon X \to S^2$ its degree pair $\Pair{d_C}{d}$ where $d_C$ is the fixed point degree and $d$ the total degree of $f$. Because of this correspondence between maps defined on a type II torus and maps defined on a type I torus, the condition (\ref{DegreeTripleCondition}) for degree triples to be in the image of the degree triple map induces a corresponding condition for a pair $\Pair{d_C}{d}$ to be in the image of the degree pair map: \begin{align}
\label{DegreePairCondition}
d \equiv d_C \mod 2. \end{align} This completes the case $n=2$ for both involutions.
For the general case $n > 2$ we begin with an examination of the connected components $\mathcal{H}_{(p,q)}$ of $\mathcal{H}_n^*$. As in the case $n=2$ we prove that the components $\mathcal{H}_{(n,0)}$ and $\mathcal{H}_{(0,n)}$ are equivariantly contractible to a point. Fundamental for the following considerations is the result that the components $\mathcal{H}_{(p,q)}$ with $0 < p,q < n$ have a $U(n)$-orbit as equivariant strong deformation retract, where $U(n)$ acts on $\mathcal{H}_{(p,q)}$ by conjugation. This orbit is equivariantly diffeomorphic to the complex Grassmann manifold $\text{Gr}_p(\mathbb{C}^n)$ on which the involution $T$ acts as \begin{align*}
V \mapsto \overline{V}. \end{align*} Thus, the problem of classifying equivariant maps $X \to \mathcal{H}_{(p,q)}$ is equivalent to classifying equivariant maps $X \to \text{Gr}_p(\mathbb{C}^n)$. Now we fix the standard flag in $\mathbb{C}^n$ and consider the -- with respect to this flag -- unique (complex) one-dimensional Schubert variety $\mathcal{S}$ in $\text{Gr}_p(\mathbb{C}^n)$. In this situation we prove, using basic Hausdorff dimension theoretical results, that it is possible to iteratively remove certain parts of the Grassmann manifold $\text{Gr}_p(\mathbb{C}^n)$ so that in the end we obtain an equivariant strong deformation retract to the Schubert variety $\mathcal{S}$. The latter is equivariantly biholomorphic to the complex projective space $\mathbb{P}_1$ equipped with the standard real structure. This allows us to use the results for the $n=2$ case. In terms of the invariants, the only difference between the case $n = 2$ and $n > 2$ is that in the former case the fixed point degrees are integers from the set $\mathbb{Z}$ while in the latter case they are only from the set $\{0,1\}$. This stems from the fact that in the case $n>2$ the fundamental group of the $T$-fixed points in the Grassmann manifold is not infinite cyclic anymore, but cyclic of order two. In this setup, the conditions for degree triples resp. pairs to be realizable are exactly (\ref{DegreeTripleCondition}) and (\ref{DegreePairCondition}). This completes the classification of equivariant maps $X \to \mathcal{H}_n^*$.
In chapter~\ref{ChapterJumps} we construct curves \begin{align*}
H\colon [-1,1] \times X \to \mathcal{H}_n \end{align*} of equivariant maps with the property that the image $\Im(H_t)$ is contained in $\mathcal{H}_n^*$ for $t\not= 0$ and $H_{-1}$ and $H_{+1}$ are not equivariantly homotopic as maps to $\mathcal{H}_n^*$. This is only possible if we allow a certain degeneration at $t=0$, which means that there exists a non-empty \emph{singular set} $S(H_0)$ consisting of the points $x \in X$ such that the matrix $H_0(x)$ is singular. To make the construction non-trivial we require the singular set $S(H_0)$ to be discrete. As in the previous chapter we first consider the type I involution in the special case $n=2$. As a first result we obtain that jump curves from $H_{-1}$ to $H_{+1}$ with discrete singular set can only exist if the eigenvalue signatures of $H_{-1}$ and $H_{+1}$ coincide. Hence we fix an eigenvalue signature $(p,q)$. The construction of \emph{jump
curves} is based on the decomposition of degree triples as a concatenation of simpler triples. First we show how to construct jumps for certain ``model maps'' for basic degree triples. Subsequently we extend this method such that we can construct jumps from any equivariant homotopy class to any other -- as long as the eigenvalue signature remains unchanged.
For the type II involution in the case $n=2$, we can employ the same correspondence that has been used during the homotopy classification to turn jump curves for the type I involution into corresponding jump curves for the type II involution. This completes the construction of jump curves in the case $n=2$.
For the general case $n > 2$ we construct jumps curves using the embedding of the Schubert variety $\mathcal{S} \cong \mathbb{P}_1$ into $\text{Gr}_p(\mathbb{C}^n)$. In other words: The jump curves we construct in the higher dimensional situation really come from jump curves in the case $n=2$.
\chapter{Equivariant Homotopy Classification} \label{ChapterClassification}
As indicated in the introduction, we may regard the torus $X$ as being defined by the standard square lattice $\Lambda = \LatGen{1}{i}$. As a complex manifold it carries a canonical orientation. Furthermore, we regard $X$ as being equipped with the real structure $T([z]) = [\overline{z}]$ (type I) or with the real structure $T([z]) = [i\overline{z}]$ (type II). Recall that $G$ denotes the cyclic group $C_2$ of order two. The $G$-action on $\mathcal{H}_n$ is assumed to be given by matrix transposition or -- equivalently -- complex conjugation: \begin{align*}
T\colon \mathcal{H}_n &\to \mathcal{H}_n\\
H &\mapsto \ltrans{H} = \overline{H}. \end{align*} The goal in this chapter is to completely describe the sets $[X,\mathcal{H}_n^*]_G$ of $G$-\parbox{0pt}{}equivariant homotopy classes of maps $X \to \mathcal{H}_n^*$ ($n \geq 2$). Recall the basic definition of equivariant homotopy: \begin{definition}
Let $X$ and $Y$ be two $G$-spaces and $f_0, f_1\colon X \to Y$ two
$G$-maps. Note that $I \times X$ ($I$ is the unit interval $[0,1]$)
can be regarded as a $G$-space by defining $T(t,x) = (t,T(X))$. Then
$f_0$ and $f_1$ are said to be \emph{$G$-homotopic} (or
\emph{equivariantly homotopic}\footnote{We use the terms
``$G$-homotopy'' and ``equivariant homotopy'' interchangeably.})
if there exists a $G$-map $f\colon I \times X \to Y$ such that
$f(0,\cdot) = f_0$ and $f(1,\cdot) = f_1$. \end{definition}
The first invariant of maps $X \to \mathcal{H}_n^*$ we consider is the \emph{eigenvalue signature}. We begin with the following \begin{definition}
Let $H$ be a non-singular $n \times n$ matrix and
\begin{align*}
\lambda^+_1 \geq \ldots \geq \lambda^+_p > 0 > \lambda^-_1 \ldots \geq \lambda^-_q
\end{align*}
the eigenvalues of $H$ (repetitions allowed). Then we call $(p,q)$
the \emph{eigenvalue signature} (or simply \emph{signature}) of $H$
and set $\text{sig}\, H = (p,q)$. \end{definition} The connected components of the space $\mathcal{H}_n^*$ are the $n+1$ open subspaces $\mathcal{H}_{(p,q)}$, where \begin{align*}
\mathcal{H}_{(p,q)} = \left\{H \in \mathcal{H}_n^*\colon \text{sig}\, H = (p,q)\right\}. \end{align*} Thus we can write \begin{align*}
[X,\mathcal{H}_n^*]_G = \dot{\bigcup_{p+q=n}}\;[X,\mathcal{H}_{(p,q)}]_G. \end{align*} In particular this means that the signature $\text{sig}\,(H)$ for maps $H\colon X \to \mathcal{H}_n^*$ is well-defined and a topological invariant. It turns out that the components $\mathcal{H}_{(n,0)}$ and $\mathcal{H}_{(0,n)}$ are equivariantly contractible but the components $\mathcal{H}_{(p,q)}$ with $0<p,q<n$ are topologically interesting as they have Grassmann manifolds as equivariant strong deformation retract. The classification of maps to a component $\mathcal{H}_{(p,q)}$ for a fixed (non-definite) eigenvalue signature $(p,q)$ is in this way reduced to the problem of classifying equivariant maps to complex Grassmann manifolds $\text{Gr}_p(\mathbb{C}^n)$ on which the $G$-action is given by standard complex conjugation.
As a preparation for proving a general statement about the set $[X,\mathcal{H}_n^*]_G$, we first consider the special case $n=2$ (theorem~\ref{HamiltonianClassificationRank2}, p.~\pageref{HamiltonianClassificationRank2}). As mentioned earlier, the only non-trivial cases occur for maps whose images are contained in the components $\mathcal{H}_{(p,q)}$ of mixed signature. In the case $n=2$ this boils down to $\mathcal{H}_{(1,1)}$. This space has the $2$-sphere, equipped with the involution given by a reflection along the $x,y$-plane, as equivariant strong deformation retract.\footnote{The 2-sphere is regarded as being embedded as the
unit sphere in $\mathbb{R}^3$} Thus, the problem of describing $[X,\mathcal{H}_{(1,1)}]_G$ is reduced to classifying equivariant maps $X \to S^2$. We do this for the type~I and the type~II involution seperately, but it turns out that the type II case can be reduced to the type I case (see theorem~\ref{Classification1} and theorem~\ref{Classification2}). The result is that for both involutions the complete invariants of maps $X \to S^2$ consist of two pieces of data: First the \emph{total degree} $d$ (which is also an invariant of non-equivariant homotopy) and second the so-called \emph{fixed point degrees} -- two integers (named $d_0, d_1$) for the type I involution and one integer (named $d_C$) for the type~II involution. Note that the notion of fixed point degree only makes sense for \emph{equivariant} maps, as they are required to map the fixed point set in $X$ into the fixed point set in $S^2$. Furthermore we obtain the statement that not all combinations of total degree and fixed point degrees are realizable by $G$-maps. As mentioned in the introduction, the conditions which are sufficient and necessary are $d \equiv d_0 + d_1 \mod 2$ for the type I involution and $d \equiv d_C \mod 2$ for the type II involution.
After having proven a classification statement for $n=2$, we observe that this case is really the fundamental case to which the general case ($n$ arbitrary) can be reduced to by means of a retraction argument. In each Grassmannian $\text{Gr}_p(\mathbb{C}^n)$ there exists a Schubert variety $\mathcal{S} \cong \mathbb{P}_1$; after fixing a full flag in $\mathbb{C}^n$, this the unique one-dimensional (over $\mathbb{C}$) Schubert variety which generates the second homology groups $H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z})$. After iteratively cutting out certain parts of a general Grassmannian $\text{Gr}_p(\mathbb{C}^n)$ we obtain an equivariant retraction to this embedded Schubert variety. It can be equivariantly identified with $\mathbb{P}_1$ on which the involution is given by the standard real structure: \begin{align*}
[z_0:z_1] \mapsto \left[\overline{z_0}:\overline{z_1}\right] \end{align*} This space can be equivariantly identified with $S^2$. Hence, we can use the previous results for the case $n=2$.
\section{Maps to $\mathcal{H}_2$} \label{SectionN=2}
In this section we provide a complete description of the set $[X,\mathcal{H}_2^*]_G$, where $X$ is equipped with a real structure $T$, which is either the type I or the type II involution. This case is of particular importance, since all the other cases (i.\,e. $n > 2$) can be reduced to this one. Note that a general equivariant map $H\colon X \to \mathcal{H}_2$ is of the form \begin{align}
\label{GeneralRank2Map}
H =
\begin{pmatrix}
a_1 & b \\
\overline{b} & a_2
\end{pmatrix}, \end{align} where $a_1$, $a_2$ are functions $X \to \mathbb{R}$ and $b$ is a function $X \to \mathbb{C}$ satisfying \begin{align}
\label{N=2MatrixEquivarianceConditionsA}
&a_j \circ T = a_j \;\text{for $j=1,2$}\\
\label{N=2MatrixEquivarianceConditionsB}
&b \circ T = \overline{b} \end{align} A map into $\mathcal{H}_n^*$ has the additional property that
$\det H = a_1 a_2 - |b|^2$ is nowhere zero. As we have mentioned previously, the space $\mathcal{H}_2^*$ decomposes into the disjoint union \begin{align*}
\mathcal{H}_{(2,0)} \,\dot{\cup}\, \mathcal{H}_{(1,1)} \,\dot{\cup}\, \mathcal{H}_{(0,2)}, \end{align*} where $(2,0)$, $(1,1)$ and $(0,2)$ are the possible eigenvalue signatures for maps $X \to \mathcal{H}_n^*$. First we note: \begin{remark}
\label{Rank2DefiniteComponentsContractible}
The components $\mathcal{H}_{(2,0)}$ and $\mathcal{H}_{(0,2)}$ have
$\{\pm \I{2}\}$ as equivariant strong deformation
retract.\footnote{Here, $\I{k}$ is the $k \times k$ identity
matrix.} \end{remark} \begin{proof}
It suffices to consider the component $\mathcal{H}_{(2,0)}$, as the
proof for the other is exactly the same. We define the following
strong deformation retract:
\begin{align*}
\rho\colon I \times \mathcal{H}_{(2,0)} &\to \mathcal{H}_{(2,0)}\\
(t,H) &\mapsto (1-t)H + t\I{2}.
\end{align*}
We have to check that $\rho$ really maps into
$\mathcal{H}_{(2,0)}$. For this, let $H$ be any positive-definite
hermitian matrix. Then $(1-t)H$ is also positive-definite for $t \in
(0,1)$. If $\det((1-t)H + t\I{2}) = 0$, this means that the negative
number $-t$ is an eigenvalue of $(1-t)H$, which is a
contradiction. Equivariance of $\rho_t$ follows from the fact that
the equivariance conditions (\ref{N=2MatrixEquivarianceConditionsA})
and (\ref{N=2MatrixEquivarianceConditionsB}) are compatible with
scaling by real numbers. \end{proof}
At this point we begin using the space $i\mathfrak{su}_2$, which is the vector space of traceless hermitian operators: \begin{align*}
i\mathfrak{su}_2 = \left\{
\begin{pmatrix}
a & \phantom{-}b \\
\overline{b} & -a
\end{pmatrix}\colon a \in \mathbb{R} \;\text{and}\; b \in \mathbb{C}\right\}. \end{align*} It can be identified with $\mathbb{R}^3$ via the linear isomorphism \begin{align}
\label{EmbeddingU2OrbitAsUnitSphere}
\Psi\colon \begin{pmatrix}
a & b \\
\overline{b} & -a
\end{pmatrix} \mapsto
\begin{pmatrix}
a \\
\Re(b) \\
\Im(b)
\end{pmatrix}. \end{align} We regard $\mathbb{R}^3$ as being equipped with the involution, which reflects along the $x,y$-plane: \begin{align*}
\begin{pmatrix}
x \\
y \\
z
\end{pmatrix} \mapsto
\begin{pmatrix}
\phantom{-}x \\
\phantom{-}y \\
-z
\end{pmatrix}. \end{align*} With respect to this action, the diffeomorphism $i\mathfrak{su}_2 \xrightarrow{\;\sim\;} \mathbb{R}^3$ is equivariant. Regarding the topology of the component $\mathcal{H}_{(1,1)}$ we state: \begin{remark}
\label{Rank2TraceZeroReduction}
The component $\mathcal{H}_{(1,1)}$ has
$i\mathfrak{su}_2\smallsetminus\{0\}$ as equivariant, strong deformation
retract. \end{remark} \begin{proof}
A general matrix in $i\mathfrak{su}_2$ is of the form
\begin{align*}
\begin{pmatrix}
a & \phantom{-}b \\
\overline{b} & -a
\end{pmatrix}
\end{align*}
with $a$ real and $b$ complex. Its eigenvalues are
\begin{align*}
\pm\sqrt{a^2 + |b|^2}.
\end{align*}
This implies that $i\mathfrak{su}_2\smallsetminus\{0\}$ is contained
in $\mathcal{H}_{(1,1)}$. Now we prove that $\mathcal{H}_{(1,1)}$
has the space $i\mathfrak{su}_2\smallsetminus\{0\}$ as equivariant,
strong deformation retract. A computation shows that the two
eigenvalue functions $\lambda_1,\lambda_2$ of the general map $H$
(\ref{GeneralRank2Map}) are
\begin{align*}
\lambda_{1,2} = \frac{a_1+a_2}{2} \pm \sqrt{\left(\frac{a_1 + a_2}{2}\right)^2 + |b|^2 - a_1 a_2}.
\end{align*}
It is a straightforward computation to show that $\det H > 0$
implies that $H$ is positive or negative definite. From this we can
conclude that the eigenvalue signature being $(1,1)$ implies $\det H
< 0$, that is $|b|^2 - a_1 a_2 > 0$. Now define
\begin{align*}
c = \frac{a_1 + a_2}{2}
\end{align*}
and consider the strong deformation retract
\begin{align*}
\rho\colon I \times \mathcal{H}_{(1,1)} &\to \mathcal{H}_{(1,1)}\\
(t,H) &\mapsto (1-t)H - tc\I{2}.
\end{align*}
For this to be well-defined we need $\det(\rho(t,H)) <
0$ for all $t$ and $H$:
\begin{align*}
\det(\rho(t,H)) &= (a_1 - tc)(a_2 - tc) - |b|^2\\
&= a_1 a_2 - |b|^2 - tc(a_1 + a_2) + (tc)^2\\
&< - tc(a_1 + a_2) + (tc)^2\\
&= -\frac{(a_1 + a_2)^2}{2} + t\frac{(a_1 + a_2)^2}{4}\\
&= \left(\frac{a_1 + a_2}{2}\right)^2(t-2) < 0.
\end{align*}
Furthermore, $\rho$ stabilizes $i\mathfrak{su}_2\smallsetminus\{0\}$ and
$\rho(1,\mathcal{H}_{(1,1)}) = i\mathfrak{su}_2\smallsetminus\{0\}$. As
in the previous remark, equivariance of $\rho_t$ follows since the
equivariance conditions (\ref{N=2MatrixEquivarianceConditionsA}) and
(\ref{N=2MatrixEquivarianceConditionsB}) are compatible with scaling
by real numbers. \end{proof}
Clearly, the isomorphism $\Psi$ maps the zero matrix to the origin. The space $\mathbb{R}^3\smallsetminus\{0\}$ has the unit $2$-sphere as equivariant strong deformation retract. Under $\Psi$ this corresponds to the $U(2)$ orbit of $\I{1}{1} = \text{Diag}\,(1,-1)$ in $i\mathfrak{su}_2$. This orbit consists of the matrices \begin{align*}
\left\{
\begin{pmatrix}
|a|^2 - |b|^2 & 2a\overline{c} \\
2\overline{a}c & |b|^2 - |a|^2
\end{pmatrix}\colon \;\text{where}\;
\begin{pmatrix}
a & b \\
c & d
\end{pmatrix}\;\text{is unitary.}\right\} \end{align*}
This proves the following \begin{proposition}
\label{Rank2MixedSignatureReduction}
The space $i\mathfrak{su}_2\smallsetminus\{0\}$ has the $U(2)$-orbit of
$\I{1}{1}$ as equivariant strong deformation retract. The above
isomorphispm $\Psi$ equivariantly identifies the $U(2)$-orbit with
the unit sphere in $\mathbb{R}^3$ on which the involution acts as a
reflection along the $x,y$-plane. \qed \end{proposition}
This reduces the classification problem to the classification of equivariant maps $X \to S^2$. In order to discuss mapping degrees of maps to $S^2$ we need to fix an orientation on the sphere and on its equator $E$. We choose the orientation on $S^2$ to be defined by an outer normal vector field on the sphere. Furthermore we define the orientation on the equator $E$ to be such that the loop \begin{align}
\label{DegreeOneMapOnEquator}
S^1 &\to E \subset S^2 \subset \mathbb{R}^3\\
z &\mapsto
\begin{pmatrix}
\cos(\arg z)\\
\sin(\arg z)\\
0
\end{pmatrix}\nonumber \end{align} has positive degree one\footnote{These choices are arbitrary. Choosing
the opposite orientation on the sphere or on the equator only
changes the signs appearing in concrete examples, but not the
general discussion.}. Later, when discussing the case $n > 2$, it will become important to equivariantly identify $S^2$ with the projective space $\mathbb{P}_1$. The action on $\mathbb{P}_1$ is given by standard complex conjugation \begin{align*}
[z_0:z_1] \mapsto \left[\overline{z_0}:\overline{z_1}\right]. \end{align*} \label{P1S2OrientationDiscussion}Note that $\mathbb{P}_1$ is canonically oriented as a complex manifold. Let $\psi$ be an equivariant, orientation-preserving diffeomorphism $S^2 \to \mathbb{P}_1$. This identifies the compactified real line $\smash{\widehat{\mathbb{R}}}$ with the equator $E$. We can assume that this identification of $S^2$ with $\mathbb{P}_1$ is such that a loop of degree $+1$ into the compactified real line $\smash{\widehat{\mathbb{R}}} \subset \mathbb{P}_1$ is given by \begin{align*}
S^1 &\to \mathbb{RP}_1\\
z &\mapsto
\begin{cases}
\infty & \;\text{if $\arg(z) \in 2\pi\mathbb{Z}$}\\
\tan\left(\frac{\arg z - \pi}{2}\right) & \;\text{else}\\
\end{cases}. \end{align*} That is, the loop starts at $\infty$, then traverses the negative real numbers until it reaches zero, then it traverses the positive real numbers until it reaches $\infty$ again.
As indicated in the introduction, the relevant topological invariants of maps $X \to S^2$ consist of two pieces of data: \begin{enumerate} \item The \emph{total degree}, which is the usual Brouwer degree, and \item The \emph{fixed point degrees}, which are the Brouwer degrees of
the restrictions of the maps to the components of the fixed point sets
of the respective involution on $X$. \end{enumerate} For the type I involution this fixed point set consists of two disjoint circles. Hence we obtain \emph{degree triples} of the form $\Triple{d_0}{d}{d_1}$ as invariants, where the $d_j$ are the fixed point degrees and $d$ is the total degree. For the type II involution the fixed point set is a single circle, which is why the invariants we work with are \emph{degree pairs} of the form $\Pair{d_C}{d}$ ($d$ being the total degree and $d_C$ the fixed point degree). We call a degree triple resp. a degree pair \emph{realizable} if it occurs as invariant of a $G$-maps. The exact definitions will be given in definition~\ref{DefinitionTriple} and definition~\ref{DefinitionDegreePair}.
The main result in this section is the following \begin{restatable*}{theorem}{HamiltonianClassificationRankTwo}
\label{HamiltonianClassificationRank2}
Let $X$ be a torus equipped with either the type I or the type II
involution. Then:
\begin{enumerate}[(i)]
\item The sets $[X,\mathcal{H}_{(2,0)}]_G$ and
$[X,\mathcal{H}_{(0,2)}]_G$ are trivial (i.\,e. one-point sets).
\item Two $G$-maps $X \to \mathcal{H}_{(1,1)}$ are $G$-homotopic iff
their degree triples (type I) resp. their degree pairs (type II)
agree.
\item The realizable degree triples $\Triple{d_0}{d}{d_1}$ (type I)
resp. degree pairs $\Pair{d_C}{d}$ (type II) are exactly those
which satisfy
\begin{align*}
d \equiv d_0 + d_1 \mod 2 \;\;\text{resp.}\;\;d \equiv d_C \mod 2.
\end{align*}
\end{enumerate} \end{restatable*} The proof will be given in section~\ref{SectionHamiltonianClassificationRank2} (p.~\pageref{HamiltonianClassificationRankTwoProof}).
\subsection{Maps to $S^2$} \label{SectionMapsTo2Sphere}
After the reduction described above, we can regard $G$-maps $X \to \mathcal{H}_{(1,1)}$ as having their image contained in the oriented $2$-sphere $S^2 \subset \mathbb{R}^3$. Throughout this section we use a fixed orientation-preserving identification of the equator $E$ with $S^1$. Furthermore, we let $p_0 \in E$ be the fixed base point in the equator, which corresponds -- under this identification -- to $1 \in S^1$. This point will be used later for bringing the maps into a convenient “normal form”. We will make frequent use of the following theorem: \begin{theorem}
\label{HopfTheorem}
Let $M$ be a closed, $n$-dimensional manifold. Then maps $M \to S^n$
are homotopic if and only if their degrees agree. \end{theorem} \begin{proof}
See e.\,g. \cite[p.~50]{Milnor}. \end{proof}
An important tool for the classification will be the study of homotopies on the fixed point sets. For this we start with a general definition: \begin{definition}
\label{DefinitionFixpointDegree} (Fixed point degree) Let $f\colon M
\to Y$ be a $G$-map between $G$-manifolds. Assume that the fixed point
set $Y^G$ is a closed, oriented manifold $K$ of dimension $m$ and
that the fixed point set $M^G$ is the disjoint union
\begin{align*}
M^G = \bigcup_{j=0,\ldots,k} K_j
\end{align*}
of closed, oriented (connected) manifolds $K_j$ each of dimension
$m$. Then we define the \emph{fixed point degrees} to be the
$k$-tuple $\left(d_0,\ldots,d_k\right)$ where $d_j$ is the degree of
the map $\restr{f}{K_j}\colon K_j \to K$. \end{definition}
\begin{remark}
\label{FixpointDegreeInvariant}
The fixed point degrees are invariants of $G$-homotopies. \end{remark} \begin{proof}
Let $M$ and $Y$ be two $G$-manifolds as in
definition~\ref{DefinitionFixpointDegree} and let $f,f'$ be two
$G$-homotopic maps $M \to Y$. Denote by $(d_0,\ldots,d_k)$
resp. $(d'_0,\ldots,d'_k)$ their fixed point degrees. Assume that
$d_j \neq d'_j$ for some $j$. By assumption, there exists a
$G$-homotopy $H\colon I \times X \to Y$ from $f$ to $f'$. This
homotopy can be restricted to $I \times K_j$, which can be regarded
as a homotopy $h\colon I \times K_j \to K$. The degree $d_j$ is the
degree of the map $h_0$ and $d'_j$ is the degree of the map
$h_1$. But the degree is a homotopy invariant, hence $d_j \neq d'_j$
yields a contradiction. \end{proof}
From now on we handle the involution types I and II seperately.
\subsubsection{Type I} \label{SectionMapsToSphereClassI}
In this section the real structure $T$ on $X$ is the type I involution: \begin{align*}
T\colon X &\to X\\
[z] &\mapsto [\overline{z}]. \end{align*} Furthermore, we regard the torus $X$ as being equipped with the structure of a $G$-CW complex (see e.g. \cite[p.~16]{May} or \cite[p.~1]{ShahArticle}). Such a $G$-CW structure is depicted in figure~\ref{FigureClassICWDecomposition}. We continue with the \emph{cylinder reduction} method.
\paragraph{Cylinder Reduction}
In order to describe the cylinder reduction we need the notion of a fundamental region: \begin{definition}
\label{DefinitionFundamentalRegion}
A \emph{fundamental region} for the $G$-action on $X$ is a connected
subset $R \subset X$ such that each $G$-orbit in $X$ intersects $R$
in a single point. \end{definition}
\begin{figure}
\caption{Type I $G$-CW setup of the torus $X$.}
\label{FigureClassICWDecomposition}
\label{FigureClassIFundamentalRegion}
\label{3figs}
\end{figure}
The type I involution is particularly convenient to work with, since for this action there exists a sub-$G$-CW complex that is a fundamental region for the $T$-action (see figure~\ref{FigureClassIFundamentalRegion}). Geometrically this fundamental region is a closed cylinder (including its boundary circles), which we denote by $Z$. The fixed point set $X^G$ is the disjoint union of two circles $C_0$ and $C_1$. The cylinder $Z$ is bounded by these circles. We identify $Z$ with $I \times S^1$. In this setup, the boundary circles are $C_0 = \{0\} \times S^1$ and $C_1 = \{1\} \times S^1$. For convenience we also set $C = C_0 \cup C_1$. In this situation we note the following general lemma: \begin{lemma}
\label{Class1EquivariantExtension}
Let $Y$ be a $G$-space, $Y^G$ the $T$-fixed point set in $Y$ and
$$
f\colon (Z,C) \to (Y,Y^G)
$$
a map. Then there exists a unique equivariant extension of $f$ to a
$G$-map $X \to Y$ which we denote by $\smash{\hat{f}}$. The same applies
to homotopies of such maps. \end{lemma} \begin{proof}
We only prove the statement for a given homotopy
\begin{align*}
H\colon I \times (Z,C) \to (Y,Y^G).
\end{align*}
Denote the complementary cylinder by $Z'$ (note that $Z \cap Z' = C
= C_0 \cup C_1$). Then we can construct the equivariant extension
$\smash{\widehat{H}}$ of $H$ by defining
\begin{align*}
\widehat{H}\colon I \times X &\to Y\\
(t,x) &\mapsto
\begin{cases}
H(t,x) & \;\text{if $x \in Z$}\\
T(H(t,T(x))) & \;\text{if $x \in Z'$}\\
\end{cases}
\end{align*}
Since $H(t,\cdot)$ is a map $(Z,C) \to (Y,Y^G)$, this definition of
$\smash{\widehat{H}}$ is well-defined on the boundary circles $C_0$ and
$C_1$ and globally continuous. \end{proof}
For the concrete situation $(Y,Y^G) = (S^2,E)$ this implies: \begin{remark}
\label{CylinderReduction} (Cylinder reduction) Every $G$-map
$f\colon X \to S^2$ is completely determined by its restriction
$\restr{f}{Z}$ to the cylinder $Z$. Discussing $G$-homotopies of
maps $X \to S^2$ is equivalent to discussing homotopies of maps
$(Z, C) \to (S^2,E)$. \end{remark}
\label{CjS1Identification} For the following we need to fix an identification of $C_0$ and $C_1$ with $S^1$.\footnote{Here, $S^1$ is regarded as being equipped
with the standard orientation.} Let us assume that the identification of the circles $C_j$ with $S^1$ is such that the loops \begin{align*}
\gamma_j\colon I &\to X\\
t &\mapsto \left[t + \frac{j}{2}i\right] \;\text{ for $j=0,1$} \end{align*} both define loops of degree one. Recall that loops in $S^1$ are up to homotopy determined by their degree. Based on this observation we define a normal form for equivariant maps $X \to S^2$: \begin{definition}
\label{DefinitionBoundaryNormalization}
(Type I normalization) Let $f\colon X \to S^2$ be a $G$-map (resp. a
map $(Z,C) \to (S^2,E)$). Then $f$ is \emph{type I normalized} if
the restrictions
\begin{align*}
\Restr{f}{C_j}\colon C_j \to E
\end{align*}
are, using the fixed identifications of the circles $C_j$ and $E$
with $S^1$, of the form $z \mapsto z^{k_j}$ for some integers $k_j$. \end{definition}
\begin{remark}
\label{BoundaryNormalization}
Let $f\colon (Z,C) \to (S^2,E)$ be a map (or let $f$ be a $G$-map $X
\to S^2$). Via the homotopy extension property (resp. its
equivariant version, see corollary~\ref{G-HEP}) of the pair $(Z, C)$
(resp. $(X,C)$), there exists a $G$-homotopy from $f$ to a map $f'$,
which is type I normalized. This means that, using the respective
identifications of the $C_j$ and $E$ with $S^1$, the map
$\restr{f'}{C_j}$ is of the form $z \mapsto z^{k_j}$ where $k_j$
denotes the respective fixed point degree on the circle $C_j$. \qed \end{remark}
Given a $G$-map $f\colon X \to S^2$ we have already defined its fixed point degree $\Bideg{d_0}{d_1}$ (see definition~\ref{DefinitionFixpointDegree}). Given a map $f\colon (Z, C) \to (S^2,E)$ we define its fixed point degree to be the fixed point degree of its equivariant extension $X \to S^2$.
\paragraph{The Free Loop Space}
By the cylinder reduction (remark~\ref{CylinderReduction}) it suffices to classify maps $(Z,C) \to (S^2,E)$ up to homotopy. The cylinder $Z$ is just $S^1$ times some closed interval (e.\,g. the closed unit interval $I$). Recall that $\mathcal{L}S^2$, the free loop space of the space $S^2$, is the space of all maps $S^1 \to S^2$: \begin{align*}
\mathcal{L}S^2 = \mathcal{M}(S^1,S^2) = \{f\colon S^1 \to S^2\}. \end{align*} First we make a general remark about the topology on the free loop space: \begin{remark}
The space $S^1$ is Hausdorff and locally compact. Hence, in the
terminology of e.\,g. \cite{ArticleEscardoHeckmann}, it is
\emph{exponentiable}, which means that for any other spaces $Y$ and
$A$, the natural bijection
\begin{align*}
\mathcal{M}(A,\mathcal{M}(S^1,Y)) = \left(\Maps{S^1}{Y}\right)^A
\cong \Maps{A \times S^1}{Y} = \mathcal{M}(A \times S^1,Y)
\end{align*}
is compatible with the topology in the sense that it preserves
continuity. See \cite{ArticleEscardoHeckmann}. The mapping spaces
are regarded as being equipped with the compact-open topology. \qed \end{remark}
As an immediate consequence of the above we note: \begin{remark}
A map $f\colon (Z,C) \to (S^2,E)$ can be regarded as a curve in
$\mathcal{L}S^2$ starting and ending at loops whose image are
contained in $E$. After type I normalization the start and end curve
are distinguished and indexed by $\mathbb{Z}$. \qed \end{remark}
We do not make any distinction in the notation; we silently identify maps $(Z,C) \to (S^2,E)$ with curves in $\mathcal{L}S^2$.
\begin{definition}
Let $\alpha$ be a curve in $\mathcal{L}S^2$ from $p_1$ to $p_2$ and
$\beta$ a curve from $p_2$ to $p_3$. Then we denote by
$\beta \ast \alpha$ the concatenation of the curves $\alpha$ and
$\beta$, which is a curve from $p_1$ to $p_3$. Given a curve
$\alpha$, we denote by $\alpha^{-1}$ the curve with time reversed. \end{definition}
\begin{figure}
\caption{Concatenation of maps from the cylinder.}
\label{fig:ConcatCylinderMaps}
\end{figure}
Using the isomorphism \begin{align}
\label{FLSIso}
\mathcal{M}(I,\mathcal{L}S^2) \cong \mathcal{M}(Z,S^2), \end{align} concatenation of curves induces a corresponding operation on the space of (type I normalized) maps $(Z,C) \to (S^2,E)$. This is depicted in figure~\ref{fig:ConcatCylinderMaps}. We need to be careful to distinguish the inverse of a map from its inverse regarded as a curve. But this should always be clear from the context.
Of course, the homotopy classification of curves in $\mathcal{L}S^2$ depends on the topology of $\mathcal{L}S^2$. Our first remark towards understanding the topology of the latter is: \begin{remark}
The free loop space $\mathcal{L}S^2$ is path-connected. \end{remark} \begin{proof}
Let $\alpha, \beta \in \mathcal{L}S^2$ be two loops in $S^2$. Since
$\pi_1(S^2) = 0$, any loop can be deformed to the constant loop at
some chosen point in $S^2$. By transitivity, this implies that
$\alpha$ and $\beta$ can be deformed into each other. In other
words, there is a path from $\alpha$ to $\beta$, regarded as
elements of the free loop space. \end{proof}
Now we consider the set $\pi(\mathcal{L}S^2;\gamma_0,\gamma_1)$ of homotopy classes of curves in $\mathcal{L}S^2$ with fixed endpoints $\gamma_0$ and $\gamma_1$. Using lemma~\ref{LemmaCurvesNullHomotopic} we can conclude that two curves $f, g\colon I \to \mathcal{L}S^2$ from $\gamma_0$ to $\gamma_1$ are homotopic if and only if $g^{-1} \ast f$ is null-homotopic in $\pi(\mathcal{L}S^2,\gamma_0)$. This underlines that we need to understand the fundamental group $\pi_1(\mathcal{L}S^2)$. In the next step we compute the fundamental group $\pi_1(\mathcal{L}S^2)$. For this we make use of the following fibration (see e.\,g. \cite{AgaudeArticle}) \begin{align}
\label{LoopFibration}
\Omega S^2 \hookrightarrow \mathcal{L}S^2 \to S^2 \end{align} where $\Omega S^2$ denotes the space of \emph{based} loops in $S^2$ at some fixed basepoint, e.\,g. $p_0 \in E$. The map $\mathcal{L}S^2 \to S^2$ is the evaluation map, which takes a loop in $\mathcal{L}S^2$, i.\,e. a map $S^1 \to S^2$, and evaluates it at $1$. Now we can state: \begin{lemma}
\label{FundamentalGroupOfFLS}
The fundamental group $\pi_1(\mathcal{L}S^2)$ of the free loop space
of $S^2$ is infinite cyclic. \end{lemma} \begin{proof}
We consider the fibration (\ref{LoopFibration}) and develop it into
a long exact homotopy sequence. The relevant part of this sequence
is:
\begin{align}
\ldots \rightarrow \mathbb{Z} \cong \pi_2(S^2) \rightarrow \pi_1(\Omega S^2) \rightarrow \pi_1(\mathcal{L}S^2) \rightarrow \pi_1(S^2) \cong 0
\end{align}
It is known that
\begin{align*}
\pi_1(\Omega S^2) \cong \pi_2(S^2) \cong \mathbb{Z}.
\end{align*}
Therefore, by exactness of the above sequence, it follows that
$\pi_1(\mathcal{L}S^2)$ must be a quotient of $\mathbb{Z}$,
i.\,e. either the trivial group, $\mathbb{Z}$ or one of the finite
groups $\mathbb{Z}_n$. We prove that it is $\mathbb{Z}$ with the
help of a geometric argument: Let $\gamma_1$ be the curve in $S^2$
which parametrizes the equator $E$ with degree one and choose
$\gamma_1$ as the basepoint for the fundamental group and then
construct an infinite family of maps $X \to S^2$ with the same fixed
point degree $(1,1)$ but with different global degrees -- the latter
implies by Hopf's theorem that they cannot be homotopic. In
particular they cannot be $G$-homotopic and hence the curves
associated to their restrictions to $Z$ cannot be homotopic.
The construction goes as follows: We construct a map $\varphi$ from
the cylinder $Z = I \times S^1$ to $S^2$ such that
$\varphi_0 \equiv \varphi_1$ and $\varphi_0$ and $\varphi_1$ are
degree 1 maps $S^1 \to E$ as follows: Assume that for $t=0$ we have
an embedding of $S^1$ as the equator $E \subset S^2$. Then we fix
two antipodal points on the equator and begin rotating the equator
on the Riemann sphere while keeping the two antipodal points fixed
for all $t$. We can rotate this $S^1$-copy on the sphere as fast as
we want, we just need to make sure that at $t=1$ we again match up
with the equator. By making the correct number (i.\,e. an even
number) of rotations we obtain a map $\varphi$ from $Z$ to $S^2$
whose equivariant extension to the full torus $X$ has the desired
fixed point degree $(1,1)$. By increasing the number of rotations,
we can generate homotopically distinct $G$-maps from the torus into
the sphere. Thus we obtain an infinite family of maps with fixed
point degree $\Bideg{1}{1}$ and with distinct total
degrees. Therefore, the fundamental group cannot be finite, but must
be infinite cyclic. \qedhere \end{proof}
\paragraph{The Degree Map}
In the following we introduce the \emph{total degree} into the discussion. The fixed point degree together with this total degree will turn out to be a complete invariant of equivariant homotopy. The degree map will be the fundamental tool for the proof. We would like to associate to a curve in $\mathcal{L}S^2$ the degree of its equivariant extension. This only makes sense for a certain subset of all curves. Thus we define: \begin{definition}
For each $j \in \mathbb{Z}$ we let $\mathscr{D}_j \in
\mathcal{L}S^2$ be the normalized loop in
$E \subset S^2$ of degree $j$. We set
\begin{align*}
\mathcal{L}_{\text{std}} = \left\{\mathscr{D}_j\colon j \in \mathbb{Z}\right\} \subset \mathcal{L}S^2.
\end{align*}
Let $\mathscr{P}$ be the set of curves in $\mathcal{L}S^2$ starting
and ending at points in $\mathcal{L}_{\text{std}}$:
\begin{align*}
\mathscr{P} = \left\{\alpha\colon I \to \mathcal{L}S^2\mid \alpha(0), \alpha(1) \in \mathcal{L}_{\text{std}}\right\}.
\end{align*} \end{definition} The set $\mathscr{P}$ is a suitable domain of definition for the degree map, which we now define: \begin{definition}
\label{DefDegreeMap}
The \emph{degree map} on $\mathscr{P}$ is the map
\begin{align*}
\text{deg}\colon \mathscr{P} &\to \mathbb{Z}\\
\alpha &\mapsto \deg \widehat{\alpha},
\end{align*}
where $\smash{\widehat{\alpha}}$ denotes the equivariant extension
of $\alpha$. That is, $\alpha$, which is a curve in the free loop
space $\mathcal{L}S^2$, is to be regarded as a map
$(Z,C) \to (S^2,E)$ and $\smash{\widehat{\alpha}}\colon X \to S^2$
is its equivariant extension to the full torus (see
lemma~\ref{Class1EquivariantExtension}). \end{definition} An immediate property of the degree map is: \begin{remark}
\label{DegOfConstantCurveZero}
Let $\alpha \in \mathscr{P}$ be the constant curve at some $\gamma
\in \mathcal{L}_{\text{std}}$. Then $\deg \alpha = 0$. \end{remark} \begin{proof}
The equivariant extension $\smash{\widehat{\alpha}}$ has its image contained
in $E \subset S^2$. Hence it is not surjective as a map to $S^2$,
which implies $\deg \smash{\widehat{\alpha}} = 0$. \end{proof} For curves $\alpha$, $\beta$ we introduce the symbols $\alpha \simeq \beta$ to express that $\alpha$ and $\beta$ are homotopic as curves (with fixed endpoints). \begin{remark}
\label{DegreeMapInvariantUnderHomotopies}
When $\alpha, \beta \in \mathscr{P}$ are two curves with common
endpoints, then $\alpha \simeq \beta$ implies $\deg \alpha = \deg
\beta$. In other words: the degree map is also well-defined
on $\mathscr{P}/{\simeq}$, that is $\mathscr{P}$ modulo homotopy. \end{remark} \begin{proof}
Homotopies between two curves can be regarded as homotopies between
maps $Z \to S^2$ rel $C$. Such homotopies can also be equivariantly
extended, giving homotopies between the respective equivariant
extensions of the two maps $X \to S^2$. Therefore, their total
degree must agree. \end{proof} We have just seen that the degree map factors through $\mathscr{P}/{\simeq}$. The next goal is to prove an injectivity property of the degree map. Crucial for its proof is that the degree map is compatible with the aforementioned concatenation operation of loops: \begin{lemma}
\label{DegreeCompatibleWithConcatenation}
Let $\gamma_1, \gamma_2, \gamma_3$ be in $\mathcal{L}_{\text{std}}$,
furthermore let $\alpha \in \mathscr{P}$ be a path in
$\mathcal{L}S^2$ from $\gamma_1$ to $\gamma_2$ and $\beta$ be a path
in $\mathcal{L}S^2$ from $\gamma_2$ to $\gamma_3$. Then
$\beta \ast \alpha$ is a path from $\gamma_1$ to $\gamma_3$ and
\begin{align*}
\deg (\beta \ast \alpha) = \deg \beta + \deg \alpha.
\end{align*} \end{lemma}
\begin{proof}
Let $\alpha$ and $\beta$ be paths in $\mathcal{L}S^2$ with the
properties mentioned above. To prove the lemma, we have to show
that
\begin{align*}
\deg \widehat{\beta \ast \alpha} = \deg \widehat{\beta} + \deg
\widehat{\alpha}.
\end{align*}
The idea we employ here is that -- after smooth approximation of the
maps -- we can count preimage points of a regular fiber to compute
the total degrees. We regard the curves $\alpha$ and $\beta$ as maps
from the cylinder $Z = I \times S^1$ to $S^2$. We can assume that
$\alpha$ and $\beta$ are smooth on $Z$; the concatenation
$\beta \ast \alpha$ does not have to be smooth at the glueing
curves. But after a smooth approximation of $\beta \ast \alpha$ near
this glueing curve, we can assume that the equivariant extensions
$\smash{\widehat{\alpha}}$, $\smash{\widehat{\beta}}$ and
$\smash{\widehat{\beta \ast \alpha}}$ are smooth maps from the torus
into $S^2$.
Now let $q \in S^2$ be a regular point of
$\smash{\widehat{\beta \ast \alpha}}$, away from the neighborhood of
the boundary circles where we have smoothed the map
$\beta \ast \alpha$. We now consider its fiber over $q$. This
consists of all points $p_1,\ldots,p_k$ with
$\smash{\widehat{\alpha}}(p_j) = q$ and all points
$p'_1,\ldots,p'_\ell$ with $\smash{\widehat{\beta}}(p'_j) =
q$.
Summing the points $p_j$ (resp. $p'_j$) respecting the orientation
signs yields $\deg\smash{\widehat{\alpha}}$
(resp. $\deg\smash{\widehat{\beta}}$). Therefore, the points
$p_1,\ldots,p_k,p'_1,\ldots,p'_\ell$ with their respective
orientation signs attached add up to
$\deg\smash{\widehat{\alpha}} + \deg\smash{\widehat{\beta}}$. \end{proof}
Note that lemma~\ref{DegreeCompatibleWithConcatenation} also implies that \begin{align*}
\deg\left(\alpha^{-1}\right) = -\deg\left(\alpha\right), \end{align*} where $\alpha^{-1}$ is the curve $\alpha\colon I \to \mathcal{L}S^2$ with reversed timed. The space $\mathscr{P}$ does not carry a group structure in the usual sense, but of course the based fundamental groups $\pi_1(\mathcal{L}S^2,\gamma)$ do carry a group structure. When $\gamma$ is in $\mathcal{L}_{\text{std}}$, then $\pi_1(\mathcal{L}S^2,\gamma) \subset \mathscr{P}/{\simeq}$ and we can note the following \begin{remark}
The degree map induces a group homomorphism on fundamental groups:
\begin{align*}
\text{deg}\colon \pi_1(\mathcal{L}S^2, \mathscr{D}_j) \to \mathbb{Z}
\end{align*}
for every $j \in \mathbb{Z}$. \end{remark} \begin{proof}
Let $\mathscr{D}_j$ be in $\mathcal{L}_{\text{std}}$. By
remark~\ref{DegreeMapInvariantUnderHomotopies} we know that the
degree map is well-defined as a map on the fundamental group
$\pi_1(\mathcal{L}S^2,\mathscr{D}_j)$. By
remark~\ref{DegOfConstantCurveZero} the neutral element in
$\pi_1(\mathcal{L}S^2,\mathscr{D}_j)$, that is the homotopy class of
the constant curve at $\mathscr{D}_j$, gets mapped to $0 \in
\mathbb{Z}$. The group structure in the fundamental group is given
be the concatenation of loops at the
basepoint. Lemma~\ref{DegreeCompatibleWithConcatenation} shows that
the degree map is compatible with the group structures. Hence, the
degree map is a group homomorphism on fundamental groups. \end{proof}
Now that we know that restrictions of the degree map to fundamental groups are group homomorphisms it is useful to compute its kernel. For this we need a lemma, which deals with the special case of a fixed point degree of the form $\Bideg{d_0}{d_0}$. To formulate the statement, we introduce an equivalence relation $\sim$ on the cylinder $Z$, which identifies the circles $C_0, C_1$: $(0,z) \sim (1,z)$. The resulting space $\smash{\widetilde{Z}} = Z/{\sim}$ is a 2-torus.\label{CylinderBoundaryCirclesIdentified} Now we can state:
\begin{lemma}
\label{MapExtensionDegreeEquality}
A map $f\colon (Z,C) \to (S^2,E)$ with $\restr{f}{C_0} \equiv
\restr{f}{C_1}$ has even degree. More concretely: The map $f$
induces a map $\smash{\tilde{f}}\colon \smash{\widetilde{Z}} \to S^2$ and
\begin{align*}
\deg \hat{f} = 2 \deg \tilde{f}.
\end{align*}
In particular, the equivariant extension $\smash{\hat{f}}$ has degree
zero iff the induced map $\smash{\tilde{f}}$ has degree zero. \end{lemma}
\begin{proof}
Note that on the LHS we have the degree of the equivariant extension
$\smash{\hat{f}}\colon X \to S^2$ of $f$ while on the RHS we have (two
times) the degree of the induced map $\smash{\tilde{f}}\colon
\smash{\widetilde{Z}} \to S^2$.
We prove the statement by counting preimage points. The torus
$\smash{\widetilde{Z}}$ carries a differentiable structure. The
induced map $\smash{\tilde{f}}\colon \smash{\widetilde{Z}} \to S^2$
does not have to be smooth near $C \subset
\smash{\widetilde{Z}}$.
But after a smooth approximation near $C$ we can assume that it is
globally smooth. By the same reason the equivariant extension
$\smash{\hat{f}}$ defined on $X$ is globally smooth.
Let $q$ be a regular value of the smoothed map $f$, away from the
circle $C$. This implies that $q$ is also a regular value of
$\smash{\tilde{f}}$ and $\smash{\hat{f}}$. Let $d$ be the degree of
$\smash{\tilde{f}}$. The fiber of $q$ under $\smash{\tilde{f}}$ can thus be
written as
\begin{align*}
\tilde{f}^{-1}(q) = \{p_1,\ldots,p_k\}.
\end{align*}
Denoting the respective orientation signs at each preimage point
$p_j$ with $\sigma_j$ we can express the degree $d$ as
\begin{align*}
d = \sum_{1\leq j \leq k} \sigma_j.
\end{align*}
To simplify the proof we assume that $q$ has been chosen such that
$T(q)$ is also a regular value of $\smash{\hat{f}}$ and that $q$
(and therefore also $T(q)$) has no preimage points on the boundary
circles.
Recall that when regarding the cylinder $Z$ as being embedded in $X$
with boundary circles $C_0$ and $C_1$, the equivariant extension
$\smash{\hat{f}}$ is the map
\begin{align*}
\hat{f}\colon X &\to S^2\\
x &\mapsto
\begin{cases}
f(x) & \;\text{if $x \in Z$}\\
T \circ f \circ T(x) & \;\text{if $x \in Z'$}\\
\end{cases}
\end{align*}
where $Z'$ denotes the complementary cylinder in $X$ whose
intersection with $Z$ consists of the circles $C_0 \cup C_1$. It
remains to compare the fibers $\smash{\hat{f}^{-1}}(q)$ and
$\smash{\tilde{f}^{-1}}(q)$. Since $q$ is assumed to have no preimage
points in the boundary circles, we can write the first fiber as
\begin{align*}
\hat{f}^{-1}(q) = \Restr{\hat{f}}{Z}^{-1}(q) \cup \Restr{\hat{f}}{Z'}^{-1}(q).
\end{align*}
By definition of $\smash{\hat{f}}$, the preimage points in the cylinder
$Z$ are exactly those of $\smash{\tilde{f}}$, with the same orientation
signs. We now prove
\begin{align*}
\Restr{\hat{f}}{Z'}^{-1}(q) = T\left(\Restr{\hat{f}}{Z}^{-1}(T(q))\right).
\end{align*}
For this, let $p$ be in $Z'$ with
$\restr{\smash{\hat{f}}}{Z'}(p) = q$. This implies
$T \circ \restr{\smash{\hat{f}}}{Z'}(p) = T(q)$, which in turn
implies by equivariance
\begin{align*}
\Restr{\hat{f}}{Z}(T(p)) = T(q).
\end{align*}
In other words, after defining $\smash{\tilde{p}} = T(p) \in Z$,
we have: $p = T(\smash{\tilde{p}})$ with $\smash{\tilde{p}} \in Z$
and
\begin{align*}
\Restr{\smash{\tilde{f}}}{Z}(\smash{\tilde{p}}) =
\Restr{\smash{\tilde{f}}}{Z}(T(p)) =
T \circ \Restr{\smash{\tilde{f}}}{Z'}(p) = T(q).
\end{align*}
On the other hand, let $\smash{\tilde{p}}$ be in
$T\left(\restr{\smash{\hat{f}}}{Z}^{-1}(T(q))\right)$. This
means that $\smash{\tilde{p}} = p$ with
$\restr{\smash{\hat{f}}}{Z}(p) = T(q)$. Set
$p = T(\smash{\tilde{p}})$. Then:
\begin{align*}
\Restr{\hat{f}}{Z'}(\hat{p}) = \Restr{\hat{f}}{Z'}(T(p)) = T \circ \Restr{\hat{f}}{Z}(p) = q,
\end{align*}
which means $\smash{\tilde{p}} \in \restr{\smash{\hat{f}}}{Z'}^{-1}(q)$.
Since $T$ bijectively sends $Z$ to $Z'$ it follows that the number
of preimage points on $Z$ and $Z'$ coincide. Also, since
$\smash{\hat{f}}$ restricted to (the interior of) $Z'$ is the
composition of $f$ with two orientation-reversing diffeomorphisms,
the orientation sign at each preimage point $p$ in $Z$ is the same
as the orientation sign of the corresponding preimage point $T(p)$
in $Z'$. This means that summing (respecting the orientation) over
all the preimage points in the fiber $\smash{\hat{f}}^{-1}(q)$
yields exactly $2 \deg \smash{\tilde{f}}$. \end{proof}
Now we can prove the desired injectivity property of the degree map: \begin{proposition}
\label{DegreeMapInjective}
For any $\gamma \in \mathcal{L}_{\text{std}}$, the degree homomorphism
\begin{align*}
\deg\colon \pi_1(\mathcal{L}S^2,\gamma) \to \mathbb{Z}
\end{align*}
is injective. \end{proposition} \begin{proof}
We prove that the degree homomorphism has trivial kernel. By
path-connected- ness of $\mathcal{L}S^2$ all the based fundamental
groups of $\mathcal{L}S^2$ are isomorphic. Hence it suffices to
prove the statement for one fixed loop $\gamma$. Therefore we reduce
the problem to a simpler case and take $\gamma$ to be the constant
loop at the base point $p_0$ in $E$.
Let $\alpha$ be a type I normalized map of the cylinder $Z$ into
$S^2$ with fixed point degree $\Bideg{0}{0}$ and $\deg \alpha = 0$. As before,
$\alpha$ can be regarded as curve in $\mathcal{L}S^2$ and $[\alpha]$
defines an element in the fundamental group
$\pi_1(\mathcal{L}S^2,\gamma)$. Let $\widetilde{Z} = Z/{\sim}$ be the
torus which results from the cylinder $Z$ by identifying the circles
$C_0$ and $C_1$ (see
p.\pageref{CylinderBoundaryCirclesIdentified}). Since, by
construction, $\restr{\alpha}{C_0} \equiv \restr{\alpha}{C_1} \equiv
\gamma$, $\alpha$ induces a map $\widetilde{Z} \to S^2$ which, by
lemma~\ref{MapExtensionDegreeEquality}, also has degree zero.
Denote the generator of the fundamental group of the torus
$\widetilde{Z}$ which corresponds to $C_0$ resp. $C_1$ by $C$.
Let $C'$ denote the other generator of the fundamental group such
that the intersection number of $C$ with $C'$ is $+1$. The
restriction of $f\colon \widetilde{Z} \to S^2$ to $C$ is already
constant. Using the homotopy extension property together with the
simply-connectedness of $S^2$ we can assume that $f$ is also
constant along $C'$. Therefore we can collapse these two generators
and $f$ induces a map
$\widetilde{Z}/(C \cup C') \cong S^2 \to S^2 \cong S^2$. By
remark~\ref{RemarkMapOnQuotientDegreeUnchanged} the map
$f\colon \widetilde{Z}/(C \cup C') \to S^2$ also has degree zero. By
Hopf's theorem, this map is null-homotopic (even as a map of pointed
spaces, as can be shown by another application of the homotopy
extension property). This corresponds to a null-homotopy of the
curve $\alpha$ with fixed basis curve $\gamma$, hence
$[\alpha] = 0$, which finishes the proof. \end{proof}
\paragraph{The Degree Triple}
In the following we combine the total degree and the fixed point degrees. For this we make the following \begin{definition}
\label{DefinitionTriple}
(Degree triple map) The \emph{degree triple map} is the map
\begin{align*}
\mathcal{T}\colon \mathcal{M}_G(X,S^2) &\to \mathbb{Z}^3\\
f &\mapsto \Triple{d_0}{d}{d_1},
\end{align*}
where $\Bideg{d_0}{d_1}$ is the fixed point degree of $f$ and $d$ is
the total degree of $f$. For a map $f\colon X \to S^2$ we define its
\emph{degree triple} (or simply \emph{triple}) to be
$\mathcal{T}(f)$. We call a given triple \emph{realizable} if it is
contained in the image $\Im(\mathcal{T})$. For a map $f\colon (Z,C)
\to (S^2,E)$ we define its \emph{degree triple} to be the degree
triple of the equivariant extension $\smash{\hat{f}}\colon X \to S^2$. \end{definition}
As an immediate consequence we obtain: \begin{remark}
\label{DegreeTripleInvariant}
The degree triple of a map is a $G$-homotopy invariant. \qed \end{remark} Thus, the degree triple map factors through $[X,S^2]_G$. In the following we analyze the image $\Im(\mathcal{T})$.
\begin{remark}
\label{RemarkTripleGlobalDegree}
Let $f$ be a $G$-map $X \to S^2$ with fixed point degrees
$\Bideg{d_0}{d_0}$. Then the total degree $\deg f$ must be even. In
other words, the degree triples $\Triple{d_0}{2k+1}{d_0}$ are not
contained in the image $\Im(\mathcal{T})$.\end{remark} \begin{proof}
Assume that the map $f$ is type I normalized. The restriction
$\restr{f}{Z}$ to the cylinder then satisfies the assumptions of
lemma~\ref{MapExtensionDegreeEquality}, hence we can conclude that
its equivariant extension $\smash{\hat{f}}$ has even degree. But the
equivariant extension of $\restr{f}{Z}$ is $f$ itself, hence $f$
must have even degree. \end{proof} In particular, the triple $\Triple{0}{1}{0}$ is not contained in $\Im(\mathcal{T})$. This gives rise to the $\mod 2$ condition \begin{align}
\label{DiscussionParityCondition}
d \equiv d_0 + d_1 \mod 2, \end{align} which was already mentioned in the outline and must hold for any realizable degree triple. The details will be explained in paragraph~\ref{ParagraphClassificationType1MainResult} (p.~\pageref{ParagraphClassificationType1MainResult}). In order to deduce the above ``parity condition'' (\ref{DiscussionParityCondition}) for general degree triples we introduce an algebraic structure on the set of realizable triples. This is the following binary operation for triples: \begin{definition}
Two triples $\Triple{d_0}{d}{d_1}$ and $\Triple{d_0'}{d'}{d_1'}$ are
called \emph{compatible} if $d_1 = d_0'$. Given two compatible
triples $\Triple{d_0}{d}{d_1}$ and $\Triple{d_1}{d'}{d_2}$, then we
define $\Triple{d_0}{d}{d_1} \bullet \Triple{d_1}{d'}{d_2}$ to be
the triple
\begin{align*}
\Triple{d_0}{d + d'}{d_2}.
\end{align*} \end{definition} We call this binary operation \emph{concatenation} of triples. Its usefulness is illustrated by the following remark: \begin{remark}
\label{ImageOfTripleMapClosedUnderConcatenation}
When the triples $\Triple{d_0}{d}{d_1}$ and $\Triple{d_1}{d'}{d_2}$
are in $\Im(\mathcal{T})$, then so is
\begin{align*}
\Triple{d_0}{d}{d_1} \bullet \Triple{d_1}{d'}{d_2}.
\end{align*} \end{remark} \begin{proof}
Assume that the triples $\Triple{d_0}{d}{d_1}$
resp. $\Triple{d_1}{d'}{d_2}$ are realized by the paths $\alpha$
resp. $\beta$ in $\mathscr{P}$ satisfying $\alpha(1) = \beta(0)$. By
definition,
\begin{align*}
\Triple{d_0}{d}{d_1} \bullet \Triple{d_1}{d'}{d_2} = \Triple{d_0}{d+d'}{d_2}\;.
\end{align*}
We need to show the existence of a path in the loop space
$\mathcal{L}S^2$ from $\alpha(0)$ to $\beta(1)$ with total degree
$d + d'$ of its associated equivariant extension. Recall that $d$
(resp. $d'$) is the degree of the equivariant extension
$\widehat{\alpha}$ (resp. $\widehat{\beta}$). The concatenation
$\alpha \ast \beta$ is a path in loop space from $d_0$ to $d_2$.
Its total degree is defined to be the degree of the equivariant
extension $\widehat{\alpha \ast \beta}$. By
lemma~\ref{DegreeCompatibleWithConcatenation} this is the same as
$\deg\widehat{\alpha} + \deg\widehat{\beta} = d + d'$. \end{proof}
In other words: the image $\Im(\mathcal{T})$ is closed under the concatenation operation of degree triples. In the following lemma we encapsulate several fundamental properties of the image $\Im(\mathcal{T})$. \begin{proposition}
\label{TripleBuildingBlocks}
Let $d_0, d_1$ be integers. Then:
\begin{enumerate}[(i)]
\item If the triple $\Triple{d_0}{d}{d_1}$ is in $\Im(\mathcal{T})$, then so is
$\Triple{d_0}{-d}{d_1}$.
\item For any fixed point degree $\Bideg{d_0}{d_1}$, the triple
$\Triple{d_0}{d_0 - d_1}{d_1}$ is in $\Im(\mathcal{T})$. In
particular, the triple $\Triple{1}{1}{0}$ is in
$\Im(\mathcal{T})$.
\item For any $k$, the triple $\Triple{0}{2k}{0}$ is in $\Im(\mathcal{T})$.
\item If $\Triple{d_0}{d}{d_1}$ is in $\Im(\mathcal{T})$, then so is
$\Triple{d_1}{d}{d_0}$.
\end{enumerate} \end{proposition} \begin{proof}
Regarding (i): Assume that $f\colon X \to S^2$ is a map with
$\mathcal{T}(f) = \Triple{d_0}{d}{d_1}$. Note that $T\colon S^2 \to
S^2$ is an orientation-reversing diffeomorphism of $S^2$ which keeps
the equator $E$ fixed. Therefore:
\begin{align*}
\mathcal{T}(T \circ f) = \Triple{d_0}{-d}{d_1}.
\end{align*}
In other words: the triple $\Triple{d_0}{-d}{d_1}$ is realizable.
Regarding (ii): We need to construct a map $X \to S^2$ with fixed
point degrees $\Bideg{d_0}{d_1}$ and total degree $d_0 - d_1$. For
this, let $D$ be the closed unit disk in the complex plane:
\begin{align*}
D = \{z \in \mathbb{C}\colon |z| \leq 1\}.
\end{align*}
Let $\iota_D$ be the embedding of $D$ onto one of the two hemispheres.:
\begin{align}
\label{IotaHemisphereEmbedding}
\iota\colon D &\hookrightarrow S^2\\
r e^{i\varphi} &\mapsto
\begin{pmatrix}
r\cos \varphi \\
r\sin \varphi \\
\pm\sqrt{1-r^2}
\end{pmatrix}.
\end{align}
We pick the sign defining either the lower or the upper hemisphere
such that $\iota_D$ is orientation preserving. Now we define the
map
\begin{align*}
f\colon Z &\to D\\
(t, \varphi) &\mapsto \begin{cases}
(1-2t) e^{i d_0 \varphi} & \text{ for $0 \leq t \leq \frac{1}{2}$}\\
(2t-1) e^{i d_1 \varphi} & \text{ for $\frac{1}{2} \leq t \leq 1$}.
\end{cases}
\end{align*}
To ease the notation, set
\begin{align*}
Z_1 = \left(0,\frac{1}{2}\right) \times S^1 \;\text{ and }\; Z_2 &= \left(\frac{1}{2},1\right) \times S^1.
\end{align*}
Then we consider the composition map
\begin{align*}
F = \iota_D \circ f\colon Z \to S^2.
\end{align*}
The restriction $\restr{F}{C_j}$ to the boundary circles is of the
form:
\begin{align*}
\Restr{F}{C_j}\colon S^1 &\to E \subset S^2\\
\varphi &\mapsto
\begin{pmatrix}
\cos(d_j \varphi) \\
\sin(d_j \varphi) \\
0
\end{pmatrix}.
\end{align*}
Hence, by construction, $F$ has fixed point degrees
$\Bideg{d_0}{d_1}$ (compare with (\ref{DegreeOneMapOnEquator}),
p.~\pageref{DegreeOneMapOnEquator}). It remains to show that the
equivariant extension $\smash{\hat{F}}$ has total degree
$d_0 - d_1$. At least away from the boundary circles,
$\smash{\hat{F}}$ is a smooth map. Thus, let $q \in S^2$ be a
regular value of $\smash{\hat{F}}$, say, on the hemisphere
$\iota_D(D)$, but not one of the poles (they are not regular values
for $f$). The point $q$ corresponds to some point
$\smash{\tilde{q}} = \smash{\tilde{t}e^{i\tilde{\varphi}}}$ in the
closed unit disk $D$ with $\smash{\tilde{t}} \in (0,1)$. By
construction, the fiber $\smash{\hat{F}^{-1}}(q)$ will be the same as
the fiber $F^{-1}(\tilde{q})$, which contains exactly $d_0 + d_1$
points in the cylinder $Z$:
\begin{align*}
q_{j,k} = \left(\frac{1+(-1)^{j+1}\tilde{t}}{2}, d_j\left(\tilde{\varphi} + \frac{2k}{d_j}\pi\right)\right)\; \text{for $j=0,1$ and $k=0,1,\ldots,d_j$}.
\end{align*}
The points $q_{0,k}$, $k=0,\ldots,d_1-1$, are of the form
\begin{align*}
q_{0,k} = \left(\frac{1-\tilde{t}}{2}, d_1\left(\tilde{\varphi} + \frac{2k}{d_1}\pi\right)\right)
\end{align*}
which means that they are contained in the cylinder half $Z_1$. The
points $q_{1,k}$, $k=0,\ldots,d_2-1$ are of the form
\begin{align*}
q_{2,k} = \left(\frac{1+\tilde{t}}{2}, d_2\left(\tilde{\varphi} + \frac{2k}{d_2}\pi\right)\right)
\end{align*}
and therefore they are contained in $Z_2$. A computation shows
that
the orientations signs at the points $q_{1,k} \in Z_1$ are the
opposite of those at the points $q_{2,k} \in Z_2$. Therefore,
summing over the points in the generic fiber
$\smash{\hat{F}^{-1}}(q)$ yields $\pm (d_0 - d_1)$ as total degree
of $\smash{\hat{F}}$. Since $\iota_D$ has been chosen
orientation-preserving, the total degree is $d_0 - d_1$.
Regarding (iii): First note that by (i) and (ii) the triple
$\Triple{0}{1}{1}$ is in $\Im(\mathcal{T})$. But then the triple
$\Triple{0}{1}{1} \bullet \Triple{1}{1}{0} = \Triple{0}{2}{0}$ is
also in $\Im(\mathcal{T})$. Concatenation of this triple with itself
$k$ times yields the desired triple $\Triple{0}{2k}{0}$.
Regarding (iv): Let $\tau\colon X \to X$ be the translation inside
the torus, which swaps the boundary circles (e.\,g. $[z] \mapsto [z
+ \text{\textonehalf}i]$ in our model). Then, composing a map of type
$\Triple{d_0}{d}{d_1}$ with $\tau$ yields a map of type
$\Triple{d_1}{d}{d_0}$.
This finishes the proof. \end{proof}
\label{RemarkAboutNormalForms} The proof of proposition~\ref{TripleBuildingBlocks} (ii) is of particular importance -- it contains a constructive method for producing equivariant maps $X \to S^2$ with triple $\Triple{d_0}{d_0-d_1}{d_1}$. These triples can be regarded as basic building blocks for equivariant maps $X \to S^2$, since all other triples can be built from triples of this form by concatenation.
\begin{definition}
We call the maps constructed in proposition~\ref{TripleBuildingBlocks}
(ii) \emph{maps in normal form} for the triple
$\Triple{d_0}{d_0-d_1}{d_1}$. \end{definition}
Note that the normal form map for a triple $\Triple{d_0}{d_1-d_0}{d_1}$ is by definition the normal form map for $\Triple{d_0}{d_0-d_1}{d_1}$ composed with $T\colon S^2 \to S^2$.
\paragraph{Main Result} \label{ParagraphClassificationType1MainResult}
In this paragraph we state and prove the main classification result for the type I involution. The statement is: \begin{theorem}
\label{Classification1}
The $G$-homotopy class of a map $f \in \mathcal{M}_G(X,S^2)$ is
uniquely determined by its degree triple $\mathcal{T}(f)$. The image
$\Im(\mathcal{T})$ of the degree triple map
$\mathcal{T}\colon \mathcal{M}_G(X,S^2) \to \mathbb{Z}^3$ consists of those
triples $\Triple{d_0}{d}{d_1}$ satisfying
\begin{align*}
d \equiv d_0 + d_1 \mod 2.
\end{align*} \end{theorem}
We need one last lemma in order to prove this theorem: \begin{lemma}
\label{HomotopyOnCylinder}
Let $f$ and $g$ be two type I normalized maps $(Z, C) \to (S^2,E)$
with the same triple $\Triple{d_0}{d}{d_1}$. Then $f$ and $g$ are
homotopic rel $C$. \end{lemma} \begin{proof}
Both maps can be regarded as curves $f,g\colon I \to
\mathcal{L}S^2$. Since they are assumed to be type I normalized,
they start at the same curve $\gamma_0 \in \mathcal{L}_{\text{std}}$
and end at the same curve $\gamma_1 \in
\mathcal{L}_{\text{std}}$. We need to prove the existence of a
homotopy between the curves $f$ and $g$ in $\mathcal{L}S^2$.
By lemma~\ref{LemmaCurvesNullHomotopic} it suffices to show that the
loop $g^{-1} \ast f$ based at $\gamma_0$ is null-homotopic. To
prove this we use the degree map restricted to the fundamental group
based at $\gamma_0$:
\begin{align*}
\deg\colon \pi_1(\mathcal{L}S^2,\gamma_0) &\to \mathbb{Z}\\
[\gamma] &\mapsto \deg \hat{\gamma}.
\end{align*}
From lemma~\ref{DegreeCompatibleWithConcatenation} it follows that
\begin{align*}
\deg (g^{-1} \ast f) = \deg f - \deg g = \deg \hat{f} - \deg \hat{g} = \deg f - \deg g = d - d = 0.
\end{align*}
Because of remark~\ref{DegreeMapInvariantUnderHomotopies} this can
also be stated on the level of homotopy classes:
\begin{align*}
\deg [g^{-1} \ast f] = \deg [f] - \deg [g] = 0.
\end{align*}
Therefore, using the injectivity of the degree map
(proposition~\ref{DegreeMapInjective}) we can conclude that $[g^{-1}
\ast f] = 0$ in $\pi_1(\mathcal{L}S^2,\gamma_1)$. In other
words:
\begin{align*}
g^{-1} \ast f \simeq c_{\gamma_0},
\end{align*}
where $c_{\gamma_0}$ denotes the constant curve in $\mathcal{L}S^2$ at
$\gamma_0$. Now, lemma~\ref{LemmaCurvesNullHomotopic} implies that
$f$ and $g$ are homotopic by means of a homotopy $h\colon I
\times I \to \mathcal{L}S^2$. This homotopy induces a homotopy
\begin{align*}
H\colon I \times (Z,C) &\to (S^2,E)\\
(t,(s,z)) &\mapsto h(t,s,z)
\end{align*}
between the maps $f$ and $g$. \end{proof}
Finally, we can prove theorem~\ref{Classification1}: \begin{proof}
It is clear by remark~\ref{DegreeTripleInvariant} that the degree
triple $\Triple{d_0}{d}{d_1}$ is a $G$-homotopy invariant. Now we
show that it is a \emph{complete} invariant in the sense that
\begin{align*}
\mathcal{T}(f) = \mathcal{T}(g) \;\Rightarrow\; f \simeq_G g,
\end{align*}
where $f \simeq_G g$ means that the maps $f$ and $g$ are
$G$-homotopic. For this, let $f$ and $g$ be two $G$-maps with the
same triple $\Triple{d_0}{d}{d_1}$.
By remark~\ref{BoundaryNormalization} we can assume that $f$ and $g$
are type~I normalized. Now we use remark~\ref{CylinderReduction} and
reduce the construction of a $G$-homotopy from $f$ to $g$ to the
construction of a homotopy from $\restr{f}{Z}$ to $\restr{g}{Z}$ rel
$C$. The assumptions of lemma~\ref{HomotopyOnCylinder} are
satisfied, hence this lemma provides us with a homotopy $H\colon I
\times (Z,C) \to (S^2,E)$ rel $C$. Such a homotopy can be
equivariantly extended to all of $X$, establishing a $G$-homotopy
between $f$ and $g$ as maps $X \to S^2$.
It remains to compute the image $\Im(\mathcal{T})$. To simplify the
notation, set
\begin{align*}
p =
\begin{cases}
0 & \;\text{if $d_0 + d_1$ is even}\\
1 & \;\text{if $d_0 + d_1$ is odd}.
\end{cases}
\end{align*}
First we show that for any $d$ of the form $2k + p$ the triple
$\Triple{d_0}{d}{d_1}$ is contained in the image $\Im(\mathcal{T})$,
then we show that the assumption of the triple $\Triple{d_0}{2k + 1
+ p}{d_1}$ being contained in $\Im(\mathcal{T})$ leads to a
contradiction. To ease the notation, let $\sigma_j$ be the sign of
$d_j$, i.\,e. $d_j = \sigma_j|d_j|$, for $j=0,1$. By definition of
$p$ we know that $2k + p - (d_0 + d_1)$ is an even number. Hence the
triple
\begin{align*}
t = \Triple{0}{2k+p-(d_0+d_1)}{0}
\end{align*}
is contained in $\Im(\mathcal{T})$ by
proposition~\ref{TripleBuildingBlocks} (ii). We define the following
triples:
\begin{align*}
t_1 =& \Triple{\sigma_0}{\sigma_0}{0}\\
t_2 =&\Triple{2\sigma_0}{\sigma_0}{\sigma_0}\\
& \ldots\\
t_{|d_0|} =&\Triple{|d_0|\sigma_0}{\sigma_0}{|d_0|\sigma_0 - \sigma_0}\\
u_1 =& \Triple{0}{\sigma_1}{\sigma_1}\\
u_2 =& \Triple{\sigma_1}{\sigma_1}{2\sigma_1}\\
& \ldots\\
u_{|d_1|} =& \Triple{|d_1|\sigma_1 - \sigma_1}{\sigma_1}{|d_1|\sigma_1},
\end{align*}
which are contained in $\Im(\mathcal{T})$ by
proposition~\ref{TripleBuildingBlocks} (ii). From this we can form the
triple
\begin{align*}
\tilde{t} &= t_{|d_0|} \bullet \ldots \bullet t_2 \bullet t_1 \bullet t \bullet u_1 \bullet u_2 \bullet \ldots \bullet u_{|d_1|}\\
&= \Triple{d_0}{2k+p}{d_1}
\end{align*}
Therefore, the triple $\tilde{t}$ is contained in
$\Im(\mathcal{T})$. On the other hand, assume that the triple $t =
\Triple{d_0}{2k + 1 + p}{d_1}$ is contained in $\Im(\mathcal{T})$
for some $k$. As above, we define the following realizable triples
\begin{align*}
t_1 =& \Triple{0}{-\sigma_1}{\sigma_1}\\
t_2 =&\Triple{\sigma_1}{-\sigma_1}{2\sigma_1}\\
& \ldots\\
t_{|d_0|} =&\Triple{|d_0|\sigma_1 - \sigma_1}{-\sigma_1}{|d_0|\sigma_1}\\
u_1 =& \Triple{\sigma_2}{-\sigma_2}{0}\\
u_2 =& \Triple{2\sigma_2}{-\sigma_2}{\sigma_2}\\
& \ldots\\
u_{|d_1|} =& \Triple{|d_1|\sigma_2}{-\sigma_2}{|d_1|\sigma_2 - \sigma_2}.
\end{align*}
This allows us to form the following triple
\begin{align*}
\tilde{t} &= t_1 \bullet t_2 \bullet \ldots \bullet t_{|d_0|} \bullet t \bullet u_{|d_1|} \bullet \ldots \bullet u_2 \bullet u_1 \\
&= \Triple{0}{2k + 1 + p - (d_0 + d_1)}{0},
\end{align*}
which then also has to be in $\Im(\mathcal{T})$. But, by definition,
$p - (d_0 + d_1)$ is an even number, hence $2k + 1 + p - (d_0 +
d_1)$ is odd, contrary to
remark~\ref{RemarkTripleGlobalDegree}. Hence, the assumption that
$\Triple{d_0}{2k+1+p}{d_1}$ is in the image $\Im(\mathcal{T})$
cannot hold. \end{proof}
A consequence of the above is that we can identify equivariant homotopy classes with their associated degree triples. At this point we underline that the proof of theorem~\ref{Classification1} and lemma~\ref{HomotopyOnCylinder} shows that when two type I normalized $G$-maps $f, g\colon X \to S^2$ have the same degree triple, then they are not only equivariantly homotopic, but they are equivariantly homotopic rel $C = C_0 \cup C_1$. This will be of particular importance in the next section.
\subsubsection{Type II}
In this section, the torus $X$ -- still defined in terms of the standard square lattice -- is equipped with the type II involution \begin{align*}
T\colon X &\to X\\
[z] &\mapsto [i\overline{z}]. \end{align*} As before, we can regard the torus $X$ as a $G$-CW complex. The $G$-CW structure we use for the type II involution is depicted in figure~\ref{MSInvolutionFigureCW}. \label{ParagraphGeometryOfClass2} The first major difference when compared with type I is the fact that the fixed point set in the torus consists of a single circle $C$, not two. In the universal cover $\mathbb{C}$ this circle can be described as the diagonal line $x + ix$. Furthermore, the complement of the circle $C$ in the torus is still connected. The choice of ``fundamental region'' in this case is not completely obvious. Indeed, we need a new definition -- the notion of a fundamental region in the sense of definition~\ref{DefinitionFundamentalRegion} is not suitable for the type II involution: \begin{definition}
\label{PseudofundamentalRegion}
A connected subset $R \subset X$ is called a \emph{pseudofundamental
region} for the $G$-action on $X$ if there exists a subset $R'
\subset \partial R$ in the boundary of $R$ such that $R\smallsetminus R'$
is a fundamental region. \end{definition}
The idea behind the proof of the classification for type II is to identify a pseudofundamental region $R \subset X$, then bring the maps into a certain normal form such that we can collapse certain parts of $\partial R$ and the maps push down to this quotient. In the quotient, the geometry of the $T$-action is easier to understand and the image of $R$ in the quotient will be fundamental region for the induced $T$-action on the quotient.
\begin{figure}
\caption{Type II $G$-CW Setup of the Torus $X$.}
\label{MSInvolutionFigureCW}
\label{MSInvolutionFigureCWDelta}
\end{figure}
The pseudofundamental region we use for the type II $T$-action on $X$ is \begin{align*}
R = \left\{[x+iy]\colon 0 \leq x \leq 1 \text{ and } 0 \leq y \leq x\right\} \end{align*}
See figure~\ref{MSInvolutionFigureCWDelta} for a depiction of this fundamental region. Note that $T$-equivariance of a map requires the values of this map along the circles $A_1$ and $A_2$ to be compatible, because $T$ maps $A_1$ to $A_2$ and vice versa. To make this precise (and to ease the notation) we make the following definition. \begin{definition}
\label{Class2CompatibilityCondition}
For $f\colon X \to S^2$ resp. $f\colon R \to S^2$ we let
$f_{A_j}\colon I \to S^2$ ($j=1,2$) be the map
\begin{align*}
f_{A_j}\colon t \mapsto
\begin{cases}
f\left([t]\right) & \;\text{for $j=1$}\\
f\left([1 + it]\right) & \;\text{for $j=2$}.
\end{cases}
\end{align*}
For convenience we also set $A = A_1 \cup A_2$. \end{definition}
Then we can reformulate what it means for a map $f\colon X \to S^2$ to be equivariant: \begin{remark}
\label{RemarkClassIIEquivariance}
Let $F\colon X \to S^2$ be a $G$-map. Then its restriction $f =
\restr{F}{R}$ to $R$ satisfies
\begin{enumerate}[(i)]
\item $f(C) \subset E$ and
\item $f_{A_1} = T \circ f_{A_2}$.
\end{enumerate} \end{remark}
\begin{lemma}
\label{Class2EquivariantExtension}
Maps $R \to S^2$ resp. homotopies $I \times X \to S^2$ with the
above two properties extend uniquely to equivariant maps $X \to S^2$
resp. equivariant homotopies $I \times X \to S^2$. \end{lemma} \begin{proof}
This is basically the same proof as for
lemma~\ref{Class1EquivariantExtension}. We let $R'$ be the opposite
pseudofundamental region, which has the intersection $A_1 \cup A_2
\cup C$ with $R$. Then we define the extension of the map on $R'$ to
be the $T$-conjugate of the map on $R$. The compatibility condition
along $A_1 \cup A_2$ (from
definition~\ref{Class2CompatibilityCondition}) guarantees that the
resulting extension is globally well-defined on $X$. \end{proof}
\paragraph{Homotopy Invariant}
In the type I classification we have identified degree \emph{triples} as the desired (complete) homotopy invariant; in the type II case we need something slightly different. In order to obtain well-defined notion of fixed point degree we must choose an orientation on the fixed point circle $C \subset X$ (compare with p.~\pageref{CjS1Identification}). \label{CircleOrientationRemark} Although we assume for this paragraph that an orientation has already been chosen, we will wait until paragraph~\ref{RedcutionToClassI} (p.~\pageref{RedcutionToClassI}) before we fix this orientation on $C$.
\begin{definition}
\label{DefinitionDegreePair} (Degree pair map)
The \emph{degree pair map} is the map
\begin{align*}
\mathcal{P}\colon \mathcal{M}_G(X,S^2) &\to \mathbb{Z}^2\\
f &\mapsto \Pair{d_C}{d},
\end{align*}
where $d$ is the total degree of the map $f$ and $d_C$ is the
fixed point degree of $f$. For a map $f\colon X \to S^2$ we define its
\emph{degree pair} (or simply \emph{pair}) to be
$\mathcal{P}(f)$. We call a given pair \emph{realizable} if it is
contained in the image $\Im(\mathcal{P})$. \end{definition} We need to be careful to distinguish the \emph{degree pair} $\Pair{d_C}{d}$ of an equivariant map $X \to S^2$ (for the type II involution) from the \emph{fixed point degrees} $\Bideg{d_0}{d_1}$ of an equivariant map $X \to S^2$ (for the type I involution), but this should always be clear from the context. After having defined the degree pair, we immediately obtain: \begin{remark}
\label{DegreePairInvariant}
The degree pair is a $G$-homotopy invariant. \qed \end{remark}
\paragraph{Reduction to Type I} \label{RedcutionToClassI}
Although, at first glance, the type II involution appears to be quite different from the type I involution, it is nevertheless possible to use the results from the type I classification (section~\ref{SectionMapsToSphereClassI}).
\begin{definition}
\label{DefinitionClass2Normalized} (Type II normalization)
Let $f\colon X \to S^2$ be a $G$-map. We say $f$ is \emph{type II
normalized} if
\begin{enumerate}[(i)]
\item $f_{A_1} = f_{A_2} = c_{p_0}$, where
$c_{p_0}\colon X \to S^2$ denotes the constant map whose image is
$\{p_0\} \subset E$ and
\item $f$ is normalized on the circle $C$ in the sense that, using
the identifications of $C$ and $E$ with $S^1$, the map
$\restr{f}{C}$ corresponds to $z \mapsto z^k$ for some
$k$.
\end{enumerate} \end{definition}
\begin{proposition}
\label{Class2Normalization}
Any $G$-map $f\colon X \to S^2$ is $G$-homotopic to a map $f'$ which
is type II normalized. \end{proposition}
\begin{proof}
The statement is proved in several steps. Consider $X$ with the
$G$-CW-decomposition as in figure~\ref{MSInvolutionFigureCW}
consisting of the six cells $e^0$, $e^1_{1,2,3}$, and
$e^2_{1,2}$. Restrict $f$ to the pseudo-fundamental region $R$. Then
$A = A_1 \cup A_2$ is a subcomplex of $R$. We now successively apply
two homotopies to $f$. Observe that the maps $f_{A_j}$ ($j=1,2$) are
loops in $S^2$ at some point $p \in E$. Using the
simply-connectedness of $S^2$ there exists a null-homotopy
$\rho\colon I \times I \to S^2$ of $f_{A_1}$ to the constant curve
$c_p$. For the first homotopy, we define $H$ on $\{0\} \times R$ to
be the original map $f$ and on $I \times A_1$ resp. $I \times A_2$
we set
\begin{align*}
(t,s) \mapsto \rho(t,s) \;\;\text{resp.}\; (t,1+is) \mapsto T \circ \rho(t,s).
\end{align*}
By the homotopy extension property (HEP, see proposition~\ref{HEP})
this can be extended to a homotopy $H\colon I \times R \to S^2$ and
the resulting map $H(1,\cdot)$, which we denote by
$\smash{\tilde{f}}$, has the property that
$\smash{\tilde{f}}(A_j) = \{p\}$ ($j=1,2$).
Define the second homotopy as follows: On $\{0\} \times R$ we define
$H$ to be the map $\smash{\tilde{f}}$. On $I \times C$ we make a
homotopy $H_C\colon I \times C \to E$ to the normalized form $z
\mapsto z^k$. In particular, $e_0$ will then be mapped to $p_0 \in
E$. But when we change the map on $C$ we only need to adjust it
accordingly on $A$, hence we define the homotopy on $I \times A_1$
resp. $I \times A_2$ to be
\begin{align*}
(t,s) \mapsto H_C(t,0) \;\text{resp.}\; (t,1+is) \mapsto H_C(t,0).
\end{align*}
The map $H$ is then well-defined on $I \times e_0$ and extends by
the HEP to a homotopy $I \times R \to S^2$. By
lemma~\ref{Class2EquivariantExtension}, we can equivariantly extend
this homotopy to a homotopy $I \times X \to S^2$ and the resulting
map has the desired properties. \end{proof}
For the reduction to type I it will be very useful to replace the torus $X$ with the quotient $X/A$. This will be formalized next. With the above remark we can assume without loss of generality that any map in $\mathcal{M}_G(X,S^2)$ is type II normalized. Type II normalized maps $f\colon X \to S^2$ push down to the quotient $X/A \to S^2$. Let us denote the topological quotient $X \to X/A$ by $\pi_A$ and the image of $A$ under $\pi_A$ with $A/A$. In the next lemma we study the geometry of $X/A$. \begin{lemma}
\label{ClassIIQuotientSphereIdentification}
We state:
\begin{enumerate}[(i)]
\item The $T$-action on $X$ pushes down to the quotient.
\item The projection map $\pi_A\colon X \to X/A$ is equivariant.
\item The space $(X/A,T)$ can then be equivariantly identified with
$(S^2, T)$, where $T$ acts on $S^2$ as the standard reflection
along the equator.
\end{enumerate} \end{lemma}
\begin{proof}
Regarding (i): First we prove that the $T$-action on $X$ pushes down
to $X/A$: The $T$-action stabilizes $A \subset X$. Hence, $T$ is
compatible with the relation on $X$, which identifies the points in
$A$. By theorem~\ref{TopologyMapInducedOnQuotient} $T$ therefore
induces a map $X/A \to X/A$, which we, by abuse of notation, again
denote by $T$. Since $T\colon X \to X$ is an involutive
homeomorphism on $X$, the same applies to $T\colon X/A \to X/A$ (see
e.\,g. theorem~\ref{TopologyMapInducedOnQuotient}).
Regarding (ii): This is true by definition of the $T$-action on the
quotient: $T([x]) = [T(x)]$.
Regarding (iii): The space $X/A$ consists of two $2$-cells, glued
together along their boundary, which is stabilized by $T$. We can
define an equivariant homeomorphism to $S^2$ by mapping one of the
$2$-cells to e.\,g. the lower hemisphere of the sphere, mapping the
boundary of the $2$-cell to the equator. The map on the second cell
can then be defined by equivariant extension, thus mapping it to the
upper hemisphere. \end{proof}
Thus we can from now on identify $X/A$ with $S^2$ as topological manifolds and by this identification equip $X/A$ with the smooth structure from $S^2$. We denote the point in $S^2$ corresponding to $A/A \in X/A$ by $P_0$. The projection map $\pi_A$ induces an equivariant map \begin{align}
\label{TypeIIDiscussionS2}
\pi_{S^2}\colon X \to S^2, \end{align} whose restriction $X\smallsetminus A \to S^2\smallsetminus\{P_0\}$ is smooth. The degree of $\pi_{S^2}$ is either $+1$ or $-1$ but we can assume that it is $+1$ (if it were $-1$, we could compose with a reflection along the equator in $S^2$). We must be careful to not confuse the 2-sphere introduced in (\ref{TypeIIDiscussionS2}) with the 2-sphere which has been identified as a deformation retract of $\mathcal{H}_{(1,1)}$. In particular their equators must be considered as distinct objects. Thus we denote the equator of the 2-sphere which appears as a quotient of the torus $X$ by $E'$.
Now, let $(X',T)$ be a type I torus, that is, the torus $\mathbb{C}/\Lambda$ equipped with the type I involution $T$ and denote the north resp. the south pole of $S^2$ by $O_\pm$. Observe that also the sets $X'\smallsetminus C_0$ and $S^2\smallsetminus\{O_\pm\}$ are $G$-spaces. The former is an open cylinder and the latter is a doubly punctured 2-sphere. We note: \begin{remark}
\label{Class1TorusWithSphereIdentification}
There exists a smooth and orientation preserving $G$-diffeomorphism
\begin{align*}
\Psi\colon X'\smallsetminus C_0 \xrightarrow{\;\sim\;} S^2\smallsetminus \{O_\pm\}.
\end{align*} \end{remark}
\begin{proof}
Regard $S^2$ as being embedded as the unit-sphere in $\mathbb{R}^3$. Then we
define the map $\Psi$ as follows:
\begin{align*}
\Psi\colon X'\smallsetminus C_0 &\xrightarrow{\;\sim\;} S^2\smallsetminus \{O_\pm\}\\
[x+iy] &\mapsto
\begin{pmatrix}
\sqrt{1-(2y-1)^2} \cos(2\pi x) \\
\sqrt{1-(2y-1)^2} \sin(2\pi x) \\
2y-1
\end{pmatrix}
\end{align*}
This defines an equivariant and orientation preserving
diffeomorphism. \end{proof}
On p.~\pageref{CircleOrientationRemark} we mentioned that we still need to decide for an orientation on the circle $C \subset X$, which is equivalent to chosing an identification of $C$ with $S^1$ -- we will do this now. Observe that the restriction of the above map $\Psi$ to $C_1 \subset X'$ identifies the circle $C_1$ with the equator $E'$ in $S^2$, which the map $\pi_{S^2}$ identifies with with $C \subset X$. We define the identifications of $C \subset X$ and $E' \subset S^2$ with $S^1$ in terms of this identification with $C_1 \subset X'$. Analogously to definition~\ref{DefinitionClass2Normalized} we make the following \begin{definition}
We call a $G$-map $f\colon S^2 \to S^2$ \emph{type II
normalized}\footnote{as a map defined on the 2-sphere, not on the
type II torus $X$}, if the north pole $O_+$ and the south pole
$O_-$ both are mapped to $p_0 \in E \subset S^2$ and the map is
normalized on the equator $E'$ according to the above identification
of $E'$ with $S^1$. \end{definition}
\begin{remark}
Any $G$-map $S^2 \to S^2$ is equivariantly homotopic to a type II
normalized $G$-map. \end{remark}
\begin{proof}
This follows with the equivariant homotopy extension
(corollary~\ref{G-HEP}): There exists a $G$-CW decomposition of
$S^2$ such that the set $B = \{O_\pm\} \cup E'$ is a $G$-CW
subcomplex of $S^2$. \end{proof}
To summarize the above discussion: Above we have constructed a map from the set of type II normalized $G$-maps $X \to S^2$ to the set of type II normalized $G$-maps $S^2 \to S^2$. We denote this map by $\psi$ and state: \begin{proposition}
\label{Class2CorrespondenceXS2}
The map $\psi$ is a bijection. Furthermore, the degree pairs of $f$
and $\psi(f)$ agree. \end{proposition}
\begin{proof}
Let us begin by reviewing how the map $\psi$ is defined: If $f\colon
X \to S^2$ is a type II normalized map, it is by definition constant
along $A$. Hence it pushes down to a $G$-map $f\colon X/A \to
S^2$. Now $X/A$ can be equivariantly identified with $S^2$, hence,
by means of this identification, $f$ defines a $G$-map $S^2 \to
S^2$. By construction these two maps are related by the following
diagram:
\[
\begin{xy}
\xymatrix{
X \ar[d]_{\pi_{S^2}} \ar[dr]^f \\
S^2 \ar[r]_{f'} & S^2
}
\end{xy}
\]
Injectivity of this map is clear: If $\psi(f) = \psi(g)$, then also
\begin{align*}
f = \psi(f) \circ \pi_{S^2} = \psi(g) \circ \pi_{S^2} = g.
\end{align*}
For the surjectivity, let $f'\colon S^2 \to S^2$ be a type II
normalized $G$-map, then $f = f' \circ \pi_{S^2}\colon X \to S^2$ is a
type II normalized $G$-map such that $\psi(f) = f'$.
Since $\pi_{S^2}$ has degree $+1$ and using the functorial property
of homology we see that the total degrees of $f$ and $f'$ must
agree. Regarding the fixed point degree: $\pi_{S^2}$ homeomorphically
maps the circle $C \subset X$ to the equator $E \subset S^2$. Now
the only possibility would be that the fixed point degrees differ by a
sign. But, by definition, the identifications of $C \subset X$ and
$E \subset S^2$ with $S^1$ differ only by $\pi_{S^2}$ and above the
orientation of $C \subset X$ has been chosen such that
$\restr{\pi_{S^2}}{C}$ preserves the orientation. \end{proof}
This correspondence between type II normalized $G$-maps $X \to S^2$ and type II normalized $G$-maps $S^2 \to S^2$ also implies a statement on the level of equivariant homotopy: \begin{remark}
\label{Class2HomotopiesLiftFromSphere}
Given two type II normalized $G$-maps $f,g\colon X \to S^2$ and an
equivariant homotopy $H\colon S^2 \to S^2$ between the induced maps
$f',g'\colon S^2 \to S^2$, then $f$ and $g$ are also equivariantly
homotopic. \end{remark}
\begin{proof}
Compose the homotopy with the map $\mathrm{id} \times \pi_{S^2}$. \end{proof}
We now describe a basic correspondence between type II normalized $G$-maps $S^2 \to S^2$ and type I normalized $G$-maps $X' \to S^2$. Assume $f$ is a type II normalized $G$-map $S^2 \to S^2$. This induces a a map \begin{align*}
\phi(f)\colon X' &\to S^2\\
p &\mapsto
\begin{cases}
f \circ \Psi (p) & \;\text{if $p \not\in C_0$}\\
p_0 & \;\text{if $p \in C_0$}
\end{cases} \end{align*} Since $f$ is type II normalized, i.\,e. $f(O_\pm) = p_0$, the map $\phi(f)$ is continuous everywhere and type I normalized as a map $X' \to S^2$. By construction $f$ and $\phi(f)$ are related by \begin{align}
\label{PsiCorrespondence}
\Restr{f}{S^2\smallsetminus\{O_\pm\}} \circ \Psi = \Restr{\phi(f)}{X'\smallsetminus
C_0}, \end{align} where $\Psi$ is the orientation preserving diffeomorphism from remark~\ref{Class1TorusWithSphereIdentification}. The reduction to type I is now summarized in the following two statements: \begin{proposition}
\label{LemmaClass2ReductionToClass1}
The above map $\psi$ defines a bijection between type II normalized
$G$-maps $S^2 \to S^2$ and type I normalized $G$-maps $X' \to S^2$ whose
fixed point degree along the circle $C_0$ is zero. For a type II
normalized $G$-map $f$, the degree pair of $f$ is $\Pair{d_C}{d}$
iff the degree triple of $\phi(f)$ is $\Triple{0}{d}{d_C}$. \end{proposition}
\begin{proof}
Regarding the injectivity: Let $f$ and $g$ be two type II normalized
$G$-maps $S^2 \to S^2$ such that $\phi(f) = \phi(g)$. By
(\ref{PsiCorrespondence}) it follows that
$\restr{f}{S^2\smallsetminus\{O_\pm\}} =
\restr{g}{S^2\smallsetminus\{O_\pm\}}$. Since $f$ and $g$ are assumed to
be type II normalized, hence in particular continuous, this implies
that $f = g$. Regarding the surjectivity: Let $f'$ be a type I
normalized $G$-map $X' \to S^2$ with fixed point degree
$\Bideg{0}{d_1}$. Then (\ref{PsiCorrespondence}) defines a map
$f\colon S^2\smallsetminus\{O_\pm\} \to S^2$ which, by defining $f(O_\pm) =
p_0$, uniquely extends to a type II normalized $G$-map $S^2 \to
S^2$. By construction we obtain $\phi(f) = f'$.
Regarding the correspondence between the degree invariants: Assume
we are given a $G$-map $f\colon S^2 \to S^2$ with degree pair
$\Pair{d_C}{d}$. By corollary~\ref{SmoothApproximation2}, $f$ can be
equivariantly smoothed. Note that the map $\Psi$ in
(\ref{PsiCorrespondence}) is an orientation preserving
diffeomorphism. This implies that $\phi(f)$ is smooth, at least away
from the boundary circle $C_0$. Using
theorem~\ref{SmoothApproximation1} we can smooth $\phi(f)$ near the
circle $C_0$ while keeping it unchanged away from $C_0$. Now, let
$q$ be a regular value of $f$ such that its preimage points
$p_1,\ldots,p_m$ are contained in $S^2\smallsetminus\{O_\pm\}$. Then $q$
is also a regular of $\phi(f)$ and its preimage points are
$\Psi^{-1}(p_j)$, $j=1,\ldots,m$. Since $\Psi$ preserves the
orientation, $\deg f = \deg \phi(f)$. By definition of the
identification of $E \subset S^2$ with $S^1$ (through $\Psi$), the
fixed point degree $d_C$ remains unchanged as well. Finally, by
construction, the fixed point degree of $\phi(f)$ along $C_0$ is zero. \end{proof}
Now we can show that equivariant homotopies for maps $X' \to S^2$ induce equivariant homotopies for maps $S^2 \to S^2$: \begin{proposition}
\label{LemmaClass2ReductionToClass1Homotopy}
Given two type II normalized $G$-maps $f,g\colon S^2 \to S^2$ with
the same degree pair, then they are $G$-homotopic rel $\{O_\pm\}$. \end{proposition}
\begin{proof}
Without loss of generality we can assume that $f$ and $g$ are both
type II normalized $G$-maps $S^2 \to S^2$ with the same degree pair.
Then, by proposition~\ref{LemmaClass2ReductionToClass1}, the induced
maps $\phi(f),\phi(g)\colon X' \to S^2$ share the same degree
triple. Hence there exists a $G$-homotopy $H'$ from $\phi(f)$ to
$\phi(g)$ which is in particular relative with respect to the
boundary circle $C_0$. In other words, throughout the homotopy, $H'$
maps the circle
$C_0$ to $p_0 \in E \subset S^2$. This homotopy then induces a an
equivariant homotopy $H$ between $f$ and $g$:
\begin{align*}
H\colon I \times S^2 &\to S^2\\
(t,p) &\to
\begin{cases}
H'(t,\Psi^{-1}(p)) & \;\text{if $p \not\in \{O_\pm\}$}\\
p_0 & \;\text{if $p \in \{O_\pm\}$}
\end{cases}
\end{align*}
Hence, $f$ and $g$ are equivariantly homotopic rel $\{O_\pm\}$. \end{proof}
\paragraph{Main Result}
The main result in this section is the following \begin{theorem}
\label{Classification2}
The $G$-homotopy class of a map $f \in \mathcal{M}_G(X,S^2)$ is
uniquely determined by its degree pair $\mathcal{P}(f)$. The image
$\Im(\mathcal{P})$ of the degree pair map
$\mathcal{P}\colon \mathcal{M}_G(X,S^2) \to \mathbb{Z}^2$ consists of those
pairs $\Pair{d_C}{d}$ satisfying
\begin{align*}
d \equiv d_C \mod 2.
\end{align*} \end{theorem}
\begin{proof}
First we prove that the degree pairs $\Pair{d_C}{d}$ are a complete
invariant of equivariant homotopy for maps $X \to S^2$. For this,
let $f$ and $g$ be two $G$-maps $X \to S^2$ with the same degree
pair $\Pair{d_C}{d}$. Without loss of generality we can assume that
$f$ and $g$ are type II normalized
(Proposition~\ref{Class2Normalization}). In this case, by
proposition~\ref{Class2CorrespondenceXS2}, $f$ and $g$ push down to
type II normalized maps $f',g'\colon S^2 \to S^2$ with the same
degree pair. With
proposition~\ref{LemmaClass2ReductionToClass1Homotopy} it now
follows that $f'$ and $g'$ are equivariantly homotopic. Then
remark~\ref{Class2HomotopiesLiftFromSphere} establishes the
existence of an equivariant homotopy from $f$ to $g$.
Regarding the image of $\Im(\mathcal{P})$: Let $d_C$ be an
integer. We now have to compute all possible integers $d$ such that
the degree pair $\Pair{d_C}{d}$ is contained in the
$\Im(\mathcal{P})$. Let us first deduce a necessery condition for
the given degree pair to be in the image $\Im(\mathcal{P})$. For
this, let $f\colon X \to S^2$ be a $G$-map with the aforementioned
degree pair. By proposition~\ref{Class2CorrespondenceXS2} we obtain an
induced $G$-map $f'\colon S^2 \to S^2$ with the same degree pair. Now
proposition~\ref{LemmaClass2ReductionToClass1} implies the existence
of an induced map $\phi(f)\colon X' \to S^2$ with the degree triple
$\Triple{0}{d}{d_C}$. By theorem~\ref{Classification1}, we must have
$d \equiv d_C \mod 2$. This shows that the condition is necessary
for a pair to be contained in $\Im(\mathcal{P})$; what remains to
show is that this condition is also sufficient. Let $f''\colon X'
\to S^2$ be a $G$-map with degree triple $\Triple{0}{d}{d_C}$. By
proposition~\ref{LemmaClass2ReductionToClass1} there exists a
$G$-map $f'\colon S^2 \to S^2$ with the degree pair $\Pair{d_C}{d}$
and by proposition~\ref{Class2CorrespondenceXS2} this induces a
$G$-map $f\colon X \to S^2$ with the same degree pair. This proves the
statement. \end{proof}
\subsection{Classification of Maps to $\mathcal{H}_2^*$} \label{SectionHamiltonianClassificationRank2}
Note that so far the degree triple map (resp. the degree pair map) has only been defined on $\mathcal{M}_G(X,S^2)$. But since $S^2$ has been shown to be an equivariant strong deformation retract, we can also define the degree triple map (resp. degree pair map) on $\mathcal{M}_G(X,\mathcal{H}_{(1,1)})$. In particular this allows us to speak of degree triples (resp. of degree pairs) of $G$-maps $X \to \mathcal{H}_{(1,1)}$. Now we state and prove the main result of this section:
\HamiltonianClassificationRankTwo \begin{proof}
\label{HamiltonianClassificationRankTwoProof}
The fact that the cases $\text{sig}\, = (2,0)$ and $\text{sig}\, = (0,2)$ constitute
$G$-homotopy classes on their own has already been shown at the
beginning of this chapter
(remark~\ref{Rank2DefiniteComponentsContractible}). Regarding the
case $\text{sig}\, = (1,1)$: By remark~\ref{Rank2MixedSignatureReduction},
$\mathcal{H}_{(1,1)}$ has $S^2$ as equivariant, strong deformation
retract. Therefore,
\begin{align*}
[X,\mathcal{H}_{(1,1)}]_G \cong [X,S^2]_G.
\end{align*}
Then theorem~\ref{Classification1} (for the type I involution) and
theorem~\ref{Classification2} (for the type II involution) complete
the proof. \end{proof}
\subsection{An Example from Complex Analysis}
The Weierstrass $\wp$-function is a meromorphic and doubly-periodic function on the complex plane associated to a lattice $\Lambda$. It can be defined as follows: \begin{align*}
\wp(z) = \frac{1}{z^2} + \sum_{\lambda \in \Lambda\smallsetminus\{0\}} \left(\frac{1}{(z-\lambda)^2} - \frac{1}{\lambda^2}\right). \end{align*} This function can be regarded as a holomorphic map to $\mathbb{P}_1$. Since the lattice $\Lambda$ is stable under complex conjugation, we have the identity \begin{align*}
\wp(\overline{z}) = \overline{\wp(z)}. \end{align*} By the orientation-preserving idenfication $\mathbb{P}_1 \cong S^2$ (see p.~\ref{P1S2OrientationDiscussion})we can regard the map $\wp$ as an equivariant map $X \to S^2$. Since its total degree as a map to $\mathbb{P}_1$ is two, its degree as a map to $S^2$ is also two. Analogously, the derivative $\wp'$ defines an equivariant map $X \to \mathbb{P}_1 \cong S^2$ of degree three. Furthermore we state: \begin{remark}
\label{BidegreeOfWP}
The Weierstrass $\wp$-function (resp. $\wp'$), regarded as a map
\begin{align*}
X \to \mathbb{P}_1 \cong S^2 \hookrightarrow \mathcal{H}_{(1,1)}
\end{align*}
has fixed point degrees $\Bideg{0}{0}$ (resp. $\Bideg{1}{0}$). \end{remark} \begin{proof}
Regarding $\wp$: The restriction $\restr{\wp}{C_0}$ to be boundary
circle $C_0$ is not surjective to the compactified real line
$\smash{\widehat{\mathbb{R}}}$, since in the square lattice case, the only zero of
$\wp(z)$ is at the point $z=\text{\textonehalf}(1+i)$ (see
e.\,g. \cite{EichlerZagier}). Similarly, the restriction of $\wp$
to the boundary circle is not surjective to $\mathbb{RP}_1$, because the
only pole of $\wp$ is at $z = 0$ (of order 2). This proves that the
fixed point degrees are both zero.
Regarding $\wp'$: Since $\wp'(z)$ has its only pole at $z=0$, its
restriction to the circle $C_1 = I + \text{\textonehalf}i$ cannot be
surjective to $\smash{\widehat{\mathbb{R}}}$, therefore its fixed point
degree $d_1$ must be zero. But its fixed point degree $d_0$ is
$\pm 1$: It is known that the only pole of $\wp'(z_0)$ is at $z=0$
and its only zero in $C_0$ is at $z=\text{\textonehalf}$. A
computation shows that $\wp'$ is negative along
$(0,\text{\textonehalf})$. In other words, along the curve segment
$(0,\text{\textonehalf})$, the image under $\wp'$ moves from
$\infty$ to the negative real numbers and finally to $0$. By the
identity
\begin{align*}
\wp'(-z) = -\wp'(z),
\end{align*}
it follows that on the curve segment $(\text{\textonehalf})$ the
image under $\wp'$ moves from $0$ to the positive real numbers until
it finally reaches $\infty$. By definition of the orientation on
$\smash{\widehat{\mathbb{R}}}$ (see the discussion on
p.~\ref{P1S2OrientationDiscussion}), this is a loop of degree $+1$. \end{proof}
For the type II involution we consider the scaled Weierstrass functions \begin{align*}
&F = i\wp\\
\text{and}\; &G = e^{i\frac{3\pi}{4}} \wp'. \end{align*} As above, they can be considered as equivariant maps $X \to S^2$. \begin{remark}
The fixed point degree of $F$ is $0$ and that of $G$ is $1$. \end{remark} \begin{proof}
Regarding $F$: The map $F(z)$ has its only pole at $z=0$ and its
only zero at $\text{\textonehalf}(1+i)$. Thus, when $z$ linearly
moves from the origin to the point $\text{\textonehalf}(1+i)$, then
its image under $F$ defines a curve from $\infty$ to $0$. Since $F$
is an even function it follows that on the second segment of the
diagonal circle, $F$ reverses the previous curve, going back from
$0$ to $\infty$. Thus the fixed point degree for $F$ is $0$.
Regarding $G$: Along the curve segment from $0$ to
$\text{\textonehalf}(1+i)$ it defines a curve from $\infty$ to
$0$. A computation shows that this curve is along the negative real
numbers. Since $G$ is not an odd map, its restriction to the segment
from $\text{\textonehalf}(1+i)$ to $1+i$ defines a curve from $0$ to
$\infty$, but with the opposite sign, i.\,e. along the positive real
numbers. This defines a degree $+1$ loop. \end{proof} To summarize the above: \begin{remark}
With respect to the identification $\mathbb{P}_1 \cong S^2$ described on
p.~\ref{P1S2OrientationDiscussion}, we have:
\begin{align*}
&\mathcal{T}(\wp) = \Triple{0}{2}{0}\\
&\mathcal{T}(\wp') = \Triple{1}{3}{0}\\
&\mathcal{P}(i\wp) = \Pair{0}{2}\\
&\mathcal{P}(e^{i\frac{3\pi}{4}} \wp') = \Pair{1}{3}.
\end{align*} \end{remark}
By choosing a different equivariant identification $\mathbb{P}_1 \cong S^2$, we might introduce signs for the total degrees or for the fixed point degrees or for both. For instance, we can always compose the $G$-diffeomorphism $\mathbb{P}_1 \to S^2$ with reflections on $S^2$. Thus, in some sense, the numbers in the above remark are only well-defined up to sign.
\section{Maps to $\mathcal{H}_n$}
In this chapter we generalize the results of the previous section to arbitrary $n > 2$. For this we begin by noting that the unitary group $U(n)$ acts on $\mathcal{H}_{(p,q)}$ by conjugation. Since every matrix in $\mathcal{H}_{(p,q)}$ can be diagonalized by unitary matrices, the set of $U(n)$-orbits in $\mathcal{H}_{(p,q)}$ is parametrized by the set of (unordered) real eigenvalues $\lambda_1^+, \ldots, \lambda_p^+, \lambda_1^-,\ldots, \lambda^-_q$. The involution $T$ stabilizes the components $\mathcal{H}_{(p,q)}$, since the matrices $H$ and $T(H)$ clearly have the same spectrum. The $T$-action is also compatible with the $U(n)$-orbit structure of $\mathcal{H}_{(p,q)}$, as shown in the next remark: \begin{remark}
\label{RemarkRealStructureRestrictsToEachOrbit}
The $T$-action on $\mathcal{H}_n$ stabilizes each $U(n)$-orbit in
each $\mathcal{H}_{(p,q)}$. \end{remark} \begin{proof}
Let $D = \text{Diag}\,(\lambda_1,\ldots,\lambda_n)$ be a diagonal matrix in
$\mathcal{H}_{(p,q)}$. In particular all $\lambda_j$ are non-zero
real numbers. Let $\mathcal{O}$ be the $U(n)$-orbit of $D$. We have
to show that $\mathcal{O}$ is $T$-stable. For this, let $M$ be a
matrix in $\mathcal{O}$. By definition it is of the form $M = UDU^*$
for some $U \in U(n)$. Then we have
\begin{align*}
T(UDU^*) = \overline{UDU^*} = \overline{U}D\overline{U}^*.
\end{align*}
But since $U \in U(n)$ implies $\overline{U} \in U(n)$, we can
conclude that $T(UDU^*)$ is again contained in the orbit
$\mathcal{O}$. \end{proof}
In each component $\mathcal{H}_{(p,q)}$ there is a $U(n)$-orbit which is particularly convenient to work with: \begin{remark}
\label{MinimalOrbitInComponent}
The $U(n)$-orbit of the block diagonal matrix
\begin{align*}
\I{p}{q} = \begin{pmatrix}
\I{p} & 0 \\
0 & -\I{q}
\end{pmatrix}
\end{align*}
is diffeomorphic to the Grassmannian $\text{Gr}_p(\mathbb{C}^n)$. \end{remark}
\begin{proof}
The $U(n)$-isotropy of $\I{p}{q}$ is $U(p) \times U(q)$. Hence, for
the $U(n)$-orbit we have the following smooth identification:
\begin{equation*}
U(n).\I{p}{q} \cong \frac{U(n)}{U(p) \times U(q)} \cong \text{Gr}_p(\mathbb{C}^n)\qedhere
\end{equation*} \end{proof}
In order to reduce the classification of maps to $\mathcal{H}_{(p,q)}$ to the classification of maps to this Grassmann manifold, it is important to understand the $T$-action on the latter. For this we make the following remark: \begin{remark}
\label{InducedActionOnGrassmannian}
The induced $T$-action on $\text{Gr}_p(\mathbb{C}^n)$ is given by
\begin{align*}
V \mapsto \overline{V},
\end{align*}
for each $p$-dimensional subvector space $V \subset \mathbb{C}^n$. \end{remark}
\begin{proof}
As a first step we compute the induced $T$-action under the
identification
\begin{align*}
\frac{U(n)}{U(p) \times U(q)} &\xrightarrow{\sim} U(\I{p}{q}) \subset \mathcal{H}_{(p,q)}\\
[U] &\mapsto U\I{p}{q}U^*.
\end{align*}
Now, let $[U]$ be a point in the quotient. The above isomorphism
sends $[U]$ to $U \I{p}{q} U^*$. The $T$-action on $\mathcal{H}_n$
maps this to
$\smash{\overline{U \I{p}{q} U^*} = \overline{U} \I{p}{q}
\overline{U}^*}$,
which under the above isomorphism, corresponds to the point
$\smash{\left[\overline{U}\right]}$ in the quotient. Thus, the
induced $T$-action on the quotient is via
$\smash{T\left([U]\right) = \left[\overline{U}\right]}$. For the
next step we need to use the isomorphism
\begin{align*}
\frac{U(n)}{U(p) \times U(q)} &\xrightarrow{\;\sim\;} \text{Gr}_p(\mathbb{C}^n)\\
[U] &\mapsto U(V_0),
\end{align*}
where $V_0$ denotes a fixed base point in the Grassmannian,
e.\,g. the subvector space spanned by $e_1,\ldots,e_p$. Now, let $V
\in \text{Gr}_p(\mathbb{C}^n)$ be a subvector space of $\mathbb{C}^n$
generated by the vectors $v_1,\ldots,v_p$. This subspace uniquely
corresponds to a coset $[U]$ such that $U(V_0) = V$. Recalling that
$V_0$ is by definition generated by the standard basis
$e_1,\ldots,e_p$, we see that the first $p$ coloumns
$u_1,\ldots,u_p$ of $U$ constitute a unitary basis of the
subvector space $V$. The $T$-action on the quotient maps $[U]$ to
$\smash{\left[\overline{U}\right]}$, which then corresponds to the
subvector space $\smash{\overline{U}}(V_0)$. This subvector space can be
described in terms of the basis
\begin{align*}
\overline{U}(e_1),\ldots,\overline{U}(e_p),
\end{align*}
which is just $\overline{u_1},\ldots,\overline{u_p}$. Therefore,
the induced $T$-action on $\text{Gr}_p(\mathbb{C}^n)$ is
\begin{align*}
V \mapsto \overline{V},
\end{align*}
which finishes the proof. \end{proof}
As a special case we also note: \begin{remark}
\label{InducedActionOnProjectiveSpace}
The induced action on $\mathbb{P}_n = \text{Gr}_1(\mathbb{C}^{n+1})$ in terms
of homogeneous coordinates is given by
\begin{align*}
[z_0:\ldots:z_n] \mapsto \left[\overline{z_0}:\ldots:\overline{z_n}\right].
\end{align*} \end{remark}
\begin{proof}
Follows from remark~\ref{InducedActionOnGrassmannian} together with
the fact that the homogeneous coordinates for a given line $L =
\mathbb{C}(\ell_1,\ldots,\ell_{n+1})$ in $\mathbb{C}^{n+1}$ are
$[\ell_1:\ldots:\ell_{n+1}]$. \end{proof}
\begin{remark}
Regard the Grassmannian $\text{Gr}_k(\mathbb{C}^n)$ as being equipped with the
standard real structure given by complex conjugation. If $T$ is a
real structure on the $n$-dimensional vectorspace $W$, then there is
a biholomorphic $G$-map
\begin{align*}
\text{Gr}_k(W) \xrightarrow{\;\sim\;} \text{Gr}_k(\mathbb{C}^n).
\end{align*} \end{remark}
\begin{proof}
The real structure $T$ on $W$ induces a decomposition of $W$ as $W =
W_\mathbb{R} \oplus iW_\mathbb{R}$, where $W_\mathbb{R}$ is an $n$-dimensional, real
subvector space of $W$. The vectorspace $W$ can be identified with
$\mathbb{C}^n$ by sending an orthonormal basis $w_1,\ldots,w_n$ of $W_\mathbb{R}$ to
the standard basis $e_1,\ldots,e_n$. This is equivariant by
construction and induces an equivariant biholomorphism $\text{Gr}_k(W) \to
\text{Gr}_k(\mathbb{C}^n)$. \end{proof}
Let us now look at the topology of the two connected components $\mathcal{H}_{(n,0)}$ and $\mathcal{H}_{(0,n)}$ of $\mathcal{H}_n^*$: \begin{remark}
\label{RemarkDefiniteComponentsRetractable}
The component $\mathcal{H}_{(n,0)}$ (resp. $\mathcal{H}_{(0,n)}$)
has $\{\I{n}\}$ (resp. $\{-\I{n}\}$) as a strong
equivariant deformation retract. \end{remark}
\begin{proof}
We only prove the statement for the component $\mathcal{H}_{(n,0)}$.
For this we define:
\begin{align*}
\rho\colon I \times \mathcal{H}_{(n,0)} &\to \mathcal{H}_{(n,0)}\\
(t,A) &\mapsto (1-t)A + t\I{n}
\end{align*}
Clearly, for $t=0$ this is the identity on $\mathcal{H}_{(n,0)}$ and
for $t=1$ it is constant at the identity $\I{n}$. Hence, in
order to show that this is a well-defined deformation retract, we
have to prove that there exists no matrix $A \in
\mathcal{H}_{(n,0)}$ such that $\rho_t(A)$ is singular for some $t
\in (0,1)$.
Let $A$ be a positive definite matrix in $\mathcal{H}_n$. By
assumption we know that $\det A$, which coincides with the product
of all eigenvalues of $A$, is positive. Let us assume
that $\det \rho_t(A) = 0$. By definition this means
\begin{align}
\label{PositiveDefiniteRetraction}
\det ((1-t)A + t\I{n}) = 0.
\end{align}
This means that $-t$ is an eigenvalue of $A' := (1-t)A$. But $A'$ is
just $A$, scaled by a positive real number. Hence $A'$ is also
positive definite and therefore cannot have a negative eigenvalue
$-t$. Thus we obtain a contradiction, which proves that $\rho_t$
really has its image contained in $\mathcal{H}_{(n,0)}$. The proof
for the positive negative case works completely analogously.
What is now left to show is equivariance of the above deformation
retract. But this follows from the fact that the involution on
$\mathcal{H}_n$ acts by conjugation and this is compatible with
scaling by real numbers as done in
(\ref{PositiveDefiniteRetraction}). \end{proof}
This shows that the components $\mathcal{H}_{(n,0)}$ and $\mathcal{H}_{(0,n)}$ are equivariantly contractible to a point, thus the spaces $[X,\mathcal{H}_{(n,0)}]_G$ and $[X,\mathcal{H}_{(0,n)}]_G$ are trivial. On the other hand, as we will see in the following, interesting topology will occur in the case of mixed signatures: For $0 < p,q < n$ the components $\mathcal{H}_{(p,q)}$ have the $U(n)$-orbit of $\I{p}{q}$ as equivariant strong deformation retract\footnote{Here $\I{p}{q}$ denotes the block diagonal matrix
with $\I{p}$ in the upper left corner and $-\I{q}$ in the lower
right corner.}. Proving this requires some preperations; we begin by introducing the following surjection: \begin{align*}
\pi\colon \mathcal{H}_{(p,q)} &\to \text{Gr}_p(\mathbb{C}^n)\\
H &\mapsto E^+(H), \end{align*} where $E^+(H)$ denotes the direct sum of the positive eigenspaces of $H$. This is a $p$-dimensional subspace of $\mathbb{C}^n$. Fixing a basepoint $E_0 = \left<e_1,\ldots,e_p\right>$, this defines a fiber bundle with neutral fiber $F = \pi^{-1}(\{E_0\})$. The fiber consists of those matrices in $\mathcal{H}_{(p,q)}$ which have $E_0$ as the direct sum of their positive eigenspaces. Note that $K := U(n)$ acts transitively on $\text{Gr}_p(\mathbb{C}^n)$. Thus, denoting the stabilizer of $E_0$ in $K$ with $L$, we can identify $\text{Gr}_p(\mathbb{C}^n)$ with $K/L$. We equip the product $K \times F$ with a $K$-action given by multiplication on the first factor: \begin{align*}
k(k',H) \mapsto (kk', H). \end{align*} The $L$-action on $K \times F$ is given by \begin{align*}
\ell(k,H) = (k\ell,\ell^*H\ell). \end{align*} It induces the quotient $K \times F \to K \times_L F$. The $K$-action on $K \times_L F$ is then given by \begin{align*}
k[(k',H)] = [k(k',H)] = [(kk',H)]. \end{align*} The quotient $K \times_L F \to K/L$ is $K$-equivariant by definition of the respective $K$-actions. We obtain the following diagram of $K$-spaces: \[ \xymatrix{
K \times F \ar[r] \ar[d] & K \times_L F \ar[d]\\
K \ar[r] & K/L } \]
After these remarks we state: \begin{proposition}
\label{GrassmannianStrongDeformationRetract}
The component $\mathcal{H}_{(p,q)}$ has the orbit $U(n).\I{p}{q}$ as
$T$-equivariant, strong deformation retract. \end{proposition} \begin{proof}
The idea of this proof is to use the above diagram and define a
strong deformation retract of the fiber $F$ which we then globalize
to a strong deformation retraction on $K \times_L F$. Using the
identifications $K \times_L F \cong \mathcal{H}_{(p,q)}$ and $K/L
\cong \text{Gr}_p(\mathbb{C}^n)$ this defines a $T$-equivariant strong deformation
retract from $\mathcal{H}_{(p,q)}$ to $U(n).\I{p}{q} \subset
\mathcal{H}_{(p,q)}$ (see figure~\ref{fig:UnOrbitsInHpq}).
Note that we have the following isomorphism
\begin{align}
\label{EquivariantFiberBundleIsoPsi}
\Psi\colon K \times_L F &\xrightarrow{\;\sim\;} \mathcal{H}_{(p,q)}\\
\left[(k,H)\right] &\mapsto kHk^*.\nonumber
\end{align}
Set $\Sigma = K.\I{p}{q} \subset \mathcal{H}_{(p,q)}$. This defines
(the image of) a global section of the bundle $K \times_L F \to
K/L$. Observe that a matrix $H \in F$ is, by definition,
positive-definite on $E_0 = \left<e_1,\ldots,e_p\right>$ and
therefore negative-definite on $E_0^\perp =
\smash{\left<e_{p+1},\ldots,e_{p+q}\right>}$\footnote{On $\mathbb{C}^n$ we use the
standard unitary structure.}. This implies that $H$ is of the form
\begin{align*}
H =
\begin{pmatrix}
H_p & 0 \\
0 & H_q
\end{pmatrix},
\end{align*}
where $H_p$ and $H_q$ are both hermitian, $H_p$ is positive-definite
on $E_0$ and $H_q$ is negative-definite on $E_0^\perp$. Now we can
define a retraction $\rho\colon I \times F \to F$ of the neutral
fiber:
\begin{align}
\label{EquationRetractRho}
\rho_t(H) =
\begin{pmatrix}
(1-t)H_p + t\I{p} & 0 \\
0 & (1-t)H_q - t\I{q}
\end{pmatrix}.
\end{align}
This is a well-defined map with image in the fiber $F$: The proof of
remark~\ref{RemarkDefiniteComponentsRetractable} shows that the
intermediate matrices during a homotopy of a definite matrix to
$+\I_p$ resp. $-\I_q$ stay definite. This generalizes to the
situation at hand: Given a matrix $H$ of signature $(p,q)$, then
$\rho_t(H)$ will be of the same signature for every $t$. Therefore,
$\rho_t(F) \subset F$ for all $t$. Next, extend $\rho$ to a map
\begin{align*}
\hat{\rho}\colon I \times K \times F &\to K \times F\\
(t,(k,H)) &\mapsto (k,\rho_t(H))
\end{align*}
Now we prove that $\smash{\hat{\rho}}$ is $L$-equivariant and
therefore it pushes down to a map
\begin{align*}
I \times K \times_L F \to K \times_L F
\end{align*}
For this, let $\ell$ be in $L$. Then
\begin{align*}
\ell(\hat{\rho}(t,k,H)) = \ell(k,\rho_t(H)) = (k\ell,\ell^* \rho_t(H) \ell)
\end{align*}
On the other hand:
\begin{align*}
\hat{\rho}(\ell(t,k,H)) = \hat{\rho}(t,k\ell,\ell^* H \ell) = (k\ell, \rho_t(\ell^* H \ell))
\end{align*}
Now, $L$-equivariance follows from the fact that matrix conjugation
commutes with addition and scalar multiplication of matrices and
therefore
\begin{align*}
\ell^* \rho_t(H) \ell = \rho_t(\ell^* H \ell).
\end{align*}
By the isomorphism $K \times_L F \cong \mathcal{H}_{(p,q)}$,
$\smash{\hat{\rho}}$ induces a map
$\smash{\tilde{\rho}}\colon I \times \mathcal{H}_{(p,q)} \to
\mathcal{H}_{(p,q)}$.
Note that $\rho_0$ and therefore also $\smash{\hat{\rho_0}}$ as well as
the push-down to the quotient is the identity. On the other hand,
$\rho_1$ retracts the fiber $F$ to $\I{p}{q}$. Thus,
$\smash{\hat{\rho_1}}$ retracts $K \times F$ to $K \times
\{\I{p}{q}\}$.
Using the isomorphism $K \times_L F \cong \mathcal{H}_{(p,q)}$, it
follows that $\smash{\tilde{\rho}}$, retracts $\mathcal{H}_{(p,q)}$ to
$\Sigma = K.\I{p}{q}$.
It remains to prove that $\smash{\tilde{\rho}}_t$ is $T$-equivariant for
each $t$:
\begin{align}
\label{EquationRetractionEquivariance}
\tilde{\rho}_t(\overline{H}) = \overline{\tilde{\rho}_t(H)}.
\end{align}
A short computation shows that the isomorphism $\Psi$
(\ref{EquivariantFiberBundleIsoPsi}) is $T$-equivariant with respect
to the $T$-action on $K \times_L F$ given by
\begin{align*}
T\left(\left[(k,H)\right]\right) = \left[\left(\overline{k},\overline{H}\right)\right].
\end{align*}
Therefore, in order to prove (\ref{EquationRetractionEquivariance})
we need to show that the induced map $I \times K \times_L F \to K
\times_L F$ is $T$-equivariant. By definition this boils down to
showing that
\begin{align*}
\left[\left(\overline{k},\rho_t\left(\overline{H}\right)\right)\right] = \left[\left(\overline{k},\overline{\rho_t(H)}\right)\right],
\end{align*}
which is a direct consequence of the definition of $\rho_t$ as in
(\ref{EquationRetractRho}). Summarizing the above we have seen that
there exists a strong deformation retract $\smash{\tilde{\rho}}$ from
$\mathcal{H}_{(p,q)}$ to the orbit $K.\I{p}{q}$ which is
$T$-equviariant. \end{proof} \begin{figure}
\caption{The $U(n)$-orbits in $\mathcal{H}_{(p,q)}$.}
\label{fig:UnOrbitsInHpq}
\end{figure}
The $U(n)$-orbit of $\I{p}{q}$ can be equivariantly and diffeomorphically identified with the complex Grassmann manifold $\text{Gr}_p(\mathbb{C}^n)$. This reduces the problem of describing the sets $\smash{[X,\mathcal{H}_{(p,q)}]_G}$ to the problem of describing the sets $\smash{[X,\text{Gr}_p(\mathbb{C}^n)]_G}$. As a first step towards the study of equivariant maps to $\text{Gr}_p(\mathbb{C}^n)$ we describe $(\text{Gr}_p(\mathbb{C}^n))_{\mathbb{R}}$, the space of real points with respect to the real structure $T$ on $\text{Gr}_p(\mathbb{C}^n)$: \begin{remark}
The space $(\text{Gr}_p(\mathbb{C}^n))_{\mathbb{R}}$ of real points is
diffeomorphic to the real Grassmannian $\text{Gr}_p(\mathbb{R}^n)$. \end{remark}
\begin{proof}
Given any $T$-stable subspace $V$ in $\mathbb{C}^n$, then $T$ defines a
decomposition $V = V_\mathbb{R} \oplus i V_\mathbb{R}$ where $V_\mathbb{R} = \text{Fix}\, T$ is a
subspace of $\mathbb{R}^n$. Note that the group $O(n,\mathbb{R})$ acts transitively
on $\text{Gr}_p(\mathbb{R}^n)$. Therefore, for any two $T$-stable $p$-dimensional
subvector spaces $V, V' \subset \mathbb{C}^n$, there exists an orthogonal
transformation $g$ mapping the $V_\mathbb{R}$ to $V'_\mathbb{R}$. Such a
transformation extends uniquely to a unitary transformation of
$\mathbb{C}^n$ which sends $V$ to $V'$. Now, a computation shows that the
stabilizer of the base point
$V_0 = \smash{\left<e_1,\ldots,e_p\right>}$ in $O(n,R)$ consists of
the matrices of the form
\begin{align*}
g =
\begin{pmatrix}
g_1 & 0 \\
0 & g_2
\end{pmatrix},
\end{align*}
where $g_1 \in O(p,\mathbb{R})$ and $g_2 \in O(q,\mathbb{R})$. It follows that
\begin{equation*}
(\text{Gr}_p(\mathbb{C}^n))_\mathbb{R} \cong \frac{O(n,\mathbb{R})}{O(p,\mathbb{R}) \times O(q,\mathbb{R})} \cong \text{Gr}_p(\mathbb{R}^n).\qedhere
\end{equation*} \end{proof}
We now proceed with the classification of equivariant maps to the space of non-singular $n \times n$ hermitian matrices: \begin{align*}
H\colon X \to \mathcal{H}^*_n. \end{align*} As shown earlier, such a map has a well-defined signature $(p,q)$ (with $p+q = n$) specifying that $H(x)$ has exactly $p$ positive eigenvalues and $q$ negative eigenvalues for every $x \in X$. The classification for the image space $\mathcal{H}_n$ will be achieved in the following way: \begin{enumerate} \item First we handle the classification of maps $X \to \mathcal{H}_3$
seperately. In this situation the relevant image space can be
reduced to the Grassmannian $\text{Gr}_1(\mathbb{C}^3) \cong \mathbb{P}_2$. The
classification with respect to this image space works by making a
reduction to the case of maps to $\mathbb{P}_1$. \item Then, for a general Grassmannian $\text{Gr}_p(\mathbb{C}^n)$ we apply
a reduction method iteratively until we have reduced the situation
to a copy of $\mathbb{P}_1$ embedded in $\text{Gr}_p(\mathbb{C}^n)$ as a Schubert
variety. The seperate discussion of $\mathbb{P}_2$ will act as a guiding
example and provide useful statements for the final step of the
iterative reduction. \end{enumerate}
For the following we need a notion of \emph{total degree}, as the standard notion of degree (i.\,e. the Brouwer degree) is not applicable here, since $X$ and a general Grassmann manifold $\text{Gr}_p(\mathbb{C}^n)$ have different dimensions. But because of the convenient geometrical properties of Grassmannians (see proposition~\ref{TopologyGrassmannians}) we in fact do not have to change much: The second homology group is still infinite cyclic, which allows us to generalize the notion of total degree. For this, let $f\colon X \to \text{Gr}_p(\mathbb{C}^n)$ be a $G$-map. The homology group $H_2(X,\mathbb{Z})$ is generated by the fundamental class $[X]$. The homology group $H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z})$ is generated by $[\mathcal{S}]$, where $\mathcal{S}$ denotes the Schubert variety \begin{align}
\label{DefOfSchubertVarietyC}
\mathcal{S} = \left\{E \in \text{Gr}_p(\mathbb{C}^n)\colon \mathbb{C}^{p-1} \subset E \subset \mathbb{C}^{p+1}\right\}. \end{align} Here, $\mathbb{C}^\ell$ is regarded as being embedded in $\mathbb{C}^n$, for $\ell < n$, via \begin{align*}
(z_1,\ldots,z_\ell) \mapsto (z_1,\ldots,z_\ell,\underbrace{0,\ldots,0}_{n-\ell}). \end{align*} In the terminology of e.\,g. \cite{GriffithsHarris}, $\mathcal{S}$ is the Schubert variety\label{SchubertVarietyDiscussion} \begin{align*}
\mathcal{S} = \left\{E \in \text{Gr}_p(\mathbb{C}^k)\colon \dim(E \cap V_{k-p+i-a_i}) \geq i \;\text{for all $i=1,\ldots,p$}\right\} \end{align*} defined by the sequence \begin{align*}
a_i =
\begin{cases}
n - p & \;\text{for $1 \leq i < p-1$} \\
n - p - 1 & \;\text{for $p - 1 \leq i \leq p$}.
\end{cases} \end{align*} After having fixed the standard flag $V_1 \subset V_2 \subset \ldots \subset V_n$, where $V_j$ is the vector space generated by the standard basis vectors $e_1,\ldots,e_j$, $\mathcal{S}$ is the unique one-dimensional Schubert variety in $\text{Gr}_p(\mathbb{C}^n)$. It is biholomorphic to $\mathbb{P}_1$ and comes equippd with its canonical orientation as a complex manifold. As it has been discussed on p.~\pageref{P1S2OrientationDiscussion}, this space can be equivariantly identified with the $2$-sphere. Now we can make the following \begin{definition}
\label{TotalDegreeGr}
Let $f\colon X \to \text{Gr}_p(\mathbb{C}^n)$ be a $G$-map. Then its induced map
on the second homology is of the form
\begin{align*}
f_*\colon H_2(X,\mathbb{Z}) &\to H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z})\\
[X] &\mapsto d[\mathcal{S}].
\end{align*}
We define the \emph{total degree} of $f$ to be this integer $d$ and
write $\deg f$ for the total degree of $f$. \end{definition} Clearly, the total degree defined this way is a homotopy invariant of maps $X \to \text{Gr}_p(\mathbb{C}^n)$. We also need a slight generalization of definition~\ref{DefinitionFixpointDegree} to account for the fundamental group of $(\text{Gr}_p(\mathbb{C}^n))_{\mathbb{R}}$ not being infinite cyclic unless $p = 1$ and $n=2$: \begin{definition}
\label{DefinitionFixpointSignature} (Fixed point signature) Let
$f\colon M \to Y$ be a $G$-map between $G$-manifolds. Assume that
the fixed point set $Y^G \subset Y$ is path-connected, $\pi_1(Y^G) =
C_2$ and that the fixed point set $M^G$ is the disjoint union
\begin{align*}
M^G = \bigcup_{j=0,\ldots,k} K_j
\end{align*}
of circles $K_j$. Then we define the \emph{fixed point signature} of
$f$ to be the $k$-tuple $(m_0,\ldots,m_k)$ where
\begin{align*}
m_j =
\begin{cases}
0 & \;\text{if the loop $\Restr{f}{K_j}\colon K_j \to Y^G$ is contractible in $Y^G$}\\
1 & \;\text{else}.
\end{cases}
\end{align*} \end{definition}
In the following let $n > 2$, i.\,e. we exclude the case $\text{Gr}_p(\mathbb{C}^n) = \mathbb{P}_1$, which has already been dealt with in section~\ref{SectionN=2}. For the type I involution on $X$ we make the following definition: \begin{definition} (Degree triple map)
For $n > 2$, we define the \emph{degree triple map} to be the map
\begin{align*}
\mathcal{T}\colon \mathcal{M}_G(X,\text{Gr}_p(\mathbb{C}^n)) &\to \{0,1\} \times \mathbb{Z} \times \{0,1\}\\
f &\mapsto \Triple{m_0}{d}{m_1},
\end{align*}
where $\Bideg{m_0}{m_1}$ is the fixed point signature of $f$ with
respect to the circles $C_0, C_1$ and $d$ is the total degree of
$f$. We call a given triple \emph{realizable} if it is contained in
the image $\Im(\mathcal{T})$. For a map
\begin{align*}
f\colon (Z,C) \to \left(\text{Gr}_p(\mathbb{C}^n),\text{Gr}_p(\mathbb{C}^n)_\mathbb{R}\right)
\end{align*}
we define its \emph{degree triple} to be the degree triple of the
equivariant extension $\smash{\hat{f}}\colon X \to \text{Gr}_p(\mathbb{C}^n)$. \end{definition}
For the type II involution on $X$ we define: \begin{definition} (Degree pair map)
For $n > 2$, we define the \emph{degree pair map} to be the map
\begin{align*}
\mathcal{P}\colon \mathcal{M}_G(X,\text{Gr}_p(\mathbb{C}^n)) &\to \mathbb{Z} \times \{0,1\}\\
f &\mapsto \Pair{m}{d},
\end{align*}
where $(m)$ is the fixed point signature of $f$ and $d$ is the total
degree of $f$. We call a given pair \emph{realizable} if it is
contained in the image $\Im(\mathcal{P})$. For a map
\begin{align*}
f\colon (Z,C) \to \left(\text{Gr}_p(\mathbb{C}^n),\text{Gr}_p(\mathbb{C}^n)_\mathbb{R}\right)
\end{align*}
we define its \emph{degree pair} to be the degree pair of the
equivariant extension $\smash{\hat{f}}\colon X \to \text{Gr}_p(\mathbb{C}^n)$. \end{definition}
The degree triple resp. the degree pair is clearly an invariant of equivariant homotopy.
\subsection{Maps to $\mathcal{H}_3$}
In this section we study the equivariant homotopy of maps $X \to \mathcal{H}_3^*$. The connected components of $\mathcal{H}_3^*$ are parameterized by the eigenvalue signatures $(3,0)$, $(2,1)$, $(1,2)$ and $(0,3)$. The group actions are exactly the same as in the first chapter: we can regard $X$ as being defined by the standard square torus, equipped with either the type I or the type II real structure and the involution on $\mathcal{H}_3$ is given by conjugation. It turns out that the classification of maps in the case where the image space is $\mathcal{H}_3^*$ is not fundamentally different from the classification where it is $\mathcal{H}_2^*$. As before we start with a reduction procedure.
Let $H\colon X \to \mathcal{H}_3^*$ be a map with the signature $(p,q)$. Hence, $H$ can be regarded as a map $X \to \mathcal{H}_{(p,q)}$. In the concrete case of maps to $\mathcal{H}_3$ with mixed signature we can now conclude with proposition~\ref{GrassmannianStrongDeformationRetract} that $\mathcal{H}_{(2,1)}$ (resp. $\mathcal{H}_{(1,2)}$) can be equivariantly retracted to the orbit $U(3).\I{2}{1}$ (resp. $U(3).\I{1}{2}$). These orbits are equivariantly and diffeomorphically identifiable with $\text{Gr}_2(\mathbb{C}^3)$ (resp. $\text{Gr}_1(\mathbb{C}^3)$). Since these two Grassmann manifolds are by remark~\ref{GrassmannianIdentificationEquivariant} equivalent as $G$-spaces, we can assume without loss of generality that we are dealing with $G$-maps of the form \begin{align*}
X \to \text{Gr}_1(\mathbb{C}^3) \cong \mathbb{P}_2, \end{align*} where the involution $T$ on $\mathbb{P}_2$ is given by standard complex conjugation: \begin{align*}
T\colon [z_0:z_1:z_2] \mapsto \left[\overline{z_0}:\overline{z_1}:\overline{z_2}\right]. \end{align*}
\subsubsection{Maps to $\mathbb{P}_2$} \label{SectionClassificationP2}
For the following, let $p_0$ be the base point $[0:0:1] \in \mathbb{P}_2$ and regard $\mathbb{P}_1$ as being embedded in $\mathbb{P}_2$ as \begin{align*}
[z_0:z_1] \mapsto [z_0:z_1:0]. \end{align*}
\begin{lemma}
\label{RetractFromP2toP1}
There exists an equivariant strong deformation retract $\rho$ from
$\mathbb{P}_2\smallsetminus\{p_0\}$ to $\mathbb{P}_1 \subset
\mathbb{P}_2$. \end{lemma}
\begin{proof}
Define $\rho$ as follows:
\begin{align*}
\rho\colon I \times \mathbb{P}_2\smallsetminus\{p_0\} &\to \mathbb{P}_2\smallsetminus\{p_0\}\\
\left(t,[z_0:z_1:z_2]\right) &\mapsto [z_0:z_1:(1-t) z_2].
\end{align*}
For $t=0$ this is the identity of $\mathbb{P}_2\smallsetminus\{p_0\}$,
for $t=1$, this is the map
\begin{align*}
[z_0:z_1:z_2] \mapsto [z_0:z_1:0].
\end{align*}
This is well-defined, since $\rho$ is only defined on the complement
of the point $p_0$. This construction is equivariant, because the
multiplication with real numbers commutes with complex conjugation. \end{proof}
\begin{figure}
\caption{$\mathbb{P}_2\smallsetminus\{p_0\}$ retracts to $\mathbb{P}_1 \subset \mathbb{P}_2$.}
\label{P2VectorBundleOverP1}
\end{figure}
The strong deformation retract $\rho$ (see figure~\ref{P2VectorBundleOverP1} for a depiction) constructed in the previous lemma also defines a strong deformation retract of the fixed point set $(\mathbb{P}_2)_\mathbb{R}$ to the fixed point set $(\mathbb{P}_1)_\mathbb{R}$. For understanding what happens to fixed point signatures of maps $X \to \mathbb{P}_2$ during such a deformation retract we make the following remark: \begin{remark}
\label{NonTrivialLoopInRPn}
Assume $n > 1$. Let $\gamma\colon I \to \mathbb{RP}_1$ be the loop which
wraps around $\mathbb{RP}_1$ once. Let $\iota_n$ be the following embedding:
\begin{align*}
\iota_n\colon \mathbb{RP}_1 &\to \mathbb{RP}_n\\
[x_0:x_1] &\mapsto [x_0:x_1:0:\ldots:0].
\end{align*}
Then $\iota_n \circ \gamma$ defines the (up to homotopy) unique
non-trivial loop in $\mathbb{RP}_n$. \end{remark}
\begin{proof}
To simplify the notation set $\iota = \iota_n$.
We have the following commutative diagram
\[
\begin{xy}
\xymatrix{
S^1 \ar@{^{(}->}[r]^{\hat{\iota}} \ar[d] & S^n \ar[d]\\
\mathbb{RP}_1 \ar@{^{(}->}[r]_\iota & \mathbb{RP}_n
}
\end{xy}
\]
Here, $\smash{\hat{\iota}}$ can be regarded as the restriction of
the embedding $\mathbb{R}^2 \hookrightarrow \mathbb{R}^{n+1}$ to
$S^1 \subset \mathbb{R}^2$. Denote the loop which wraps around
$\mathbb{RP}_1$ once by $\gamma\colon I \to \mathbb{RP}_1$. It can
be lifted to a path $\smash{\hat{\gamma}}\colon I \to S^1$, which
makes a one-half loop in $S^1$. Then, by commutativity of the
diagram, it follows that
$\smash{\widehat{\iota \circ \gamma}} := \smash{\hat{\iota}} \circ
\smash{\hat{\gamma}}$
is a lift of $\iota \circ \gamma$, the latter being a representant
of the homotopy class $\iota_*[\gamma] \in \pi_1(\mathbb{RP}_n)$. It is
well known (see e.\,g. \cite[p.~74]{Hatcher}) that $\pi_1(\mathbb{RP}_n)$
(for $n > 1$) is generated by the projection to $\mathbb{RP}_n$ of a
curve in the universal covering space $S^n$ which connects two
antipodal points. Thus it is enough to show that
$\widehat{\iota \circ \gamma}$ is not a closed loop in $S^n$. But
this is clear, since $\smash{\hat{\gamma}}$ is not a closed loop and
$S^1 \hookrightarrow S^n$ is just an embedding. \end{proof}
As a consequence of the previous remark we can formulate: \begin{remark}
\label{RPnFundamentalGroupIsomorphism}
Assume $n > 1$ and let $\iota$ be the embedding
\begin{align*}
\iota\colon \mathbb{RP}_n &\to \mathbb{RP}_{n+1}\\
[x_0:\ldots:x_n] &\mapsto [x_0:\ldots:x_n:0].
\end{align*}
Then the induced map $\iota_*\colon \pi_1(\mathbb{RP}_n) \to
\pi_1(\mathbb{RP}_{n+1})$ is an isomorphism. \end{remark}
\begin{proof}
Since both fundamental groups are cyclic of order 2 it suffices to
show that $\iota_*$ maps the non-trivial homotopy class $[\gamma]
\in \pi_1(\mathbb{RP}_n)$ to the non-trivial homotopy class in
$\pi_1(\mathbb{RP}_{n+1})$. Let $\alpha$ be the non-trivial loop in
$\mathbb{RP}_n$ which is defined in terms of the embedding of $\mathbb{RP}_1
\hookrightarrow \mathbb{RP}_n$ (see
remark~\ref{NonTrivialLoopInRPn}). Then we have $[\gamma] =
[\alpha]$. The homotopy class $\iota_*([\gamma])$ can thus be
regarded as the homotopy class of the loop $\iota \circ \alpha$. But
this is just the non-trivial loop constructed in the previous remark
for $\mathbb{RP}_{n+1}$. Therefore, $\iota \circ \alpha$ is non-trivial
and the statement is proven. \end{proof}
Note that there is a canonical embedding of $S^1 \cong \mathbb{RP}_1 \hookrightarrow \mathbb{RP}_2$ as the line at infinity: \begin{align*}
[x_1:x_2] \mapsto [x_1:x_2:0] \end{align*} Fix a point $p_0$ in $\mathbb{RP}_1$. We now define two loops: Let $\gamma_0$ be the loop which is constant at $p_0$ and let $\gamma_1$ be a fixed loop which wraps around $\mathbb{RP}_1 \subset \mathbb{RP}_2$ once. By remark~\ref{NonTrivialLoopInRPn} this defines the non-trivial element in $\pi_1(\mathbb{RP}_2)$.
\begin{definition}
\label{DefinitionFixpointNormalizationP2}
A $G$-map $f\colon X \to \mathbb{P}_2$ is \emph{fixed point normalized} if
$\restr{f}{C_j}$ is either $\gamma_0$ or $\gamma_1$ for all fixed point
circles $C_j$ in $X$. \end{definition}
\begin{remark}
\label{FixpointNormalizationP2}
Let $X$ be equipped with the type I or the type II involution. Then
every $G$-map $f\colon X \to \mathbb{P}_2$ can be fixed point normalized. \end{remark} \begin{proof}
Using the equivariant version of the homotopy extension property of
the pair $(X,C)$ (see e.\,g. corollary~\ref{G-HEP}) we can prescribe
a homotopy on each of the the fixed point circles of the torus which
deforms the corresponding loop to either $\gamma_0$ or $\gamma_1$,
depending on the fixed point signature of the map. \end{proof}
Thus, after this normalization we can assume that maps with the same fixed point signature agree along the boundary circles.
\begin{lemma}
\label{ReductionP2toP1}
Let $f\colon X \to \mathbb{P}_2$ be a fixed point normalized $G$-map
for the type I or the type II involution. Then:
\begin{enumerate}[(i)]
\item The map $f$ is equivariantly homotopic to a map $f'$ which has
its image contained in $\mathbb{P}_1 \subset \mathbb{P}_2$.
\item The degree of $f$ (definition~\ref{TotalDegreeGr}) agrees with
$\deg f'$, where $f'$ is regarded as a map $X \to \mathbb{P}_1$.
\end{enumerate} \end{lemma}
\begin{proof}
Regarding (i): Let $\smash{\widehat{C}}$ be the union of the
(finitely many) fixed point circles. Because of dimension reasons
there exists a point $p \in \mathbb{RP}_2$ which is not contained in
the image of $\smash{\restr{f}{\widehat{C}}}$. Note that $\SOR{3}$ acts
transitively on $\mathbb{RP}_2$. Thus, let $g_1 \in \SOR{3}$ be the
transformation in $\SOR{3}$ which sends $p$ to $p_0 :=
[0:0:1]$.
This transformation $g_1$ can also be regarded as a transformation
of $\mathbb{P}_2$, since $\SOR{3} \subset \mathrm{SU}(3)$. Also, since
$\SOR{3}$ is path-connected we can find a path $g_t$ from the
identity in $\SOR{3}$ to $g_1$. The composition $g_t \circ f$ is
then a homotopy of $f$ to a map $\smash{\tilde{f}}$ such that
$p_0 \not\in \Im \smash{\tilde{f}}$.
This homotopy is also equivariant. Thus, without loss of generality
we can assume that, at least after a $G$-homotopy, a given $G$-map
$f\colon X \to \mathbb{P}_2$ does not have $p_0$ in its image. Let
us assume that the given map $f$ has this property. Then we can
compose $f$ with the equivariant strong deformation retract from
remark~\ref{RetractFromP2toP1} and therefore obtain an equivariant
homotopy from $f$ to a map $f'$ such that $\Im f' \subset
\mathbb{P}_1 \subset \mathbb{P}_2$.
Regarding (ii): The degree $\deg f$ is the number $d$ such that
\begin{align*}
f_*\colon H_2(X,\mathbb{Z}) \to H_2(\mathbb{P}_2,\mathbb{Z})
\end{align*}
is the map
\begin{align*}
[X] \mapsto d[\mathbb{P}_1].
\end{align*}
while the degree $\deg f'$ is the number $d'$ such that
\begin{align*}
f'_*\colon H_2(X,\mathbb{Z}) \to H_2(\mathbb{P}_1,\mathbb{Z})
\end{align*}
is the map
\begin{align*}
[X] \mapsto d'[\mathbb{P}_1].
\end{align*}
The inclusion $\iota\colon \mathbb{P}_1 \hookrightarrow
\mathbb{P}_2$ induces an isomorphism in the second homology
group. Thus we can also regard $f_*$ as taking on values in
$H_2(\mathbb{P}_2,\mathbb{Z})$. But since $f$ and $\iota \circ f'$
are homotopic, their induced maps in homology must agree. This means
$d = d'$. \end{proof}
\paragraph{Type I}
In this paragraph we are assuming the involution on $X$ to be of type I.
\begin{remark}
\label{ReductionP2toP1Class1}
Let $f\colon X \to \mathbb{P}_2$ be a fixed point normalized
$G$-map. Then the map $f$ has fixed point signature $\Pair{m_0}{m_1}$
iff $f'$ (from lemma~\ref{ReductionP2toP1}) has fixed point degree
$\Bideg{m_0}{m_1}$. \end{remark}
\begin{proof}
Let us first assume that $\restr{f}{C_j}\colon C_j \to
\mathbb{RP}_2$ is trivial, i.\,e. $m_j = 0$. Since $f$ is fixed point
normalized, we know that $\restr{f}{C_j}$ is the constant
map. Therefore, after making the deformation retract to $\mathbb{RP}_1
\subset \mathbb{P}_1$ it is still constant, hence has degree
zero. On the other hand, if a map $S^1 \to \mathbb{RP}_1 \subset
\mathbb{P}_1$ has degree zero, then it is already null-homotopic as
a map to $\mathbb{RP}_1$. Therefore it will also be null-homotopic as a
map to $\mathbb{RP}_2 \supset \mathbb{RP}_1$.
Let us now assume that $\restr{f}{C_j}\colon C_j \to \mathbb{RP}_2$
is non-trivial, i.\,e. $m_j = 1$. In this case $\restr{f}{C_j}$ is
the map which wraps around $\mathbb{RP}_1$ once. In particular, this
loop will be fixed when composing with the strong deformation
retract to $\mathbb{P}_1$. Therefore, after the retraction, the
resulting map will still have degree one along the boundary circle
$C_j$. The reverse direction follows from
remark~\ref{NonTrivialLoopInRPn}, where we discussed the loop which
wraps around $\mathbb{RP}_1$ once; using the canonical embedding
$\mathbb{RP}_1 \hookrightarrow \mathbb{RP}_2$, this loops defines a
non-trivial element in $\pi_1(\mathbb{RP}_2)$. \end{proof}
\begin{theorem}
\label{TheoremClassificationMapsToP2Class1}
The $G$-homotopy class of a map $f \in \mathcal{M}_G(X,\mathbb{P}_2)$ is
uniquely determined by its degree triple $\mathcal{T}(f)$. The image
$\Im(\mathcal{T})$ of the degree triple map consists of those
triples $\Triple{m_0}{d}{m_1}$ (with $m_0, m_1 \in \{0,1\}$)
satisfying
\begin{align*}
d \equiv m_0 + m_1 \mod 2.
\end{align*} \end{theorem}
\begin{proof}
First we show that two maps with the same degree triple are
equivariantly homotopic. Thus, let $f$ and $g$ be two $G$-maps $X
\to \mathbb{P}_2$, both having the triple $\Triple{m_0}{d}{m_1}$.
Using lemma~\ref{ReductionP2toP1} (i) we can assume that $f$ and $g$
have their images contained in $\mathbb{P}_1 \subset
\mathbb{P}_2$. Applying lemma~\ref{ReductionP2toP1} (ii) and
lemma~\ref{ReductionP2toP1Class1} it then follows that the degree
triple of $f$ and $g$, regarded as a map $X \to \mathbb{P}_1$ is
again $\Triple{m_0}{d}{m_1}$. Hence, by
theorem~\ref{Classification1}, these maps are equivariantly
homotopic.
Regarding the image $\Im(\mathcal{T})$: Let us first assume that
$\Triple{m_0}{d}{m_1}$ is in the image. Then, by definition, there
exists a map $f\colon X \to \mathbb{P}_2$ with this
triple. Lemma~\ref{ReductionP2toP1} then implies the existence of a
map $f'\colon X \to \mathbb{P}_1$ with the same triple. Hence, by
theorem~\ref{Classification1}, $d \equiv m_0 + m_1 \mod 2$. On the
other hand, given a triple $\Triple{m_0}{d}{m_1}$ with $m_0,m_1 \in
\{0,1\}$ and $d \equiv m_0 + m_1 \mod 2$, there exists a map
$f\colon X \to S^2 \cong \mathbb{P}_1$ with this triple. Composing
with the embedding $\mathbb{P}_1 \hookrightarrow \mathbb{P}_2$ we
have produced a map $X \to \mathbb{P}_2$ which, again by
lemma~\ref{ReductionP2toP1}, has the same triple
$\Triple{m_0}{d}{m_1}$. \end{proof}
\paragraph{Type II}
In this paragraph we are assuming the involution on $X$ to be of type II.
\begin{remark}
\label{ReductionP2toP1Class2}
Let $f\colon X \to \mathbb{P}_2$ be a fixed point normalized
$G$-map. Then the map $f$ has the fixed point signature $m$ iff $f'$
(from lemma~\ref{ReductionP2toP1}) has the fixed point degree $m$. \end{remark}
\begin{proof}
The proof is exactly the same as in
lemma~\ref{ReductionP2toP1Class1}. One only has consider the single
circle $C$ in $X$ instead of two circles $C_0$ and $C_1$. \end{proof}
\begin{theorem}
\label{TheoremClassificationMapsToP2Class2}
The $G$-homotopy class of a map $f \in \mathcal{M}_G(X,\mathbb{P}_2)$ is
uniquely determined by its degree pair $\mathcal{P}(f)$. The image
$\Im(\mathcal{P})$ of the degree pair map consists of those pairs
$\Pair{m}{d}$ (with $m \in \{0,1\}$) satisfying
\begin{align*}
d \equiv m \mod 2.
\end{align*} \end{theorem}
\begin{proof}
The proof works completely analogously to the proof of
theorem~\ref{TheoremClassificationMapsToP2Class1}. For the first
statement it suffices to show that two maps with the same degree
pair are equivariantly homotopic. Thus, let $f$ and $g$ be two
$G$-maps $X \to \mathbb{P}_2$, both having the pair
$\Pair{m}{d}$. Using lemma~\ref{ReductionP2toP1} (i) we can assume
that $f$ and $g$ have their images contained in $\mathbb{P}_1 \subset
\mathbb{P}_2$. Applying lemma~\ref{ReductionP2toP1} (ii) and
lemma~\ref{ReductionP2toP1Class2} it then follows that the degree
pair of $f$ and $g$, regarded as a map $X \to \mathbb{P}_1$ is again
$\Pair{m}{d}$. Hence, by theorem~\ref{Classification2}, these maps
are equivariantly homotopic, which proves the statement.
Regarding the image $\Im(\mathcal{P})$: Let us assume that
$\Pair{m}{d}$ is in the image. By definition there exists a map
$f\colon X \to \mathbb{P}_2$ with this
pair. Lemma~\ref{ReductionP2toP1} then implies the existence of a
map $f'\colon X \to \mathbb{P}_1$ with the same pair. Hence, by
theorem~\ref{Classification2}, $d \equiv m \mod 2$ is a necessary
condition for a pair to be contained in $\Im(\mathcal{P})$. On the
other hand, given a pair $\Pair{m}{d}$ with $m \in \{0,1\}$ and $d
\equiv m \mod 2$, there exists a map $f\colon X \to S^2 \cong
\mathbb{P}_1$ with this pair. Composing with the embedding
$\mathbb{P}_1 \hookrightarrow \mathbb{P}_2$ we have produced a map
$X \to \mathbb{P}_2$ which, again by lemma~\ref{ReductionP2toP1},
has the same pair $\Pair{m}{d}$. Therefore, $\Pair{m}{d}$ is
contained in the image $\Im(\mathcal{P})$. \end{proof}
\subsubsection{Classification of Maps to $\mathcal{H}_3^*$}
So far the degree triple map (resp. the degree pair map) has only been defined for $G$-maps $X \to \mathbb{P}_2$. But after fixing an identification of the orbit $U(3).\I{p}{q} \subset \mathcal{H}_{(p,q)}$, which has been shown to be an equivariant strong deformation retract, with $\mathbb{P}_2$, the degree triple map (resp. the degree pair map) is also defined on all mapping spaces $\mathcal{M}_G(X,\mathcal{H}_{(p,q)})$ with $p+q=3$. This remark then allows us to state and prove the main result of this section:
\begin{theorem}
\label{HamiltonianClassificationRank3}
Let $X$ be a torus equipped with either the type I or the type II
involution. Then:
\begin{enumerate}[(i)]
\item The sets $[X,\mathcal{H}_{(3,0)}]_G$ and
$[X,\mathcal{H}_{(0,3)}]_G$ are trivial.
\item Two $G$-maps $X \to \mathcal{H}_{(p,q)}$ (with $0<p,q<3$) are
$G$-homotopic iff their degree triples (type I) resp. their degree
pairs (type II) agree.
\item The realizable degree triples $\Triple{m_0}{d}{m_1}$ (type I)
resp. degree pairs $\Pair{m}{d}$ (type II) are exactly those
which satisfy
\begin{align*}
d \equiv m_0 + m_1 \mod 2 \;\text{resp.}\;d \equiv m \mod 2.
\end{align*}
\end{enumerate} \end{theorem}
\begin{proof}
The $\text{sig}\, = (3,0)$ resp. the $\text{sig}\, = (0,3)$ case is handled by
remark~\ref{RemarkDefiniteComponentsRetractable}, which proves that
maps $X \to \mathcal{H}_{(2,0)}$ (resp. $X \to \mathcal{H}_{(0,2)}$)
are equivariantly retractable to the map which is constant the
identity (resp. minus identity). In the case of the signature being
$(2,1)$ (resp. $(1,2)$) the image space has the projective space
$\mathbb{P}_2$ as an equivariant strong deformation retract. In this case
theorem~\ref{TheoremClassificationMapsToP2Class1} (for type I) and
theorem~\ref{TheoremClassificationMapsToP2Class2} (for type II)
complete the proof. \end{proof}
\begin{remark}
\label{HamiltonianMapRealizationRank3}
Clearly, the case $\text{sig}\, = (3,0)$ (resp. $\text{sig}\, = (0,3)$) is trivially
realized by the map, which is constant the identity (resp. minus
identity). Representants for the homotopy classes for $\text{sig}\, = (2,1)$
and $\text{sig}\, = (1,2)$ can be constructed as follows: Let
$f\colon X \to S^2$ be a $G$-map. Using the orientation-preserving
diffeomorphism $S^2 \xrightarrow{\;\sim\;} \mathbb{P}_1$, we can regard
$f$ as a map to $\mathbb{P}_1$. Composing this map with the embedding
$\mathbb{P}_1 \hookrightarrow \mathbb{P}_2$ yields a $G$-map
$\smash{\tilde{f}}\colon X \to \mathbb{P}_2$. Finally, we can compose
$\smash{\tilde{f}}$ with one of the two embeddings
$\iota_{(2,1)},\iota_{(1,2)}\colon \mathbb{P}_2 \hookrightarrow
\mathcal{H}_3^*$,
which embed $\mathbb{P}_2$ in the respective component of
$\mathcal{H}_3^*$ associated to the signature $(2,1)$ or $(1,2)$. \end{remark}
\subsection{Iterative Retraction Method}
In this section we describe a reduction procedure which can be used iteratively until we arrive in a known situation, i.\,e. $\text{Gr}_1(\mathbb{C}^2) \cong \mathbb{P}_1$. We call this procedure \emph{iterative
retraction} of Grassmann manifolds. Recall section~\ref{SectionClassificationP2}, in particular lemma~\ref{ReductionP2toP1}: There we start with a $G$-map $f\colon X \to \mathbb{P}_2$ and remove a certain subset $S$ (in this case, $S$ is a point) from $\mathbb{P}_2$ which -- at least after a $G$-homotopy -- is not contained in the image $\Im(f)$. The resulting space $\mathbb{P}_2\smallsetminus S$ has the submanifold $\mathbb{P}_1 \subset \mathbb{P}_2$ as equivariant, strong deformation retract. In this section we show that this method can be generalized to general Grassmann manifolds $\text{Gr}_p(\mathbb{C}^n)$. Fundamental for this reduction is the fact that the Schubert variety $\mathcal{S} \subset \text{Gr}_p(\mathbb{C}^n)$ is equivariantly identifiable with $\mathbb{P}_1 \cong S^2$ (see the definition of discussion of $\mathcal{S}$ on p.~\pageref{DefOfSchubertVarietyC}).
The strategy in this chapter is to identify a higher-dimensional analog of the subset $S$ in $\text{Gr}_p(\mathbb{C}^\ell)$ such that $\text{Gr}_p(\mathbb{C}^\ell)\smallsetminus S$ has $\text{Gr}_p(\mathbb{C}^{\ell-1}) \subset \text{Gr}_p(\mathbb{C}^n)$ as equivariant strong deformation retract. This reduction step is the basic building block for the reducing the classification of maps to $\text{Gr}_p(\mathbb{C}^n)$ to the classification of maps to the Schubert variety $\mathcal{S}$. It works as follows:
Starting with a $G$-map $f\colon X \to \text{Gr}_p(\mathbb{C}^n)$, we identify a subset $S$ with the aforementioned property and such that (after a $G$-homotopy) the image $\Im(f)$ does not intersect $S$. Then, using the equivariant deformation retract to $\text{Gr}_p(\mathbb{C}^{n-1})$, we can regard $f$ as having its image contained in $\text{Gr}_p(\mathbb{C}^{n-1})$. This step can be repeated until it can be assumed that the map $f$ has its image contained in $\text{Gr}_p(\mathbb{C}^{p+1})$. The latter can be equivariantly identified with $\text{Gr}_1(\mathbb{C}^{p+1})$. It follows that the reduction procedure just outlined can be repeated again until it can be assumed that the map $f$ as its image contained in $\text{Gr}_1(\mathbb{C}^2)$. This subspace $\text{Gr}_1(\mathbb{C}^2)$ identified using the procedure just described is exactly the Schubert variety $\mathcal{S}$. We begin with the following remark in this direction: \begin{remark}
\label{GrassmannianIdentificationEquivariant}
Using the standard unitary structure on $\mathbb{C}^n$, the canonical map
\begin{align*}
\Psi\colon \text{Gr}_k(\mathbb{C}^n) \to \text{Gr}_{n-k}(\mathbb{C}^n),
\end{align*}
where both Grassmannians are equipped with the real structure $T$
sending a space $V$ to $\overline{V}$, is equivariant with respect
to $T$. \end{remark}
\begin{proof}
It must be shown that, for $V \in \text{Gr}_k(\mathbb{C}^n)$, $T(V)^\perp =
T(V^\perp)$. This is equivalent to showing that $T(V)$ and
$T(V^\perp)$ are orthogonal with respect to the standard hermitian
form on $\mathbb{C}^n$. Thus, let $T(v)$ be in $T(V)$ (with $v \in
V$) and $T(w)$ be in $T(V^\perp)$ (with $w \in V^\perp$). We have to
show that $\left<T(v),T(w)\right> = 0$. Equivalently we can show
that $\smash{\overline{\left<T(v),T(w)\right>}} = 0$. A short computation
yields:
\begin{align*}
\overline{\left<T(v),T(w)\right>} = \overline{T(v)^*T(w)} = \overline{\overline{v}^*\overline{w}} = v^*w = 0.
\end{align*}
This proves that $T(V)$ is orthogonal to $T(V^\perp)$ and therefore
$T(V)^\perp = T(V^\perp)$. \end{proof}
For the technical arguments in this section we require that the smooth manifolds $X$ and $\text{Gr}_p(\mathbb{C}^n)$ are both smoothly embedded in some euclidean space (using e.\,g. the Whitney embedding theorem). By $\dim_H(\cdot)$ we denote the Hausdorff dimension of a topological space. See section~\ref{AppendixHausdorffDimension} for some introductory statements about Hausdorff dimensions. The set $S$, being the higher dimensional analog of a point in the $\mathbb{P}_2$ situation, will be denoted by $\mathcal{L}_L$, where $L$ is a line\footnote{By \emph{line} we mean a $1$-dimensional complex
subspace.} in $\mathbb{C}^n$, and is defined as follows: \begin{align*}
\mathcal{L}_L = \left\{E \in \text{Gr}_p(\mathbb{C}^n)\colon L \subset E\}\right. \end{align*}
\begin{remark}
\label{LLManifold}
The set $\mathcal{L}_L$ is a submanifold of $\text{Gr}_p(\mathbb{C}^n)$ of
complex dimension $(p-1)(n-p)$. \end{remark} \begin{proof}
Consider the map
\begin{align*}
f\colon \text{Gr}_{p-1}(L^\perp) &\to \text{Gr}_p(\mathbb{C}^n)\\
E &\mapsto \left<L,E\right>,
\end{align*}
where $\left<L,E\right>$ denotes the $p$-plane that is spanned by
the line $L$ together with the $p-1$-dimensional plane $E \subset
L^\perp$. Clearly, $\Im f = \mathcal{L}_L$. One can write down $f$
in local coordinates for the Grassmannians, which shows that $f$ is
a holomorphic immersion. Furthermore, it is injective. Together with
the compactness of $\text{Gr}_p(L^\perp)$ this shows that $f$ is an
holomorphic embedding (see
e.\,g. \cite[p.~214]{FritzscheGrauert}). In other words,
$\mathcal{L}_L$ is biholomorphically equivalent to
$\text{Gr}_{p-1}(\mathbb{C}^{n-1})$, a complex manifold of dimension $(p-1)(n-p)$. \end{proof}
Let $\mathbb{C}^n = L \oplus W$ be a decomposition of $\mathbb{C}^n$ as direct sum of a $T$-stable line $L$ and a 1-codi\-men\-sio\-nal, $T$-stable subvector space $W$. In this setup, we can regard the Grassmann manifold $\text{Gr}_p(W)$ as a submanifold of $\text{Gr}_p(\mathbb{C}^n)$. The usefulness of the submanifold $\mathcal{L}_L$ is illustrated by the following lemma: \begin{lemma}
\label{GrassmannianDeformationRetract}
The space $\text{Gr}_p(\mathbb{C}^n)\smallsetminus \mathcal{L}_L$ has $\text{Gr}_p(W)$ as
equivariant strong deformation retract. \end{lemma} \begin{proof}
To ease the notation set $\Omega = \text{Gr}_p(\mathbb{C}^n)\smallsetminus
\mathcal{L}_L$. The idea of this proof is that
\begin{align*}
\pi\colon \Omega \to \text{Gr}_p(W)
\end{align*}
can be regarded as a complex rank-$k$ vector bundle, where $\pi$ is
defined in terms of the projection $\mathbb{C}^n \to W$. Here we use the
fact that a $p$-dimensional plane $E$ in $\mathbb{C}^n$ which does not
contain the line $L$ projects down to a $p$-dimensional plane in
$W$.
The local trivializations of this bundle are defined using local
trivializations of the tautological bundle $\mathcal{T} \to
\text{Gr}_p(W)$: Let $U_\alpha$ be a trivializing neighborhood in $\text{Gr}_p(W)$ of
the tautological bundle and
\begin{align*}
\psi_\alpha\colon \Restr{\mathcal{T}}{U_\alpha} \to U_\alpha \times \mathbb{C}^k
\end{align*}
a local trivialization. In other words, the $k$ maps
\begin{align*}
f_{\alpha,j}\colon U_\alpha &\to \Restr{\mathcal{T}}{U_\alpha}\\
E &\mapsto \psi^{-1}_\alpha(E,e_j)\;\text{ for $j=1,\ldots,k$}
\end{align*}
define a local frame over $U_\alpha$. Let $\ell$ be non-zero vector
in the line $L$. Notice that the fiber of $\Omega \to \text{Gr}_p(W)$ over
a plane $E \in U_\alpha$ consists of those planes $\smash{\widehat{E}}$
which are spanned by bases of the form
\begin{align*}
\lambda_1 \ell + f_{\alpha,1}(E), \ldots, \lambda_k \ell + f_{\alpha,k}(E),
\end{align*}
for a unique vector $\lambda = (\lambda_1,\ldots,\lambda_k)$. The
uniqueness follows from the fact that every vector in $\mathbb{C} = L \oplus
W$ uniquely decomposes into $v_L + v_W$. Now we can define a local
trivialization for $\Omega \to \text{Gr}_p(W)$ over $U_\alpha$:
\begin{align*}
\varphi_\alpha\colon U_\alpha &\to \mathbb{C}^k\\
\widehat{E} &\mapsto \left(\pi\left(\widehat{E}\right), \lambda\right).
\end{align*}
This also defines the vectorspace structure on the fibers: For a given $k$-vector $\lambda$, define
\begin{align*}
\widehat{E}_\lambda = \left<\lambda_1 \ell + f_{\alpha,1}(E),\ldots,\lambda_k \ell + f_{\alpha,k}(E)\right>.
\end{align*}
Using this notation we obtain
\begin{align*}
\widehat{E}_\lambda + \widehat{E}_\mu = \widehat{E}_{\lambda + \mu} \;\text{for all $\lambda,\mu \in \mathbb{C}^k$} \;\text{and}\; a \widehat{E}_{\lambda} = \widehat{E}_{\alpha\lambda} \;\text{for all $a \in \mathbb{C}$}.
\end{align*}
Thus, $\pi\colon \Omega \to \text{Gr}_p(W)$ defines a rank-$k$ vector
bundle. Note that the zero section of this bundle can be naturally
identified with $\text{Gr}_p(W)$. Thus it remains to show that there is a
$G$-equivariant retraction of $\Omega$ to its zero section. We
define the retraction $\varphi_t\colon \Omega \to \Omega$ in local
trivializations for a covering $\{U_\alpha\}$ via
\begin{align*}
\varphi_{\alpha}\colon I \times \mathbb{C}^k &\to \mathbb{C}^k\\
\lambda &\mapsto t\lambda.
\end{align*}
We now show that this gives a globally well-defined map
$\varphi\colon I \times \Omega \to \Omega$: Let $E$ be a plane in
$U_\alpha \cap U_\beta$. Over $U_\alpha$, the map $\varphi_\alpha$
is defined using the frame $f_{\alpha,1},\ldots,f_{\alpha,k}$, while
$\varphi_\beta$ is defined using the frame
$f_{\beta,1},\ldots,f_{\beta,k}$. Let $\smash{\widehat{E}}$ be a
plane in $\restr{\Omega}{U_\alpha \cap U_\beta}$ such that
\begin{align*}
\widehat{E} = \left<\ell^\alpha_1 + w^\alpha_1,\ldots,\ell_k^\alpha + w_k^\alpha\right> = \left<\ell^\beta_1 + w^\beta_1,\ldots,\ell^\beta_k + w^\beta_k\right>,
\end{align*}
where
\begin{align*}
w^\alpha_j = f_{\alpha,j}\left(\pi\left(\widehat{E}\right)\right) \;\text{resp.}\; w^\beta_j = f_{\beta,j}\left(\pi\left(\widehat{E}\right)\right).
\end{align*}
Since these two bases generate the same plane, there exists a $k
\times k$ matrix $A = (a_{ij})$ such that
\begin{align*}
\ell^\alpha_j + w^\alpha_j = \sum a_{ij}\left(\ell^\beta_i + w^\beta_i\right).
\end{align*}
Using the fact that the intersection $L \cap W$ is trivial we obtain
\begin{align*}
\ell^\alpha_j = \sum a_{ij} \ell^\beta_i \;\text{ and }\; w^\alpha_j = \sum a_{ij} w^\beta_i.
\end{align*}
But then the bases
\begin{align*}
t\ell^\alpha_1 + w^\alpha_1,\ldots,t\ell_k^\alpha + w_k^\alpha \;\text{ and }\; t\ell^\beta_1 + w^\beta_1,\ldots,t\ell^\beta_k + w^\beta_k
\end{align*}
are related by the same matrix $A$, hence they define the same plane
$\smash{\widehat{E}}_t$. In other words, the deformation retract $\varphi_t$
is globally well-defined on $\Omega$. In particular the above shows
that for any plane $E = \left<\ell_1 + w_1, \ldots, \ell_k +
w_k\right>$, where the $W$-vectors are not necessarily the ones
defined by one of the trivializing frames of the tautological bundle
we have
\begin{align*}
\varphi_t(E) = \left<(1-t)\ell_1 + w_1, \ldots, (1-t)\ell_k + w_k\right>.
\end{align*}
Now, using the fact that $L$ and $W$ are both $T$-stable,
equivariance follows by a computation:
\begin{align*}
\varphi_t \circ T(E) &= \varphi_t\left(T\left(\left<\ell_1 + w_1, \ldots, \ell_k + w_k\right>\right)\right) \\
&= \varphi_t\left(\left<\overline{\ell_1 + w_1}, \ldots, \overline{\ell_k + w_k}\right>\right) \\
&= \left<(1-t)\overline{\ell_1} + \overline{w_1}, \ldots, (1-t)\overline{\ell_k} + \overline{w_k}\right> \\
&= \left<\overline{(1-t)\ell_1 + w_1}, \ldots, \overline{(1-t)\ell_k + w_k}\right> \\
&= T\left(\left<(1-t)\ell_1 + w_1, \ldots, (1-t)\ell_k + w_k\right>\right) \\
&= T\left(\varphi_t\left(\left<\ell_1 + w_1, \ldots, \ell_k + w_k\right>\right)\right)\\
&= T \circ \varphi_t(E).
\end{align*}
This proves the statement. \end{proof}
The next intermediate result will be the statement that, given a map $H$ from $X$ to $\text{Gr}_p(\mathbb{C}^n)$ where $n - 1 > p$, there always exists a line $L$ in $\mathbb{C}^n$ such that -- at least after a $G$-homotopy -- the image of $H$ does not intersect $\mathcal{L}_L$. Then, lemma~\ref{GrassmannianDeformationRetract} is applicable and we obtain an equivariant homotopy from $H$ to a map $H'$ having its image contained in the lower-dimensional Grassmannian embedded in $\text{Gr}_p(\mathbb{C}^n)$. We begin by outlining some technical preparations and introducing a convenient notation. For a plane $p$-plane $E$ in $\mathbb{C}^n$ we set $E_T = E \cap T(E)$. Note that $0 \leq \dim E_T \leq p$. For $k=0,\ldots,p$ define $\mathcal{E}_k = \{E \in \text{Gr}_p(\mathbb{C}^n)\colon \dim E_T = k\}$. This induces a stratification of $\text{Gr}_p(\mathbb{C}^n)$: \begin{align*}
\text{Gr}_p(\mathbb{C}^n) = \bigcup_{k=0,\ldots,p} \mathcal{E}_k. \end{align*} Note that in particular we have \begin{align*}
\mathcal{E}_p = \left\{E \in \text{Gr}_p(\mathbb{C}^n)\colon T(E) = E\right\} = (\text{Gr}_p(\mathbb{C}^n))_{\mathbb{R}} \end{align*} Define the following incidence set: \begin{align*}
\mathcal{I} = \left\{(L,E) \in \mathbb{P}(\mathbb{C}^n) \times \text{Gr}_p(\mathbb{C}^n)\colon L \subset E\right\}. \end{align*} The set $\mathcal{I}$ is a manifold, which can be seen by considering the diagonal action of $U(n)$ on the product manifold $\mathbb{P}(\mathbb{C}^n) \times \text{Gr}_p(\mathbb{C}^n)$: $U(n)$ acts transitively on $\mathcal{I}$. Furthermore we introduce \begin{align*}
\mathcal{I}_{\mathbb{R}} = \left\{(L,E) \in (\mathbb{P}(\mathbb{C}^n))_{\mathbb{R}} \times \text{Gr}_p(\mathbb{C}^n)\colon L \subset E\right\} = \left\{(L,E) \in \mathcal{I}\colon T(L) = L\right\}. \end{align*} We obtain two projections, namely \begin{align*}
& \pi_1\colon \mathcal{I} \to \mathbb{P}(\mathbb{C}^n)\\
\;\text{and}\; & \pi_2\colon \mathcal{I} \to \text{Gr}_p(\mathbb{C}^n), \end{align*} together with their ``real'' counterparts \begin{align*}
& \pi_{1,\mathbb{R}}\colon \mathcal{I}_{\mathbb{R}} \to (\mathbb{P}(\mathbb{C}^n))_\mathbb{R}\\
\;\text{and}\; & \pi_{2,\mathbb{R}}\colon \mathcal{I}_{\mathbb{R}} \to \text{Gr}_p(\mathbb{C}^n). \end{align*} We regard the incidence manifold $\mathcal{I}$ as being smoothly embedded in some $\mathbb{R}^N$ (e.\,g. with the Whitney embedding theorem).
Defining $M_k := \smash{\pi_{2,\mathbb{R}}^{-1}}(\mathcal{E}_k)$ we obtain -- for each $k$ -- a fiber bundle \begin{align*}
M_k \xrightarrow{\;\pi_{2,\mathbb{R}}\;} \mathcal{E}_k \end{align*} with the fiber, over a plane $E \in \mathcal{E}_k$, being \begin{align*}
F_k &= \left\{(L,E)\colon \text{$L \in \mathbb{P}(\mathbb{C}^n)$, $T(L) = L$ and $L \subset E$}\right\}\\
&= \left\{(L,E)\colon \text{$L \in \mathbb{P}(\mathbb{C}^n)$ and $L \subset E_T$}\right\}\\
&\cong \mathbb{P}(E_T)\\
&\cong \mathbb{P}_{k-1} \end{align*} for all $k=1,\ldots,p$. In particular we obtain the estimate \begin{align}
\label{IterativeRetractionDiscussionFirstEstimate}
\dim_{\mathbb{R}} F_k = \dim_{\mathbb{C}}(E_T) - 1 = k - 1 \leq p - 1. \end{align}
Let us now focus on the image $\Im H$ of a map $H\colon X \to \text{Gr}_p(\mathbb{C}^n)$. Without loss of generality we can assume that $H$ is equivariantly smoothed (theorem~\ref{SmoothApproximation1}). For each $k=0,\ldots,p$ we set $H(X)_k = H(X) \cap \mathcal{E}_k$ and $M = \smash{\pi_{2,\mathbb{R}}^{-1}}(H(X)) \subset \mathcal{I}_\mathbb{R}$. Note that \begin{align*}
M = \left\{(L,E) \in (\mathbb{P}(\mathbb{C}^n))_\mathbb{R} \times \text{Gr}_p(\mathbb{C}^n)\colon \text{$E \in \Im H$ and $L \subset E$}\right\}. \end{align*} Having the above formalism in place, we can now focus on the fundamental problem: Understanding the image of $M$ under the projection $\smash{\pi_{1,\mathbb{R}}}$. The following remark highlights what kind of dimension estimate we need in order to conclude the existence of a line $L$ such that the image $\Im H$ does not intersect $\mathcal{L}_L$. \begin{remark}
If the inequality
\begin{align*}
\text{dim}_H(\pi_{1,\mathbb{R}}(M)) < \text{dim}_H(\mathbb{P}(\mathbb{C}^n)_\mathbb{R}) = n-1
\end{align*}
is satisfied, then there exists a line $L$ in $\mathbb{C}^n$ such that
$\mathcal{L}_L \cap H(X) = \emptyset$. \end{remark}
\begin{proof}
The inequality implies that
$\pi_{1,\mathbb{R}}(M) \subsetneq \mathbb{P}(\mathbb{C}^n)_\mathbb{R}$, since otherwise both sets
would have the same Hausdorff dimension. Recall that
$M = \smash{\pi_{2,\mathbb{R}}^{-1}}(H(X))$. Thus, the non-surjectivity of
$\smash{\restr{\pi_{1,\mathbb{R}}}{M}}$ means that there exists a $T$-fixed
line $L$ in $\mathbb{C}^n$ which has no $\smash{\pi_{1,\mathbb{R}}}$-preimage in the
set
\begin{align*}
\pi_{2,\mathbb{R}}^{-1}(H(X)) = \left\{(L,E) \in \mathbb{P}(\mathbb{C}^n)_\mathbb{R} \times \text{Gr}_p(\mathbb{C}^n)\colon \text{$E \in H(X)$ and $L \subset E$}\right\}.
\end{align*}
If there was a plane $E \in H(X)$ with $L \subset E$, then $(L,E)$
would be in ${\pi_{2,\mathbb{R}}}^{-1}(H(X))$, which is a contradiction. Hence,
there cannot exist such a plane $E$ in the image of $H$. \end{proof}
By the above remark together with theorem~\ref{HDimProperties} (5) it suffices to prove that $\text{dim}_H(M)$ is smaller than $n - 1$ in order to conclude the existence of a $T$-fixed line $L$ such that $\Im H$ has empty intersection with $\mathcal{L}_L$. This is the next goal. The preimage $M$ can be written as a finite union \begin{equation*}
M = \bigcup_{k=0,\ldots,p} {\pi_{2,\mathbb{R}}}^{-1}(H(X)_k). \end{equation*} With corollary~\ref{HDimViaSetDecomposition} it follows that \begin{align}
\label{HDimOfMAsMax}
\text{dim}_H M = \max_k \left\{\text{dim}_H(\pi_{2,\mathbb{R}}^{-1}(H(X)_k))\right\}. \end{align}
But each set $\smash{\pi_{2,\mathbb{R}}^{-1}}(H(X)_k)$ is contained in the total space $M_k$ of the fiber bundle $M_k \to \mathcal{E}_k$. Applying corollary~\ref{HDimOfTotalSpace} yields \begin{align*}
\text{dim}_H\left(\pi_{2,\mathbb{R}}^{-1}(H(X)_k)\right) = \text{dim}_H(H(X)_k) + \dim(F_k). \end{align*} Substituting the dimension of the fiber computed in (\ref{IterativeRetractionDiscussionFirstEstimate}) yields \begin{align*}
\text{dim}_H\left(\pi_{2,\mathbb{R}}^{-1}(H(X)_k)\right) = \text{dim}_H(H(X)_k) + k - 1. \end{align*} Therefore, (\ref{HDimOfMAsMax}) implies: \begin{align}
\label{HDimOfMAsMaxConcrete}
\text{dim}_H(M) = \max_k \left\{\text{dim}_H(H(X)_k) + k - 1\right\}. \end{align} Thus, in order to control $\text{dim}_H(M)$ it suffices to control each $\text{dim}_H(H(X)_k)$. But since $H(X)$ cannot have Hausdorff dimension bigger than two ($H$ is assumed to be smooth) and under the assumption that $p \leq n - 2$, we directly obtain the estimate \begin{align*}
\text{dim}_H(M) &\leq \max\{p, \text{dim}_H(H(X)_p) + p - 1\}\\
&\leq \max\{n-2,\text{dim}_H(H(X)_p) + p - 1\}. \end{align*} Thus we only have to control the dimension of $H(X)_p$. In order to obtain the desired estimate $\text{dim}_H(M) < n-1$ it suffices to have $\text{dim}_H(H(X)_p) \leq 1$. In the following we show that -- up to $G$-homotopy -- this can be assumed. Although, for proving the main result of this section, it suffices to reduce a general Grassmannian $\text{Gr}_p(\mathbb{C}^n)$ until we arrive at $\text{Gr}_1(\mathbb{C}^3) \cong \mathbb{P}_2$, we state the following in its general form, i.\,e. including the statement for the reduction from $\mathbb{P}_2$ to $\mathbb{P}_1$: \begin{proposition}
\label{PropIterativeRetractionDecomposition}
Given an equviariant map $f\colon X \to \text{Gr}_p(\mathbb{C}^n)$ with
$n \geq 3$ and $1 \leq p \leq n-2$, there exists a decomposition of
$\mathbb{C}^n$ into the direct sum of a line
$L \subset \mathbb{C}^n$ and a 1-codimensional subvector space
$W \subset \mathbb{C}^n$, both $T$-stable, such that
$\mathcal{L}_L \cap \Im f = \emptyset$. \end{proposition}
\begin{proof}
We prove the statement in two parts. First we prove that the
statement is true for $0 < p \leq n - 3$ and then we seperately deal
with the case $p = n - 2$. Both proofs work by analyzing the image
$\Im f$ and then using dimension estimates to deduce the existence
of a line $L \subset \mathbb{C}^n$ with $\mathcal{L}_L \cap \Im f$
being the empty set. Without loss of generality we can assume that
$f$ is a smooth $G$-map (see
theorem~\ref{SmoothApproximation1}). Recall that smooth functions
are, in particular, Lipschitz continuous.
First assume $p \leq n - 3$. Since $\text{dim}_H(X) = 2$, it follows that
$\text{dim}_H(H(X)) \leq 2$ by theorem~\ref{HDimProperties} (5). In
particular, $\text{dim}_H(H(X)_k) \leq 2$ for all $k=0,\ldots,p$. Thus,
using (\ref{HDimOfMAsMaxConcrete}) we obtain
\begin{align*}
\text{dim}_H(M) &= \max_{k=0,\ldots,p} \left\{\text{dim}_H(H(X)_k) + k - 1\right\} \\
&\leq p + 1 \\
&\leq n - 2 \\
&< n - 1.
\end{align*}
Hence, the non-surjectivity of $\smash{\restr{\pi_{1,\mathbb{R}}}{M}}$ is
established in the case $p \leq n - 3$. For the remaining case $p =
n - 2$ we can apply lemma~\ref{LemmaSmallIntersectionWithStrata}
(see below) which guarantees that, at least after a $G$-homotopy:
\begin{align*}
\text{dim}_H(H(X)_p) \leq 1.
\end{align*}
In this case we can write
\begin{align*}
\text{dim}_H(M) = \max_{k=0,\ldots,p} \{\text{dim}_H(H(X)_k) + k - 1\} \leq p = n - 2 < n - 1.
\end{align*}
Thus, in both cases we have constructed a homotopy from the map $H$
to a map $H'$ such that $\mathcal{L}_L \cap \Im(H') = \emptyset$ for
some $T$-fixed line $L \subset \mathbb{C}^n$. Take $W$ to be the orthogonal
complement $L^\perp \subset \mathbb{C}^n$. In particular $W$ is also
$T$-stable.
This finishes the proof. \end{proof}
The following lemma completes the proof of the previous proposition~\ref{PropIterativeRetractionDecomposition}: \begin{lemma}
\label{LemmaSmallIntersectionWithStrata}
Every $G$-map $H\colon X \to \text{Gr}_p(\mathbb{C}^n)$ is
equivariantly homotopic to a $G$-map $H'$ such that $\text{dim}_H(H'(X)_p)
\leq 1$. \end{lemma}
\begin{proof}
Recall that $H(X)_p = H(X) \cap \mathcal{E}_p$ and $\mathcal{E}_p =
(\text{Gr}_p(\mathbb{C}^n))_{\mathbb{R}}$. Thus in order to minimize the dimension of
$H(X)_p$ we need to modify $H$ by moving its image away from the
real points in $\text{Gr}_p(\mathbb{C}^n)$. To ease the notation we set $Y =
\text{Gr}_p(\mathbb{C}^n)$. Now we let
\begin{align*}
N \xrightarrow{\;\pi_{Y,\mathbb{R}}\;} Y_\mathbb{R}
\end{align*}
be the normal vector bundle of $Y_{\mathbb{R}}$ in $Y$.
The bundle $N$ comes equipped with a bundle norm $\|\cdot\|$.
Now we can use the standard method
of diffeomorphically identifying an open tubular neighborhood $U$ of
the 0-section of $N$ (which can be identified with $Y_\mathbb{R}$ itself)
with an open neighborhood $V$ of $Y_\mathbb{R}$ in $Y$. We denote this
diffeomorphism $U \to V$ by $\Psi$.
In the following we construct an equivariant homotopy $H_t$ such
that $H_0 = H$ and $H_1$ has the desired property of $\text{dim}_H(H_1(X)
\cap Y_\mathbb{R})$ being at most one.
For this, let $s$ be a generic section in $\Gamma(Y_\mathbb{R},N)$. In
particular this means that there are only finitely many points over
which $s$ vanishes. Furthermore, let $\chi$ be a smooth cut-off
function which is constantly one in a small neighborhood $U'
\subset\subset U$ of the zero section in $N$ and which vanishes
outside of $U$. Now define
\begin{align*}
g^s_t(v) = v - t\chi(v)s(\pi_{Y,\mathbb{R}}(v)).
\end{align*}
After scaling $s$ by a constant such that $\|s(\cdot)\|$ is
sufficiently small, $g^s_t$ defines a diffeomorphism $U \to U$.
Then, via the diffeomorphism $\Psi$, we obtain an induced
diffeomorphism $\smash{\tilde{g}^s_t}\colon V \to V$. Since the
cut-off function $\chi$ vanishes near the boundary of $U$,
$\smash{\tilde{g}^s_t}$ extends to a diffeomorphism $Y \to Y$ which
is the identity outside of $V$. Define
\begin{align*}
H^s_t := \tilde{g}^s_t \circ H\colon X \to Y.
\end{align*}
Note that $H^s_t$ is not necessarily equivariant anymore for
positive $t$. This will be corrected later in the proof by
restricting the maps $H^s_t$ to a (pseudo-)fundamental region $R$ of
$X$ and then equivariantly extending to all of $X$. As a first step
towards equivariance of $H^s_t$, we need to make sure that $H^s_t$
maps $\text{Fix}\, T \subset X$ into $Y_\mathbb{R}$ for all $t$. To guarantee this,
let $f$ be a $C^\infty$ function on $Y_\mathbb{R}$ such that
\begin{align}
\label{IterativeRetractionProofHelperFunction}
\{f = 0 \} = \Psi^{-1}(H(\text{Fix}\, T)) \subset U.
\end{align}
By multiplying the section $s$ with this function $f$ (and, by abuse
of notation, denoting the resulting section again $s$) it is
guaranteed that $\smash{\tilde{g}^s_t}$ is the identity along
$\text{Fix}\, T \subset X$, hence $H^s_t$ still maps the fixed point set
$X^T$ into $Y^T = Y_\mathbb{R}$ for all $t$. In the next step we need to
estimate of the ``critical set''
\begin{align*}
C_s = \{x \in X\colon H^s_1(x) \in Y_\mathbb{R}\}.
\end{align*}
By construction, a matrix $H^s_1(x)$ is contained in the real part
$Y_\mathbb{R}$ iff $\Psi^{-1} \circ H^s_1(x)$ is contained in the
zero-section in the normal bundle $N$. By definition this means
\begin{align*}
g^s_1 \circ \Psi^{-1} \circ H(x) = 0,
\end{align*}
which, by definition of $g^s_t$, is equivalent to saying that
\begin{align*}
\Psi^{-1}(H(x)) = s\left(\pi_{Y,\mathbb{R}}\left(\Psi^{-1}(H(x))\right)\right).
\end{align*}
Using the fact that $C_s$ must be contained in $H^{-1}(V)$ we can
conclude that
\begin{align*}
C_s = \left\{x \in H^{-1}(V)\colon \Psi^{-1} \circ H(x) = s \circ \pi_{Y,\mathbb{R}} \circ \Psi^{-1} \circ H(x)\right\}.
\end{align*}
We have to prove that, for some choice of $s$, $\text{dim}_H(C_s) \leq 1$.
If $H(x) \in Y_\mathbb{R}$ for some $x$, then $H^s_1(x) \in Y_\mathbb{R}$ is almost
never satisfied ($s$ is almost never zero). Hence, it suffices to
estimate the dimension of the set
\begin{align*}
C_s \smallsetminus H^{-1}(Y_\mathbb{R}) = \{x \in \Omega\colon \Psi^{-1} \circ H(x) = s \circ \pi_{Y,\mathbb{R}} \circ \Psi^{-1} \circ H(x)\},
\end{align*}
where
\begin{align*}
\Omega = H^{-1}(V)\smallsetminus H^{-1}(Y_\mathbb{R}).
\end{align*}
We show that by scaling the section $s$ appropriately, we obtain the
desired estimate $\text{dim}_H(C_s) \leq 1$. Observe that $\Omega$ is an
open set in $X$, hence in particular a real manifold of dimension
two, not neccesarily connected. But it has at most countably many
connected components which we denote by $\{\Omega_j\}_{j \in J}$.
Define the following function
\begin{align*}
h_s\colon \Omega &\to \mathbb{R}\\
x &\mapsto \frac{\|s(\pi_{Y,\mathbb{R}}(\Psi^{-1}(H(x))))\|}{\|\Psi^{-1}(H(x))\|}.
\end{align*}
This quotient is well-defined on $\Omega$, since $\Omega$ does not
include the $H$-preimage of $Y_\mathbb{R}$, which corresponds to the
zero-section in $U$. Therefore $h_s$ defines a smooth function on
$\Omega$. It now follows that
\begin{align*}
C_s \smallsetminus H^{-1}(Y_\mathbb{R}) = h_s^{-1}(\{1\}).
\end{align*}
Now we introduce the scaling of the section $s$ such that
$h_s^{-1}(\{1\})$ is at most one-dimensional. Since the number of
connected components $\Omega_j$ ist at most countable, there exists
a number $\varepsilon$ arbitrary close to 1 such that there exists
no component $\Omega_j$ on which $h_s \equiv \varepsilon$ and
furthermore such that $\varepsilon$ is a regular value for $h_s$ on
all the components $\Omega_j$ on which $h_s$ is not constant. It
then follows that $h_s^{-1}(\{\varepsilon\})$ is one-dimensional and
\begin{align*}
h_s^{-1}(\{\varepsilon\}) = h_{\varepsilon s}^{-1}(\{1\}) = C_{\varepsilon s}\smallsetminus H^{-1}(Y_\mathbb{R}).
\end{align*}
Thus, for the section $\varepsilon s$ we have the desired dimension
estimate of the critical set.
Finally we correct the missing equivariance of $H_1$ as follows: In
the type I case we let $Z$ be the cylinder fundamental region and
restrict the homotopy $H_t$ just constructed to $Z$. By
remark~\ref{Class1EquivariantExtension}, the homotopy $\restr{H}{Z}$
extends uniquely to an equivariant homotopy
$\smash{\widetilde{H}}\colon I \times X \to Y$. Define
$H' = \widetilde{H}_1$.
It remains to check that the above estimate of $h_s^{-1}(\{1\})$
remains valid. But this is follows with
corollary~\ref{HDimViaSetDecomposition}, which proves the statement
for the type I involution.
For the type II case we let $R$ be the pseudofundamental region
introduced in the beginning of section
(p.~\pageref{ParagraphGeometryOfClass2}). In this case we need to
make sure that we do not destroy the equivariance propery on the set
$A = A_1 \cup A_2$ (see p.~\pageref{ParagraphGeometryOfClass2}). In
order to be able to use the method of restriction (to $R$) followed
by equivariant extension, we need to make sure that the homotopy
$\restr{H}{I \times R}$ behaves well on the boundary of $R$ (see
remark~\ref{RemarkClassIIEquivariance}). For this we make two small
adjustments to the above construction. First, using the homotopy
extension property together with the simply-connectedness of
$Y$
we make a homotopy to the original map $H$ such that it is constant
along $A_1, A_2 \subset \partial R$ (compare with the type II
normalization, in particular proposition~\ref{Class2Normalization},
p.~\ref{Class2Normalization}). It follows that the images $H(A_1)$
and $H(A_2)$ are one-point sets contained in $Y_\mathbb{R}$. Second, we
modify the function $f$ introduced in
(\ref{IterativeRetractionProofHelperFunction}): Instead of letting
this helper function $f$ vanish exactly over $\Psi^{-1}(H(\text{Fix}\, T))$,
we let it vanish over the bigger set $\Psi^{-1}(H(\partial R))$
(notice that $\text{Fix}\, T \subset \partial R$). It then follows that the
homotopy $H_t$ does not change $H$ along the image $H(\partial R)
\subset V$. In particular, it preserves the compatibility condition
on $A \subset \partial R$ which is required for the
equivariance. Now we can construct the equivariant extension to all
of $X$ as in the type I case above. The desired dimension estimate
remains satisfied because of
corollary~\ref{HDimViaSetDecomposition}. \end{proof}
Having proposition~\ref{PropIterativeRetractionDecomposition} in place, we formulate the main result of this section: \begin{restatable*}{proposition}{PropositionReductionOfGrToCurve}
\label{LemmaReductionOfGrToP1}
Assume $n \geq 3$ and $1 \leq p \leq n-1$. Let $f\colon X \to
\text{Gr}_p(\mathbb{C}^n)$ be a $G$-map. Then $f$ is equivariantly homotopic to
the map $\iota \circ f'$ where $\Im(f')$ is contained in the
Schubert variety $\mathcal{S}$ and $\iota$ is the above embedding of
$\mathcal{S}$ into $\text{Gr}_p(\mathbb{C}^n)$. By identifying $\mathcal{S} \cong
\mathbb{P}_1$, the degree triples (resp. degree pairs) of $f\colon X \to
\text{Gr}_p(\mathbb{C}^n)$ and $f'\colon X \to \mathcal{S} \cong \mathbb{P}_1$ agree. \end{restatable*} Its proof will be given on p.~\pageref{LemmaReductionOfGrToP1}. By proposition~\ref{PropIterativeRetractionDecomposition} we know there exists a line $L$ such that $\mathcal{L}_L$ is not contained in the image of a given map $H$. For using this statement as the building block for the iterative retraction procedure it is convenient to be able to normalize this line $L$. This is made precise in the following remark: \begin{remark}
\label{SOnRNormalizingInGrassmannian}
Let $L$ be a $T$-stable line in $\mathbb{C}^n$. Then there exists a curve
$g(t)$ in $\SOR{n}$ such that $g(0) = \mathrm{Id}$ and $g(1)$ maps
$\mathcal{L}_L$ to $\mathcal{L}_{L_0}$ where $L_0$ is the $n$-th
standard line $L_0 = \mathbb{C}.e_n$. \end{remark}
\begin{proof}
By assumption the line $L$ is $T$-stable. This implies that $L$ is
generated by a vector $v_n$ of unit length such that $T(v_n) =
v_n$. In other words, $v_n$ is in $(\mathbb{C}^n)_\mathbb{R} = \mathbb{R}^n$. Furthermore,
the orthogonal complement $L^\perp$ of $L$ is also
$T$-invariant. Let $(v_1,\ldots,v_{n-1})$ be an orthonormal basis of
$(L^\perp)_\mathbb{R}$. Then, $(v_1,\ldots,v_n)$ is an orthonormal basis of
$\mathbb{C}^n$ consisting solely of real vectors. Define $g$ as the element
of $\SOR{n}$ which maps $v_j$ to $e_j$ for all
$j=1,\ldots,n$. Using the path-connectedness of $\SOR{n}$ we can
find a path $g(t)$ such that $g(0) = \mathrm{Id}$ and $g(1) = g$. Hence, by
construction, $g(1)$ maps the line $L$ to $L_0$. It remains to show
that
\begin{align*}
g(\mathcal{L}_L) = \mathcal{L}_{L_0}.
\end{align*}
For this, let $E$ be a $p$-plane in $\mathcal{L}_L$. Then, by
definition, $L \subset E$. But then also $g(L) = L_0 \subset
g(E)$. On the other hand, given a plane $E$ in with $L_0 \subset E$,
then define $E' = g^{-1}(E)$. It follows that $L \subset E'$, on
other words $E' \in \mathcal{L}_L$. Now we see that $E = g(E') \in
\mathcal{L}_L$. \end{proof}
Combining the above we can make a reduction from $\text{Gr}_p(\mathbb{C}^n)$ to the smaller manifold $\text{Gr}_p(\mathbb{C}^{n-1})$: \begin{proposition}
\label{PropositionIterativeRetraction}
Given an equivariant map $f\colon X \to \text{Gr}_p(\mathbb{C}^n)$ where
$3 \leq n$ and $1 \leq p \leq n - 2$, then there exists a
$G$-homotopy from $f$ to a map $f'$ whose image is contained in
$\text{Gr}_p(\mathbb{C}^{n-1})$, where $\mathbb{C}^{n-1}$ is regarded as being
embedded in $\mathbb{C}^n$ as $(z_1,\ldots,z_n) \mapsto (z_1,\ldots,z_n,0)$. \end{proposition}
\begin{proof}
Due to the conditions on $p$ and $n$,
proposition~\ref{PropIterativeRetractionDecomposition} is
applicable. Hence there exists a $T$-stable decomposition $\mathbb{C}^n = L
\oplus W$ such that $\mathcal{L}_L \cap \Im f = \emptyset$. Using
remark~\ref{SOnRNormalizingInGrassmannian} it follows that there
exists a curve $g(t)$ of $\SOR{n}$ transformations such that
$g(1)$ maps $\mathcal{L}_L$ to $\mathcal{L}_{L_0}$ where $L_0$ is
the standard line $\mathbb{C}.e_n$. Now define a homotopy $F_t =
g(t)f$. Then, by construction, $F_0 = f$ and $\Im F_1 \cap
\mathcal{L}_{L_0} = \emptyset$. In this situation
lemma~\ref{GrassmannianDeformationRetract} is applicable and we
obtain an equivariant homotopy from $f$ to a map $f'$ such that
$\Im(f')$ is contained in the lower dimensional Grassmannian
$\text{Gr}_p(\mathbb{C}^{n-1})$. \end{proof}
Taking the degree invariants (triples and pairs) into account we state the following addition to the previous proposition: \begin{lemma}
\label{DegreeInvariantsConstantDuringRetraction}
As before, assume $n \geq 4$ and $1 \leq p \leq n - 2$. Let $f$ be a
$G$-map $X \to \text{Gr}_p(C^n)$ and denote the canonical embedding
$\text{Gr}_p(\mathbb{C}^{n-1}) \hookrightarrow \text{Gr}_p(\mathbb{C}^n)$ by $\iota$. Assume
there exists a $G$-map $f'\colon X \to \text{Gr}_p(\mathbb{C}^{n-1})$ such that
$f$ and $\iota \circ f'$ are equivariantly homotopic. Then,
depending on the involution type, the degree triples (type I)
resp. the degree pairs (type II) of $f$ and $f'$ agree for any two
such maps. \end{lemma}
\begin{proof}
The existence of the map $f'$ is the statement of
proposition~\ref{PropositionIterativeRetraction}: we obtain a map $f'$,
equivariantly homotopic to $f$, whose image is contained in
$\text{Gr}_p(\mathbb{C}^{n-1})$. Thus, we can regard $f'$ as a map to
$\text{Gr}_p(\mathbb{C}^{n-1})$ and clearly we then obtain $f = \iota \circ f'$.
Regarding the degree invariants: By
remark~\ref{GrassmannianEmbeddingIsoInHomology}, the embedding
$\iota$ induces an isomorphism
\begin{align*}
\iota_*\colon H_2(\text{Gr}_p(\mathbb{C}^{n-1}),\mathbb{Z}) \xrightarrow{\;\sim\;} H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z}).
\end{align*}
Note that $\iota_*$ maps a generating cycle $[\mathcal{C}]$ of
$H_2(\text{Gr}_p(\mathbb{C}^{k-1}),\mathbb{Z})$ to the generating cycle $\iota_*([C]) =
[\iota(\mathcal{C})]$ of $H_2(\text{Gr}_p(\mathbb{C}^k),\mathbb{Z})$. We regard
$\mathcal{C}$ as being canonically oriented as a complex manifold.
Since $f_* = \iota_* \circ f'_*$, it follows that $f_*$ and $f'_*$
are defined in terms of the same multiplication factor; in other
words: The total degree of the map does not change when we regard it
as a map to the lower dimensional Grassmann manifold.
It remains to check that the fixed point signatures do not change
during the retraction. For this let $C \subset X$ be one of the
fixed point circles (any of the two circles in type I or the unique
circle in type II). The restrictions
\begin{align*}
\Restr{f}{C}\colon C \to \text{Gr}_p(\mathbb{R}^n) \;\text{ and }\; \Restr{\iota \circ f'}{C}\colon C \to \text{Gr}_p(\mathbb{R}^n)
\end{align*}
are homotopic, thus they define the same class $[\gamma]$ in the
fundamental group of $\text{Gr}_p(\mathbb{R}^n)$. By construction $f'$ has its
image contained in $\text{Gr}_p(\mathbb{R}^{n-1}) \subset \text{Gr}_p(\mathbb{R}^n)$, thus its
restriction to the circle $C$ can be regarded as a map $C \to
\text{Gr}_p(\mathbb{R}^{n-1})$, defining a homotopy class $[\gamma']$ in
$\pi_1(\text{Gr}_p(\mathbb{R}^{n-1}))$. By
remark~\ref{RealGrassmannianEmbeddingIsoInHomotopy}, the inclusion
$\text{Gr}_p(\mathbb{R}^{n-1}) \hookrightarrow \text{Gr}_p(\mathbb{R}^n)$ induces an isomorphism
of their fundamental groups if $n \geq 4$. This implies that $[\gamma]$
and $[\gamma']$ are either both trivial or both non-trivial, which
proves the statement. \end{proof} To complete the previous lemma we need the following two remarks: \begin{remark}
\label{GrassmannianEmbeddingIsoInHomology}
Assume $0 < p < k - 1$. The embedding
\begin{align*}
\iota\colon \text{Gr}_p(\mathbb{C}^{k-1}) \hookrightarrow \text{Gr}_p(\mathbb{C}^k)
\end{align*}
of Grassmann manifold induces an isomorphism on their second homology
groups. More precisely, let $\mathcal{S}$ be the Schubert variety
generating $H_2(\text{Gr}_p(\mathbb{C}^{k-1}),\mathbb{Z})$ (see
p.~\pageref{DefOfSchubertVarietyC}), then
\begin{align*}
\iota_*([\mathcal{S}]) = [\iota(\mathcal{S})].
\end{align*} \end{remark}
\begin{proof}
Note that the second homology groups of complex Grassmann manifolds
are infinite cyclic. Fix the standard flag in $\mathbb{C}^k$ and let
$\mathcal{C}$ be the Schubert variety with respect to this flag
which generates $H_2(\text{Gr}_p(\mathbb{C}^{k-1}),\mathbb{Z})$. By means of the embedding
$\iota$ it can also be regarded as being contained in the bigger
Grassmannian $\text{Gr}_p(\mathbb{C}^k)$, where it also generates
$H_2(\text{Gr}_p(\mathbb{C}^k),\mathbb{Z})$. In other words: the embedding $\iota$ maps
the generator of $H_2(\text{Gr}_p(\mathbb{C}^{k-1}),\mathbb{Z})$ to the generator of
$H_2(\text{Gr}_p(\mathbb{C}^k),\mathbb{Z})$. It follows that the induced map
\begin{align*}
\iota_*\colon H_2\left(\text{Gr}_p\left(\mathbb{C}^{k-1}\right),\mathbb{Z}\right) \to H_2\left(\text{Gr}_p\left(\mathbb{C}^k\right),\mathbb{Z}\right)
\end{align*}
is an isomorphism. \end{proof}
\begin{remark}
\label{RealGrassmannianEmbeddingIsoInHomotopy}
Let $k \geq 4$. The embedding
\begin{align*}
\iota\colon \text{Gr}_p(\mathbb{R}^{k-1}) \hookrightarrow \text{Gr}_p(\mathbb{R}^k)
\end{align*}
of real Grassmannians induces an isomorphism of their fundamental
groups. \end{remark}
\begin{proof}
We begin the proof with a general remark: For every Grassmannian
$\text{Gr}_p(\mathbb{R}^m)$ we have the following double cover
\begin{align}
\label{GrassmannianCovering}
\widetilde{\text{Gr}}_p(\mathbb{R}^m) \to \text{Gr}_p(\mathbb{R}^m),
\end{align}
where $\smash{\widetilde{\text{Gr}}_p}(\mathbb{R}^k)$ denotes the \emph{oriented}
Grassmannian. The covering map is given by forgetting the
orientation of each subspace. It is known that for $m > 2$ the
oriented Grassmannian $\text{Gr}_p(\mathbb{R}^m)$ is simply connected. Thus, by
assumption about $k$, the oriented Grassmannians
$\smash{\widetilde{\text{Gr}}_p}(\mathbb{R}^{k-1})$ and
$\smash{\widetilde{\text{Gr}}_p}(\mathbb{R}^k)$ are
simply-connected.
Thus, (\ref{GrassmannianCovering}) defines the universal cover of
$\text{Gr}_p(\mathbb{R}^m)$, and this implies that $\pi_1(\text{Gr}_p(\mathbb{R}^m))$ is
isomorphic to the Deck transformation group
$\text{Deck}\,(\smash{\widetilde{\text{Gr}}_p}(\mathbb{R}^m)/\text{Gr}_p(\mathbb{R}^m))$ (see
\cite[p.~71]{Hatcher}). The Deck transformation group in this case
consists of the single homeomorphism $\sigma$, which flips the
orientation on each subspace, thus it is $C_2$ and we obtain
$\pi_1(\text{Gr}_p(\mathbb{R}^k)) \cong C_2$.
To show that the induced map
\begin{align*}
\iota_*\colon \pi_1(\text{Gr}_p(\mathbb{R}^{k-1})) \to \pi_1(\text{Gr}_p(\mathbb{R}^k))
\end{align*}
is an isomorphism it suffices to show that a non-trivial loop
$\gamma$ in $\text{Gr}_p(\mathbb{R}^{k-1})$ will still be non-trivial when it is,
using the embedding $\iota$, regarded as a loop in $\text{Gr}_p(\mathbb{R}^k)$. We
have the following diagram:
\[
\xymatrix{
\widetilde{\text{Gr}}_p(\mathbb{R}^{k-1}) \ar[d] \ar@{^{(}->}[r]^{\hat{\iota}} & \widetilde{\text{Gr}}_p(\mathbb{R}^k) \ar[d]\\
\text{Gr}_p(\mathbb{R}^{k-1}) \ar@{^{(}->}[r]_{\iota} & \text{Gr}_p(\mathbb{R}^k)
}
\]
Let $\gamma$ be a non-trivial loop in $\text{Gr}_p(\mathbb{R}^{k-1})$, say
$\gamma(0) = \gamma(1) = E$. Its lift to the universal cover is a
non-closed curve $\smash{\hat{\gamma}}$ with
$\smash{\hat{\gamma}}(0) = E^+$ and $\smash{\hat{\gamma}}(1) = E^-$,
where $E^+$ and $E^-$ denote the same plane $E$ but equipped with
different orientations, i.\,e. $\sigma(E^+) = \sigma(E^-)$. It
follows that $\smash{\hat{\iota}} \circ \smash{\hat{\gamma}}$ is a lift of
$\iota \circ \gamma$. The curve
$\smash{\hat{\iota}} \circ \smash{\hat{\gamma}}$ is not closed, as it is
still a curve whose endpoints are related by the
orientation-flipping map $\sigma$. Under the isomorphism from the
Deck transformation group to the fundamental group of the base (see
e.\,g. on p.~34 the proof of theorem~5.6 in
\cite{ForsterRiemannSurfaces}), $\sigma$ corresponds to the curve
$\iota \circ \gamma$. Since $\sigma$ is non-trivial, so is
$\iota \circ \gamma$. This proves that the embedding $\iota$ induces
an isomorphism on the fundamental groups. \end{proof}
Now we can finally prove our main reduction statement: \PropositionReductionOfGrToCurve \begin{proof}
Note that in the case $n=3$, this is just the statement of
lemma~\ref{ReductionP2toP1} together with
remark~\ref{ReductionP2toP1Class1}
resp. remark~\ref{ReductionP2toP1Class2}.
If $n \geq 4$, apply proposition~\ref{PropositionIterativeRetraction}
iteratively until we arrive at $n = p + 1$, producing a $G$-homotopy
from $f$ to a map $f'$ whose image is contained in
$\text{Gr}_p(\mathbb{C}^{p+1})$. Note that by assumption $p + 1 > 3$. This space
can be equivariantly identified with $\text{Gr}_1(\mathbb{C}^{p+1})$ by
remark~\ref{GrassmannianIdentificationEquivariant}. Now
proposition~\ref{PropositionIterativeRetraction} can be applied again
iteratively to the map
\begin{align*}
\tilde{f}\colon X \to \text{Gr}_p(\mathbb{C}^{p+1}) \cong \text{Gr}_1(\mathbb{C}^{p+1})
\end{align*}
until we arrive at $p = 2$, yielding a map
\begin{align*}
f'\colon X \to \text{Gr}_1(\mathbb{C}^3) \cong \mathbb{P}_2.
\end{align*}
By lemma~\ref{DegreeInvariantsConstantDuringRetraction}, up to this
point, the degree triple (type I) resp. the degree pair (type II) of
the map is unchanged. For the last reduction step to $\text{Gr}_1(\mathbb{C}^2)$
we first apply a $G$-homotopy in order to make sure that both maps
are fixed point normalized (see
definition~\ref{DefinitionFixpointNormalizationP2}). Then
lemma~\ref{ReductionP2toP1} and remark~\ref{ReductionP2toP1Class1}
(type I) resp. remark~\ref{ReductionP2toP1Class2} (type II) together
with remark~\ref{GrassmannianEmbeddingIsoInHomology} imply that the
final reduction step to $\mathcal{S} \cong \mathbb{P}_1$ also keeps the
degree triple (resp. the degree pair) unchanged. Thus, in the end we
have an equivariant homotopy from $f$ to a map whose image is
contained in $\mathcal{S}$ and whose degree triples (resp. pairs) as
a map $X \to \mathcal{S} \cong \mathbb{P}_1$ are those of $f$. \end{proof}
We can now prove the main result for the equivariant homotopy classification of maps $X \to \text{Gr}_p(\mathbb{C}^n)$: \begin{theorem}
\label{ClassificationMapsToGrassmannians}
Let the torus $X$ be equipped with the type I involution (resp. the
type II involution). Assume $n > 3$ and $1 < p < n$. Then the
homotopy class of a map $f$ in $\mathcal{M}_G(X,\text{Gr}_p(\mathbb{C}^n))$ is
completely determined by its degree triple (type I) resp. its degree
pair (type II). Furthermore, the image $\Im(\mathcal{T})$
(resp. $\Im(\mathcal{P})$) consists of those degree triples
$\Triple{m_0}{d}{m_1}$ (resp. degree pairs $\Pair{m}{d}$) satisfying
\begin{align*}
d \equiv m_0 + m_1 \mod 2 \;\text{(resp. $d \equiv m \mod 2$)}.
\end{align*} \end{theorem}
\begin{proof}
We only prove the statement for the type I involution case, as the
other case works analogously. Let $f$ and $g$ be two $G$-maps with
the same degree triple. By proposition~\ref{LemmaReductionOfGrToP1}
these maps are equivariantly homotopic to maps $\iota \circ f'$ and
$\iota \circ g'$, where $f'$ and $g'$ are $G$-maps $X \to
\mathcal{S}$ and $\iota$ is the embedding of the Schubert variety
$\mathcal{S}$ into $\text{Gr}_p(\mathbb{C}^n)$. Furthermore, by the same
statement, the degree invariants remain unchanged.
By transitivity, this proves the first statement.
Regarding the image $\Im(\mathcal{T})$ (resp. $\Im(\mathcal{P})$):
As before, we can use the embedding
\begin{align*}
\mathcal{S} \hookrightarrow \text{Gr}_p(\mathbb{C}^n)
\end{align*}
together with the iterative retraction method to show that the
conditions
\begin{align*}
d \equiv m_0 + m_1 \mod 2\;\; \text{(type I)}\;\;\text{resp.}\;\; d \equiv m \mod 2\;\; \text{(type II)}
\end{align*}
are both sufficient and necessary for the degree triples
(resp. degree pairs) to be in the image of $\mathcal{T}$ (type I)
resp. $\mathcal{P}$ (type II). \end{proof}
\subsection{Classification of Maps to $\mathcal{H}_n^*$}
As in the previous cases we note that the degree triple map (resp. the degree pair) is so far only defined on the mapping spaces $\mathcal{M}_G(X,\text{Gr}_p(\mathbb{C}^n))$. But after fixing an identification of each orbit $U(n).\I{p}{q} \subset \mathcal{H}_{(p,q)}$ ($0 < p,q < n$) with $\text{Gr}_p(\mathbb{C}^n)$, the degree triple map (resp. the degree pair map) is also defined on the mapping spaces $\mathcal{M}_G(X,\mathcal{H}_{(p,q)})$ for each non-definite signature $(p,q)$. This allows us to state and prove the main result of this section:
\begin{theorem}
\label{HamiltonianClassificationRankN}
Let $X$ be a torus equipped with either the type I or the type II
involution. Assume $n \geq 3$ and $0 < p,q < 1$. Then:
\begin{enumerate}[(i)]
\item The sets $[X,\mathcal{H}_{(n,0)}]_G$ and
$[X,\mathcal{H}_{(0,n)}]_G$ are trivial.
\item Two $G$-maps $X \to \mathcal{H}_{(p,q)}$ are $G$-homotopic iff
their degree triples (type I) resp. their degree pairs (type II)
agree.
\item The realizable degree triples $\Triple{m_0}{d}{m_1}$ (type I)
resp. degree pairs $\Pair{m}{d}$ (type II) are exactly those
which satisfy
\begin{align*}
d \equiv m_0 + m_1 \mod 2 \;\text{ resp. }\;d \equiv m \mod 2.
\end{align*}
\end{enumerate} \end{theorem}
\begin{proof}
The case $n=3$ has already been dealt with in
theorem~\ref{HamiltonianClassificationRank3}. Therefore it suffices
to consider the case $n>3$. The topological triviality of the
definite signature cases is handled in
remark~\ref{RemarkDefiniteComponentsRetractable}. Let $f$ and $g$ be
two equivariant maps $X \to \smash{\mathcal{H}_{(p,q)}^*}$. By
proposition~\ref{GrassmannianStrongDeformationRetract} the image
space has $\text{Gr}_p(\mathbb{C}^n)$ as equivariant deformation retract. Thus,
$f$ and $g$ can be regarded as $G$-maps $X \to \text{Gr}_p(\mathbb{C}^n)$ with the
same degree triple resp. the same degree pair. Now
theorem~\ref{ClassificationMapsToGrassmannians} can be applied. Part
(i) implies that $f$ and $g$ are $G$-homotopic while part (ii)
contains the statement about realizable degree triples resp. degree
invariants. \end{proof}
Remark~\ref{HamiltonianMapRealizationRank3} explains how to concretely realize maps to $\mathcal{H}_3$ of a given degree invariant. This works equally well in the general situation. Let $(p,q)$ be a fixed signature ($p+q=n$) such that $0 < p,q < n$. If $n$ is smaller then $4$ we end up in one projective situations already handled (i.\,e. maps to $\mathbb{P}_1$ or maps to $\mathbb{P}_2$). Thus, assume $n \geq 4$. In section~\ref{SectionN=2} we have seen how to construct maps of a (realizable) degree triple resp. degree pair to $S^2 \cong \mathbb{P}_1$. We then identify $\mathbb{P}_1$ with the Schubert variety $\mathcal{S}$ and compose this map with the embedding $\mathcal{C} \hookrightarrow \text{Gr}_p(\mathbb{C}^n)$ to obtain a map into the $\text{Gr}_p(\mathbb{C}^n)$. The latter Grassmannian needs to be embedded as the $U(n)$-orbit of $\I{p}{q}$ into $\mathcal{H}_{(p,q)}$ (see proposition~\ref{GrassmannianStrongDeformationRetract}).
\chapter{Topological Jumps} \label{ChapterJumps}
In this chapter we construct curves \begin{align*}
H\colon [-1,1] \times X \to \mathcal{H}_n \end{align*} of equivariant maps $X \to \mathcal{H}_n$ such that the maps $H_{-1}$ and $H_{+1}$, whose images are assumed to be contained in $\mathcal{H}_n^*$, represent distinct $G$-homotopy classes. In order to make this precise, we make the following definitions: \begin{definition}
Let $H\colon X \to \mathcal{H}_n$ be a $G$-map. Then we define its
\emph{singular set} to be the set
\begin{align*}
S(H) = \{x \in X\colon \det H(x) = 0\} \subset X.
\end{align*}
A $G$-map $X \to \mathcal{H}_n$ is called \emph{singular}
(resp. non-singular) if its singular set is non-empty (resp. empty). \end{definition} \begin{definition}
\label{DefinitionJumpCurve}
A \emph{jump curve} from $H_-$ to $H_+$ (both in
$\mathcal{M}_G(X,\mathcal{H}_n)$) is a $G$-map\footnote{$G$ is
assumed to act trivially on the interval.}
$H\colon [-1,1] \times X \to \mathcal{H}_n$ such that
\begin{enumerate}[(i)]
\item $H_{\pm 1} = H_\pm$ and
\item $H_t = H(t,\cdot)$ is non-singular for $t \neq 0$.
\end{enumerate} \end{definition} Given two $G$-maps $H_\pm\colon X \to \mathcal{H}_n$ belonging to distinct $G$-homotopy classes and a jump curve $H_t$ from $H_-$ to $H_+$, then $H_0$ must be singular. Otherwise $H_t$ would induce a $G$-homotopy $I \times X \to \mathcal{H}_n^*$ from $H_-$ to $H_+$, which would imply that $H_-$ and $H_+$ are equivariantly homotopic.
Of course, given a two $G$-maps $H_\pm\colon X \to \mathcal{H}_n$, we can always consider the affine curve \begin{align*}
(1-t)H_- + tH_+ \end{align*} of $G$-maps connecting $H_-$ and $H_+$ in the vectorspace $\mathcal{H}_n$. But in this case we have no control over the singular set; neither is it guaranteed that the degeneration only occurs at $t=0$, nor that the singularity set $S(H_0)$ is in some sense ``small''. In this chapter we construct jump curves obeying the restriction that the singular set $S(H_0)$ is \emph{discrete}. The main result of this chapter is the description of a procedure for constructing jump curves for $G$-maps $X \to \mathcal{H}_{(p,q)} \subset \mathcal{H}_n^*$ ($n \geq 2$) from any $G$-homotopy class to any other $G$-homotopy class with a finite singular set; the only requirement is that the the signature $(p,q)$ remains unchanged. Note that jumps from one signature $(p,q)$ to a different signature $(p',q')$ are not possible with a finite singular set as shown in the following remark: \begin{remark}
If a curve $H_t\colon X \to \mathcal{H}^*_n$ of $G$-maps whose only
degeneration occurs at $t=0$ jumps from one signature $(p,q)$ to a
different signature $(p',q')$, then the singular set
$S(H_0)$ is the whole space $X$. \end{remark} \begin{proof}
Under the assumption that $(p,q) \not= (p',q')$, let $x$ be an
arbitray point in $X$. We have to show that $H_0(x)$ is
singular. Assume the opposite, i.\,e. that $H_0(x)$ is
non-singular. Then $c(t) = H_t(x)$ is a continuous curve in
$\mathcal{H}_n^*$ with $c(-1) \in \mathcal{H}_{(p,q)}$ and $c(1) \in
\mathcal{H}_{(p',q')}$. But for $(p,q) \neq (p',q')$,
$\mathcal{H}_{(p,q)}$ and $\mathcal{H}_{(p',q')}$ denote two
distinct connected components of $\mathcal{H}^*_n$, this yields a
contradiction. Therefore, $S(H_0) = X$. \end{proof} Note that in order to be able to construct jump curves, it does not suffice to consider maps of the type $X \to Y$ where $Y$ is a Grassmannians $\text{Gr}_p(\mathbb{C}^n)$, as these spaces are the deformation retracts of the components $\mathcal{H}_{(p,q)}$, which consist entirely of non-singular matrices. Instead we need to let the image space $Y$ be a subspace of the closure of $\mathcal{H}_{(p,q)}$ having non-empty intersection with its boundary: \begin{align*}
& Y \subset \text{cl}\,(\mathcal{H}_{(p,q)})\\
& Y \cap \partial\mathcal{H}_{(p,q)} \not= \emptyset. \end{align*} For the curve $H_t$ to be a jump curve it must satisfy \begin{align*}
\Im(H_0) \cap \partial\mathcal{H}_{(p,q)} \not= \emptyset, \end{align*} as otherwise it would not be singular at $t=0$ and the possibility of jumps would be excluded. See figure~\ref{fig:jumps} for a depiction of a jump curve.
As in the previous chapter, we handle the case $n=2$ first and then use the methods for $n=2$ for proving a general statement. For the $n=2$ case we will use as image space a certain subspace of $\mathrm{cl}(\mathcal{H}_{(1,1)})$, namely the vector space $i\mathfrak{su}_2 \subset \mathrm{cl}(\mathcal{H}_{(p,q)})$ consisting of the hermitian operators of trace zero in $\mathcal{H}_2$. The space $i\mathfrak{su}_2\smallsetminus\{0\}$ contains the $U(2)$-orbit of $\I{1}{1}$, which we have already identified as an equivariant strong deformation retract of $\mathcal{H}_{(1,1)}$ (see proposition~\ref{Rank2MixedSignatureReduction}), and the origin in $i\mathfrak{su}_2$, which is the unique singular matrix among the hermitian matrices of trace zero, is contained in the boundary $\partial\mathcal{H}_{(1,1)}$. For the general case we embed $i\mathfrak{su}_2$ into $\mathcal{H}_{(p,q)}$ such that it contains the -- with respect to the standard flag for $\mathbb{C}^n$ -- unique one-dimensional Schubert variety $\mathcal{S} \subset \text{Gr}_p(\mathbb{C}^n)$ (see the discussion on p.~\pageref{SchubertVarietyDiscussion}). This setup allows us to use the same methods as for the $n=2$ case.
\begin{figure}
\caption{A jump curve.}
\label{fig:jumps}
\end{figure}
\section{Maps into $\mathrm{cl}(\mathcal{H}_2)$}
Let us first understand how jumps can occur for maps into $\mathcal{H}_2$. Since the components $\mathcal{H}_{(2,0)}$ and $\mathcal{H}_{(0,2)}$ are topologically trivial, the only relevant component to consider is $\mathcal{H}_{(1,1)}$. Recall the definition of the vector space $i\mathfrak{su}_2$: \begin{align*}
i\mathfrak{su}_2 = \left\{
\begin{pmatrix}
a & \phantom{-}b \\
\overline{b} & -a
\end{pmatrix}\colon a \in \mathbb{R}, b \in \mathbb{C}\right\}. \end{align*} The space $i\mathfrak{su}_2\smallsetminus\{0\}$ is contained in $\mathcal{H}_{(1,1)}$. It is important that this space is not \emph{entirely} contained in $\mathcal{H}_{(1,1)}$, as we have \begin{align*}
i\mathfrak{su}_2 \cap \partial\mathcal{H}_{(1,1)} = \left\{
\begin{pmatrix}
0 & 0 \\
0 & 0
\end{pmatrix}\right\} \end{align*} As discussed earlier, $i\mathfrak{su}_2$ can be linearly identified with $\mathbb{R}^3$ via \begin{align}
\label{JumpDiscussionIsomToR3}
\begin{pmatrix}
a & \phantom{-}b \\
\overline{b} & -a
\end{pmatrix} \mapsto
\begin{pmatrix}
a \\
\Re(b) \\
\Im(b)
\end{pmatrix}. \end{align} Recall that the $U(2)$-orbit of the diagonal matrix $\I{1}{1}$ is contained in $i\mathfrak{su}_2\smallsetminus\{0\}$ and is identified by the isomorphism (\ref{JumpDiscussionIsomToR3}) with the unit sphere in $\mathbb{R}^3$.
In the following we construct jump curves as follows: Given two $G$-maps $H_\pm\colon X \to S^2$, we regard them as maps into $S^2 \subset \mathbb{R}^3$ and then construct a map of the form $H\colon [-1,1] \times X \to \mathbb{R}^3$ such that $H_{-1} = H_-$ and $H_{+1} = H_+$. First we introduce some new definitions, which are slightly more suitable for this concrete approach than definition~\ref{DefinitionJumpCurve}: \begin{definition}
Let $H\colon X \to \mathbb{R}^3$ be a $G$-map. We define the \emph{singular
set} $S(H)$ of $H$ to be the fiber $H^{-1}(\{0\})$. The singular
set of a map $(Z,C) \to (\mathbb{R}^3,\{z = 0\})$ is the singular set of its
equivariant extension. \end{definition}
\begin{definition}
\label{DefinitionJumpCurveR3}
A \emph{jump curve} from $H_-$ to $H_+$ (both in
$\mathcal{M}_G(X,\mathbb{R}^3\smallsetminus\{0\})$) is a $G$-map
$H\colon [-1,1] \times X \to \mathbb{R}^3$ such that
\begin{enumerate}[(i)]
\item $H_{\pm 1} = H_\pm$ and
\item $0 \not\in \Im(H_t)$ for $t \neq 0$.
\end{enumerate}
Futhermore, a \emph{jump curve} from
\begin{align*}
H_- \in \mathcal{M}\left((Z,C),\left(\mathbb{R}^3\smallsetminus\{0\},\left(\mathbb{R}^3\smallsetminus\{0\}\right)\cap\{z=0\}\right)\right)
\end{align*}
to
\begin{align*}
H_+ \in \mathcal{M}\left((Z,C),\left(\mathbb{R}^3\smallsetminus\{0\},\left(\mathbb{R}^3\smallsetminus\{0\}\right)\cap\{z=0\}\right)\right)
\end{align*}
is a $G$-map $H\colon [-1,1] \times (Z,C) \to (\mathbb{R}^3,\{z = 0\})$
such that the equivariant extension of $H$ to $[-1,1] \times X$ is a
jump curve from the equivariant extension of $H_-$ to the
equivariant extension of $H_+$. \end{definition} By means of the isomorphism (\ref{JumpDiscussionIsomToR3}), a jump curve between maps $X \to \mathbb{R}^3\smallsetminus\{0\}$ induces a jump curve between maps $X \to \mathcal{H}_n^*$. For the concrete construction of the curve $H_t$ we often require that the maps $H_\pm$ are in \emph{normal form} (see p.~\pageref{RemarkAboutNormalForms}) or in \emph{modified normal form} (see p.~\pageref{DefModifiedNormalForm}, definition~\ref{DefModifiedNormalForm}). We use the terms \emph{curves} and \emph{homotopies} (of $G$-maps) interchangeably.
\subsection{Type I}
In this section, $X$ always denotes the torus equipped with the type I involution. For illustrative purposes, we begin with a very naive jumping method, which does not satisfy our requirement of the singular set being small: \begin{lemma}
Let $X$ be the type I torus and let $d_0^\pm, d_1^\pm, d_0^\pm,
d_1^\pm$ be integers. Denote by $H_\pm\colon X \to
\mathcal{H}_{(1,1)}$ the $G$-maps in normal form for the triples
\begin{align*}
\Triple{d_0^\pm}{d_0^\pm-d_1^\pm}{d_1^\pm}.
\end{align*}
Then there exists a jump curve
\begin{align*}
H\colon [-1,1] \times X \to \text{cl}\,(\mathcal{H}_{(1,1)})
\end{align*}
from $H_-$ to $H_+$ such that the singular set $S(H_0)$ consists of
the two circles $C_0 \cup C_1$. \end{lemma}
\begin{proof}
Let $H_\pm\colon X \to S^2 \subset \mathbb{R}^3$ be the maps in normal form
for the triples
$\smash{\Triple{d_0^\pm}{d_0^\pm - d_1^\pm}{d_1^\pm}}$ as it has
been constructed as part of the proof of
proposition~\ref{TripleBuildingBlocks} (ii). In the following we
construct a jump curve
\begin{align*}
H\colon [-1,1] \times X \to \mathbb{R}^3
\end{align*}
from $H_-$ to $H_+$. The maps $H_\pm$ are of the form
\begin{align*}
H_{\pm} =
\begin{pmatrix}
x_\pm \\
y_\pm \\
z_\pm
\end{pmatrix}.
\end{align*}
Since both maps are in normal form (see proof of
proposition~\ref{TripleBuildingBlocks}), they have the convenient
property that $z_- = z_+$. The functions $x_\pm$ and $y_\pm$ define
the rotation defined by the respective degree triples. Note that a
matrix $H_\pm(p)$ is singular iff $x_\pm(p) = y_\pm(p) = z(p) = 0$.
The functions $x_-$, $y_-$ and $z$ (resp. $x_+$, $y_+$ and $z$) do
not simultaneously vanish, because the maps $H_\pm$ are non-singular
by assumption. Now we define the jump curve as:
\begin{align*}
H_t = \begin{pmatrix}
x_t \\
y_t \\
z
\end{pmatrix}
\end{align*}
where
\begin{align*}
x_t =
\begin{cases}
|t| x_- & \;\text{for $t < 0$}\\
0 & \;\text{for $t = 0$}\\
|t| x_+ & \;\text{for $t > 0$}\\
\end{cases}
\;\;\text{and}\;\;
y_t =
\begin{cases}
|t| y_- & \;\text{for $t < 0$}\\
0 & \;\text{for $t = 0$}\\
|t| y_+ & \;\text{for $t > 0$}\\
\end{cases}
\end{align*}
Then $H_t$ defines a non-singular map for $t \neq 0$. The only
points of degeneracy which were introduced by the $t$-scaling are
those points in $X$ where together with $z$ also $t$ vanishes. Since
the maps $H_\pm$ are in normal form, this means
\begin{equation*}
S(H_0) = \{p \in X\colon z(p) = 0 \} = C_0 \cup C_1,
\end{equation*}
which finishes the proof. \end{proof}
For the construction of more interesting topological jumps we employ a new normal form for maps for triples $\Triple{d_0}{d}{d_1}$. This is described in the following definition \begin{definition}
\label{DefModifiedNormalForm}
For triples $\Triple{d_0}{\pm(d_0 - d_1)}{d_0}$ we define $G$-maps
$F\colon X \to S^2 \subset \mathbb{R}^3$ in \emph{modified normal form} as
follows: Let $D$ be the closed two-disk, $D = \{z \in
\mathbb{C}\colon |z| \leq 1\}$ and let $\iota_D\colon (D,\partial D)
\hookrightarrow (S^2,E)$ be the embedding
(\ref{IotaHemisphereEmbedding} from
p.~\pageref{IotaHemisphereEmbedding} of $D$ onto one of the
hemispheres of $S^2$. By $Z$ we denote the fundamental region
cylinder of the torus $X$. Now define the map
\begin{align*}
f\colon (Z,C) &\to (D,\partial D)\\
(t, \varphi) &\mapsto \begin{cases}
(1-2t) e^{i d_1 \varphi} + 2t & \text{ for $0 \leq t \leq \frac{1}{2}$}\\
(2t-1) e^{i d_2 \varphi} + 2(1-t) & \text{ for $\frac{1}{2} \leq t \leq 1$}
\end{cases}
\end{align*}
and let $F\colon X \to S^2$ be the equivariant extension of the
composition map $\iota_D \circ f$ (see
lemma~\ref{Class1EquivariantExtension}). The resulting map has the
triple $\Triple{d_0}{d_0-d_1}{d_1}$. \end{definition} The fact that the map $F$ constructed above has the triple $\Triple{d_0}{d_0-d_1}{d_0}$ can be shown as in the proof for proposition~\ref{TripleBuildingBlocks}~(ii). The crucial point of this modified normal form is that the fibers over the north-
resp. south pole of the sphere contains only a finite number of points, precisely $|d_0| + |d_1|$ for each of the poles. In the original normal form the fibers over the poles were copies of $S^1$ in $X$ (see figure~\ref{FigureModifiedNormalForm}).
\begin{figure}
\caption{Homotopy Visualization on the closed $2$-Disk.}
\label{FigureModifiedNormalForm}
\end{figure}
Now we construct a jump curve with only a discrete set of singular points. This jump curve ``flips'' the total degree of maps in modified normal form for basic triples. \begin{lemma}
\label{JumpsRank2Class1TotalDegree}
Let $d_0$ and $d_1$ be two integers and assume that the maps
\begin{align*}
H_\pm\colon (Z,C) \to \left(S^2,E\right) \subset \left(\mathbb{R}^3,\{z=0\}\right)
\end{align*}
have the triples $\Triple{d_0}{\pm(d_1-d_0)}{d_1}$. Then there
exists a jump curve
\begin{align*}
H\colon [-1,1] \times (Z,C) \to \left(\mathbb{R}^3,\{z=0\}\right)
\end{align*}
from $H_-$ to $H_+$ such that the singular set of the equivariant
extension of $H_0$ consists of $2(|d_0| + |d_1|)$ isolated points in
$X\smallsetminus (C_0 \cup C_1)$. If the maps are already normalized
along the boundary circles $C_0$ and $C_1$, then this homotopy $H_t$
can be chosen to be relative to $C_0 \cup C_1$. \end{lemma}
\begin{proof}
After a $G$-homotopy we can assume that the maps $H_\pm$ are in
modified normal form for the triples
$\Triple{d_0}{\pm(d_1-d_0)}{d_1}$. This homotopy can be chosen to be
relative to $C_0 \cup C_1$ if the original maps $H_\pm$ are already
type I normalized. It now suffices to prove the statement under the
assumption that $H_\pm$ are in modified normal form. We can regard
$H_\pm$ as maps
\begin{align*}
H_\pm\colon (Z,C) &\to (\mathbb{R}^3,\{z=0\})\\
p &\mapsto
\begin{pmatrix}
x_\pm(p) \\
y_\pm(p) \\
z_\pm(p)
\end{pmatrix}
\end{align*}
where the functions $x_\pm$, $y_\pm$ and $z_\pm$ satisfy:
\begin{align*}
& x_\pm \circ T = x_\pm\\
& y_\pm \circ T = y_\pm\\
& z_\pm \circ T = -z_\pm.
\end{align*}
Note that the functions $x_-$, $y_-$ and $z_-$ (resp. $x_+$, $y_+$
and $z_+$) do not simultaneously vanish. Also, since the maps in
modified normal form for triples of the form $\Triple{d_0}{\pm(d_1 -
d_0)}{d_1}$ only differ by a reflection along the $x,y$-plane, we
can conclude that $x_- = x_+$ , $y_- = y_+$ and $z_- = -z_+$. We set
$x = x_\pm$ and $y = y_\pm$ and define the jump curve $H_t$ as
follows:
\begin{align*}
H\colon [-1,1] \times (Z,C) &\to (\mathbb{R}^3,\{z=0\})\\
(t,p) &\mapsto
\begin{pmatrix}
x(p)\\
y(p)\\
z_t(p)
\end{pmatrix}
\end{align*}
where
\begin{align*}
z_t(p) =
\begin{cases}
|t| z_-(p) & \;\text{for $t < 0$} \\
0 & \;\text{for $t = 0$} \\
|t| z_+(p) & \;\text{for $t > 0$}
\end{cases}.
\end{align*}
As long as $t \neq 0$, the map $H_t$ is non-singular. For $t=0$, the
singular set $S(H_0)$ consists of those points $p \in X$ which are
mapped to the south resp. the north pole (this is the set $\{x = y =
0\}$). In the cylinder $Z \subset X$ we obtain $|d_0|$ singular
points during the shrinking process of the degree $d_0$ loop and
$|d_1|$ points during the enlarging process of the degree $d_1$
loop. By equivariant extension
(lemma~\ref{Class1EquivariantExtension}), the same applies to the
complementary fundamental cylinder $Z'$, yielding a total number of
$2(|d_0|+|d_1|)$ singular points in $X$.
Equivariance of the curve is guaranteed because the equivariance
conditions on the functions $x_\pm$, $y_\pm$ and $z$ are compatible
with scaling by real numbers. \end{proof}
Given integers $d_0$ and $d_1$, let \begin{align*}
H^{\Triple{d_0}{\pm(d_1-d_0)}{d_1}}\colon [-1,1] \times (Z,C) \to (\mathbb{R}^3,\{z=0\}). \end{align*} denote the jump curve from $H_-$ to $H_+$ (with $\mathcal{T}(H_\pm) = \Triple{d_0}{\pm(d_1-d_0)}{d_1}$) whose existence is guaranteed by lemma~\ref{JumpsRank2Class1TotalDegree}. In the following we consider jump curves, which modify the fixed point degrees. For the proof of the next lemma it is useful to introduce a new notation for maps $(Z,C) \to (S^2,E)$ having triples of the form $\Triple{d_0}{d_0-d_1}{d_1}$. Let us recall how the normal form maps $(Z,C) \to (S^2,E)$ for such triples were constructed in the proof of proposition~\ref{TripleBuildingBlocks} (ii). The cylinder is to be regarded as $I \times S^1$ where we use $s$ as the interval coordinate. Denote the closed unit disk in $\mathbb{C}$ by $D$. Let $\gamma_d\colon S^1 \to S^1$ be the normalized loop of degree $d$. Let $\iota_D$ be the embedding (\ref{IotaHemisphereEmbedding}) of the disk $D$ as one of the hemispheres in $S^2$ and let $f$ be the following map: \begin{align*}
f\colon [0,1] \times S^1 &\to D\\
(s,z) &\mapsto
\begin{cases}
(1-2s)\gamma_{d_0}(z) & \;\text{for $0 \leq t \leq \onehalf$}\\
(2s-1)\gamma_{d_1}(z) & \;\text{for $\onehalf \leq t \leq 1$}\\
\end{cases}. \end{align*} In particular, $f_0$ and $f_1$ both map $S^1$ to the boundary of $D$. Therefore, $\iota_D \circ f$ can be regarded as a map \begin{align*}
(Z,C) \to \left(S^2,E\right) \subset \left(\mathbb{R}^3,\{z=0\}\right). \end{align*} We can then define a map for the above triple as the equivariant extension of the composition $\iota_D \circ f$ (see lemma~\ref{Class1EquivariantExtension}): \begin{align*}
F = \widehat{\iota_D \circ f}. \end{align*} In other words, $F$ is the equivariant extension of the map \begin{align}
\label{TopJumpsDefBigF}
I \times S^1 &\to S^2\\
(s,z) &\mapsto
\begin{cases}
\begin{pmatrix}
(1-2s)\Re(\gamma_{d_0}(z))\\
(1-2s)\Im(\gamma_{d_0}(z)\\
-2\sqrt{s-s^2}
\end{pmatrix} & \;\text{for $s \in \left[0,\onehalf\right]$}\\
\begin{pmatrix}
(1-2s)\Re(\gamma_{d_1}(z))\\
(1-2s)\Im(\gamma_{d_1}(z))\\
-2\sqrt{s-s^2}
\end{pmatrix} & \;\text{for $s \in \left[\onehalf,1\right]$}
\end{cases}.\nonumber \end{align} What this shows is that normal form maps for a triple $\Triple{d_0}{\pm(d_0-d_1)}{d_1}$ can be defined in terms of two loops $S^1 \to S^1$ which are of degree $d_0$ resp. of degree $d_1$. This motivates the following \begin{definition}
By $F_{\alpha,\beta}$ we denote the map $(Z,C) \to
\left(S^2,E\right)$ which is defined in terms of the loops $\alpha$
and $\beta$ as described above. In particular, $F_{\alpha,\beta}$
has the triple $\Triple{\deg \alpha}{\deg \alpha - \deg \beta}{\deg
\beta}$. \end{definition}
Recall that for loops $\gamma_{d_0}, \ldots,\gamma_{d_n}$ we can form the concatenation loop \begin{align*}
\gamma_{d_n} \ast \ldots \ast \gamma_{d_0}\colon S^1 &\to S^1\\
z &\mapsto \gamma_{d_j}(z_{n+1}) \text{, where } \arg z \in\left[\frac{2\pi j}{n+1},\frac{2\pi(j+1)}{n+1}\right] \end{align*}
\begin{lemma}
\label{JumpsRank2Class1FixpointDegree}
Let $d^\pm$ be two integers and $H_\pm\colon (Z,C) \to
\left(S^2,E\right) \subset (\mathbb{R}^3,\{z=0\})$ be maps with the degree
triples
\begin{align*}
\Triple{d^\pm}{d^\pm}{0}\;\text{(resp. $\Triple{0}{d^\pm}{d^\pm}$)}.
\end{align*}
Then there exists a jump curve
$H\colon [-1,1] \times (Z,C) \to \left(\mathbb{R}^3,\{z=0\}\right)$ from
$H_-$ to $H_+$ such that the singular set $S(H_0)$ consists of
$|d^+ - d^-|$ isolated points in $C_0$ (resp. in $C_1$). If the
original maps $H_\pm$ are already normalized along the boundary
circle $C_1$ (resp. $C_0$), then $H_t$ can be chosen relative to
$C_1$ (resp. $C_0$). \end{lemma}
\begin{proof}
We prove the statement only for the triples
$\Triple{d^\pm}{d^\pm}{0}$; the other case works completely
analogously. After a $G$-homotopy we can assume that $H_-$ is
defined in terms of the loops $\gamma_k^{-1} \ast \gamma_k \ast
\gamma_{d^-}$ and $\gamma_0$:
\begin{align}
H_- = F_{\gamma_k^{-1} \ast \gamma_k \ast \gamma_{d^-},\gamma_0}.
\end{align}
After defining $k = d^+ - d^-$ and after another $G$-homotopy we can
assume that $H_+$ is defined in terms of the loops $\gamma_0 \ast
\gamma_k \ast \gamma_{d^-}$ and $\gamma_0$:
\begin{align}
H_+ = F_{\gamma_0 \ast \gamma_k \ast \gamma_{d^-},\gamma_0}.
\end{align}
In order to prove the statement it suffices to construct a jump
curve $H_t$ from $H_-$ to $H_+$. We define the curve $H_t$ as:
\begin{align*}
H_t = F_{\iota_t \ast \gamma_k \ast \gamma_{d^-},\gamma_0},
\end{align*}
where $\iota_t$ (``jump'') is the following null-homotopy of the
loop $\gamma_k^{-1}$:
\begin{align*}
\iota_t\colon [-1,1] \times S^1 &\to \mathbb{C}\\
(t,z) &\mapsto \frac{(1-t)z^{-k} + t + 1}{2}.
\end{align*}
By construction we have $H_{\pm 1} = H_\pm$. Let us compute the
singular set of $H_t$. From the definition of the map
$F_{\alpha,\beta}$ in (\ref{TopJumpsDefBigF}) we can conclude that
the singular set consists of the common zeros of
\begin{align*}
& (1-2s)\iota_t \ast \gamma_k \ast \gamma_{d^-}(z)\\
\text{and}\; & -2\sqrt{s-s^2}.
\end{align*}
Since the square root only vanishes for $s \in \{0,1\}$ and $(1-2s)$
does not vanish for these values of $s$, the only possibility for
both functions to vanish is
\begin{align*}
s = 0 \;\text{and}\;\iota_t \ast \gamma_k \ast \gamma_{d^-}(z) = 0.
\end{align*}
By construction, $\iota_t$ crosses the origin once and that happens
at $t=0$. Therefore, $H_t$ is only singular for $t = 0$ and the
singular set consists of $|k| = |d^+ - d^-|$ points in $C_0 \subset
Z$. \end{proof}
Note that this method also changes the total degree of the map. That is to be expected, since we have all the freedom possible for changing the fixed point degrees, but as we have seen earlier, not all combinations of fixed point degrees and total degree are allowed. The previous lemma also covers the special case $d^- = d^+$, in which there are no singular points introduced. For two integers $d^\pm$ we denote the jump curve from $H_-$ to $H_+$ (with $\mathcal{T}(H_\pm) = \Triple{d^\pm}{d^\pm}{0}$ resp. $\mathcal{T}(H_\pm) = \Triple{0}{d^\pm}{d^\pm}$) whose existence is guaranteed by lemma~\ref{JumpsRank2Class1FixpointDegree} by \begin{align*}
& H^{\Triple{d^\pm}{d^\pm}{0}}\colon [-1,1] \times (Z,C) \to \left(\mathbb{R}^3,\{z=0\}\right)\\
\text{resp. }\; & H^{\Triple{0}{d^\pm}{d^\pm}}\colon [-1,1] \times (Z,C) \to \left(\mathbb{R}^3,\{z=0\}\right). \end{align*}
Now we combine the results of lemma~\ref{JumpsRank2Class1TotalDegree} and~\ref{JumpsRank2Class1FixpointDegree} into one theorem: \begin{theorem}
\label{JumpsRank2Class1}
Let the torus $X$ be eqipped with the type I involution and let
$H_\pm\colon X \to S^2 \subset \mathbb{R}^3$ be two $G$-maps with triples
$\Triple{d_0^\pm}{d^\pm}{d_1^\pm}$. Then there exists a jump curve
$H\colon [-1,1] \times X \to \mathbb{R}^3$ of $G$-maps from $H_-$ to $H_+$
such that the singular set $S(H_0) \subset X$ consists of
\begin{align*}
|d_0^+ - d_0^-| + |d_1^+ - d_1^-| + d^+ - (d_0^+ + d_1^+) - \left[d^- - (d_0^- + d_1^-)\right]
\end{align*}
isolated points.
If the original maps $H_\pm$ are type I normalized along the
boundary circles $C_0$ and $C_1$ and $d_0^- = d_0^+$ or $d_1^- =
d_1^+$, then the curve $H_t$ can be chosen such that it does not
modify the maps on the respective boundary circles where they agree. \end{theorem}
\begin{proof}
Assume we are given two maps $H_\pm\colon X \to S^2 \subset \mathbb{R}^3$
having the degree triples $\Triple{d_0^\pm}{d^\pm}{d_1^\pm}$. Define
\begin{align*}
k = \frac{d^+ - (d_0^+ + d_1^+) - \left[d^- - (d_0^- + d_1^-)\right]}{2}.
\end{align*}
Since $d^\pm - (d_0^\pm + d_1^\pm) \equiv 0 \mod 2$, $k$ is an integer.
Note that we then have the identity
\begin{align}
\label{JumpsRank2Class1ProofDiscussionD}
d^+ = d_0^+ + 2k + d^- - d_0^- - d_1^- + d_1^+.
\end{align}
The maps $H_\pm$ are equivariantly homotopic (as non-singular maps
$X \to S^2 \subset \mathbb{R}^3$) to maps $H'_\pm$ which are assumed to be
the concatenations of simpler maps, corresponding to the triple
decomposition
\begin{align}
\label{JumpCurveTripleDecomposition}
\Triple{d_0^\pm}{d_0^\pm}{0} &\bullet \Triple{0}{k}{k}\\
&\bullet \Triple{k}{\pm k}{0}\nonumber \\
&\bullet \Triple{0}{d^--(d_0^- + d_1^-)}{0}\nonumber\\
&\bullet \Triple{0}{d_1^\pm}{d_1^\pm}\nonumber.
\end{align}
If the original maps $H_\pm$ are already normalized along any of the
fixed point circles and if they have the same degree there, then the
homotopy to the maps described by the aforementioned triples can be
chosen relatively to those boundary circles. After this reduction to
the maps $H'_\pm$, it suffices to prove the existence of a jump
curve from $H'_-$ to $H'_+$. The restrictions of the maps for the
triple (\ref{JumpCurveTripleDecomposition}) to the cylinder $Z$ are
defined as the concatenation of five maps $H^j$, each defined on one
fifth $Z_j$ of the cylinder $Z$, $j=0,\ldots,4$. We can assume that
each map $H^j$ is normalized on the boundary circles of its
subcylinder $Z_j$. Note that $H'_-$ and $H'_+$ agree on
$Z_1 \cup Z_3$ and differ only on $Z_0 \cup Z_2 \cup Z_4$. This
allows us to define a curve
\begin{align*}
H'\colon [-1,1] \times (Z,C) &\to (\mathbb{R}^3,\{z=0\})\\
(t,p) &\mapsto
\begin{cases}
H^{\Triple{d_0^\pm}{d_0^\pm}{0}}(t,p) & \;\text{if $p \in Z_0$}\\
H'_-(t,p) & \;\text{if $p \in Z_1$}\\
H^{\Triple{k}{\pm k}{0}}(t,p) & \;\text{if $p \in Z_2$}\\
H'_-(t,p) & \;\text{if $p \in Z_3$}\\
H^{\Triple{0}{d_1^\pm}{d_1^\pm}}(t,p) & \;\text{if $p \in Z_4$}
\end{cases}
\end{align*}
Note that if $d_0^+ = d_0^-$ or $d_1^+ = d_1^-$, then this homotopy
$H_t$ is relative to the respective boundary circles on which the
fixed point degrees agree.
Equivariant extension of $H'$ yields a $G$-map
\begin{align*}
\widehat{H'}\colon [-1,1] \times X \to \mathbb{R}^3
\end{align*}
satisfying the desired properties (see
lemma~\ref{Class1EquivariantExtension}). By
lemma~\ref{JumpsRank2Class1FixpointDegree} we obtain $|d_0^+ -
d_0^-|$ singular points from $H^{\Triple{d_0^\pm}{d_0^\pm}{0}}$,
$|d_1^+ - d_1^-|$ singular points from
$H^{\Triple{0}{d_1^\pm}{d_1^\pm}}$. To this we have to add the
singular set added by $H^{\Triple{k}{\pm k}{0}}$, which (by
lemma~\ref{JumpsRank2Class1TotalDegree}) and by
(\ref{JumpsRank2Class1ProofDiscussionD}) consists of
\begin{align*}
2k = d^+ - (d_0^+ + d_1^+) - \left[d^- - (d_0^- + d_1^-)\right]
\end{align*}
points. In total these are
\begin{align*}
|d_0^+ - d_0^-| + |d_1^+ - d_1^-| + d^+ - (d_0^+ + d_1^+) -
\left[d^- - (d_0^- + d_1^-)\right]
\end{align*}
singular points on the torus. \end{proof}
The following corollary is a reformulation of theorem~\ref{JumpsRank2Class1Corollary} using the isomorphism $\mathbb{R}^3 \cong i\mathfrak{su}_2$: \begin{corollary}
\label{JumpsRank2Class1Corollary}
Let the torus $X$ be equipped with the type I involution and let
\begin{align*}
H_\pm\colon X \to i\mathfrak{su}_2\smallsetminus\{0\} \subset \mathcal{H}_{(1,1)}
\end{align*}
be two $G$-maps with the triples
$\Triple{d_0^\pm}{d^\pm}{d_1^\pm}$. Then there exists a jump curve
\begin{align*}
H\colon [-1,1] \times X \to i\mathfrak{su}_2 \subset \mathcal{H}_{(1,1)}
\end{align*}
from $H_-$ to $H_+$ such that the singular set $S(H_0) \subset X$
consists of
\begin{align*}
|d_0^+ - d_0^-| + |d_1^+ - d_1^-| + d^+ - (d_0^+ + d_1^+) - \left[d^- - (d_0^- + d_1^-)\right]
\end{align*}
isolated points. \end{corollary}
\subsection{Type II}
As in the classification chapter, it is possible to deduce results for the type II involution from the respective results of the type I involution. Let $(X,T)$ be the torus equipped with the type II involution and $(X',T')$ be the torus equipped with the type I involution. As an immediate corollary to theorem~\ref{JumpsRank2Class1} note: \begin{corollary}
\label{JumpsRank2Class2}
Let $H_\pm\colon X' \to S^2 \subset \mathbb{R}^3$ be two $G$-maps with
triples $\Triple{0}{d^\pm}{d_1^\pm}$. Then there exists a jump curve
$H\colon [-1,1] \times X' \to \mathbb{R}^3$ from $H_-$ to $H_+$ such that
the singular set $S(H_0) \subset X'$ consists of
\begin{align*}
|d_1^+ - d_1^-| + d^+ - d^- + d_1^- - d_1^+
\end{align*}
isolated points. In particular, if the maps $H_-$ and $H_+$ are
normalized along the boundary circle $C_0$ (meaning that
$H_\pm(C_0) = \{p_0\} \in E \subset S^2$), then the curve $H_t$ can
be chosen such that $H_t(C_0) = \{p_0\}$ for all $t$. \end{corollary}
\begin{proof}
This is just the special case $d_0^\pm = 0$ of
theorem~\ref{JumpsRank2Class1}. \end{proof}
For reducing jump curves for the type II involution to jump curves for the type I involution, recall the equivariant and orientation preserving diffeomorphism \begin{align*}
\Psi\colon X'\smallsetminus C_0 \xrightarrow{\;\sim\;} S^2\smallsetminus \{O_\pm\}. \end{align*} from remark~\ref{Class1TorusWithSphereIdentification} which we have used for identifying type I normalized $G$-maps $X' \to S^2$ with type II normalized $G$-maps $S^2 \to S^2$. We can use the same method for jump curves: \begin{proposition}
\label{JumpsRank2Class2CompleteSphere}
Let $H_\pm$ be two type II normalized $G$-maps
$S^2 \to S^2 \subset \mathbb{R}^3$ with degree pairs
$\smash{\Pair{d_E^\pm}{d^\pm}}$. Then there exists a jump curve
$H\colon [-1,1] \times S^2 \to \mathbb{R}^3$ from $H_-$ to $H_+$ such that
the singular set $S(H_0)$ consists of
\begin{align*}
|d_E^+ - d_E^-| + d^+ - d^- + d_E^- - d_E^+
\end{align*}
isolated points in $X$. \end{proposition}
\begin{proof}
By lemma~\ref{LemmaClass2ReductionToClass1}, the maps $H_\pm$ induce
type I normalized equivariant maps
$H'_\pm\colon X' \to S^2 \subset \mathbb{R}^3$ with the degree triples
$\Triple{0}{d^\pm}{d_E^\pm}$ such that
\begin{align*}
\Restr{H'_\pm}{X'\smallsetminus C_0} = \Restr{H_\pm}{S^2\smallsetminus\{O_\pm\}} \circ \Psi.
\end{align*}
Using corollary~\ref{JumpsRank2Class2} we obtain a jump curve
$H'\colon [-1,1] \times X' \to \mathbb{R}^3$ from $H'_-$ to $H'_+$ such that
the singular set $S(H'_0)$ consists of
\begin{align}
\label{SingularPointsJumpsRank2Class2CompleteSphere}
|d_E^+ - d_E^-| + d^+ - d^- + d_E^- - d_E^+
\end{align}
points. In particular this curve has the property that
$H'_t(C_0) = \{p_0\}$ for all times $t$. The last property allows us
to define the curve
\begin{align*}
H\colon [-1,1] \times S^2 &\to \mathbb{R}^3\\
(t,p) &\mapsto
\begin{cases}
H'\left(t,\Psi^{-1}(p)\right) & \;\text{if $p \not\in \{O_\pm\}$}\\
p_0 & \;\text{if $p \in \{O_\pm\}$}\\
\end{cases}
\end{align*}
Since $H'_t$ is non-singular for $t \not= 0$, the same applies to
$H$. By construction
$H_t$ is a jump curve from $H_-$ to $H_+$. The diffeomorphism $\Psi$
maps the singular set $S(H'_0)$ onto the singular set $S(H_0)$, thus
the number of singular points of $S(H_0)$ is also given by
(\ref{SingularPointsJumpsRank2Class2CompleteSphere}). This finishes
the proof. \end{proof}
Now we can deduce the following result for type II jump curves: \begin{theorem}
\label{JumpsRank2Class2Complete}
Let $X$ be the torus equipped with the type II involution and let
$H_\pm$ be two $G$-maps $X \to S^2 \subset \mathbb{R}^3$ with degree pairs
$\Pair{d_C^\pm}{d^\pm}$. Then there exists a jump curve
$H\colon [-1,1] \times X \to \mathbb{R}^3$ from $H_-$ to $H_+$ such that the
singular set $S(H_0)$ consists of
\begin{align*}
|d_C^+ - d_C^-| + d^+ - d^- + d_C^- - d_C^+
\end{align*}
isolated points in $X$. \end{theorem}
\begin{proof}
After a $G$-homotopy we can assume that both maps $H_\pm$ are type
II normalized. Therefore they push down to maps $H'_\pm$ on the
quotient $X/A$, which can be equivariantly identified with
$S^2$. Thus $H'_\pm$ can be regarded as type II normalized maps
$S^2 \to S^2$ and we have the identity
$H_\pm = H'_\pm \circ \smash{\pi_{S^2}}$. By
remark~\ref{Class2CorrespondenceXS2} the degree pairs of the maps
$H'_\pm$ are the same as those of the original maps $H_\pm$. Now
proposition~\ref{JumpsRank2Class2CompleteSphere} guarantees the
existence of a jump curve $H'\colon I \times S^2 \to \mathbb{R}^3$ from
$H'_-$ to $H'_+$ with
\begin{align*}
|d_C^+ - d_C^-| + d^+ - d^- + d_C^- - d_C^+
\end{align*}
singular points on $S^2$. Then the composition
$H'_t \circ \smash{\pi_{S^2}}$ defines a jump curve from $H_-$ to
$H_+$ with the same number of singular points. \end{proof}
As in the case of the type I involution we use the isomorphism $\mathbb{R}^3 \cong i\mathfrak{su}_2$ to note the following reformulation: \begin{corollary}
\label{JumpsRank2Class2CompleteCorollary}
Let $X$ be the torus equipped with the type II involution and let
$H_\pm$ be two $G$-maps $X \to i\mathfrak{su}_2 \subset
\mathcal{H}_{(1,1)}$ with degree pairs $\Pair{d_C^\pm}{d^\pm}$. Then
there exists a jump curve
\begin{align*}
H\colon [-1,1] \times X \to i\mathfrak{su}_2 \subset \mathcal{H}_{(1,1)}
\end{align*}
from $H_-$ to $H_+$ such that the singular set $S(H_0)$ consists of
\begin{align*}
|d_C^+ - d_C^-| + d^+ - d^- + d_C^- - d_C^+
\end{align*}
isolated points in $X$. \end{corollary}
\section{Maps into $\mathrm{cl}(\mathcal{H}_n)$}
In this section we assume $n > 2$. Recall that $\mathcal{H}_{(p,q)}$ contains the orbit $U(n).\I{p}{q}$ as an equivariant strong deformation retract. This orbit can be equivariantly identified with the Grassmann manifold $\text{Gr}_p(\mathbb{C}^n)$. In order to employ the method described in the previous section in the higher dimensional setting, we embed the space $i\mathfrak{su}_2$ into $\mathrm{cl}(\mathcal{H}_{(p,q)})$ such that $i\mathfrak{su}_2\smallsetminus\{0\} \subset U(n).\I{p}{q}$ and the origin in $i\mathfrak{su}_2$ defines a singular matrix in the boundary of this orbit. As we have seen earlier, the topology on the mapping space is only non-trivial in the case of a mixed-signature. Thus we can assume that $0 < p,q < n$. The space $i\mathfrak{su}_2$ can be equivariantly embedded into $\mathrm{cl}(\mathcal{H}_{(p,q)})$ as an affine subspace via \begin{align}
\label{isu2Embedding}
\iota\colon i\mathfrak{su}_2 &\hookrightarrow \mathrm{cl}(\mathcal{H}_{(p,q)})\\
\xi &\mapsto \begin{pmatrix}
\I{p-1} & 0 & 0\\
0 & \xi & 0 \\
0 & 0 & -\I{q-1}
\end{pmatrix}\nonumber \end{align} Similarly, the group $U(2)$ can be embedded into $U(n)$ via \begin{align*}
U(2) &\hookrightarrow U(n)\\
U &\mapsto \begin{pmatrix}
\I{p-1} & 0 & 0\\
0 & U & 0 \\
0 & 0 & -\I{q-1}
\end{pmatrix}. \end{align*} Since $U(n)$ acts on $\mathcal{H}_{(p,q)}$ via conjugation, so does $U(2)$, by regarding each element of $U(2)$ as being embedded in $U(n)$. The $U(2)$-orbit of $\I{p}{q}$ is the same as $\iota(U(2).\I{1}{1})$. It is contained as a subspace in the orbit $U(n).\I{p}{q}$ and corresponds -- under the isomorphism $\text{Gr}_p(\mathbb{C}^n) \cong U(n).\I{p}{q}$ -- to the Schubert variety $\mathcal{S} \cong \mathbb{P}_1$ (see the remarks about the Schubert variety $\mathcal{S}$ on p.~\pageref{SchubertVarietyDiscussion}). This follows from the fact that $U(2)$ acts transitively on \begin{align*}
\mathcal{S} = \left\{E \in \text{Gr}_p(\mathbb{C}^n)\colon \mathbb{C}^{p-1} \subset E \subset \mathbb{C}^{p+1}\right\}. \end{align*} Under the above embedding $i\mathfrak{su}_2 \hookrightarrow \mathrm{cl}(\mathcal{H}_{(p,q)})$, the origin of $i\mathfrak{su}_2$ corresponds to a singular matrix, which is contained in the boundary $\partial\mathcal{H}_{(p,q)}$.
Since $\mathcal{H}_{(p,q)}$ has $\text{Gr}_p(\mathbb{C}^n)$ as equivariant deformation retract, these two spaces and also their real parts have the same fundamental group: \begin{align*}
\pi_1\left(\left(\mathcal{H}_{(p,q)}\right)_\mathbb{R}\right) \cong \pi_1\left(\left(\text{Gr}_p\left(\mathbb{C}^n\right)\right)_\mathbb{R}\right) \cong C_2. \end{align*} We write $C_2$ additively as the group $\mathbb{Z}_2$ and identify elements of $\pi_1((\mathcal{H}_{(p,q)})_\mathbb{R})$ with $\mathbb{Z}_2$. Thus we can regard the restrictions of maps $X \to \mathcal{H}_{(p,q)}$ to the boundary circle(s) in $X$ as defining an element in $\pi_1((\mathcal{H}_{(p,q)})_\mathbb{R}) \cong \{0,1\}$.
\begin{theorem}
\label{JumpsRankN}
Assume $n = p + q > 2$. Let $X$ be the torus equipped with the type
I (resp. type II) involution and let $H_\pm$ be two $G$-maps $X \to
\mathcal{H}_{(p,q)}$ with the degree triples
$\smash{\Triple{m_0^\pm}{d^\pm}{m_1^\pm}}$ resp. the degree pairs
$\smash{\Pair{m_C^\pm}{d^\pm}}$. Then there exists a jump curve
\begin{align*}
H\colon [-1,1] \times X \to \mathrm{cl}(\mathcal{H}_{(p,q)})
\end{align*}
from $H_-$ to $H_+$ such that the singular set $S(H_0)$ consists of
\begin{align*}
& |m_0^+ - m_0^-| + |m_1^+ - m_1^-| + d^+ - (m_0^+ + m_1^+) - \left[d^- - (m_0^- + m_1^-)\right]\\
\text{resp.}\; & |m_C^+ - m_C^-| + d^+ - d^- + m_C^- - m_C^+
\end{align*}
isolated points. \end{theorem}
\begin{proof}
Let $H_\pm$ be $G$-maps $X \to \mathcal{H}_{(p,q)}$ with degree
triples $\Triple{m_0^\pm}{d^\pm}{m_1^\pm}$ resp. degree pairs
$\Pair{m_C^\pm}{d^\pm}$. By applying
proposition~\ref{LemmaReductionOfGrToP1} we can assume that the maps
$H_\pm$ have their images contained in the Schubert variety
$\mathcal{S} \subset \text{Gr}_p(\mathbb{C}^n)$. The Schubert variety is the
$U(2)$-orbit of $\I{p}{q}$, which is $\iota(U(2).\I{1}{1})$. That
is, it is contained in the image of the embedding
\begin{align}
\label{TopJumpsGrIotaEmbedding}
\iota\colon i\mathfrak{su}_2 \hookrightarrow \mathrm{cl}(\mathcal{H}_{(p,q)})
\end{align}
from (\ref{isu2Embedding}). This means that we can assume that the
maps $H_\pm$ are of the form
\begin{align*}
H_\pm = \iota \circ H'_\pm\colon X \to \mathrm{cl}(\mathcal{H}_{(p,q)})
\end{align*}
where the $H'_\pm$ are $G$-maps
\begin{align*}
H'_\pm\colon X \to U(2).\I{1}{1} \subset i\mathfrak{su}_2
\end{align*}
By construction, their degree triples resp. their degree pairs of
$H'_\pm$ agree with those of $H_\pm$.
Now corollary~\ref{JumpsRank2Class1Corollary} (type I)
resp. corollary~\ref{JumpsRank2Class2CompleteCorollary} (type II)
implies the existence of a jump curve
\begin{align*}
H'\colon [-1,1] \times X \to i\mathfrak{su}_2
\end{align*}
from $H'_-$ to $H'_+$ such that the singular set $S(H_0)$ consists
of
\begin{align}
\label{TopJumpsGrSingularPoints}
& |m_0^+ - m_0^-| + |m_1^+ - m_1^-| + d^+ - (m_0^+ + m_1^+) - \left[d^- - (m_0^- + m_1^-)\right]\\
\text{resp.}\; & |m_C^+ - m_C^-| + d^+ - d^- + m_C^- - m_C^+\nonumber
\end{align}
isolated points in $X$. Composing $H'_t$ with $\iota$ yields a jump
curve
\begin{align*}
H = \iota \circ H'\colon X \to \mathrm{cl}(\mathcal{H}_{(p,q)})
\end{align*}
from $H_-$ to $H_+$ whose number of isolated points is the same as
that of $H'_t$, namely that given in
(\ref{TopJumpsGrSingularPoints}). \end{proof}
\section{An Example from Physics}
In this section we present an example jump curve coming from physics (see e.\,g. \cite[p.~11]{PrimerTopIns}). For this we regard the torus $X$ as $\mathbb{R}^2/\Lambda$, where \begin{align*}
\Lambda = \LatGen{
\begin{pmatrix}
2\pi \\
0
\end{pmatrix}}{
\begin{pmatrix}
0 \\
2\pi
\end{pmatrix}}. \end{align*} The orientation on $X$ is the orientation inherited from $\mathbb{R}^2$. Employing physics notation for the coordinates on the torus phase space, we define the jump curve as \begin{align}
H_t(q,p) = \begin{pmatrix}
t - \cos q - \cos p & \sin q - i \sin p \\
\sin q + i \sin p & -(t - \cos q - \cos p)
\end{pmatrix}. \end{align} Each map $H_t$ is equivariant with respect to the type I involution. This curve differs from the jump curves we have constructed earlier in that it depends on a continuous parameter $t \in \mathbb{R}$ with multiple jumps taking place as $t$ varies. These occur precisely at $t=-2$, $t=0$ and at $t=2$. The associated curve of scaled maps into $S^2 \subset \mathbb{R}^3$ is \begin{align*}
\widetilde{H}_t(q,p) =
c_t(q,p)
\begin{pmatrix}
t - \cos q - \cos p\\
\phantom{-}\sin q\\
-\sin p
\end{pmatrix}, \end{align*} where \begin{align*}
c_t(q,p) = \frac{1}{\sqrt{(t - \cos q \cos p)^2 + (\sin q)^2 + (\sin p)^2}}. \end{align*} In the following we compute the degree triples $\mathcal{T}(H_t)$. For this it is convenient to decompose $\smash{\widetilde{H}}_t$ as \begin{align*}
\widetilde{H}_t(q,p) =
c_t(q,p)\left[
\begin{pmatrix}
t - \cos p\\
0\\
-\sin p
\end{pmatrix} + \begin{pmatrix}
- \cos q\\
\sin q\\
0
\end{pmatrix}\right]. \end{align*} Now we can regard $\smash{\widetilde{H}}_t$ as a family of circles in the $x,y$-plane whose centers in $\mathbb{R}^3$ varies with $t$ and $p$. It follows that $\widetilde{H}_t$ is not surjective (as a map to $S^2$)
for $|t| \in (2,\infty)$, therefore we have $\smash{\deg \widetilde{H}_t} = 0$ for these $t$. Furthermore, when $t$ is in a neighborhood of the origin and transitions from a negative real number to a positive real number, then the total degree of $\smash{\widetilde{H}}_t$ flips its sign. Hence it suffices to compute the total degree of e.\,g. $\smash{\widetilde{H}}_1$. This can be done by counting preimage points, for example in the fiber $\smash{\widetilde{H}}_1^{-1}(y_0)$ where $y_0 = \ltrans{(0,1,0)}$. A computation shows that the $y_0$-fiber consists only of the point $(q_0,p_0) = (\text{\textonehalf}\pi,0)$. Using the orientation of $S^2$ induced by an outer normal vector field one can find that the orientation sign of the map $\smash{\widetilde{H}}_1$ at $(q_0,p_0)$ is positive. This then implies that $\deg \smash{\widetilde{H}}_t = 1$ for $t \in (0,2)$ and $\deg \smash{\widetilde{H}}_t = -1$ for $t \in (-2,0)$.
Regarding the fixed point degrees: For $p=0$ resp. $p=\pi$ the map $\smash{\widetilde{H}}_t(p,\cdot)$ can be written as \begin{align}
\label{ExampleFixpointMapP=0}
& q \mapsto
c_t(q,0)\left[
\begin{pmatrix}
t - 1\\
0 \\
0
\end{pmatrix} +
\begin{pmatrix}
-\cos q \\
\phantom{-}\sin q \\
0
\end{pmatrix}\right]\\ \text{resp.}\;\; & q \mapsto
c_t(q,\pi)\left[
\begin{pmatrix}
t + 1\\
0 \\
0
\end{pmatrix} +
\begin{pmatrix}
-\cos q \\
\phantom{-}\sin q \\
0
\end{pmatrix}\right]\nonumber \end{align} Using the orientation on the equator $E \subset S^2$ as defined on p.~\pageref{DegreeOneMapOnEquator} it follows that the fixed point degree for $p=0$ is $0$ for $t \in \mathbb{R}\smallsetminus[0,2]$, and $-1$ for $t \in (0,2)$. The fixed point degree for $p=\pi$ is $0$ for $t \in \mathbb{R}\smallsetminus[-2,0]$ and $-1$ for $t \in (-2,0)$. To summarize the above: \begin{remark}
The following table lists the degree triples for $H_t$ depending on
$t$, where $t$ is assumed to be in one of the $t$-intervals such
that $H_t$ is non-singular:
\begin{center}
\footnotesize
\begin{tabular}[h]{ll}
\toprule
\textbf{$t$-interval} & \textbf{degree triple $\mathcal{T}(H_t)$} \\
\midrule
$(-\infty,-2)$ & $\Triple{0}{0}{0}$ \\
$(-2,0)$ & $\Triple{0}{-1}{-1}$ \\
$(0,2)$ & $\Triple{-1}{1}{0}$ \\
$(2,\infty)$ & $\Triple{0}{0}{0}$ \\
\bottomrule
\end{tabular}
\end{center} \end{remark}
Now we consider a generalization of the above map $H_t$. We define \begin{align}
H_t^m(q,p) = c_t(q,mp) \begin{pmatrix}
t - \cos q - \cos(mp) & \sin q - i \sin(mp) \\
\sin q + i \sin (mp) & -(t - \cos q - \cos(mp))
\end{pmatrix}, \end{align} with $m$ a positive integer. The maps $H_t^m$ are still type I equivariant. They can be expressed as the composition $H_t \circ p_m(q,p)$, where $p_m$ is the following $m:1$-cover of $X$: \begin{align*}
p_m\colon X &\to X\\
(q,p) &\mapsto (q,mp). \end{align*} In particular, $p_m$ has degree $m$, which implies that $\deg H_t^m = m\deg H_t$. Let us now compute the fixed point degrees of $H_t^m$. These depend on the parity of $m$. If $m$ is even, then the $2\pi$-periodicity of the trigonometric functions implies that the fixed point degrees for $p=0$ and $p=\pi$ must be the same. They are given by the map (\ref{ExampleFixpointMapP=0}). Therefore, the fixed point degrees for even $m$ must be $(0,0)$ for $t \in \mathbb{R}\smallsetminus[0,2]$ and $(-1,-1)$ otherwise. The periodicity of the trigonometric functions implies that fixed point degrees for odd $m$ are the same as those for $m=1$. We obtain the following result: \begin{remark}
The following table lists the degree triples for $H_t^m$ for even
$m$ varying with $t$ such that $H_t^m$ is non-singular:
\begin{center}
\footnotesize
\begin{tabular}[h]{ll}
\toprule
\textbf{$t$-interval} & \textbf{degree triple $\mathcal{T}(H^m_t)$} \\
\midrule
$(-\infty,-2)$ & $\Triple{0}{0}{0}$ \\
$(-2,0)$ & $\Triple{0}{-m}{0}$ \\
$(0,2)$ & $\Triple{-1}{m}{-1}$ \\
$(2,\infty)$ & $\Triple{0}{0}{0}$ \\
\bottomrule
\end{tabular}
\end{center} \end{remark} In particular, this example shows how the degree triples vary with $t$ in such a way that the condition $d \equiv d_0 + d_1 \mod 2$ is always satisfied.
\appendix \chapter{Appendix} \label{ChapterAppendix}
This chapter lists some standard material, which is included for the convenience of the reader.
\section{Topology}
Taken from \cite{Hatcher}: \begin{proposition}
\label{HEP} (Homotopy Extension Property) Let $X$ be a CW complex
and $A \subset X$ a CW subcomplex. Then the CW pair $(X,A)$ has the
homotopy extension property (HEP) for all spaces. \end{proposition} \begin{proof}
See \cite[p.\,15]{Hatcher}. \end{proof}
\begin{lemma}
\label{LemmaCurvesNullHomotopic}
Let $X$ be a topological space and $f,g\colon I \to X$ be two curves
in $X$ with $f(0) = g(0) = p$ and $f(1) = g(1) = q$. Then $g^{-1}
\ast f$ is null-homotopic if and only if $f$ and $g$ are homotopic
(with fixed endpoints). \end{lemma} \begin{proof}
The one direction is trivial: When $f$ and $g$ are homotopic, then
we can define a null-homotopy $H\colon I \times I \to X$ as follows:
During $0 \leq t \leq \text{\textonehalf}$ use a homotopy from $f$
to $g$ to build a homotopy from $g^{-1} \ast f$ to $g^{-1} \ast
g$. Then, during $\text{\textonehalf} \leq t \leq 1$ we shrink $g^{-1} \ast
g$ the constant curve at $p$.
Now assume that we are given a homotopy from $\gamma_1^{-1} \ast
\gamma_0$ to the constant curve at $p$. This is a map $H\colon I
\times I \to X$. On the left, top and right boundary of the square
$I$ the map $H$ takes on the value $p$, on the bottom boundary,
i.\,e. $H_0$, this is the curve $\gamma^{-1}_1 \ast \gamma_0$. We use
this map to construct a new map $\gamma\colon I \times I \to X$
which is a homotopy from $\gamma_0$ to $\gamma_1$ as follows:
We define $\gamma_t\colon I \to X$ to be the map $I
\xhookrightarrow{\iota_t} I \times I \xrightarrow{H} X$ where the
embedding $\iota_t$ is defined as follows:
\begin{align*}
\iota_t\colon I &\to I \times I\\
s &\mapsto \frac{1}{\max \{|\cos \pi(1-t)|,|\sin \pi(1-t)|\}} \begin{pmatrix}
\frac{\cos \pi(1-t)}{2}\\
i \sin \pi(1-t)
\end{pmatrix} +
\begin{pmatrix}
1\\
2
\end{pmatrix}
\end{align*}
With this definition, $\iota_0$ is the embedding $s \mapsto
\text{\textonehalf}(1-s)$ and $\iota_1$ is the embeding $s \mapsto
\text{\textonehalf}(1+s)$. Hence, $\gamma_t$ is indeed a homotopy
from $\gamma_0$ to $\gamma_1$. Furthermore, the construction makes
sure that for every $t$, $\iota_t(0) = \text{\textonehalf}$ and
$\iota_t(1)$ is always contained in the left, top or right boundary
of $I \times I$. This translates to the fact that $\gamma_t(0) = p$
and $\gamma_t(1) = q$ for all $t$. \end{proof}
\begin{theorem}
\label{WhitneyApproximationTheorem}
(Taken from John Lee, Whitney Approximation Theorem) Suppose $N$ is
a smooth manifold with or without boundary, $M$ is a smooth manifold
(without boundary), and $F\colon N \to M$ is a continuous map. Then
$F$ is homotopic to a smooth map. If $F$ is already smooth on a
closed subset $A \subseteq N$, then the homotopy can be taken to be
relative to $A$. \end{theorem} \begin{proof}
See \cite[p.\,141]{JohnLee}. \end{proof}
Here is a theorem by Whitney, taken from \cite{JohnLee}: \begin{theorem}
\label{SmoothApproximation0}
Let $N$ and $M$ be smooth manifolds and let $F\colon N \to M$ be a
map. Then $F$ is homotopic to a smooth map
$\smash{\widetilde{F}}\colon N \to M$. If $F$ is smooth on a closed
subset $A \subset N$, then the homotopy can be taken to be relative
to $A$. \end{theorem} \begin{proof}
See \cite[p.~142]{JohnLee} \end{proof}
The following two statements can be found in \cite{Bredon}: \begin{theorem}
\label{SmoothApproximation1}
Let $G$ be a compact Lie group acting smoothly on the manifolds $M$
and $N$. Let $\varphi\colon M \to N$ be an equivariant map. Then
$\varphi$ can be approximated by a smooth equivariant map
$\psi\colon M \to N$ which is equivariantly homotopic to $\varphi$
by a homotopy approximating the constant homotopy. Moreover, if
$\varphi$ is already smooth on the closed invariant set
$A \subset M$, then $\psi$ can be chosen to coincide with $\varphi$
on $A$, and the homotopy between $\varphi$ and $\psi$ to be constant
there. \end{theorem} \begin{proof}
See \cite[p.\,317]{Bredon}. \end{proof}
\begin{corollary}
\label{SmoothApproximation2}
Let $G, M, N$ be as in [the previous theorem]. Then any equivariant
map $M \to N$ is equivariantly homotopic to a smooth equivariant
map. Moreover, if two smooth equivariant maps $M \to N$ are
equivariantly homotopic, then they are so by a smooth equivariant
homotopy. \end{corollary} \begin{proof}
\cite[p.\,317]{Bredon}. \end{proof}
The following theorem is taken (and translated) from \cite{StoeckerZieschang}: \begin{theorem}
\label{TopologyMapInducedOnQuotient}
Let $f\colon X \to Y$ be a continuous map, which is compatible with
given equivalence relations $R$ resp. $S$ on $X$ resp. $Y$ (which
means: $x \sim_R x'$ implies $f(x) \sim_S f(x')$). Then
$f'([x]_R) = [f(x)]_S$\footnote{Here $[x]_R$ denotes the equivalence
class of $x$ under the relation $R$ (likewise for $y$ and $S$).}
defines a continuous map $f'\colon X/R \to Y/S$; it is called the
map induced by $f$. If $f$ is a homeomorphism and $f^{-1}$ is also
compatible with the relations $R$ resp. $S$, then the induced map
$f'$ is also a homeomorphism. \end{theorem} \begin{proof}
See \cite[p.~9]{StoeckerZieschang}. \end{proof}
The same applies to the following \begin{theorem}
\label{QuotientOfCWIsCW}
Let $(X,A)$ be a CW pair and $A \neq \emptyset$, then $X/A$ is a
CW complex with the zero cell $[A] \in X/A$ and the cells of the
form $p(e)$, where $p\colon X \to X/A$ denotes the identifying map
and $e$ is a cell in $X\smallsetminus A$. \end{theorem} \begin{proof}
See \cite[p.~93]{StoeckerZieschang}. \end{proof}
Taken from \cite{May}: \begin{theorem}
\label{G-HELP} (G-HELP) Let $A$ be a subcomplex of a $G$-CW
complex $X$ of dimension $\nu$ and let $e\colon Y \to Y$ be a
$\nu$-equivalence. Suppose given maps $g\colon A \to Y$, $h\colon A
\times I \to Z$, and $f\colon X \to Z$ such that $e \circ g = h
\circ i_1$ and $f \circ i = h \circ i_0$ in the following diagram:
\[
\label{HELP-Diagram}
\begin{tikzcd}
A \arrow{rr}{i_0} \arrow{dd}{i} & & A \times I \arrow{dd} \arrow{dl}{h} & & A \arrow{ll}{i_1} \arrow{dl}{g} \arrow{dd}{i} \\
& Z & & Y \arrow[crossing over,near start]{ll}{e} \\
X \arrow{ru}{f} \arrow{rr}{i_0} & & X \times I \arrow[dashed]{ul}{\tilde{h}} & & X \arrow{ll}{i_1} \arrow[dashed]{lu}{\tilde{g}} \\
\end{tikzcd}
\]
Then there exist maps $\smash{\tilde{g}}$ and $\smash{\tilde{h}}$
that make the diagram commute. \end{theorem} \begin{proof}
See \cite[p.~17]{May}. \end{proof}
From this we can deduce a statement about equivariant homotopy extensions for $G$-CW complexes: \begin{corollary}
\label{G-HEP}
(Equivariant HEP) Let $G$ be a topological group. Let $X$ and $Y$
be $G$-CW complexes and $A$ a $G$-CW subcomplex of $X$. Then the
$G$-CW pair $(X,A)$ has the equivariant homotopy extension
property. That is, given a $G$-map $f\colon X \to Y$ and a
$G$-homotopy $h\colon I \times A \to Y$. Then $h$ extends to a
homotopy $H\colon I \times X \to Y$ such that $H_0 \equiv f$ and
$\restr{H}{I \times A} \equiv h$. \end{corollary} \begin{proof}
Set $Z = Y$ and let $f\colon Y \to Z$ be the identity. In particular
this makes $f$ be a $\nu$-equivalence where we set $\nu(H) =
\dim(X)$ for all subgroups $H \subset G$. Then, the equivariant
homotopy extension property follows from the LHS square of the
diagram (\ref{HELP-Diagram}). \end{proof}
\begin{remark}
\label{RemarkQuotientDegreeOne}
Let $M$ be an $n$-dimensional, connected, closed, oriented
CW manifold and $A \subset M$ such that
\begin{enumerate}[(i)]
\item $A$ is contained in the $n-1$ skeleton $M^{n-1}$ and
\item $M/A$ is topologically an $n$-dimensional, connected, closed,
orientable manifold.
\end{enumerate}
Then the quotient $M/A$ can be equipped with an orientation such
that the projection map $M \to M/A$ has degree $+1$. \end{remark}
\begin{proof}
The manifold $M$ is assumed to be oriented, which corresponds to a
choice of fundamental class $[M]$ in $H_n(M,\mathbb{Z}) \cong \mathbb{Z}$. Denote
the quotient map with
\begin{align*}
\pi\colon M \to M/A.
\end{align*}
Since $A$ is contained in $M^{n-1}$, $\pi$ maps the $n$-cells in $M$
homeomorphically to the $n$-cells in the quotient $M/A$. It follows
that there exists an $n$-ball in one of the $n$-cells in $M/A$ such
that $\pi^{-1}(B)$ is a single $n$-ball in $M$ which gets mapped
homeomorphically to $B$. Fix a fundamental class $[M/A]$ of
$H_n(M/A,\mathbb{Z})$. Using exercise~8 from \cite[p.~258]{Hatcher} we can
conclude that the degree of $\pi$ is $\pm 1$, depending on wether it
is orientation preserving or orientation reversing. In case it is
orientation reversing we can chose the opposite orientation on $M/A$
such that $\deg \pi = +1$. \end{proof}
A direct consequence of the previous remark is: \begin{remark}
\label{RemarkMapOnQuotientDegreeUnchanged}
Let $M$ and $N$ be $n$-dimensional, connected, closed, oriented
manifolds and $f\colon M \to N$ a map of degree $d$. Furthermore,
assume that $f$ is constant on $A \subset M$ such that it induces a
map $f'\colon M/A \to N$. If the pair $(M,A)$ satisfies the
assumptions of remark~\ref{RemarkQuotientDegreeOne}, then $f'$ also
has degree $d$. \end{remark}
\begin{proof}
Denote the quotient map with $\pi\colon M \to M/A$. We then have the
following commutative diagram:
\[
\xymatrix{
M \ar[d]_\pi \ar[dr]^f\\
M/A \ar[r]_{f'} & N
}
\]
In homology we obtain:
\[
\xymatrix{
H_n(M,\mathbb{Z}) \ar[d]_{\pi_*} \ar[dr]^{f_*}\\
H_n(M/A,\mathbb{Z}) \ar[r]_{f'_*} & H_n(N,\mathbb{Z})
}
\]
By remark~\ref{RemarkQuotientDegreeOne} the quotient $M/A$ can be
equipped with an orientation such that, after identifying all
homology groups with $\mathbb{Z}$, $\pi_*$ is multiplication by $+1$ and
$f'_*$ is multiplication by $d'$. Thus we have:
\[
\xymatrix{
\mathbb{Z} \ar[d]_{\cdot +1} \ar[dr]^{\cdot d}\\
\mathbb{Z} \ar[r]_{\cdot d'} & \mathbb{Z}
}
\]
By commutativity of the diagram it follows that the degree of
$f'$ is $'d = d$. \end{proof}
\begin{proposition}
\label{TopologyGrassmannians}
Assume $0 < p < n$. Then:
\begin{enumerate}[(i)]
\item The second homology group $H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z})$ is infinite
cyclic for all $p$ and $n$. Fixing a full flag of $\mathbb{C}^n$ induces a
decomposition of $\text{Gr}_p(\mathbb{C}^n)$ into Schubert cells. With respect
to a fixed flag there exists a unique (complex) 1-dimensional
Schubert variety, which can be regarded as the generator of
$H_2(\text{Gr}_p(\mathbb{C}^n),\mathbb{Z})$.
\item The fundamental group $\pi_1(\text{Gr}_p(\mathbb{R}^n))$ is cyclic of order
two unless $p=1$ and $n=2$, in which case it is infinite cyclic.
\end{enumerate} \qed \end{proposition}
\section{Hausdorff Dimension} \label{AppendixHausdorffDimension}
For completeness we quote a theorem taken from \cite[p.~515]{SchleicherArticle}: \begin{theorem}
\label{HDimProperties}
Hausdorff dimension has the following properties:
\begin{enumerate}[(1)]
\item if $X \subset Y$, then $\text{dim}_H X \leq \text{dim}_H Y$;
\item if $X_i$ is a countable collection of sets with $\text{dim}_H X_i
\leq d$, then
\begin{align*}
\text{dim}_H \bigcup_i X_i \leq d
\end{align*}
\item if $X$ is countable, then $\text{dim}_H X = 0$;
\item if $X \subset \mathbb{R}^d$, then $\text{dim}_H X \leq d$;
\item if $f\colon X \to f(X)$ is a Lipschitz map, then $\text{dim}_H(f(X))
\leq \text{dim}_H(X)$.
\item if $\text{dim}_H X = d$ and $\text{dim}_H Y = d'$, then $\text{dim}_H(X \times Y) \geq d + d'$;
\item if $X$ is connected and contains more than one point, then
$\text{dim}_H X \geq 1$; more generally, the Hausdorff dimension of any
set is no smaller than its topological dimension;
\item if a subset $X$ of $\mathbb{R}^n$ has finite positive $d$-dimensional
Lebesgue measure, then
\begin{align*}
\text{dim}_H X = d.
\end{align*}
\end{enumerate} \end{theorem}
Although the Hausdorff dimension is not invariant under homeomorphisms, it is invariant under diffeomorphisms: \begin{corollary}
\label{HdimDiffeomorphismInvariant}
If $f\colon X \to Y$ is a diffeomorphism (between metric spaces),
then $\text{dim}_H X = \text{dim}_H Y$. \end{corollary} \begin{proof}
Let $f\colon X \to Y$ be a diffeomorphism. In particular, $f$ and
$f^{-1}$ are both Lipschitz continuous. Thus, by
theorem~\ref{HDimProperties} (5) we obtain
\begin{align*}
& \text{dim}_H(Y) = \text{dim}_H(f(X)) \leq \text{dim}_H(X)\\
\;\text{and}\; & \text{dim}_H(X) = \text{dim}_H(f^{-1}(Y)) \leq \text{dim}_H Y
\end{align*}
and the statement follows. \end{proof}
\begin{corollary}
\label{HDimViaSetDecomposition}
If $X$ is the finite union of sets $X_i$, then
\begin{align*}
\text{dim}_H X = \max_i \text{dim}_H X_i.
\end{align*} \end{corollary}
\begin{proof}
Each $X_i$ is contained in $X$, hence by
theorem~\ref{HDimProperties} (1) we obtain
\begin{align*}
\text{dim}_H X_i \leq \text{dim}_H X \;\text{for all $i$}.
\end{align*}
In other words:
\begin{align*}
\max_i \text{dim}_H X_i \leq \text{dim}_H X
\end{align*}
On the other hand, noting the trivial fact
\begin{align*}
\text{dim}_H X_i \leq \max_i \text{dim}_H X_i
\end{align*}
and using (2) of the same theorem we can conclude
\begin{align*}
\text{dim}_H X = \text{dim}_H\left(\bigcup_i X_i\right) \leq \max_i(\text{dim}_H X_i).
\end{align*}
Thus we get the desired equation:
\begin{align*}
\text{dim}_H X = \max_i(\text{dim}_H X_i).
\end{align*} \end{proof}
\begin{proposition}
\label{PropositionHausdorffPackingDimension}
For sets $A, B \subset \mathbb{R}^n$ we have
\begin{align*}
\text{dim}_H(A) + \text{dim}_H(B) \leq \text{dim}_H(A \times B) \leq \text{dim}_H(A) + \text{dim}_P(B)
\end{align*} \end{proposition}
\begin{proof}
See \cite[p.~1]{XiaoArticle}. \end{proof}
For a submanifold $B \subset \mathbb{R}^n$ the packing dimension $\text{dim}_P(B)$, the Hausdorff dimension $\text{dim}_H(B)$ and the usual manifold dimension $\dim(B)$ agree\footnote{See e.\,g. \cite[p.~48]{Falconer}}. Thus we obtain for submanifolds $A,B \subset \mathbb{R}^n$: \begin{align}
\label{HDimOfProduct}
\text{dim}_H(A \times B) = \text{dim}_H(A) + \dim(B). \end{align} This allows us to prove the following: \begin{corollary}
\label{HDimOfTotalSpace}
Let $E \xrightarrow{\;\pi\;} X$ be a fiber bundle over the smooth
manifold $X$ where $E$ is a smooth submanifold of $\mathbb{R}^N$ such that
the fiber $F$ is also a smooth manifold.
Let $A$ be a subset in $X$. Then $\text{dim}_H(\pi^{-1}(A)) = \text{dim}_H(A) +
\dim(F)$. \end{corollary}
\begin{proof}
The statement follows by using using a trivializing covering of $X$
together with corollary~\ref{HdimDiffeomorphismInvariant},
corollary~\ref{HDimViaSetDecomposition} and (\ref{HDimOfProduct}). \end{proof}
\addchap{Notation}
{\footnotesize \begin{longtable}[ht]{lp{11.6cm}}
$\emptyset$ & The empty set\\
$I$ & The closed unit interval $[0,1]$\\
$\widehat{\mathbb{R}}$ & Compactified real line, $\widehat{\mathbb{R}} = \mathbb{R} \cup \{\infty\}$\\
$\LatGen{\omega_1}{\omega_2}$ & The lattice in $\mathbb{C}$ generated by $\omega_1$ and $\omega_2$\\
$C_n$ & The cyclic group of order $n$\\
$\mathcal{M}(X,Y)$ & The space of maps $X \to Y$\\
$\mathcal{M}((X,A),(Y,B))$ & The space of maps $(X,A) \to (Y,B)$\\
$\mathcal{M}_G(X,Y)$ & The space of continuous $G$-maps $X \to Y$ (both $G$-spaces)\\
$f \simeq g$ & The maps $f$ and $g$ are homotopic\\
$f \simeq_G g$ & The maps $f$ and $g$ are $G$-equivariantly homotopic\\
$[X,Y]_G$ & The $G$-homotopy classes of maps $X \to Y$\\
$\Triple{d_0}{d}{d_1}$ & Degree triple for equivariant homotopy classes (see p.~\pageref{DefinitionTriple})\\
$\mathcal{L}X$ & The free loop space of $X$\\
$\Omega X$ & The space of based loops in $X$\\
$\Omega (X,x_0)$ & The space of based loops in $X$ with basepoint $x_0$\\
$\mathbb{H}^+$ & The upper half plane in $\mathbb{C}$\\
$\pi(X,p)$ & The fundamental group of $X$ with basepoint $p$\\
$\pi(X;p,q)$ & The homotopy classes of curves in $X$ with fixed endpoints $p$ and $q$\\
$\mathcal{H}_n$ & The set of complex, hermitian $n \times n$ matrices\\
$\mathcal{H}^*_n$ & The set of complex, hermitian, non-singular $n \times n$ matrices\\
$\mathcal{H}_{(p,q)}$ & The subset of $\mathcal{H}_{p+q}^*$ consisting of matrices with eigenvalue signature $(p,q)$\\
$c_p\colon X \to Y$ & The constant map, which sends every $x \in X$ to $p \in Y$\\
iff & if and only if\\
LHS, RHS & Left-hand side, right-hand side\\
$X^G$ & For a $G$-space $X$, $X^G = \{x \in X\colon G(x) = \{x\}\}$\\
$\ltrans{M}$ & Matrix transpose of the matrix $M$\\
$M^*$ & For a matrix $M$, $M^*$ denotes its conjugate-transpose, i.\,e. $M^* = \ltrans{\overline{M}}$\\
$e^n$ & An open $n$-cell\\
$\gamma_2 \ast \gamma_1$ & Concatenation of curves $\gamma_1$ and $\gamma_2$ where $\gamma_1(1) = \gamma_2(0)$\\
$\gamma^{-1}$ & For a curve $\gamma$, $\gamma^{-1}$ denotes the same curve with reversed time\\
$\mathbb{P}_n$ & The $n$-dimensional complex projective space\\
$\mathbb{RP}_n$ & The $n$-dimensional real projective space\\
$\I_n$ & The $n \times n$ identity matrix\\
$\I{p}{q}$ & The block diagonal matrix $\text{Diag}\,(\I{p},-\I{q})$\\
$\text{dim}_H X$ & The Hausdorff dimension of the topological space $X$\\
$\text{dim}_P X$ & The packing dimension of the topological space $X$\\
$\text{Gr}_k(\mathbb{C}^n)$ & Grassmannian of $k$-dimensional complex subvectorspaces in $\mathbb{C}^n$\\
$\text{Gr}_k(\mathbb{R}^n)$ & Grassmannian of $k$-dimensional real subvectorspaces in $\mathbb{R}^n$\\
$\mathrm{cl}(X)$ & Closure of the topological space $X$\\
$\partial X$ & Boundary of the topological space $X$ \end{longtable}}
All maps are assumed to be continuous, unless otherwise stated. When discussing the dimension of complex geometric objects we refer to its \emph{complex} dimension unless we explicitely use the term \emph{real
dimension}.
\listoffigures
\end{document} |
\begin{document}
\title{ Randomized Block Coordinate Descent \
for Online and Stochastic Optimization}
\begin{abstract} Two types of low cost-per-iteration gradient descent methods have been extensively studied in parallel. One is online or stochastic gradient descent ( OGD/SGD), and the other is randomzied coordinate descent (RBCD). In this paper, we combine the two types of methods together and propose online randomized block coordinate descent (ORBCD). At each iteration, ORBCD only computes the partial gradient of one block coordinate of one mini-batch samples. ORBCD is well suited for the composite minimization problem where one function is the average of the losses of a large number of samples and the other is a simple regularizer defined on high dimensional variables. We show that the iteration complexity of ORBCD has the same order as OGD or SGD. For strongly convex functions, by reducing the variance of stochastic gradients, we show that ORBCD can converge at a geometric rate in expectation, matching the convergence rate of SGD with variance reduction and RBCD. \end{abstract}
\section{Introduction}
In recent years, considerable efforts in machine learning have been devoted to solving the following composite objective minimization problem: \begin{align}\label{eq:compositeobj} \min_{\mathbf{x}}~f(\mathbf{x}) + g(\mathbf{x}) = \frac{1}{I}\sum_{i=1}^{I}f_i(\mathbf{x}) + \sum_{j=1}^{J}g_j(\mathbf{x}_j)~, \end{align} where $\mathbf{x}\in\R^{n\times 1}$ and $\mathbf{x}_j$ is a block coordinate of $\mathbf{x}$. $f(\mathbf{x})$ is the average of some smooth functions, and $g(\mathbf{x})$ is a \emph{simple} function which may be non-smooth. In particular, $g(\mathbf{x})$ is block separable and blocks are non-overlapping. A variety of machine learning and statistics problems can be cast into the problem~\myref{eq:compositeobj}. In regularized risk minimization problems~\cite{hastie09:statlearn}, $f$ is the average of losses of a large number of samples and $g$ is a simple regularizer on high dimensional features to induce structural sparsity~\cite{bach11:sparse}. While $f$ is separable among samples, $g$ is separable among features.
For example, in lasso~\cite{tibs96:lasso}, $f_i$ is a square loss or logistic loss function and $g(\mathbf{x}) = \lambda \| \mathbf{x} \|_1$ where $\lambda$ is the tuning parameter. In group lasso~\cite{yuan07:glasso}, $g_j(\mathbf{x}_j) = \lambda\| \mathbf{x}_j \|_2$, which enforces group sparsity among variables. To induce both group sparsity and sparsity, sparse group lasso~\cite{friedman:sglasso} uses composite regularizers $g_j(\mathbf{x}_j) = \lambda_1\| \mathbf{x}_j \|_2 + \lambda_2 \|\mathbf{x}_j\|_1$ where $\lambda_1$ and $\lambda_2$ are the tuning parameters.
Due to the simplicity, gradient descent (GD) type methods have been widely used to solve problem~\myref{eq:compositeobj}. If $g_j$ is nonsmooth but simple enough for \emph{proximal mapping}, it is better to just use the gradient of $f_i$ but keep $g_j$ untouched in GD. This variant of GD is often called proximal splitting~\cite{comb09:prox} or proximal gradient descent (PGD)~\cite{tseng08:apgm,beck09:pgm} or forward/backward splitting method (FOBOS)~\cite{duchi09}. Without loss of generality, we simply use GD to represent GD and its variants in the rest of this paper. Let $m$ be the number of samples and $n$ be dimension of features. $m$ samples are divided into $I$ blocks (mini-batch), and $n$ features are divided into $J$ non-overlapping blocks. If both $m$ and $n$ are large, solving~\myref{eq:compositeobj} using batch methods like gradient descent (GD) type methods is computationally expensive. To address the computational bottleneck, two types of low cost-per-iteration methods, online/stochastic gradient descent (OGD/SGD)~\cite{Robi51:SP,Judi09:SP,celu06,Zinkevich03,haak06:logregret,Duchi10_comid,duchi09,xiao10} and randomized block coordinate descent (RBCD)~\cite{nesterov10:rbcd,bkbg11:pbcd,rita13:pbcd,rita12:rbcd}, have been rigorously studied in both theory and applications.
Instead of computing gradients of all samples in GD at each iteration, OGD/SGD only computes the gradient of one block samples, and thus the cost-per-iteration is just one $I$-th of GD. For large scale problems, it has been shown that OGD/SGD is faster than GD~\cite{tari13:pdsvm,shsisr07:pegasos,shte09:sgd}. OGD and SGD have been generalized to handle composite objective functions~\cite{nest07:composite,comb09:prox,tseng08:apgm,beck09:pgm,Duchi10_comid,duchi09,xiao10}. OGD and SGD use a decreasing step size and converge at a slower rate than GD. In stochastic optimization, the slow convergence speed is caused by the variance of stochastic gradients due to random samples, and considerable efforts have thus been devoted to reducing the variance to accelerate SGD~\cite{bach12:sgdlinear,bach13:sgdaverage,xiao14:psgdvd,zhang13:sgdvd,jin13:sgdmix,jin13:sgdlinear}. Stochastic average gradient (SVG)~\cite{bach12:sgdlinear} is the first SGD algorithm achieving the linear convergence rate for stronly convex functions, catching up with the convergence speed of GD~\cite{nesterov04:convex}. However, SVG needs to store all gradients, which becomes an issue for large scale datasets. It is also difficult to understand the intuition behind the proof of SVG. To address the issue of storage and better explain the faster convergence,~\cite{zhang13:sgdvd} proposed an explicit variance reduction scheme into SGD. The two scheme SGD is refered as stochastic variance reduction gradient (SVRG). SVRG computes the full gradient periodically and progressively mitigates the variance of stochastic gradient by removing the difference between the full gradient and stochastic gradient. For smooth and strongly convex functions, SVRG converges at a geometric rate in expectation. Compared to SVG, SVRG is free from the storage of full gradients and has a much simpler proof. The similar idea was also proposed independently by~\cite{jin13:sgdmix}. The results of SVRG is then improved in~\cite{kori13:ssgd}. In~\cite{xiao14:psgdvd}, SVRG is generalized to solve composite minimization problem by incorporating the variance reduction technique into proximal gradient method.
On the other hand, RBCD~\cite{nesterov10:rbcd,rita12:rbcd,luxiao13:rbcd,shte09:sgd,chang08:bcdsvm,hsieh08:dcdsvm,osher09:cdcs} has become increasingly popular due to high dimensional problem with structural regularizers. RBCD randomly chooses a block coordinate to update at each iteration. The iteration complexity of RBCD was established in~\cite{nesterov10:rbcd}, improved and generalized to composite minimization problem by~\cite{rita12:rbcd,luxiao13:rbcd}. RBCD can choose a constant step size and converge at the same rate as GD, although the constant is usually $J$ times worse~\cite{nesterov10:rbcd,rita12:rbcd,luxiao13:rbcd}. Compared to GD, the cost-per-iteration of RBCD is much cheaper. Block coordinate descent (BCD) methods have also been studied under a deterministic cyclic order~\cite{sate13:cbcd,tseng01:ds,luo02:cbcd}. Although the convergence of cyclic BCD has been established~\cite{tseng01:ds,luo02:cbcd}, the iteration of complexity is still unknown except for special cases~\cite{sate13:cbcd}.
While OGD/SGD is well suitable for problems with a large number of samples, RBCD is suitable for high dimension problems with non-overlapping composite regularizers. For large scale high dimensional problems with non-overlapping composite regularizers, it is not economic enough to use one of them. Either method alone may not suitable for problems when data is distributed across space and time or partially available at the moment~\cite{nesterov10:rbcd}. In addition, SVRG is not suitable for problems when the computation of full gradient at one time is expensive. In this paper, we propose a new method named online randomized block coordinate descent (ORBCD) which combines the well-known OGD/SGD and RBCD together. ORBCD first randomly picks up one block samples and one block coordinates, then performs the block coordinate gradient descent on the randomly chosen samples at each iteration. Essentially, ORBCD performs RBCD in the online and stochastic setting. If $f_i$ is a linear function, the cost-per-iteration of ORBCD is $O(1)$ and thus is far smaller than $O(n)$ in OGD/SGD and $O(m)$ in RBCD. We show that the iteration complexity for ORBCD has the same order as OGD/SGD. In the stochastic setting, ORBCD is still suffered from the variance of stochastic gradient. To accelerate the convergence speed of ORBCD, we adopt the varaince reduction technique~\cite{zhang13:sgdvd} to alleviate the effect of randomness. As expected, the linear convergence rate for ORBCD with variance reduction (ORBCDVD) is established for strongly convex functions for stochastic optimization. Moreover, ORBCDVD does not necessarily require to compute the full gradient at once which is necessary in SVRG and prox-SVRG. Instead, a block coordinate of full gradient is computed at each iteration and then stored for the next retrieval in ORBCDVD.
The rest of the paper is organized as follows. In Section~\ref{sec:relate}, we review the SGD and RBCD. ORBCD and ORBCD with variance reduction are proposed in Section~\ref{sec:orbcd}. The convergence results are given in Section~\ref{sec:theory}. The paper is concluded in Section~\ref{sec:conclusion}.
\section{Related Work}\label{sec:relate} In this section, we briefly review the two types of low cost-per-iteration gradient descent (GD) methods, i.e., OGD/SGD and RBCD. Applying GD on~\myref{eq:compositeobj}, we have the following iterate: \begin{align}\label{eq:fobos}
\mathbf{x}^{t+1} = \argmin_{\mathbf{x}}~\langle \nabla f(\mathbf{x}^t), \mathbf{x} \rangle + g(\mathbf{x}) + \frac{\eta_t}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} In some cases, e.g. $g(\mathbf{x})$ is $\ell_1$ norm,~\myref{eq:fobos} can have a closed-form solution. \subsection{Online and Stochastic Gradient Descent} In~\myref{eq:fobos}, it requires to compute the full gradient of $m$ samples at each iteration, which could be computationally expensive if $m$ is too large. Instead, OGD/SGD simply computes the gradient of one block samples.
In the online setting, at time $t+1$, OGD first presents a solution $\mathbf{x}^{t+1}$ by solving \begin{align}
\mathbf{x}^{t+1} = \argmin_{\mathbf{x}}~\langle \nabla f_t(\mathbf{x}^t), \mathbf{x} \rangle + g(\mathbf{x}) + \frac{\eta_t}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} where $f_t$ is given and assumed to be convex. Then a function $f_{t+1}$ is revealed which incurs the loss $f_t(\mathbf{x}^t)$. The performance of OGD is measured by the regret bound, which is the discrepancy between the cumulative loss over $T$ rounds and the best decision in hindsight, \begin{align} R(T) = \sum_{t=1}^{T} { [f_t(\mathbf{x}^t) + g(\mathbf{x}^t)] - [f_t(\mathbf{x}^*)+g(\mathbf{x}^*)]}~, \end{align} where $\mathbf{x}^*$ is the best result in hindsight. The regret bound of OGD is $O(\sqrt{T})$ when using decreasing step size $\eta_t = O(\frac{1}{\sqrt{t}})$. For strongly convex functions, the regret bound of OGD is $O(\log T)$ when using the step size $\eta_t = O(\frac{1}{t})$. Since $f_t$ can be any convex function, OGD considers the worst case and thus the mentioned regret bounds are optimal.
In the stochastic setting, SGD first randomly picks up $i_t$-th block samples and then computes the gradient of the selected samples as follows: \begin{align}\label{eq:sgd}
\mathbf{x}^{t+1} = \argmin_{\mathbf{x}}~\langle \nabla f_{i_t}(\mathbf{x}^t), \mathbf{x} \rangle + g(\mathbf{x}) + \frac{\eta_t}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} $\mathbf{x}^t$ depends on the observed realization of the random variable $\xi = \{ i_1, \cdots, i_{t-1}\}$ or generally $\{ \mathbf{x}^1, \cdots, \mathbf{x}^{t-1} \}$. Due to the effect of variance of stochastic gradient, SGD has to choose decreasing step size, i.e., $\eta_t = O(\frac{1}{\sqrt{t}})$, leading to slow convergence speed. For general convex functions, SGD converges at a rate of $O(\frac{1}{\sqrt{t}})$. For strongly convex functions, SGD converges at a rate of $O(\frac{1}{t})$. In contrast, GD converges linearly if functions are strongly convex.
To accelerate the SGD by reducing the variance of stochastic gradient, stochastic variance reduced gradient (SVRG) was proposed by~\cite{zhang13:sgdvd}.~\cite{xiao14:psgdvd} extends SVRG to composite functions~\myref{eq:compositeobj}, called prox-SVRG. SVRGs have two stages, i.e., outer stage and inner stage. The outer stage maintains an estimate $\tilde{\mathbf{x}}$ of the optimal point $x^*$ and computes the full gradient of $\tilde{\mathbf{x}}$ \begin{align} \tilde{\mu} &= \frac{1}{n} \sum_{i=1}^{n} \nabla f_i(\tilde{\mathbf{x}}) = \nabla f(\tilde{\mathbf{x}})~. \end{align} After the inner stage is completed, the outer stage updates $\tilde{\mathbf{x}}$. At the inner stage, SVRG first randomly picks $i_t$-th sample, then modifies stochastis gradient by subtracting the difference between the full gradient and stochastic gradient at $\tilde{\mathbf{x}}$, \begin{align} \mathbf{v}_{t} &= \nabla f_{i_t}(\mathbf{x}^t) - \nabla f_{i_t}(\tilde{\mathbf{x}}) + \tilde{\mu}~. \end{align} It can be shown that the expectation of $\mathbf{v}_{t}$ given $\mathbf{x}^{t-1}$ is the full gradient at $\mathbf{x}^t$, i.e., $\mathbb{E}\mathbf{v}_{t} = \nabla f(\mathbf{x}^t)$. Although $\mathbf{v}_t$ is also a stochastic gradient, the variance of stochastic gradient progressively decreases. Replacing $\nabla f_{i_t}(\mathbf{x}^t)$ by $\mathbf{v}_t$ in SGD step~\myref{eq:sgd}, \begin{align}
\mathbf{x}^{t+1} & = \argmin_{\mathbf{x}}~\langle \mathbf{v}_{t}, \mathbf{x} \rangle + g(\mathbf{x}) + \frac{\eta}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} By reduding the variance of stochastic gradient, $\mathbf{x}^t$ can converge to $\mathbf{x}^*$ at the same rate as GD, which has been proved in~\cite{zhang13:sgdvd,xiao14:psgdvd}.
For strongly convex functions, prox-SVRG~\cite{xiao14:psgdvd} can converge linearly in expectation if $\eta > 4L$ and $m$ satisfy the following condition: \begin{align}\label{eq:svrg_rho} \rho = \frac{\eta^2}{\gamma(\eta-4L)m} + \frac{4L(m+1)}{(\eta-4L)m} < 1~. \end{align} where $L$ is the constant of Lipschitz continuous gradient. Note the step size is $1/\eta$ here.
\subsection{Randomized Block Coordinate Descent} Assume $\mathbf{x}_{j} (1\leq j \leq J)$ are non-overlapping blocks. At iteration $t$, RBCD~\cite{nesterov10:rbcd,rita12:rbcd,luxiao13:rbcd} randomly picks $j_t$-th coordinate and solves the following problem: \begin{align}\label{eq:rbcd}
\mathbf{x}_{j_t}^{t+1} = \argmin_{\mathbf{x}_{j_t}}~\langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t} \rangle + g_{j_t}(\mathbf{x}_{j_t}) + \frac{\eta_t}{2} \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2~. \end{align} Therefore, $\mathbf{x}^{t+1} = (\mathbf{x}_{j_t}^{t+1}, \mathbf{x}_{k\neq j_t}^t)$. $\mathbf{x}^t$ depends on the observed realization of the random variable \begin{align} \xi = \{ j_1, \cdots, j_{t-1}\}~. \end{align} Setting the step size $\eta_t = L_{j_t}$ where $L_{j_t}$ is the Lipshitz constant of $j_t$-th coordinate of the gradient $\nabla f(\mathbf{x}^t)$, the iteration complexity of RBCD is
$O(\frac{1}{t})$. For strongly convex function, RBCD has a linear convergence rate. Therefore, RBCD converges at the same rate as GD, although the constant is $J$ times larger~\cite{nesterov10:rbcd,rita12:rbcd,luxiao13:rbcd}.
\iffalse Here we briefly review and simplify the proof of the iteration complexity of RBCD in~\cite{nesterov10:rbcd,rita12:rbcd,luxiao13:rbcd}, which paves the way for the proof of ORBCD.
\begin{thm} RBCD has the following iteration complexity: \begin{align}
\mathbb{E}_{\xi_{T-1}}f(\mathbf{x}^T) - f(\mathbf{x}) & \leq \frac{J \left[ \mathbb{E} [f(\mathbf{x}^1)] - f(\mathbf{x}^*) + \frac{L}{2} \mathbb{E}\| \mathbf{x} - \mathbf{x}^1 \|_2^2 \right ]}{T} ~. \end{align} \end{thm} Denoting $g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) \in \partial g_{j_t}(\mathbf{x}_{j_t}^{t+1})$, the optimality condition of~\myref{eq:rbcd} is \begin{align} \langle \nabla_{j_t} f(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) + \eta_t (\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \leq 0~. \end{align} Rearranging the terms yields \begin{align} & \langle \nabla_{j_t} f(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \leq - \eta_t \langle \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2 - \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^{t+1} \|_2^2 - \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 ) \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2 - \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 ) ~. \end{align} Using the smoothness of $f$ and $\mathbf{x}^{t+1} = (\mathbf{x}_{j_t}^{t+1}, \mathbf{x}_{k\neq j_t}^t)$, we have \begin{align} & f(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})- [ f(\mathbf{x}^t) + g(\mathbf{x}^t) ] \nonumber \\
& \leq \langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle + \frac{L_{j_t}}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 + g_{j_t}(\mathbf{x}_{j_t}^{t+1}) - g_{j_t}(\mathbf{x}_{j_t}) - [g_{j_t}(\mathbf{x}_{j_t}^{t}) - g_{j_t}(\mathbf{x}_{j_t}) ] \nonumber \\
& = \langle \nabla_{j_t} f(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle + \frac{L_{j_t}}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 - \langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle - [g_{j_t}(\mathbf{x}_{j_t}^{t}) - g_{j_t}(\mathbf{x}_{j_t}) ]\nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{L_{j_t} - \eta_t}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 - \langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle - [g_{j_t}(\mathbf{x}_{j_t}^{t}) - g_{j_t}(\mathbf{x}_{j_t}) ] \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) - \langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle - [g_{j_t}(\mathbf{x}_{j_t}^{t}) - g_{j_t}(\mathbf{x}_{j_t}) ]~. \end{align} where the last inequality is obtained by setting $\eta_t = L_{j_t}$. Conditioned on $\mathbf{x}^t$, take expectation over $j_t$ gives \begin{align}\label{eq1}
& \mathbb{E}_{j_t}[f(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})|\mathbf{x}^t] - [f(\mathbf{x}^t) +g(\mathbf{x}^t)] \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{j_t}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) - \frac{1}{J} \sum_{j=1}^J \langle \nabla_{j_t} f(\mathbf{x}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle - \frac{1}{J}[g(\mathbf{x}^t) - g(\mathbf{x}) ]\nonumber \\
& = \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{j_t}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) - \frac{1}{J} \langle \nabla f(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle - \frac{1}{J}[g(\mathbf{x}^t) - g(\mathbf{x}) ] \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{j_t}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) - \frac{1}{J} [ f(\mathbf{x}^t) + g(\mathbf{x}^t) - (f(\mathbf{x}) + g(\mathbf{x})) ]~. \end{align} Taking expectation over $\xi_t$, we have \begin{align} & \mathbb{E}_{\xi_t}[f(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})] -\mathbb{E}_{\xi_{t-1}} [ f(\mathbf{x}^t) + g(\mathbf{x}^t)] \nonumber \\
& \leq \frac{\eta_t}{2} ( \mathbb{E}_{\xi_{t-1}}\| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{\xi_t}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) - \frac{1}{J} \{ \mathbb{E}_{\xi_{t-1}} [ f(\mathbf{x}^t) +g(\mathbf{x}^t) ] - \mathbb{E}_{\xi_{t-1}} [ f(\mathbf{x}) + g(\mathbf{x})] \} ~. \end{align} Setting $\mathbf{x} = \mathbf{x}^t$ gives \begin{align}
& \mathbb{E}_{\xi_{t}}[f(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})] - \mathbb{E}_{\xi_{t-1}} [ f(\mathbf{x}^t) + g(\mathbf{x}^{t})] \leq - \frac{\eta_t}{2} \mathbb{E}_{\xi_t}[\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2]~. \end{align} Thus, $\mathbb{E}_{\xi_{t-1}}[f(\mathbf{x}^t) + g(\mathbf{x}^{t})]$ decreases monotonically.
Let $\mathbf{x} = \mathbf{x}^*$, which is an optimal solution. Rearranging the temrs of~\myref{eq1} yields \begin{align} &\mathbb{E}_{\xi_{t}}[f(\mathbf{x}^t) + g(\mathbf{x}^t)] - [ f(\mathbf{x}^*) + g(\mathbf{x}^*) ] \nonumber \\
&\leq J \left[ \mathbb{E}_{\xi_{t-1}} [f(\mathbf{x}^t) + g(\mathbf{x}^{t}) ] - \mathbb{E}_{\xi_{t}}[f(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})] + \frac{\eta_t}{2} ( \mathbb{E}_{\xi_{t-1}}\| \mathbf{x}^* - \mathbf{x}^t \|_2^2 - \mathbb{E}_{\xi_{t}}[\| \mathbf{x}^* - \mathbf{x}^{t+1} \|_2^2]) \right]~. \end{align} Let $\eta_t = L = \max_j {L_{j}} $. Summing over $t$, we have \begin{align} & \sum_{t=1}^T\mathbb{E}_{\xi_{t-1}}[f(\mathbf{x}^t) + g(\mathbf{x}^{t})] - [ f(\mathbf{x}^*) + g(\mathbf{x}^{*})] \nonumber \\
& \leq J \left\{ f(\mathbf{x}^1) + g(\mathbf{x}^{1}) -\mathbb{E}_{\xi_{T}} [ f(\mathbf{x}^{T+1})+ g(\mathbf{x}^{T+1})] + \frac{L}{2}\| \mathbf{x}^* - \mathbf{x}^1 \|_2^2 \right \} \nonumber \\
& \leq J \left\{ [f(\mathbf{x}^1) + g(\mathbf{x}^{1})] - [ f(\mathbf{x}^*) +g(\mathbf{x}^*)] + \frac{L}{2} \| \mathbf{x}^* - \mathbf{x}^1 \|_2^2 \right \} ~. \end{align} Using the monotonicity of $\mathbb{E}_{\xi_{t-1}}f(\mathbf{x}^t)$ and dividing $T$ on both sides complete the proof. \qed \fi
\section{Online Randomized Block Coordinate Descent}\label{sec:orbcd} In this section, our goal is to combine OGD/SGD and RBCD together to solve problem~\myref{eq:compositeobj}. We call the algorithm online randomized block coordinate descent (ORBCD), which computes one block coordinate of the gradient of one block of samples at each iteration. ORBCD essentially performs RBCD in online and stochastic setting.
Let $\{ \mathbf{x}_1, \cdots, \mathbf{x}_J \}, \mathbf{x}_j\in \R^{n_j\times 1}$ be J non-overlapping blocks of $\mathbf{x}$. Let $U_j \in \R^{n\times n_j}$ be $n_j$ columns of an $n\times n$ permutation matrix $\mathbf{U}$, corresponding to $j$ block coordinates in $\mathbf{x}$. For any partition of $\mathbf{x}$ and $\mathbf{U}$, \begin{align} \mathbf{x} = \sum_{j=1}^{J}U_j\mathbf{x}_j~, \mathbf{x}_j = U_j^T\mathbf{x}~. \end{align} The $j$-th coordinate of gradient of $f$ can be denoted as \begin{align} \nabla_j f(\mathbf{x}) = U_j^T \nabla f(\mathbf{x})~. \end{align} Throughout the paper, we assume that the minimum of problem~\myref{eq:compositeobj} is attained. In addition, ORBCD needs the following assumption :
\begin{asm}\label{asm:orbcd1} $f_t$ or $f_i$ has block-wise Lipschitz continuous gradient with constant $L_j$, e.g., \begin{align}
\| \nabla_j f_t(\mathbf{x} + U_j h_j ) - \nabla_j f_t(\mathbf{x}) \|_2 \leq L_j \| h_j \|_2 \leq L \| h_j \|_2~, \end{align} where $L = \max_j L_j$. \end{asm} \begin{asm}\label{asm:orbcd2}
1. $\| \nabla f_t (\mathbf{x}^t) \|_2 \leq R_f $, or $\| \nabla f (\mathbf{x}^t) \|_2 \leq R_f $;
2. $\mathbf{x}^t$ is assumed in a bounded set ${\cal X}$, i.e., $\sup_{\mathbf{x},\mathbf{y} \in {\cal X}} \| \mathbf{x} - \mathbf{y} \|_2 = D$. \end{asm} While the Assumption~\ref{asm:orbcd1} is used in RBCD, the Assumption~\ref{asm:orbcd2} is used in OGD/SGD. We may assume the sum of two functions is strongly convex. \begin{asm}\label{asm:orbcd3} $f_t(\mathbf{x}) + g(\mathbf{x})$ or $f(\mathbf{x}) + g(\mathbf{x})$ is $\gamma$-strongly convex, e.g., we have \begin{align}\label{eq:stronggcov}
f_t(\mathbf{x}) + g(\mathbf{x}) \geq f_t(\mathbf{y}) + g(\mathbf{y}) + \langle \nabla f_t(\mathbf{y}) + g'(\mathbf{y}), \mathbf{x} - \mathbf{x}^{t} \rangle + \frac{\gamma}{2} \| \mathbf{x} - \mathbf{y} \|_2^2~. \end{align} where $\gamma > 0$ and $g'(\mathbf{y})$ denotes the subgradient of $g$ at $\mathbf{y}$. \end{asm}
\subsection{ORBCD for Online Learning} In online setting, ORBCD considers the worst case and runs at rounds. At time $t$, given any function $f_t$ which may be agnostic, ORBCD randomly chooses $j_t$-th block coordinate and presents the solution by solving the following problem: \begin{align}\label{eq:orbcdo}
\mathbf{x}_{j_t}^{t+1} &= \argmin_{\mathbf{x}_{j_t}}~\langle \nabla_{j_t} f_t(\mathbf{x}^t), \mathbf{x}_{j_t} \rangle + g_{j_t}(\mathbf{x}_{j_t}) + \frac{\eta_t}{2} \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2 \nonumber \\ & = \text{Prox}_{g_{j_t}}(\mathbf{x}_{j_t} -\frac{1}{\eta_t}\nabla_{j_t} f_t(\mathbf{x}^t) )~, \end{align} where $\text{Prox}$ denotes the proximal mapping. If $f_t$ is a linear function, e.g., $f_t = l_t\mathbf{x}^t$, then $\nabla_{j_t} f_t(\mathbf{x}^t) = l_{j_t}$, so solving~\myref{eq:orbcdo} is $J$ times cheaper than OGD. Thus, $\mathbf{x}^{t+1} = ( \mathbf{x}_{j_t}^{t+1}, \mathbf{x}_{k\neq j_t}^t)$, or \begin{align} \mathbf{x}^{t+1} = \mathbf{x}^t + U_{j_t}(\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t)~. \end{align} Then, ORBCD receives a loss function $f_{t+1}(\mathbf{x})$ which incurs the loss $f_{t+1}(\mathbf{x}^{t+1})$. The algorithm is summarized in Algorithm~\ref{alg:orbcd_online}.
$\mathbf{x}^t$ is independent of $j_t$ but depends on the sequence of observed realization of the random variable \begin{align} \xi = \{ j_1, \cdots, j_{t-1} \}. \end{align} Let $\mathbf{x}^*$ be the best solution in hindsight. The regret bound of ORBCD is defined as \begin{align} R(T) = \sum_{t=1}^T\left \{ \mathbb{E}_{\xi}[ f_t(\mathbf{x}^t) + g(\mathbf{x}^t) ] - [f_t(\mathbf{x}^*) +g(\mathbf{x}^*)] \right \}~. \end{align} By setting $\eta_t = \sqrt{t} + L$ where $L=\max_jL_j$, the regret bound of ORBCD is $O(\sqrt{T})$. For strongly convex functions, the regret bound of ORBCD is $O(\log T)$ by setting $\eta_t = \frac{\gamma t}{J} + L$.
\begin{algorithm*}[tb] \caption{Online Randomized Block Coordinate Descent for Online Learning} \label{alg:orbcd_online} \begin{algorithmic}[1]
\STATE {\bfseries Initialization:} $\mathbf{x}^1 = \mathbf{0}$ \FOR{$t=1 \text{ to } T$} \STATE randomly pick up $j_t$ block coordinates
\STATE $\mathbf{x}_{j_t}^{t+1} = \argmin_{\mathbf{x}_{j_t} \in {\cal X}_j}~\langle \nabla_{j_t} f_t(\mathbf{x}^t), \mathbf{x}_{j_t} \rangle + g_{j_t}(\mathbf{x}_{j_t}) + \frac{\eta_t}{2} \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2$~ \STATE $\mathbf{x}^{t+1} = \mathbf{x}^t + U_{j_t}(\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t)$ \STATE receives the function $f_{t+1}(\mathbf{x}) + g(\mathbf{x})$ and incurs the loss $f_{t+1}(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1})$ \ENDFOR \end{algorithmic} \end{algorithm*}
\subsection{ORBCD for Stochastic Optimization} In the stochastic setting, ORBCD first randomly picks up $i_t$-th block sample and then randomly chooses $j_t$-th block coordinate. The algorithm has the following iterate: \begin{align}\label{eq:orbcds}
\mathbf{x}_{j_t}^{t+1} & = \argmin_{\mathbf{x}_{j_t}}~\langle \nabla_{j_t} f_{i_t}(\mathbf{x}^t), \mathbf{x}_{j_t} \rangle + g_{j_t}(\mathbf{x}_{j_t}) + \frac{\eta_t}{2} \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2 \nonumber \\ & = \text{Prox}_{g_{j_t}}(\mathbf{x}_{j_t} -\nabla_{j_t} f_{i_t}(\mathbf{x}^t) )~. \end{align} For high dimensional problem with non-overlapping composite regularizers, solving~\myref{eq:orbcds} is computationally cheaper than solving~\myref{eq:sgd} in SGD. The algorithm of ORBCD in both settings is summarized in Algorithm~\ref{alg:orbcd_stochastic}.
$\mathbf{x}^{t+1}$ depends on $(i_t, j_t)$, but $j_{t}$ and $i_{t}$ are independent. $\mathbf{x}^t$ is independent of $(i_t, j_t)$ but depends on the observed realization of the random variables \begin{align} \xi = \{ ( i_1, j_1), \cdots, (i_{t-1}, j_{t-1}) \}~. \end{align} The online-stochastic conversion rule~\cite{Duchi10_comid,duchi09,xiao10} still holds here. The iteration complexity of ORBCD can be obtained by dividing the regret bounds in the online setting by $T$. Setting $\eta_t = \sqrt{t} + L$ where $L=\max_jL_j$, the iteration complexity of ORBCD is \begin{align} \mathbb{E}_{\xi} [ f(\bar{\mathbf{x}}^t) + g(\bar{\mathbf{x}}^t) ] - [f(\mathbf{x}) +g(\mathbf{x})] \leq O(\frac{1}{\sqrt{T}})~. \end{align} For strongly convex functions, setting $\eta_t = \frac{\gamma t}{J} + L$, \begin{align} \mathbb{E}_{\xi} [ f(\bar{\mathbf{x}}^t) + g(\bar{\mathbf{x}}^t) ] - [f(\mathbf{x}) +g(\mathbf{x})] \leq O(\frac{\log T}{T})~. \end{align} The iteration complexity of ORBCD match that of SGD. Simiarlar as SGD, the convergence speed of ORBCD is also slowed down by the variance of stochastic gradient.
\begin{algorithm*}[tb] \caption{Online Randomized Block Coordinate Descent for Stochastic Optimization} \label{alg:orbcd_stochastic} \begin{algorithmic}[1]
\STATE {\bfseries Initialization:} $\mathbf{x}^1 = \mathbf{0}$ \FOR{$t=1 \text{ to } T$} \STATE randomly pick up $i_t$ block samples and $j_t$ block coordinates
\STATE $\mathbf{x}_{j_t}^{t+1} = \argmin_{\mathbf{x}_{j_t} \in {\cal X}_j}~\langle \nabla_{j_t} f_{i_t}(\mathbf{x}^t), \mathbf{x}_{j_t} \rangle + g_{j_t}(\mathbf{x}_{j_t}) + \frac{\eta_t}{2} \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2$~ \STATE $\mathbf{x}^{t+1} = \mathbf{x}^t + U_{j_t}(\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t)$ \ENDFOR \end{algorithmic} \end{algorithm*}
\begin{algorithm*}[tb] \caption{Online Randomized Block Coordinate Descent with Variance Reduction} \label{alg:orbcdvd} \begin{algorithmic}[1]
\STATE {\bfseries Initialization:} $\mathbf{x}^1 = \mathbf{0}$ \FOR{$t=2 \text{ to } T$} \STATE $\mathbf{x}_0 = \tilde{\mathbf{x}} = \mathbf{x}^t$. \FOR{$k = 0\textbf{ to } m-1$} \STATE randomly pick up $i_k$ block samples \STATE randomly pick up $j_k$ block coordinates \STATE $\mathbf{v}_{j_k}^{i_k} = \nabla_{j_k} f_{i_k}(\mathbf{x}^{k}) - \nabla_{j_k} f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu}_{j_k}$ where $\tilde{\mu}_{j_k} = \nabla_{j_k} f(\tilde{\mathbf{x}})$
\STATE $\mathbf{x}_{j_k}^{k} = \argmin_{\mathbf{x}_{j_k} }~\langle \mathbf{v}_{j_k}^{i_k}, \mathbf{x}_{j_k} \rangle + g_{j_k}(\mathbf{x}_{j_k}) + \frac{\eta_k}{2} \| \mathbf{x}_{j_k} - \mathbf{x}_{j_k}^{k}\|_2^2$~ \STATE $\mathbf{x}^{k+1} = \mathbf{x}^k + U_{j_k}(\mathbf{x}_{j_j}^{k+1} - \mathbf{x}_{j_k}^k)$ \ENDFOR \STATE $\mathbf{x}^{t+1} = \mathbf{x}^m$ or $\frac{1}{m}\sum_{k=1}^{m}\mathbf{x}^k$ \ENDFOR \end{algorithmic} \end{algorithm*} \subsection{ORBCD with variance reduction} In the stochastic setting, we apply the variance reduction technique~\cite{xiao14:psgdvd,zhang13:sgdvd} to accelerate the rate of convergence of ORBCD, abbreviated as ORBCDVD. As SVRG and prox-SVRG, ORBCDVD consists of two stages. At time $t+1$, the outer stage maintains an estimate $\tilde{\mathbf{x}} = \mathbf{x}^t$ of the optimal $\mathbf{x}^*$ and updates $\tilde{\mathbf{x}}$ every $m+1$ iterations. The inner stage takes $m$ iterations which is indexed by $k = 0,\cdots, m-1$. At the $k$-th iteration, ORBCDVD randomly picks $i_k$-th sample and $j_k$-th coordinate and compute \begin{align} \mathbf{v}_{j_k}^{i_k} &= \nabla_{j_k} f_{i_k}(\mathbf{x}^k) - \nabla_{j_k} f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu}_{j_k}~, \label{eq:orbcdvd_vij} \end{align} where \begin{align}\label{eq:orbcdvd_mu} \tilde{\mu}_{j_k} = \frac{1}{n} \sum_{i=1}^{n} \nabla_{j_k} f_i(\tilde{\mathbf{x}}) = \nabla_{j_k} f(\tilde{\mathbf{x}})~. \end{align} $\mathbf{v}_{j_t}^{i_t}$ depends on $(i_t, j_t)$, and $i_t$ and $j_t$ are independent. Conditioned on $\mathbf{x}^k$, taking expectation over $i_k, j_k$ gives
\begin{align} \mathbb{E}\mathbf{v}_{j_k}^{i_k} &= \mathbb{E}_{i_k} \mathbb{E}_{j_k}[\nabla_{j_k} f_{i_k}(\mathbf{x}^k) - \nabla_{j_k} f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu}_{j_k}] \nonumber \\ &= \frac{1}{J}\mathbb{E}_{i_k} [\nabla f_{i_k}(\mathbf{x}^k) - \nabla f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu} ] \nonumber \\ & = \frac{1}{J}\nabla f(\mathbf{x}^k)~. \end{align}
Although $\mathbf{v}_{j_k}^{i_k}$ is stochastic gradient, the variance $\mathbb{E} \| \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2$ decreases progressively and is smaller than $\mathbb{E} \| \nabla f_{i_t}(\mathbf{x}^t) - \nabla f(\mathbf{x}^t) \|_2^2$. Using the variance reduced gradient $\mathbf{v}_{j_k}^{i_k}$, ORBCD then performs RBCD as follows: \begin{align}
\mathbf{x}_{j_k}^{k+1} &= \argmin_{\mathbf{x}_{j_k}}~ \langle \mathbf{v}_{j_k}^{i_k}, \mathbf{x}_{j_k} \rangle + g_{j_k}(\mathbf{x}_{j_k}) + \frac{\eta}{2} \| \mathbf{x}_{j_k} - \mathbf{x}_{j_k}^k \|_2^2 \label{eq:orbcdvd_xj}~. \end{align} After $m$ iterations, the outer stage updates $\mathbf{x}^{t+1}$ which is either $\mathbf{x}^m$ or $\frac{1}{m}\sum_{k=1}^{m}\mathbf{x}^k$. The algorithm is summarized in Algorithm~\ref{alg:orbcdvd}. At the outer stage, ORBCDVD does not necessarily require to compute the full gradient at once. If the computation of full gradient requires substantial computational efforts, SVRG has to stop and complete the full gradient step before making progress. In contrast, $\tilde{\mu}$ can be partially computed at each iteration and then stored for the next retrieval in ORBCDVD.
Assume $\eta > 2L$ and $m$ satisfy the following condition: \begin{align}\label{eq:orbcdvd_rho} \rho = \frac{L(m+1)}{(\eta-2L)m} + \frac{(\eta-L)J}{(\eta-2L)m} - \frac{1}{m}+ \frac{\eta (\eta-L)J}{(\eta-2L)m\gamma} < 1~, \end{align} Then $h(\mathbf{x})$ converges linearly in expectation, i.e., \begin{align}
\mathbb{E}_{\xi} [f(\mathbf{x}^t) + g(\mathbf{x}^t) - (f(\mathbf{x}^*) + g(\mathbf{x}^*) ] \leq O(\rho^t)~. \end{align}
Setting $\eta = 4L$ in~\myref{eq:orbcdvd_rho} yields \begin{align} \rho = \frac{m+1}{2m} + \frac{3J}{2m} - \frac{1}{m}+ \frac{6JL}{m\gamma} \leq \frac{1}{2} + \frac{3 J}{2m}(1+\frac{4 L}{\gamma})~. \end{align} Setting $m = 18JL/\gamma$, then \begin{align} \rho \leq \frac{1}{2} + \frac{1}{12}(\frac{\gamma}{L}+4) \approx \frac{11}{12}~. \end{align} where we assume $\gamma/L \approx 1$ for simplicity.
\section{The Rate of Convergence}\label{sec:theory} The following lemma is a key building block of the proof of the convergence of ORBCD in both online and stochastic setting. \begin{lem} Let the Assumption~\ref{asm:orbcd1} and \ref{asm:orbcd2} hold. Let $\mathbf{x}^t$ be the sequences generated by ORBCD. $j_t$ is sampled randomly and uniformly from $\{1,\cdots, J \}$. We have \begin{align}\label{eq:orbcd_key_lem}
& \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R_f^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - g(\mathbf{x}^{t+1}) ~. \end{align} where $L = \max_j L_j$. \end{lem} \noindent{\itshape Proof:}\hspace*{1em} The optimality condition is \begin{align} \langle \nabla_{j_t} f_t(\mathbf{x}^t) + \eta_t (\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}), \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \leq 0~. \end{align}
Rearranging the terms yields \begin{align} & \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \leq - \eta_t \langle \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^t \|_2^2 - \| \mathbf{x}_{j_t} - \mathbf{x}_{j_t}^{t+1} \|_2^2 - \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 ) \nonumber \\
& = \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2 - \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 ) ~, \end{align} where the last equality uses $\mathbf{x}^{t+1} = (\mathbf{x}_{j_t}^{t+1}, \mathbf{x}_{k\neq {j_t}}^t)$. By the smoothness of $f_t$, we have \begin{align}
f_t(\mathbf{x}^{t+1}) \leq f_t(\mathbf{x}^t) + \langle \nabla_j f_t(\mathbf{x}^t), \mathbf{x}_j^{t+1} - \mathbf{x}_j^t \rangle + \frac{L_j}{2} \| \mathbf{x}_j^{t+1} - \mathbf{x}_j^t \|_2^2~. \end{align} Since $\mathbf{x}^{t+1} - \mathbf{x}^t = U_{j_t}(\mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^{t})$, \iffalse the convexity of $g$ gives \begin{align} & g(\mathbf{x}^{t+1}) - g(\mathbf{x}^t) \leq \langle g'(\mathbf{x}^{t+1}), \mathbf{x}^{t+1} - \mathbf{x}^t \rangle \leq \langle g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) + \sum_{\mathbb{I}_{j_t} \cap \mathbb{I}_k \neq \emptyset} g'_{k}(\mathbf{x}_{k}^{t+1}), \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle \nonumber \\
& \leq \langle g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle + \frac{1}{2\alpha}\| \sum_{\mathbb{I}_{j_t} \cap \mathbb{I}_k \neq \emptyset} g'_{k}(\mathbf{x}_{k}^{t+1}) \|_2^2 + \frac{\alpha}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 \nonumber \\
& \leq \langle g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle + \frac{(J-1)R_g^2}{2\alpha} + \frac{\alpha}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2~. \end{align} \begin{align} g(\mathbf{x}^{t+1}) - g(\mathbf{x}^t) \leq \langle g'_{j_t}(\mathbf{x}_{j_t}^{t+1}) , \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle~. \end{align} \begin{align}
\| \mathbf{x}_{k}^{t+1} - \mathbf{x}_{k}^t \|_2^2 = \| \mathbf{x}_{\mathbb{I}_{j_t} \cap \mathbb{I}_k \neq \emptyset}^{t+1} - \mathbf{x}_{\mathbb{I}_{j_t} \cap \mathbb{I}_k \neq \emptyset}^t \|_2^2 \leq \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2~. \end{align} \begin{align}
\langle \sum_{\mathbb{I}_{j_t} \cap \mathbb{I}_k \neq \emptyset} g'_{k}(\mathbf{x}_{k}^{t+1}), \mathbf{x}_{k}^{t+1} - \mathbf{x}_{k}^t \rangle \leq \frac{(J-1)R_g^2}{2\alpha} + \frac{\alpha}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2~. \end{align} \fi we have \begin{align} & f_t(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1}) - [f_t(\mathbf{x}^t) + g(\mathbf{x}^t)] \nonumber \\
& \leq \langle \nabla_{j_t} f_t(\mathbf{x}^t), \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \rangle + \frac{L_{j_t}}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 + g_{j_t}(\mathbf{x}_{j_t}^{t+1}) - g_{j_t}(\mathbf{x}_{j_t}) + g_{j_t}(\mathbf{x}_{j_t}^{t}) - g_{j_t}(\mathbf{x}_{j_t}) \nonumber \\
& \leq \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t+1}), \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t} \rangle + \frac{L_{j_t}}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 - \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t}), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{L_{j_t} - \eta_t}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 - \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^{t}), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle ~. \end{align} Rearranging the terms yields \begin{align}\label{eq:lem1}
\langle \nabla_{j_t} f_t(\mathbf{x}^t) + g_{j_t}'(\mathbf{x}^t) , \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle &\leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{L_{j_t} - \eta_t}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 \nonumber \\ &+ f_t(\mathbf{x}^t) + g(\mathbf{x}^t)- [ f_t(\mathbf{x}^{t+1}) + g(\mathbf{x}^{t+1}) ]~. \end{align} The convexity of $f_t$ gives \begin{align}
f_t(\mathbf{x}^t) - f_t(\mathbf{x}^{t+1}) \leq \langle \nabla f_t(\mathbf{x}^t) , \mathbf{x}^t - \mathbf{x}^{t+1} \rangle = \langle \nabla_{j_t} f_t(\mathbf{x}^t) , \mathbf{x}_{j_t}^t - \mathbf{x}_{j_t}^{t+1} \rangle \leq \frac{1}{2\alpha} \| \nabla_{j_t} f_t(\mathbf{x}^t) \|_2^2 + \frac{\alpha}{2} \| \mathbf{x}_{j_t}^t - \mathbf{x}_{j_t}^{t+1} \|_2^2~. \end{align} where the equality uses $\mathbf{x}^{t+1} = (\mathbf{x}_{j_t}^{t+1}, \mathbf{x}_{k\neq {j_t}}^t)$. Plugging into~\myref{eq:lem1}, we have \begin{align} & \langle \nabla_{j_t} f_t(\mathbf{x}^t) + g'_{j_t}(\mathbf{x}_{j_t}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{L_{j_t} - \eta_t}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 + \langle \nabla_{j_t} f_t(\mathbf{x}^t) , \mathbf{x}_{j_t}^t - \mathbf{x}_{j_t}^{t+1} \rangle + g(\mathbf{x}^t) - g(\mathbf{x}^{t+1}) \nonumber \\
& \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{L_{j_t} - \eta_t}{2} \| \mathbf{x}_{j_t}^{t+1} - \mathbf{x}_{j_t}^t \|_2^2 + \frac{\alpha}{2}\| \mathbf{x}_{j_t}^t - \mathbf{x}_{j_t}^{t+1} \|_2^2 + \frac{1}{2\alpha} \| \nabla_{j_t} f_t(\mathbf{x}^t) \|_2^2 ~. \end{align} Let $ L = \max_{j} L_j$. Setting $\alpha = \eta_t - L$ where $\eta_t > L$ completes the proof. \qed
This lemma is also a key building block in the proof of iteration complexity of GD, OGD/SGD and RBCD. In GD, by setting $\eta_t = L$, the iteration complexity of GD can be established. In RBCD, by simply setting $\eta_t = L_{j_t}$, the iteration complexity of RBCD can be established.
\subsection{Online Optimization} Note $\mathbf{x}^t$ depends on the sequence of observed realization of the random variable $\xi = \{ j_1, \cdots, j_{t-1} \}$. The following theorem establishes the regret bound of ORBCD. \begin{thm} Let $\eta_t = \sqrt{t} + L$ in the ORBCD and the Assumption~\ref{asm:orbcd1} and \ref{asm:orbcd2} hold. $j_t$ is sampled randomly and uniformly from $\{1,\cdots, J \}$. The regret bound $R(T)$ of ORBCD is \begin{align} R(T) \leq J ( \frac{\sqrt{T} + L}{2}D^2 + \sqrt{T} R^2 + g(\mathbf{x}^1) - g(\mathbf{x}^*) )~. \end{align} \end{thm} \noindent{\itshape Proof:}\hspace*{1em} In~\myref{eq:orbcd_key_lem}, conditioned on $\mathbf{x}^t$, take expectation over $j_t$, we have \begin{align}\label{eq:a}
\frac{1}{J} \langle \nabla f_t(\mathbf{x}^t) + g'(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle &\leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - \mathbb{E}g(\mathbf{x}^{t+1}) ~. \end{align} Using the convexity, we have \begin{align} f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x})] \leq \langle \nabla f_t(\mathbf{x}^t) + g'(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle~. \end{align} Together with~\myref{eq:a}, we have \begin{align}
f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x}) ] &\leq J \left \{ \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - \mathbb{E}g(\mathbf{x}^{t+1}) \right \}~. \end{align} Taking expectation over $\xi$ on both sides, we have \begin{align}
\mathbb{E}_{\xi} \left [ f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x}) ] \right ] &\leq J \left \{ \frac{\eta_t}{2} ( \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) \right .\nonumber \\ & + \left. \frac{R^2}{2(\eta_t - L)} + \mathbb{E}_{\xi} g(\mathbf{x}^t) - \mathbb{E}_{\xi}g(\mathbf{x}^{t+1}) \right \}~. \end{align} Summing over $t$ and setting $\eta_t = \sqrt{t} + L$, we obtain the regret bound \begin{align}\label{eq:orbcd_rgt0} & R(T) = \sum_{t=1}^T\left \{ \mathbb{E}_{\xi} [ f_t(\mathbf{x}^t) + g(\mathbf{x}^t) ] - [f_t(\mathbf{x}) +g(\mathbf{x})] \right \} \nonumber \\
&\leq J \left \{ - \frac{\eta_{T}}{2} \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^{T+1} \|_2^2 + \sum_{t=1}^{T}(\eta_{t} - \eta_{t-1}) \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^{t} \|_2^2 + \sum_{t=1}^{T}\frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^1) - \mathbb{E}_{\xi}g(\mathbf{x}^{T+1}) \right \} \nonumber \\ & \leq J \left \{ \frac{\eta_T}{2} D^2 + \sum_{t=1}^{T}\frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^1) - g(\mathbf{x}^*) \right \} \nonumber \\ & \leq J \left \{ \frac{\sqrt{T} + L}{2} D^2 + \sum_{t=1}^{T}\frac{R^2}{2\sqrt{t} } + g(\mathbf{x}^1) - g(\mathbf{x}^*) \right \} \nonumber \\ & \leq J ( \frac{\sqrt{T} + L}{2} D^2+ \sqrt{T} R^2 + g(\mathbf{x}^1) - g(\mathbf{x}^*) )~, \end{align} which completes the proof. \qed
If one of the functions is strongly convex, ORBCD can achieve a $\log(T)$ regret bound, which is established in the following theorem. \begin{thm}\label{thm:orbcd_rgt_strong} Let the Assumption~\ref{asm:orbcd1}-\ref{asm:orbcd3} hold and $\eta_t = \frac{\gamma t}{J} + L$ in ORBCD. $j_t$ is sampled randomly and uniformly from $\{1,\cdots, J \}$. The regret bound $R(T)$ of ORBCD is \begin{align} R(T) \leq J^2R^2 \log(T) + J(g(\mathbf{x}^1) - g(\mathbf{x}^*) ) ~. \end{align} \end{thm} \noindent{\itshape Proof:}\hspace*{1em} Using the strong convexity of $f_t + g$ in~\myref{eq:stronggcov}, we have \begin{align}
f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x})] \leq \langle \nabla f_t(\mathbf{x}^t) + g'(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle - \frac{\gamma}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} Together with~\myref{eq:a}, we have \begin{align}
f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x}) ] &\leq \frac{J\eta_t - \gamma }{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \frac{J\eta_t}{2} \mathbb{E}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) \nonumber \\ & + \frac{JR^2}{2(\eta_t - L)} + J [ g(\mathbf{x}^t) - \mathbb{E}g(\mathbf{x}^{t+1}) ] ~. \end{align} Taking expectation over $\xi$ on both sides, we have \begin{align}
\mathbb{E}_{\xi} \left [ f_t(\mathbf{x}^t) + g(\mathbf{x}^t) - [f_t(\mathbf{x}) + g(\mathbf{x}) ] \right ] &\leq \frac{J\eta_t - \gamma}{2} \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^t \|_2^2 - \frac{J\eta_t}{2}\mathbb{E}_{\xi}[\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2]) \nonumber \\ & + \frac{JR^2}{2(\eta_t - L)} + J [ \mathbb{E}_{\xi} g(\mathbf{x}^t) - \mathbb{E}_{\xi}g(\mathbf{x}^{t+1}) ]~. \end{align} Summing over $t$ and setting $\eta_t = \frac{\gamma t}{J} + L$, we obtain the regret bound \begin{align} & R(T) = \sum_{t=1}^T\left \{ \mathbb{E}_{\xi} [ f_t(\mathbf{x}^t) + g(\mathbf{x}^t) ] - [f_t(\mathbf{x}) +g(\mathbf{x})] \right \} \nonumber \\
&\leq - \frac{J\eta_{T}}{2} \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^{T+1} \|_2^2 + \sum_{t=1}^{T}\frac{J\eta_{t} -\gamma - J\eta_{t-1}}{2}\mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^{t} \|_2^2 + \sum_{t=1}^{T}\frac{JR^2}{2(\eta_t - L)} + J ( g(\mathbf{x}^1) - \mathbb{E}_{\xi}g(\mathbf{x}^{T+1}) ) \nonumber \\ & \leq \sum_{t=1}^{T}\frac{J^2R^2}{2\gamma t } + J(g(\mathbf{x}^1) - g(\mathbf{x}^*) ) \nonumber \\ & \leq J^2R^2 \log(T) + J(g(\mathbf{x}^1) - g(\mathbf{x}^*) ) ~, \end{align} which completes the proof. \qed
In general, ORBCD can achieve the same order of regret bound as OGD and other first-order online optimization methods, although the constant could be $J$ times larger.
\iffalse If setting $f_t = f$, ORBCD turns to batch optimization or randomized overlapping block coordinate descent (ROLBCD). By dividing the regret bound by $T$ and denoting $\bar{\mathbf{x}}^T = \frac{1}{T}\sum_{t=1}^{T} \mathbf{x}^t$, we obtain the iteration complexity of ROLBCD, i.e., \begin{align} \mathbb{E}_{\xi_{T-1}^j}[ f(\bar{\mathbf{x}}^T) + g(\bar{\mathbf{x}}^T) ] - [f(\mathbf{x}) +g(\mathbf{x})] = \frac{R(T)}{T} ~. \end{align} The iteration complexity of ROLBCD is $O(\frac{1}{\sqrt{T}})$, which is worse than RBCD. \fi \subsection{Stochastic Optimization}
In the stochastic setting, ORBCD first randomly chooses the $i_t$-th block sample and the $j_t$-th block coordinate. $j_{t}$ and $i_{t}$ are independent. $\mathbf{x}^t$ depends on the observed realization of the random variables $\xi = \{ ( i_1, j_1), \cdots, (i_{t-1}, j_{t-1}) \}$. The following theorem establishes the iteration complexity of ORBCD for general convex functions. \begin{thm}\label{thm:orbcd_stc_ic} Let $\eta_t = \sqrt{t} + L$ and $\bar{\mathbf{x}}^T = \frac{1}{T} \sum_{t=1}^{T}\mathbf{x}^t $ in the ORBCD. $i_t, j_t$ are sampled randomly and uniformly from $\{1,\cdots, I \}$ and $\{1,\cdots, J \}$ respectively. The iteration complexity of ORBCD is \begin{align} \mathbb{E}_{\xi} [ f(\bar{\mathbf{x}}^t) + g(\bar{\mathbf{x}}^t) ] - [f(\mathbf{x}) +g(\mathbf{x})] \leq \frac{J ( \frac{\sqrt{T} + L}{2} D^2+ \sqrt{T} R^2 + g(\mathbf{x}^1) - g(\mathbf{x}^*) )}{T}~. \end{align} \end{thm} \noindent{\itshape Proof:}\hspace*{1em} In the stochastic setting, let $f_t$ be $f_{i_t}$ in~\myref{eq:orbcd_key_lem}, we have \begin{align}\label{eq:orbcd_key_stoc}
\langle \nabla_{j_t} f_{i_t}(\mathbf{x}^t) + g_{j_t}'(\mathbf{x}^t) , \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle \leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - g(\mathbf{x}^{t+1}) ~. \end{align} Note $i_t, j_t$ are independent of $\mathbf{x}^t$. Conditioned on $\mathbf{x}^t$, taking expectation over $i_t$ and $j_t$, the RHS is \begin{align} & \mathbb{E}\langle \nabla_{j_t} f_{i_t}(\mathbf{x}^t) + g_{j_t}'(\mathbf{x}^t) , \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle = \mathbb{E}_{i_t} [ \mathbb{E}_{j_t} [ \langle \nabla_{j_t} f_{i_t}(\mathbf{x}^t) + g_{j_t}'(\mathbf{x}^t), \mathbf{x}_{j_t}^{t} - \mathbf{x}_{j_t} \rangle ] ] \nonumber \\ & = \frac{1}{J} \mathbb{E}_{i_t} [ \langle \nabla f_{i_t}(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle + \langle g'(\mathbf{x}^t) , \mathbf{x}^{t} - \mathbf{x} \rangle ] \nonumber \\ & = \frac{1}{J}\langle \nabla f(\mathbf{x}^t) + g'(\mathbf{x}^t) , \mathbf{x}^{t} - \mathbf{x} \rangle ~. \end{align} Plugging back into~\myref{eq:orbcd_key_stoc}, we have \begin{align}\label{eq:orbcd_stc_0} & \frac{1}{J}\langle \nabla f(\mathbf{x}^t) + g'(\mathbf{x}^t) , \mathbf{x}^{t} - \mathbf{x} \rangle \nonumber \\
&\leq \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - \mathbb{E} g(\mathbf{x}^{t+1}) ~. \end{align} Using the convexity of $f + g$, we have \begin{align} f(\mathbf{x}^t) + g(\mathbf{x}^t) - [f(\mathbf{x}) + g(\mathbf{x})] \leq \langle \nabla f(\mathbf{x}^t) + g'(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle~. \end{align} Together with~\myref{eq:orbcd_stc_0}, we have \begin{align}
f(\mathbf{x}^t) + g(\mathbf{x}^t) - [f(\mathbf{x}) + g(\mathbf{x}) ] &\leq J \left \{ \frac{\eta_t}{2} ( \| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2) + \frac{R^2}{2(\eta_t - L)} + g(\mathbf{x}^t) - \mathbb{E}g(\mathbf{x}^{t+1}) \right \}~. \end{align} Taking expectation over $\xi$ on both sides, we have \begin{align}
\mathbb{E}_{\xi} \left [ f(\mathbf{x}^t) + g(\mathbf{x}^t) \right ] - [f(\mathbf{x}) + g(\mathbf{x}) ] &\leq J \left \{ \frac{\eta_t}{2} ( \mathbb{E}_{\xi}\| \mathbf{x} - \mathbf{x}^t \|_2^2 - \mathbb{E}_{\xi}[\| \mathbf{x} - \mathbf{x}^{t+1} \|_2^2]) \right .\nonumber \\ & + \left. \frac{R^2}{2(\eta_t - L)} + \mathbb{E}_{\xi} g(\mathbf{x}^t) - \mathbb{E}_{\xi}g(\mathbf{x}^{t+1}) \right \}~. \end{align} Summing over $t$ and setting $\eta_t = \sqrt{t} + L$, following similar derivation in~\myref{eq:orbcd_rgt0}, we have \begin{align} \sum_{t=1}^T\left \{ \mathbb{E}_{\xi} [ f(\mathbf{x}^t) + g(\mathbf{x}^t) ] - [f(\mathbf{x}) +g(\mathbf{x})] \right \} \leq J ( \frac{\sqrt{T} + L}{2} D^2+ \sqrt{T} R^2 + g(\mathbf{x}^1) - g(\mathbf{x}^*) )~. \end{align} Dividing both sides by $T$, using the Jensen's inequality and denoting $\bar{\mathbf{x}}^T = \frac{1}{T}\sum_{t=1}^{T}\mathbf{x}^t$ complete the proof. \qed
For strongly convex functions, we have the following results. \begin{thm} For strongly convex function, setting $\eta_t = \frac{\gamma t}{J} + L$ in the ORBCD. $i_t, j_t$ are sampled randomly and uniformly from $\{1,\cdots, I \}$ and $\{1,\cdots, J \}$ respectively. Let $\bar{\mathbf{x}}^T = \frac{1}{T} \sum_{t=1}^{T}\mathbf{x}^t $. The iteration complexity of ORBCD is \begin{align} \mathbb{E}_{\xi} [ f(\bar{\mathbf{x}}^T) + g(\bar{\mathbf{x}}^T) ] - [f(\mathbf{x}) +g(\mathbf{x})] \leq \frac{J^2R^2 \log(T) + J(g(\mathbf{x}^1) - g(\mathbf{x}^*) ) }{T}~. \end{align} \end{thm} \noindent{\itshape Proof:}\hspace*{1em} If $f+g$ is strongly convex, we have \begin{align}
f(\mathbf{x}^t) + g(\mathbf{x}^t) - [f(\mathbf{x}) + g(\mathbf{x})] \leq \langle \nabla f(\mathbf{x}^t) + g'(\mathbf{x}^t), \mathbf{x}^{t} - \mathbf{x} \rangle - \frac{\gamma}{2} \| \mathbf{x} - \mathbf{x}^t \|_2^2~. \end{align} Plugging back into~\myref{eq:orbcd_stc_0}, following similar derivation in Theorem~\ref{thm:orbcd_rgt_strong} and Theorem~\ref{thm:orbcd_stc_ic} complete the proof. \qed
\subsection{ORBCD with Variance Reduction} According to the Theorem 2.1.5 in~\cite{nesterov04:convex}, the block-wise Lipschitz gradient in Assumption~\ref{asm:orbcd1} can also be rewritten as follows: \begin{align}
& f_i(\mathbf{x}) \leq f_i(\mathbf{y}) + \langle \nabla_j f_i(\mathbf{x}) - \nabla_j f_i(\mathbf{y}), \mathbf{x}_j - \mathbf{y}_j\rangle + \frac{L}{2} \| \mathbf{x}_j - \mathbf{y}_j\|_2^2~, \label{eq:blk_lip1} \\
&\| \nabla_j f_i(\mathbf{x}) - \nabla_j f_i(\mathbf{y}) \|_2^2 \leq L \langle \nabla_j f_i(\mathbf{x}) - \nabla_j f_i(\mathbf{y}), \mathbf{x}_j - \mathbf{y}_j\rangle~.\label{eq:blk_lip2} \end{align}
Let $\mathbf{x}^*$ be an optimal solution. Define an upper bound of $f(\mathbf{x}) + g(\mathbf{x}) -(f(\mathbf{x}^*)+g(\mathbf{x}^*))$ as \begin{align}\label{eq:def_h} h(\mathbf{x},\mathbf{x}^*)= \langle \nabla f(\mathbf{x}), \mathbf{x} - \mathbf{x}^*\rangle + g(\mathbf{x}) - g(\mathbf{x}^*)~. \end{align}
If $f(\mathbf{x}) + g(\mathbf{x})$ is strongly convex, we have \begin{align}\label{eq:orbcd_strong_h}
h(\mathbf{x},\mathbf{x}^*) \geq f(\mathbf{x}) - f(\mathbf{x}^*) + g(\mathbf{x}) - g(\mathbf{x}^*) \geq \frac{\gamma}{2} \| \mathbf{x} - \mathbf{x}^* \|_2^2 ~. \end{align}
\begin{lem}\label{lem:orbcdvd_lem1} Let $\mathbf{x}^*$ be an optimal solution and the Assumption~\ref{asm:orbcd1}, we have \begin{align}
\frac{1}{I} \sum_{i=1}^{I} \| \nabla f_i(\mathbf{x}) - \nabla f_i(\mathbf{x}^*) \|_2^2 \leq L h(\mathbf{x},\mathbf{x}^*)~. \end{align} where $h$ is defined in~\myref{eq:def_h}. \end{lem} \noindent{\itshape Proof:}\hspace*{1em} Since the Assumption~\ref{asm:orbcd1} hold, we have using \begin{align}\label{eq:orbcd_bd_h}
&\frac{1}{I} \sum_{i=1}^{I} \| \nabla f_i(\mathbf{x}) - \nabla f_i(\mathbf{x}^*) \|_2^2 = \frac{1}{I} \sum_{i=1}^{I} \sum_{j=1}^J \| \nabla_j f_i(\mathbf{x}) - \nabla_j f_i(\mathbf{x}^*) \|_2^2 \nonumber \\ &\leq \frac{1}{I} \sum_{i=1}^{I} \sum_{j=1}^J L \langle \nabla_j f_i(\mathbf{x}) - \nabla_j f_i(\mathbf{x}^*), \mathbf{x}_j - \mathbf{x}_j^*\rangle \nonumber \\ & = L [ \langle \nabla f(\mathbf{x}), \mathbf{x} - \mathbf{x}^*\rangle + \langle \nabla f(\mathbf{x}^*), \mathbf{x}^* - \mathbf{x} \rangle]~, \end{align} where the inequality uses~\myref{eq:blk_lip2}. For an optimal solution $\mathbf{x}^*$, $g'(\mathbf{x}^*) + \nabla f(\mathbf{x}^*) = 0$ where $g'(\mathbf{x}^*)$ is the subgradient of $g$ at $\mathbf{x}^*$. The second term in~\myref{eq:orbcd_bd_h} can be rewritten as \begin{align} & \langle \nabla f(\mathbf{x}^*), \mathbf{x}^* - \mathbf{x} \rangle = - \langle g'(\mathbf{x}^*), \mathbf{x}^* - \mathbf{x} \rangle = g(\mathbf{x}) - g(\mathbf{x}^*) ~. \end{align} Plugging into~\myref{eq:orbcd_bd_h} and using~\myref{eq:def_h} complete the proof. \qed
\begin{lem}\label{lem:orbcdvd_lem2} Let $\mathbf{v}_{j_k}^{i_k} $ and $\mathbf{x}_{j_k}^{k+1}$ be generated by~\myref{eq:orbcdvd_vij}-\myref{eq:orbcdvd_xj}. Conditioned on $\mathbf{x}^k$, we have \begin{align}
\mathbb{E} \| \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2 \leq \frac{2L}{J} [h(\mathbf{x}^{k},\mathbf{x}^*) + h(\tilde{\mathbf{x}},\mathbf{x}^*)]~. \end{align} \end{lem} \noindent{\itshape Proof:}\hspace*{1em} Conditioned on $\mathbf{x}^k$, we have \begin{align} \mathbb{E}_{i_k}[ \nabla f_{i_k}(\mathbf{x}^k) - \nabla f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu} ] = \frac{1}{I} \sum_{i=1}^{I} [ \nabla f_{i}(\mathbf{x}^k) - \nabla f_{i}(\tilde{\mathbf{x}}) + \tilde{\mu} ] = \nabla f(\mathbf{x}^k)~. \end{align} Note $\mathbf{x}^k$ is independent of $i_k, j_k$. $i_k$ and $j_k$ are independent. Conditioned on $\mathbf{x}^k$, taking expectation over $i_k, j_k$ and using~\myref{eq:orbcdvd_vij} give \begin{align}
&\mathbb{E}\| \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2 = \mathbb{E}_{i_k} [ \mathbb{E}_{j_k}\| \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2] \nonumber \\
&= \mathbb{E}_{i_k}[ \mathbb{E}_{j_k}\| \nabla_{j_k} f_{i_k}(\mathbf{x}^k) - \nabla_{j_k} f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu}_{j_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2] \nonumber \\
&= \frac{1}{J}\mathbb{E}_{i_k}\| \nabla f_{i_k}(\mathbf{x}^k) - \nabla f_{i_k}(\tilde{\mathbf{x}}) + \tilde{\mu} - \nabla f(\mathbf{x}^k) \|_2^2 \nonumber \\
& \leq \frac{1}{J} \mathbb{E}_{i_k}\| \nabla f_{i_k}(\mathbf{x}^k) - \nabla f_{i_k}(\tilde{\mathbf{x}}) \|_2^2 \nonumber \\
& \leq \frac{2}{J} \mathbb{E}_{i_k}\| \nabla f_{i_k}(\mathbf{x}^k) - \nabla f_{i_k}(\mathbf{x}^*) \|_2^2 + \frac{2}{J} \mathbb{E}_{i_k}\| \nabla f_{i_k}(\tilde{\mathbf{x}}) - \nabla f_{i_k}(\mathbf{x}^*) \|_2^2 \nonumber \\
& = \frac{2}{IJ} \sum_{i=1}^{I}\| \nabla f_i(\mathbf{x}^k) - \nabla f_i(\mathbf{x}^*) \|_2^2 + \frac{2}{IJ}\sum_{i=1}^{I} \| \nabla f_i(\tilde{\mathbf{x}}) - \nabla f_i(\mathbf{x}^*) \|_2^2 \nonumber \\ & \leq \frac{2L}{J} [ h(\mathbf{x}^{k}, \mathbf{x}^*) + h(\tilde{\mathbf{x}}, \mathbf{x}^*)]~. \end{align}
The first inequality uses the fact $\mathbb{E} \| \zeta - \mathbb{E}\zeta \|_2^2 \leq \mathbb{E} \| \zeta \|_2^2$ given a random variable $\zeta$, the second inequality uses $\| \mathbf{a} + \mathbf{b} \|_2^2 \leq 2 \| \mathbf{a} \|_2^2 + 2\|\mathbf{b}\|_2^2$, and the last inequality uses Lemma~\ref{lem:orbcdvd_lem1}. \qed
\begin{lem}\label{lem:orbcdvd_lem3} Under Assumption~\ref{asm:orbcd1}, $f(\mathbf{x}) = \frac{1}{I} \sum_{i=1}^{I}f_i(\mathbf{x})$ has block-wise Lipschitz continuous gradient with constant $L$, i.e., \begin{align}
\| \nabla_j f(\mathbf{x} + U_j h_j ) - \nabla_j f(\mathbf{x}) \|_2 \leq L \| h_j \|_2~. \end{align} \end{lem} \noindent{\itshape Proof:}\hspace*{1em} Using the fact that $f(\mathbf{x}) = \frac{1}{I} \sum_{i=1}^{I}f_i(\mathbf{x})$, we have \begin{align}
&\| \nabla_j f(\mathbf{x} + U_j h_j ) - \nabla_j f(\mathbf{x}) \|_2 = \| \frac{1}{I} \sum_{i=1}^{I} [\nabla_j f_i(\mathbf{x} + U_j h_j ) - \nabla_j f_i(\mathbf{x}) ] \|_2 \nonumber \\
& \leq \frac{1}{I} \sum_{i=1}^{I} \| \nabla_j f_i(\mathbf{x} + U_j h_j ) - \nabla_j f_i(\mathbf{x}) \|_2 \nonumber \\
& \leq L \| h_j \|_2~, \end{align} where the first inequality uses the Jensen's inequality and the second inequality uses the Assumption~\ref{asm:orbcd1}. \qed
Now, we are ready to establish the linear convergence rate of ORBCD with variance reduction for strongly convex functions. \begin{thm}\label{thm:orbcdvd} Let $\mathbf{x}^t$ be generated by ORBCD with variance reduction~\myref{eq:orbcdvd_mu}-\myref{eq:orbcdvd_xj}. $j_k$ is sampled randomly and uniformly from $\{1,\cdots, J \}$. Assume $\eta > 2L$ and $m$ satisfy the following condition: \begin{align} \rho = \frac{L(m+1)}{(\eta-2L)m} + \frac{(\eta-L)J}{(\eta-2L)m} - \frac{1}{m}+ \frac{\eta (\eta-L)J}{(\eta-2L)m\gamma} < 1~, \end{align} Then ORBCDVD converges linearly in expectation, i.e., \begin{align}
\mathbb{E}_{\xi} [ f(\mathbf{x}^t) + g(\mathbf{x}^t) - (f(\mathbf{x}^*)+g(\mathbf{x}^*) ] \leq \rho^t [ \mathbb{E}_{\xi} h(\mathbf{x}^1, \mathbf{x}^*)]~. \end{align} where $h$ is defined in~\myref{eq:def_h}. \end{thm} \noindent{\itshape Proof:}\hspace*{1em} The optimality condition of~\myref{eq:orbcdvd_xj} is \begin{align} \langle \mathbf{v}_{j_k}^{i_k} + \eta (\mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k) + g'_{j_k}(\mathbf{x}_{j_k}^{k+1}), \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k} \rangle \leq 0~. \end{align}
Rearranging the terms yields \begin{align} & \langle \mathbf{v}_{j_k}^{i_k} + g'_{j_k}(\mathbf{x}_{j_k}^{k+1}) , \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k} \rangle \leq - \eta \langle \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k , \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k} \rangle \nonumber \\
& \leq \frac{\eta}{2} ( \| \mathbf{x}_{j_k} - \mathbf{x}_{j_k}^k \|_2^2 - \| \mathbf{x}_{j_k} - \mathbf{x}_{j_k}^{k+1} \|_2^2 - \| \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k \|_2^2 ) \nonumber \\
& = \frac{\eta}{2} ( \| \mathbf{x} - \mathbf{x}^k \|_2^2 - \| \mathbf{x} - \mathbf{x}^{k+1} \|_2^2 - \| \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k \|_2^2 ) ~, \end{align} where the last equality uses $\mathbf{x}^{k+1} = (\mathbf{x}_{j_k}^{k+1}, \mathbf{x}_{k\neq {j_k}}^t)$. Using the convecxity of $g_j$ and the fact that $g(\mathbf{x}^k) - g(\mathbf{x}^{k+1}) = g_{j_k}(\mathbf{x}^k) - g_{j_k}(\mathbf{x}^{k+1})$, we have \begin{align} & \langle \mathbf{v}_{j_k}^{i_k} , \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k} \rangle + g_{j_k}(\mathbf{x}^k) - g_{j_k}(\mathbf{x}) \leq \langle \mathbf{v}_{j_k}^{i_k} , \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^{k+1} \rangle + g(\mathbf{x}^k) - g(\mathbf{x}^{k+1}) \nonumber \\
& + \frac{\eta}{2} ( \| \mathbf{x} - \mathbf{x}^k \|_2^2 - \| \mathbf{x} - \mathbf{x}^{k+1} \|_2^2 - \| \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k \|_2^2 ) ~. \end{align} According to Lemma~\ref{lem:orbcdvd_lem3} and using~\myref{eq:blk_lip1}, we have \begin{align}
\langle \nabla_{j_k} f(\mathbf{x}^k), \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^{k+1} \rangle \leq f(\mathbf{x}^k) - f(\mathbf{x}^{k+1}) + \frac{L}{2} \| \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^{k+1} \|_2^2~. \end{align} Letting $\mathbf{x} = \mathbf{x}^*$ and using the smoothness of $f$, we have \begin{align} & \langle \mathbf{v}_{j_k}^{i_k} , \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k} \rangle + g_{j_k}(\mathbf{x}^k) - g_{j_k}(\mathbf{x}^*) \leq \langle \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k), \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^{k+1} \rangle + f(\mathbf{x}^k) + g(\mathbf{x}^k) - [f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] \nonumber \\
& + \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 - \| \mathbf{x}_{j_k}^{k+1} - \mathbf{x}_{j_k}^k \|_2^2 ) + \frac{L}{2} \| \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^{k+1} \|_2^2\nonumber \\
& \leq \frac{1}{2(\eta-L)} \| \mathbf{v}_{j_k}^{i_k} - \nabla_{j_k} f(\mathbf{x}^k) \|_2^2 + f(\mathbf{x}^k) + g(\mathbf{x}^k) - [f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] + \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 ) ~. \end{align} Taking expectation over $i_k, j_k$ on both sides and using Lemma~\ref{lem:orbcdvd_lem2}, we have \begin{align}\label{eq:orbcdvd_expbd} & \mathbb{E} [ \langle \mathbf{v}_{j_k}^{i_k} , \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^* \rangle + g_{j_k}(\mathbf{x}^k) - g_{j_k}(\mathbf{x}^*)] \nonumber \\ &\leq \frac{L}{J(\eta-L)} [h(\mathbf{x}^k,\mathbf{x}^*) + h(\tilde{\mathbf{x}},\mathbf{x}^*)] + f(\mathbf{x}^k) + g(\mathbf{x}^k) - \mathbb{E}[f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] \nonumber \\
& + \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \mathbb{E}\| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 )~. \end{align} The left hand side can be rewritten as \begin{align} & \mathbb{E} [\langle \mathbf{v}_{j_k}^{i_k} , \mathbf{x}_{j_k}^{k} - \mathbf{x}_{j_k}^* \rangle + g_{j_k}(\mathbf{x}^k) - g_{j_k}(\mathbf{x}^*)] = \frac{1}{J} [ \mathbb{E}_{i_k}\langle \mathbf{v}^{i_k} , \mathbf{x}^k - \mathbf{x}^* \rangle + g(\mathbf{x}^k) -g(\mathbf{x}^*) ] \nonumber \\ & = \frac{1}{J} [ \langle \nabla f(\mathbf{x}^k) , \mathbf{x}^k - \mathbf{x}^* \rangle + g(\mathbf{x}^k) -g(\mathbf{x}^*) ] = \frac{1}{J} h(\mathbf{x}^k,\mathbf{x}^*)~. \end{align} Plugging into~\myref{eq:orbcdvd_expbd} gives \begin{align} \frac{1}{J} [ h(\mathbf{x}^k,\mathbf{x}^*) ] & \leq \frac{L}{J(\eta-L)} [h(\mathbf{x}^k,\mathbf{x}^*) + h(\tilde{\mathbf{x}},\mathbf{x}^*)] + f(\mathbf{x}^k) + g(\mathbf{x}^k) - \mathbb{E}[f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] \nonumber \\
&+ \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \mathbb{E}\| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 ) \nonumber \\ & \leq \frac{L}{J(\eta-L)} [h(\mathbf{x}^k,\mathbf{x}^*) + h(\tilde{\mathbf{x}},\mathbf{x}^*)] + f(\mathbf{x}^k) + g(\mathbf{x}^k) - \mathbb{E}[f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] \nonumber \\
&+ \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \mathbb{E}\| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 ) ~, \end{align} Rearranging the terms yields \begin{align} \frac{\eta - 2L}{J(\eta-L)} h(\mathbf{x}^k,\mathbf{x}^*) &\leq \frac{L}{J(\eta-L)}[ h(\tilde{\mathbf{x}},\mathbf{x}^*) ] + f(\mathbf{x}^k) + g(\mathbf{x}^k) - \mathbb{E}[f(\mathbf{x}^{k+1})+g(\mathbf{x}^{k+1})] \nonumber \\
& + \frac{\eta}{2} ( \| \mathbf{x}^* - \mathbf{x}^k \|_2^2 - \mathbb{E}\| \mathbf{x}^* - \mathbf{x}^{k+1} \|_2^2 )~. \end{align} At time $t+1$, we have $\mathbf{x}_0 = \tilde{\mathbf{x}} = \mathbf{x}^t$. Summing over $k = 0,\cdots, m$ and taking expectation with respect to the history of random variable $\xi$, we have \begin{align} \frac{\eta - 2L}{J(\eta-L)} \sum_{k=0}^{m} \mathbb{E}_{\xi}h(\mathbf{x}_k,\mathbf{x}^*) &\leq \frac{L(m+1)}{J(\eta-L)} \mathbb{E}_{\xi}h(\tilde{\mathbf{x}},\mathbf{x}^*) + \mathbb{E}_{\xi}[ f(\mathbf{x}_0) + g(\mathbf{x}_0) ] - \mathbb{E}_{\xi} [ f(\mathbf{x}_{m+1}) + g(\mathbf{x}_{m+1})] \nonumber \\
&+ \frac{\eta}{2} ( \mathbb{E}_{\xi}\| \mathbf{x}^* - \mathbf{x}_0 \|_2^2 - \mathbb{E}_{\xi}\| \mathbf{x}^* - \mathbf{x}_{m+1} \|_2^2 ) \nonumber \\
&\leq \frac{Lm}{J(\eta-L)} \mathbb{E}_{\xi}h(\tilde{\mathbf{x}},\mathbf{x}^*) + \mathbb{E}_{\xi}h(\mathbf{x}_0,\mathbf{x}^*) + \frac{\eta}{2} \mathbb{E}_{\xi}\| \mathbf{x}^* - \mathbf{x}_0 \|_2^2 \nonumber ~, \end{align} where the last inequality uses \begin{align} f(\mathbf{x}_0) + g(\mathbf{x}_0) - [ f(\mathbf{x}_{m+1}) + g(\mathbf{x}_{m+1})] & \leq f(\mathbf{x}_0) + g(\mathbf{x}_0) - [ f(\mathbf{x}^*) + g(\mathbf{x}^*)] \nonumber \\ & \leq \langle\nabla f(\mathbf{x}_0), \mathbf{x}_0 - \mathbf{x}^* \rangle + g(\mathbf{x}_0) - g(\mathbf{x}^*) \nonumber \\ & = h(\mathbf{x}_0,\mathbf{x}^*)~. \end{align} Rearranging the terms gives \begin{align}
\frac{\eta -2L}{J(\eta-L)} \sum_{k=1}^{m} \mathbb{E}_{\xi}h(\mathbf{x}^k,\mathbf{x}^*) \leq \frac{L(m+1)}{J(\eta-L)} \mathbb{E}_{\xi}h(\tilde{\mathbf{x}},\mathbf{x}^*) + (1- \frac{\eta -2 L}{J(\eta-L)} ) \mathbb{E}_{\xi}h(\mathbf{x}_0,\mathbf{x}^*) + \frac{\eta}{2} \mathbb{E}_{\xi}\| \mathbf{x}^* - \mathbf{x}_0 \|_2^2~. \end{align} Pick $x^{t+1}$ so that $h(\mathbf{x}^{t+1}) \leq h(\mathbf{x}_k), 1\leq k \leq m$, we have \begin{align}\label{eq:orbcdvd_lineareq0}
\frac{\eta - 2L}{J(\eta-L)} m \mathbb{E}_{\xi} h(\mathbf{x}^{t+1},\mathbf{x}^*) \leq [ \frac{L(m+1)}{J(\eta-L)} + 1- \frac{\eta -2 L}{J(\eta-L)} ] \mathbb{E}_{\xi}h(\mathbf{x}^t,\mathbf{x}^*) + \frac{\eta}{2} \mathbb{E}_{\xi}\| \mathbf{x}^* - \mathbf{x}^t \|_2^2~, \end{align} where ther right hand side uses $\mathbf{x}^t = \mathbf{x}_0 = \tilde{\mathbf{x}}$. Using~\myref{eq:orbcd_strong_h}, we have \begin{align} \frac{\eta - 2L}{J(\eta-L)} m \mathbb{E}_{\xi} h(\mathbf{x}^{t+1} ,\mathbf{x}^*) \leq [ \frac{L(m+1)}{J(\eta-L)} + 1- \frac{\eta -2 L}{J(\eta-L)} +\frac{\eta}{\gamma} ] \mathbb{E}_{\xi}h(\mathbf{x}^t,\mathbf{x}^*)~. \end{align} Dividing both sides by $\frac{\eta - 2L}{J(\eta-L)} m$, we have \begin{align}
\mathbb{E}_{\xi} h(\mathbf{x}^{t+1},\mathbf{x}^*) \leq \rho \mathbb{E}_{\xi}h(\mathbf{x}^t,\mathbf{x}^*)~, \end{align} where \begin{align} \rho = \frac{L(m+1)}{(\eta-2L)m} + \frac{(\eta-L)J}{(\eta-2L)m} - \frac{1}{m}+ \frac{\eta (\eta-L)J}{(\eta-2L)m\gamma} < 1~, \end{align} which completes the proof. \qed
\section{Conclusions}\label{sec:conclusion} We proposed online randomized block coordinate descent (ORBCD) which combines online/stochastic gradient descent and randomized block coordinate descent. ORBCD is well suitable for large scale high dimensional problems with non-overlapping composite regularizers. We established the rate of convergence for ORBCD, which has the same order as OGD/SGD. For stochastic optimization with strongly convex functions, ORBCD can converge at a geometric rate in expectation by reducing the variance of stochastic gradient.
\section*{Acknowledgment} H.W. and A.B. acknowledge the support of NSF via IIS-0953274, IIS-1029711, IIS- 0916750, IIS-0812183, NASA grant NNX12AQ39A, and the technical support from the University of Minnesota Supercomputing Institute. A.B. acknowledges support from IBM and Yahoo. H.W. acknowledges the support of DDF (2013-2014) from the University of Minnesota. H.W. also thanks Renqiang Min and Mehrdad Mahdavi for mentioning the papers about variance reduction when the author was in the NEC Research Lab, America.
\end{document} |
\begin{document}
\title[\tiny Approximation with respect to polynomials with constant coefficients]{The best m-term approximation with respect to polynomials with constant coefficients} \author{Pablo M. Bern\'a} \address{Pablo M. Bern\'a \\ Instituto Universitario de Matem\'atica Pura y Aplicada \\ Universitat Polit\`ecnica de Val\`encia \\ 46022 Valencia, Spain} \email{[email protected]} \author{\'Oscar Blasco} \address{\'Oscar Blasco \\ Departamento de An\'alisis Matem\'atico \\ Universidad de Valencia, Campus de Burjassot \\ 46100 Valencia, Spain} \email{[email protected]}
\thanks{The first author is partially supported by GVA PROMETEOII/2013/013 and 19368/PI/14 (\textit{Fundaci\'on S\'eneca}, Regi\'on de Murcia, Spain). The second author is partially supported by MTM2014-53009-P (MINECO, Spain).} \subjclass{41A65, 41A46, 46B15.}
\keywords{thresholding greedy algorithm; m-term approximation; weight-greedy basis. }
\begin{abstract} In this paper we show that that greedy bases can be defined as those where the error term using $m$-greedy approximant is uniformly bounded by the best $m$-term approximation with respect to polynomials with constant coefficients in the context of the weak greedy algorithm and weights. \end{abstract}
\maketitle
\section{Introduction }
Let $(\SX,\Vert \cdot \Vert)$ be an infinite-dimensional real Banach space and let $\mathscr B = (e_n)_{n=1}^\infty$ be a normalized Schauder basis of $\SX$ with biorthogonal functionals $(e_n^*)_{n=1}^\infty$. Throughout the paper, for each finite set $A\subset \SN$ we write $|A|$ for the cardinal of the set $A$, $1_A=\sum_{j\in A} e_j$ and $P_A(x)=\sum_{n\in A}e_n^*(x) e_n$. Given a collection of signs $(\eta_j)_{j\in A}\in\lbrace\pm 1\rbrace$ with $|A|<\infty$, we write $1_{\eta A} = \sum_{n\in A}\eta_j e_j\in \SX$ and we use the notation $[1_{\eta A}]$ and $[e_n, n\in A]$ for the one-dimensional subspace and the $|A|-$dimensional subspace generated by generated by $1_{\eta A}$ and by $\lbrace e_n, n\in A\rbrace$ respectively. For each $x\in\SX$ and $m\in \SN$, S.V. Konyagin and V.N. Temlyakov defined in \cite{VT} the \textbf{$m$-th greedy approximant} of $x$ by $$\mathcal{G}_m(x) = \sum_{j=1}^m e_{\rho(j)}^*(x)e_{\rho(j)},$$ where $\rho$ is a greedy ordering, that is $\rho : \SN \longrightarrow \SN$ is a permutation such that $supp(x) = \lbrace n: e_n^*(x)\neq 0\rbrace \subseteq \rho(\SN)$ and $\vert e_{\rho(j)}^*(x)\vert \geq \vert e_{\rho(i)}^*(x)\vert$ for $j\leq i$. The collection $(\mathcal{G}_m)_{m=1}^\infty$ is called the \textbf{Thresholding Greedy Algorithm} (TGA).
This algorithm is usually a good candidate to obtain the \textbf{best m-term approximation} with regard to $\mathscr B$, defined by $$ \sigma_m(x,\mathscr B)_\SX =\sigma_m(x) := \inf\lbrace d(x,[e_n, n\in A]) : A\subset \SN, \vert A\vert = m\rbrace.$$
The bases satisfying \begin{equation} \label{old}\Vert x-\mathcal{G}_m(x)\Vert \leq C\sigma_m(x),\;\; \forall x\in\SX, \forall m\in\SN,\end{equation} where $C$ is an absolute constant are called \textbf{greedy bases} (see \cite{VT}).
The first characterization of greedy bases was given by S.V. Konyagin and V. N. Temlyakov in \cite{VT} who established that a basis is greedy if and only if it is unconditional and democratic (where a basis is said to be democratic if there exists $C>0$ so that $\|1_A\|\le C \|1_B\|$ for any pair of finite sets $A$ and $B$ with $|A|=|B|$).
Let us also recall two possible extensions of the greedy algorithm and the greedy basis. The first one consists in taking the $m$ terms with near-biggest coefficients and generating the Weak Greedy Algorithm (WGA) introduced by V.N. Temlyakov in \cite{T}.
For each $t\in (0,1]$,
a finite set $\Ga\subset\SN$ is called a $t$-greedy set for $x\in\SX$, for short $\Ga\in\mathscr G(x,t)$, if \[
\min_{n\in \Ga}|{\be^*_n}(x)|\,\geq\,t\max_{n\notin\Ga}|{\be^*_n}(x)|, \]
and write $\Ga\in\mathscr G(x,t,N)$ if in addition $|\Ga|=N$. A \textbf{$t$-greedy operator of order $N$} is a mapping $G^t:\SX\to\SX$ such that \[ G^t(x)=\sum_{n\in \Ga_x}{\be^*_n}(x){\mathbf e}_n, \quad \mbox{for some }\Ga_x\in \mathscr G(x,t,N). \]
A basis is called \textbf{$t$-greedy} if there exists $C(t)>0$ such that \begin{eqnarray} \Vert x-G^t(x)\Vert \leq C(t)\sigma_m(x)\; \forall x\in\SX, \forall m\in\SN, \forall G^t\in \mathscr G(x,t,m). \end{eqnarray}
It was shown that a basis is $t$-greedy for some $0<t\le 1$ if and only if it is $t$-greedy for all $0<t\le 1$. From the proof it follows that greedy basis are also $t$-greedy basis with constant $C(t)= O(1/t)$ as $t\to 0$.
The second one consists in replacing $|A|$ by $w(A)=\sum_{n\in A} w_n$ and it was considered by G. Kerkyacharian, D. Picard and V.N. Temlyakov in \cite{KPT} (see also \cite[Definition 16]{Tem}). Given a weight sequence $\omega = \lbrace \omega_n\rbrace_{n=1}^\infty, \omega_n >0$ and a positive real number $\delta>0$, they defined $$\sigma_\delta ^\omega (x) = \inf \lbrace d(x,[e_n, n\in A]) : A\subset \SN, \omega(A)\leq \delta\rbrace$$ where $\omega(A) := \sum_{n\in A}\omega_n$, with $A\subset\mathbb{N}$.
They called \textbf{weight-greedy bases} ($\omega$- greedy bases) to those bases satisfying \begin{eqnarray}\label{wt} \Vert x-\mathcal{G}_m(x)\Vert \leq C \sigma_{\omega(A_m)}^\omega (x),\; \forall x\in\SX, \forall m\in\SN, \end{eqnarray}
where $C>0$ is an absolute constant and $A_m = supp(\mathcal{G}_m(x))$. Moreover, they proved in \cite{KPT} that $\mathscr B$ is a $\omega$- greedy basis if and only if it is unconditional and $w$-democratic (where a basis is $w$-democratic whenever there exists $C>0$ so that $\|1_A\|\le C \|1_B\|$ for any pair of finite sets $A$ and $B$ with $w(A)\le w(B)$).
This generalization was motivated by the work of A. Cohen, R.A. DeVore and R. Hochmuth in \cite{CDH} where the basis was indexed by dyadic intervals and $w_\alpha (\Lambda)=\sum_{I\in \Lambda}|I|^\alpha$. Later in 2013, similar considerations were considered by E. Hern\'andez and D. Vera to prove some inclusions of approximation spaces (see \cite{HV}).
Let us summarize and use the following combined definition. \begin{defi} Let $\mathscr B$ be a normalized Schauder basis in $\mathbb{X}$, $0<t\le 1$ and weight sequence $\omega = \lbrace \omega_n\rbrace_{n=1}^\infty$ with $\omega_n >0$. We say that $\mathscr B$ is \textbf{$(t,\omega)$-greedy} if there exists $C(t)>0$ such that \begin{equation}\label{g} \Vert x-G^t(x)\Vert \leq C(t)\sigma^w_{m(t)}(x)\; \forall x\in\SX, \forall m\in\SN, \forall G^t\in \mathscr G(x,t,m) \end{equation} where $A_m(t)=supp (G^t(x))$ and $m(t)=w(A_m(t))$. \end{defi}
The authors introduced (see \cite{BB}) the best $m$-term approximation with respect to polynomials with constant coefficients as follows: $$\mathcal{D}^*_m(x) := \inf \lbrace d(x,[1_{\eta A}]) : A\subset \SN, (\eta_n)\in \{\pm 1\}, \vert A\vert = m\rbrace.$$
Obviously, $\sigma_m(x)\leq \mathcal{D}^*_m(x)$ but, while $\sigma_m(x)\to 0$ as $m\to\infty$ it was shown that for orthonormal bases in Hilbert spaces we have $\mathcal{D}^*_m(x)\to \|x\|$ as $m\to\infty$. The following result establishes a new description of greedy bases using the best $m$-term approximation with respect to polynomials with constant coefficients. \begin{theorem} (\cite[Theorem 3.6]{BB}) Let $\SX$ be a Banach space and $\mathscr B$ a Schauder basis of $\SX$.
(i) If there exists $C>0$ such that
\begin{equation}\label{new}\Vert x-\mathcal{G}_m(x)\Vert \leq C\mathcal{D}^*_m(x),\; \forall x\in \SX,\; \forall m\in \mathbb{N},\end{equation}
then $\mathscr B$ is $C$-suppression unconditional and $C$-symmetric for largest coefficients.
(ii) If $\mathscr B$ is $K_s$-suppression unconditional and $C_s$-symmetric for largest coefficients then
$$\Vert x-\mathcal{G}_m(x)\Vert \leq (K_s C_s)\sigma_m(x),\; \forall x\in \SX,\; \forall m\in \mathbb{N}.$$
\end{theorem}
The concepts of suppression unconditional and symmetric for largest coefficients bases can be found in \cite{BB,AA2,AW,DKOSS,VT}. We recall here that a basis is \textbf{$K_s$-suppression unconditional} if the projection operator is uniformly bounded, that is to say $$\Vert P_A(x)\Vert \leq K_s\Vert x\Vert,\; \forall x\in\SX,\forall A\subset \SN$$
and $\mathscr B$ is \textbf{$C_s$-symmetric for largest coefficients} if $$\Vert x+t1_{\varepsilon A}\Vert \leq C_s\Vert x+t1_{\varepsilon' B}\Vert,$$ for any $\vert A\vert = \vert B\vert$, $A\cap B=\emptyset$, $supp(x) \cap (A\cup B) = \emptyset$, $(\varepsilon_j), (\varepsilon'_j) \in \lbrace \pm 1\rbrace$ and $t = \max\lbrace \vert e_n^*(x)\vert : n\in supp(x)\rbrace$.
In this note we shall give a direct proof of the equivalence between condition (\ref{old}) and (\ref{new}) even in the setting of $(t,w)$-greedy basis.
Let us now introduce our best $m$-term approximation with respect to polynomials with constant coefficients associated to a weight sequence and the basic property to be considered in the paper.
\begin{defi} Let $\mathscr B$ be a normalized Schauder basis in $\mathbb{X}$, $0<t\le 1$ and a weight sequence $\omega = \lbrace \omega_n\rbrace_{n=1}^\infty$ with $\omega_n >0$. We denote by $$\mathcal{D}_{\delta}^\omega (x) := \inf \lbrace d(x,[1_{\eta A}]) : A\subset \SN, (\eta_n)\in \{\pm 1\}, \omega(A)\leq \delta\rbrace.$$
The basis $\mathscr B$ is said to $(t,w)$-greedy for polynomials with constant coefficients, denoted to have {\bf $(t,w)$-PCCG property}, if there exists $D(t)>0$ such that \begin{equation}\label{ng}\Vert x-G^t(x)\Vert \leq D(t)\mathcal{D}_{m(t)}^\omega (x), \forall x\in\SX, \forall m\in\SN, \forall G^t\in \mathscr G(x,t,m)\end{equation} where $A_m(t)=supp(G^t(x))$ and $m(t)=\omega(A_m(t))$.
In the case $t=1$ and $w(A)=|A|$ we simply call it the {\bf PCCG property}. \end{defi} Of course $\sigma_\delta ^\omega(x) \leq \mathcal{D}_\delta^\omega(x)$ for all $\delta>0$, hence if the basis is $(t,\omega)$-greedy then (\ref{ng}) holds with the $D(t)=C(t)$. We now formulate our main result which produces a direct proof of the result in \cite{BB} and give the extension to $t$-greedy and weighted greedy versions. \begin{theorem} Let $\mathscr B$ be a normalized Schauder basis in $\mathbb{X}$ and let $\omega = \lbrace \omega_n\rbrace_{n=1}^\infty$ be a weight sequence with $\omega_n >0$ for all $n\in \mathbb N$. The following are equivalent:
(i) There exist $0<s\le 1$ such that $\mathscr B$ has the $(s,w)$-PCCG property.
(ii) $\mathscr B$ is $(t,\omega)$-greedy for all $0<t\le 1$. \end{theorem}
\begin{proof} Only the implication (i) $\Longrightarrow$ (ii) needs a proof. Let us assume that (\ref{ng}) holds for some $0<s\le 1$. Let $0<t\le 1$, $x \in\SX$, $m\in \mathbb N$ and $G^t\in \mathscr G(x,t,m)$. We write $G^t(x) = P_{A_m(t)}(x)$ with $A_m(t)\in \mathcal G(x,t,m)$. For each $\varepsilon >0$ we choose $z = \sum_{n\in B}e_n^*(x)e_n$ with $\omega(B)\leq \omega(A_m(t))$ and $\Vert x-z\Vert \leq \sigma_{\omega(A_m)}^\omega (x) + \varepsilon$.
We write $$x- P_{A_m(t)}(x)= x- P_{A_m(t)\cup B} (x) +P_{B\setminus A_m(t)}(x).$$ Taking into account that $P_{B\setminus A_m(t)}(x)\in co(\lbrace S 1_{\eta (B\setminus A_m(t))} : \vert \eta_j\vert = 1\rbrace)$ for any $S\ge \underset{j\in B\setminus A_m(t)}{\max}\vert e_j^*(x)\vert$, it suffices to show that there exists $R\ge 1$ and $C(t)>0$ such that \begin{equation}\label{final}
\|x- P_{A_m(t)\cup B} (x) + R\gamma 1_{\eta(B\setminus A_m(t))}\|\le C(t) \|x-z\| \end{equation} for any choice of signs $(\eta_j)_{j\in B\setminus A_m(t)}$ where $\gamma = \underset{j\in B\setminus A_m(t)}{\max}\vert e_j^*(x)\vert$.
Let us assume first that $t\geq s$. We shall show that \begin{equation} \label{two}\Vert x- P_{(A_m(t)\cup B)}(x)+\frac{t}{s}\gamma 1_{\eta B\setminus A_m(t)}\Vert \le D(s)\Vert x- P_B(x)\Vert\end{equation} for any choice of signs $(\eta_j)_{j\in B\setminus A_m(t)}$.
Given $(\eta_j)_{j\in B\setminus A_m(t)}$ we consider $$y_{\eta}= x- P_B(x)+ \frac{t}{s}\gamma 1_{\eta(B\setminus A_m(t))}=\sum_{n\notin B} e_n^*(x)e_n+ \sum_{n\in B\setminus A_m(t)}\frac{t}{s}\gamma\eta_n e_n .$$
Note that $$\min_{n\in A_m(t)\setminus B}|e_n^*(y_\eta)|= \min_{n\in A_m(t)\setminus B}|e_n^*(x)|\ge \min_{n\in A_m(t)}|e_n^*(x)|$$ and $$s\max_{n\in (A_m(t)\setminus B)^c}|e_n^*(y_\eta)|=\max\{ s\max_{n\notin A_m(t)}|e_n^*(x)|, t\gamma\} .$$ Therefore, since $t\ge s$, we conclude that
$$\min_{n\in A_m(t)\setminus B}|e_n^*(y_\eta)|\ge s \max_{n\in (A_m(t)\setminus B)^c}|e_n^*(y_\eta)|.$$
Hence $A_m(t)\setminus B \in \mathcal G(y_\eta, s, N)$ with $N = \vert A_m(t)\setminus B\vert$. We write $G^s(y_\eta) = P_{A_m(t)\setminus B}(x)$ and notice that $$ y_\eta- G^s(y_\eta)= x- P_{A_m(t)\cup B}(x)+ \frac{t}{s}\gamma 1_{\eta(B\setminus A_m(t))}. $$
Since $\omega(B)\leq \omega(A_m(t))$ we have also that $\omega(B\setminus A_m(t))\leq \omega(A_m(t)\setminus B)$. Hence for $N(s)=\omega(A_m(t)\setminus B)$ we conclude \begin{eqnarray*} \Vert x- P_{(A_m(t)\cup B)}(x)+\frac{t}{s}\gamma 1_{\eta B\setminus A_m(t)}\Vert &\leq& D(s)\mathcal{D}_{N(s)}^\omega (y_\eta)\\\nonumber
&\leq& D(s) \Vert y_\eta-\frac{t}{s}\gamma1_{\eta B\setminus A_m(t)}\Vert\\\nonumber &=&D(s)\Vert x- P_B(x)\Vert. \end{eqnarray*}
Now, let $y=x-z+ \mu 1_B$ for $\mu = s \, \underset{j\notin B}{\max}\vert e_j^*(x-z)\vert +\underset{j\in B}{\max}\vert e_j^*(x-z)\vert.$\newline
Then $$\min_{j\in B} |\mu + e_n^*(x-z)|\ge s\max_{j\notin B} |e_n^*(x-z)|,$$ which gives that $B\in \mathcal G(y,s, |B|)$ and we obtain $G^s(y) = P_B(x-z)+\mu 1_{B}$. Hence \begin{equation}\label{three} \Vert x-P_B(x)\Vert = \Vert y-G^s(y)\Vert \le D(s)\Vert y - \mu 1_B\Vert= D(s)\Vert x-z\Vert. \end{equation}
Therefore, by $\eqref{two}$ and $\eqref{three}$ we obtain
$$\Vert x- P_{(A_m(t)\cup B)}(x)+\frac{t}{s}\gamma 1_{\eta B\setminus A_m(t)}\Vert\le D(s)^2\|x-z\|.$$ Then, for $s\le t$ we obtain that $\mathscr B$ is $(t,w)$-greedy with constant $C(t)\le D(s)^2$.
We now consider the case $s>t$. We use the following estimates: $$\Vert x- P_{(A_m(t)\cup B)}(x)+\gamma 1_{\eta B\setminus A_m(t)}\Vert\le \Vert x- P_{ B}(x)\Vert+\Vert P_{A_m(t)\setminus B}(x)\Vert+\gamma \Vert1_{\eta B\setminus A_m(t)}\Vert.$$ Arguing as above, using now $$\tilde y_{\eta}= P_{A_m(t)\setminus B}(x)+ \frac{t}{s}\gamma 1_{\eta(B\setminus A_m(t))},$$
we conclude that $\frac{t}{s}\gamma \Vert1_{\eta B\setminus A_m(t)}\Vert\le D(s)\|P_{A_m(t)\setminus B}(x)\|$.
The argument used to show (\ref{three}) gives $ \|z- P_C z\|\le D(s) \|z\|$ for all $z\in \mathbb{X}$ and finite set $C$. Therefore
$$\Vert P_{A_m(t)\setminus B}(x)\Vert= \Vert P_{A_m(t)}(x-P_{B}x)\Vert\le (1+ D(s))\|x-P_Bx\|.$$ Putting all together we have
$$\Vert x- P_{(A_m(t)\cup B)}(x)+\gamma 1_{\eta B\setminus A_m(t)}\Vert\le (2+\frac{t+s}{t}D(s))\|x-P_Bx\|,$$ and therefore $\mathscr B$ is $(t,w)$-greedy with constant $C(t)\le (2+\frac{t+s}{t}D(s))D(s).$ \end{proof}
\begin{corollary} If $t=1$ and $\omega(A) = \vert A\vert$, then $\mathscr B$ has the PCCG property if and only if $\mathscr B$ is greedy. \end{corollary}
\begin{corollary} If $\omega(A) = \vert A\vert$, then $\mathscr B$ has the $t$-PCCG property if and only if $\mathscr B$ is $t$-greedy. \end{corollary}
\section{A remark on the Haar system}
Throughout this section $|E|$ stands for the Lebesgue measure of a set in $[0,1]$, $\mathcal D$ for the family of dyadic intervals in $[0,1]$ and $card(\Lambda)$ for the number of dyadic elements in $\Lambda$. We denote by $\mathcal{H}:=\lbrace H_I\rbrace$ the Haar basis in $[0,1]$, that is to say $$H_{[0,1]} (x) = 1\; \text{for}\; x\in [0,1),$$ and for $I\in \mathcal D$ of the form $I = [(j-1)2^{-n}, j2^{-n})$, $j=1,..,2^n$, $n= 0,1,...$ we have \begin{displaymath} H_{I}(x) = \left\{ \begin{array}{ll} 2^{n/2} & \mbox{if $x\in [(j-1)2^{-n}, (j-\frac{1}{2})2^{-n})$,} \\ -2^{n/2} & \mbox{if $x\in [(j-\frac{1}{2})2^{-n}, j2^{-n})$,} \\ 0 & \mbox{otherwise.} \end{array} \right. \end{displaymath}
We write $$c_I(f) := \langle f,H_I\rangle = \int_0^1 f(x)H_I(x)dx \hbox{ and } c_I(f,p):= \Vert c_I(f)H_I\Vert_p, \quad 1\le p<\infty.$$
It is well known that $\mathcal H$ is an orthonormal basis in $L^2([0,1])$ and for $1<p<\infty$ we can use the Littlewood-Paley's Theorem which gives \begin{equation}\label{lp} c_p \left\Vert \left( \sum_I \vert c_I(f,p)\frac{H_I}{\Vert H_I\Vert_p}\vert^2\right)^{1/2}\right\Vert_p \leq \Vert f\Vert_p \leq C_p\left\Vert \left( \sum_I \vert c_I(f,p)\frac{H_I}{\Vert H_I\Vert_p}\vert^2\right)^{1/2}\right\Vert_p \end{equation}
to conclude that $(\frac{H_I}{\Vert H_I\Vert_p})_I$ is an unconditional basis in $L^p([0,1])$. Denoting $f<<_p g$ whenever $c_I(f,p)\le c_I(g,p)$ for all dyadic intervals $I$ we obtain from (\ref{lp}) the existence of a constant $K_p$ such that
\begin{equation}\label{uncon}
\|f\|_p\le K_p \|g\|_p \quad \forall f, g\in L^p([0,1]) \hbox{ with } f<<_p g,
\end{equation}
and also
\begin{equation}\label{h}
\|P_\Lambda g\|\le K_p \|g\|_p \quad \forall g\in L^p \quad \forall \Lambda\subset \mathcal D. \end{equation} Regarding the greedyness of the Haar basis it was V. N. Temlyakov the first one who proved (see \cite{T}) that the every wavelet basis $L_p$-equivalent to the Haar basis is $t$-greedy in $L_p([0,1])$ with $1<p<\infty$ for any $0<t\le 1$.
Let $\omega:[0,1]\to \mathbb R^+$ be a measurable weight and, as usual, we denote $\omega(I)=\int_I \omega(x)dx$ and $m_I(\omega)=\frac{\omega(I)}{|I|}$ for any $I\in \mathcal D$. In the space $L^p(\omega)=L^p([0,1],\omega)$ we denote $\|f\|_{p, \omega}=(\int_0^1 |f(x)|^p\omega(x) dx)^{1/p}$ and $$ c_I(f,p,\omega):= \Vert c_I(f)H_I\Vert_{p,\omega}= |c_I(f)|\frac{\omega(I)^{1/p}}{|I|^{1/2}}. $$ Recall that $\omega$ is said to be a dyadic $A_p$-weight (denoted $\omega \in A^{d}_p$) if \begin{equation} A^{d}_p(\omega)= \sup_{I\in \mathcal D} m_I(\omega) \Big( m_I(\omega^{-1/(p-1)})\Big)^{p-1}<\infty. \end{equation}
As one may expect, Littlewood-Paley theory holds for weights in the dyadic $A_p$-class. \begin{theorem} (see \cite{ABM, I} for the multidimensional case) If $\omega \in A^{d}_p$ then \begin{eqnarray}\label{uncon} \Vert f\Vert_{p,\omega} \approx\left\Vert \left( \sum_I \vert c_I(f,p,\omega)\frac{H_I}{\Vert H_I\Vert_{p,\omega}}\vert^2\right)^{1/2}\right\Vert_{p,\omega}. \end{eqnarray} In particular $(\frac{H_I}{\Vert H_I\Vert_{p,\omega}})_I$ is an unconditional basis in $L^p(\omega)$ for $1<p<\infty$. \end{theorem}
The greedyness of the Haar basis in $L^p(\omega)$ goes back to M. Izuki (see \cite{I, IS}) who showed that this holds for weights in the class $A_p^d$.
We shall use the ideas in these papers to show that the Haar basis satisfies the PCCG property for certain spaces defined using the Littlewood-Paley theory.
\begin{defi} Let $\omega:[0,1]\to \mathbb R^+$ be a measurable weight and $1\le p<\infty$. For each finite set of dyadic intervals $\Lambda$ we define
$f_\Lambda=\sum_{I\in \Lambda} c_I(f)H_I=\sum_{I\in \Lambda} c_I(f,p,\omega)\frac{H_I}{\Vert H_I\Vert_{p,\omega}}$ and write $$\|f\|_{X^p(\omega)}= \left\Vert\left( \sum_{I\in \Lambda} \vert c_I(f,p,\omega)\frac{H_I}{\Vert H_I\Vert_{p,\omega}}\vert^2\right)^{1/2}\right\Vert_{p,\omega}.$$
The closure of $span(f_\Lambda: card(\Lambda)<\infty)$ under this norm will be denoted $X^p(\omega)$.
\end{defi}
From the definition $(\frac{H_I}{\Vert H_I\Vert_{p,\omega}})_I$ is an unconditional basis with constant 1 in $X^p(\omega)$ and due to (\ref{uncon}) $X^p(\omega)=L^p(\omega)$ whenever $\omega\in A_p^d$. Our aim is to analyze conditions on the weight $\omega$ for the basis to be greedy. For such a purpose we do not need the weight to belong to $A_p^d$. In fact analyzing the proof in \cite{I, IS} one notices that only the dyadic reverse doubling condition (see \cite[p. 141]{GCRF}) was used. Recall that a weight $\omega$ is said to satisfies {\bf the dyadic reverse doubling condition} if there exists $\delta<1$ such that
\begin{equation}\label{dc}\omega(I')\le \delta \omega (I), \forall I,I'\in \mathcal D \hbox{ with } I'\subsetneq I.
\end{equation}
Let us introduce certain weaker conditions.
\begin{defi} Let $\alpha>0$ and $\omega$ be a measurable weight. We shall say that $\omega$ satisfies {\bf the dyadic reverse Carleson condition} of order $\alpha$ with constant $C>0$ whenever
\begin{equation}\label{cc}\sum_{I\in \mathcal D, J\subseteq I}\omega(I)^{-\alpha}\le C \omega(J)^{-\alpha}, \forall J\in \mathcal D .
\end{equation}
\end{defi}
\begin{defi} Let $\alpha>0$ and two sequences $(w_I)_{I\in \mathcal D}$ and $(v_I)_{I\in \mathcal D}$ of positive real numbers. We say that the pair $\Big((w_I)_{I\in \mathcal D}, (v_I)_{I\in \mathcal D}\Big)$ satisfies $\alpha-{\bf DRCC }$ with constant $C>0$ whenever
\begin{equation}\label{cc1}\sum_{I\in \mathcal D, J\subseteq I}w_I^{-\alpha}\le C v_J^{-\alpha}, \forall J\in \mathcal D .
\end{equation}
\end{defi}
\begin{Remark} \label{n} (i) If $\omega\in \cup_{p> 1}A_p^w$ then $\omega$ satisfies the dyadic reverse doubling condition (see \cite[p 141]{GCRF}).
(ii) If $\omega$ satisfies the dyadic reverse doubling condition then $\omega$ satisfies the dyadic reverse Carleson condition of order $\alpha$ with constant $\frac{1}{1-\delta^\alpha}$ for any $\alpha>0$.
Indeed, $$\sum_{J\subseteq I}\omega(I)^{-\alpha}\le \omega(J)^{-\alpha}+ \omega(J)^{-\alpha}\sum_{m=1}^{\infty} \delta^{m\alpha}\le \frac{1}{1-\delta^\alpha}\omega(J)^{-\alpha}.$$
(iii) If $\omega$ satisfies the dyadic reverse Carleson condition of order $\alpha$ and $w_I=\omega(I)$ for each $I\in \mathcal D$ then $\Big((w_I)_{I\in \mathcal D},(w_I)_{I\in \mathcal D}\Big)$ satisfies $\alpha$-{\bf DRCC }. \end{Remark}
We need the following lemmas, whose proofs are essentially included in \cite{CDH, I, IS}.
\begin{lemma} \label{1c} Let $\omega$ be a weight and $(v_I)_{I\in \mathcal D}$ be a sequence of positive real numbers such that
$\Big((v_I)_{I\in \mathcal D}, (\omega(I))_{I\in \mathcal D}\Big)$ satisfies $ 1$-{\bf DRCC } with constant $C$. Then \begin{equation}\label{dem}
\left(\sum_{I\in \Lambda} \frac{\omega(I)}{v_I}\right)^{1/p}\le C\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}, \forall 1\le p<\infty. \end{equation} \end{lemma} \begin{proof} We first write
\begin{equation}\label{main} \left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}= \left(\int_0^1 (\sum_{I\in \Lambda} \omega(I)^{-2/p}\chi_I)^{p/2}\omega(x) dx\right)^{1/p}. \end{equation} Let $I(x)$ denote the minimal dyadic interval in $\Lambda$ with regard to the inclusion relation that contains $x$. Now we use that $$\sum_{I\in \mathcal D, I(x)\subseteq I}v_I^{-1}\le C \omega(I(x))^{-1}$$ to conclude that \begin{eqnarray*} (\sum_{I\in \Lambda} \frac{\omega(I)}{v_I})^{1/p}&=& \Big(\sum_{I\in\Lambda} \int_{I}v_I^{-1}\omega(x)dx\Big)^{1/p}= \Big(\int_0^1(\sum_{I\in \Lambda}v_I^{-1}\chi_I(x))\omega(x)dx\Big)^{1/p}\\ &\le & C\Big(\int_0^1 \omega(I(x))^{-1}\omega(x)dx\Big)^{1/p}\le C\Big(\int_0^1(\sum_{I\in \Lambda}\omega(I)^{-2/p}\chi_I(x))^{p/2}\omega(x)dx\Big)^{1/p}\\
&=& C\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_p}\|_{X^p(\omega)}. \end{eqnarray*} The proof is complete. \end{proof} \begin{lemma} \label{2p}Let $1<p<\infty$, $\omega$ be a weight and $(v_I)_{I\in \mathcal D}$ of positive real numbers. If $\Big((\omega(I))_{I\in \mathcal D}, (v_I)_{I\in \mathcal D}\Big)$ satisfies $2/p$-{\bf DRCC } with constant $C>0$ then \begin{equation}
\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}\le C \left(\sum_{I\in \Lambda} \frac{\omega(I)}{v_I}\right)^{1/p} \end{equation} for all finite family $\Lambda$ of dyadic intervals. \end{lemma} \begin{proof}
Let $E = \cup_{I\in\Lambda}I$. As above
$I(x)$ stands for the minimal dyadic interval in $\Lambda$ with regard to the inclusion relation that contains $x$. From (\ref{cc1}) we have that
\begin{equation}\label{1}
\sum_{I\in \Lambda}\omega(I)^{-2/p}\chi_I(x)\le Cv_{I(x)}^{-2/p}, \quad x\in E.\end{equation} Now denote for each $I\in \Lambda$, $\tilde I=\{x\in E: I(x)=I\}$. Clearly $\tilde I\subseteq I$ and $E=\cup_{I\in \Lambda}\tilde I $. Hence applying (\ref{main}) and (\ref{1}) we obtain \begin{eqnarray*}
\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_p}\right\|_{X^p(\omega)}&\le& C\left(\int_E
v_{I(x)}^{-1}\omega(x) dx\right)^{1/p} = C\left(\int_{\cup_{I\in \Lambda} \tilde I}
v_{I(x)}^{-1}\omega(x) dx\right)^{1/p} \\ &\le& C \Big(\sum_{I\in \Lambda} \int_{\tilde I}v_{I}^{-1}\omega(x)dx\Big)^{1/p}\le C \Big(\sum_{I\in \Lambda} v_I^{-1}\int_{I}\omega(x)dx\Big)^{1/p}\\ &=& C(\sum_{I\in \Lambda} \frac{\omega(I)}{v_I})^{1/p}. \end{eqnarray*} The proof is now complete. \end{proof}
Combining Remark \ref{n} and Lemmas \ref{1c} and \ref{2p} we obtain the following corollary. \begin{corollary} Let $1<p<\infty$, $\omega$ be a weight satisfying the dyadic reverse doubling condition then \begin{equation} \label{basic}
\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}\approx card(\Lambda)^{1/p} \end{equation} for all finite family $\Lambda$ of dyadic intervals. \end{corollary}
\begin{corollary} Let $1<p<\infty$, $\omega$ be a weight and $(v_I)_{I\in \mathcal D}$ of positive real numbers. If $\Big((\omega(I))_{I\in \mathcal D}, (v_I)_{I\in \mathcal D}\Big)$ satisfies $2/p'$-{\bf DRCC } with constant $C>0$ then \begin{equation}\label{dem0}
\left(\sum_{I\in \Lambda} \frac{\omega(I)}{v_I}\right)^{1/p}\le C \Big(\max_{I\in \Lambda}\frac{\omega(I)}{v_I}\Big)\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)} \end{equation} for all finite family $\Lambda$ of dyadic intervals. \end{corollary} \begin{proof} Note that, using Lemma \ref{2p}, we have \begin{eqnarray*} \sum_{I\in \Lambda} \frac{\omega(I)}{v_I}&=& \int_0^1 (\sum_{I\in\Lambda} v_I^{-1}\chi_I(x))\omega(x)dx\\ &\le & \int_0^1 (\sum_{I\in\Lambda} \omega(I)^{-2/p}\chi_I)^{1/2}(\sum_{I\in\Lambda} v_I^{-2}\omega(I)^{2/p}\chi_I(x))^{1/2}\omega(x)dx\\ &\le & \Big(\int_0^1 (\sum_{I\in\Lambda} \omega(I)^{-2/p}\chi_I)^{p/2}\omega(x)dx\Big)^{1/p}\Big(\int_0^1(\sum_{I\in\Lambda} v_I^{-2}\omega(I)^{2/p}\chi_I(x))^{p'/2}\omega(x)dx\Big)^{1/p'}\\
&\le& \left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)} \left\|\sum_{I\in \Lambda} \frac{\omega(I)}{v_I}\frac{H_I}{\Vert H_I\Vert_{p',\omega}}\right\|_{X^{p'}(\omega)}\\
&\le& \left(\max_{I\in \Lambda}\frac{\omega(I)}{v_I}\right)\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p',\omega}}\right\|_{X^{p'}(\omega)}\\
&\le& C\left(\max_{I\in \Lambda}\frac{\omega(I)}{v_I}\right)\left\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\right\|_{X^p(\omega)}\left(\sum_{I\in \Lambda} \frac{\omega(I)}{v_I}\right)^{1/p'}. \end{eqnarray*} The result now follows. \end{proof} Taking into account that dyadic reverse Carleson condition of order $\alpha$ implies dyadic reverse Carleson condition of order $\beta$ for $\beta>\alpha$ we obtain the following fact. \begin{corollary} Let $1<p<\infty$, $\omega$ be a weight satisfying the dyadic reverse Carleson condition of order $\min\{2/p', 2/p\}$ then \begin{equation} \label{basic}
\|\sum_{I\in \Lambda} \frac{H_I}{\Vert H_I\Vert_{p,\omega}}\|_{X^p(\omega)}\approx card(\Lambda)^{1/p} \end{equation} for all finite family $\Lambda$ of dyadic intervals. \end{corollary}
\begin{theorem} Let $1<p<\infty$, $0<t\le 1$, $(w_I)_{I\in \mathcal D}$ be a sequence of real numbers such that $$0<m_0=\inf_{I\in \mathcal D}w_I\le \sup_{I\in \mathcal D}w_I=M_0<\infty$$ and let $\omega$ be a weight satisfying the dyadic reverse Carleson condition of order $\min\{1, 2/p\}$ with constant $C>0$. Then the Haar basis has the $(t, w_I)$-PCCG property in $X^p(\omega)$.
\end{theorem}
\begin{proof} Let $f\in X^p(\omega)$ and let $\Lambda^t_m$ be a set of $m$ dyadic intervals where $$\min_{I\in \Lambda^t_m}c_I(f,p, \omega)\ge t \max_{I'\notin \Lambda^t_m}c_{I'}(f,p, \omega) .$$
For each $\alpha\in \mathbb R$, $(\varepsilon_n)\in \{\pm 1\}$ and $\Lambda'$ with $\sum_{J\in \Lambda'}w_J\le \sum_{I\in \Lambda_m^t}w_I$ we need to show that $\|f-P_{\Lambda^t_m}(f)\|_{X^p(\omega)}\le C(t) \|f-\alpha 1_{\varepsilon \Lambda'}\|_{X^p(w)}$ for some constant $C(t)>0$. From triangular inequality $$\Vert f-P_{\Lambda^t_m}(f)\Vert_{X^p(\omega)} \leq \Vert P_{(\Lambda_m \cup \Lambda')^c}(f-\alpha 1_{\varepsilon \Lambda'})\Vert_{X^p(\omega)} + \Vert P_{\Lambda'\setminus \Lambda^t_m}(f)\Vert_{X^p(\omega)}$$ and the fact $\Vert P_{\Lambda}(f-\alpha 1_{\varepsilon B})\Vert_{X^p(\omega)}\le \Vert f-\alpha 1_{\varepsilon B}\Vert_{X^p(\omega)}$ for any $\Lambda$
we only need to show that there exists $C>0$ such that
$$\Vert P_{\Lambda'\setminus \Lambda^t_m}(f)\Vert_{X^p(\omega)}\le C \|f-\alpha 1_{\varepsilon \Lambda'}\|_{X^p(\omega)}.$$
Set $v_I=\frac{\omega(I)}{w_I}$ and observe that $\Big((\omega(I))_{I\in \mathcal D}, (v_I)_{I\in \mathcal D}\Big)$ satisfies $2/p$-{DRCC } with constant $M_0C$ and $\Big( (v_I)_{I\in \mathcal D}, \omega(I)_{I\in \mathcal D}\Big)$ satisfies $1$-{DRCC } with constant $C/m_0$. Note that $\sum_{J\in \Lambda'}w_J\le \sum_{I\in \Lambda_m^t}w_I$ implies that $$ \sum_{J\in \Lambda'\setminus \Lambda_m^t}\frac{\omega(J)}{v_{J}}\le \sum_{I\in \Lambda_m^t\setminus \Lambda'}\frac{\omega(I)}{v_{I}} $$ and then, invoking Lemma \ref{2p} and Lemma \ref{1c}, we get the estimates \begin{eqnarray*} \Vert P_{\Lambda'\setminus \Lambda_m^t}(f)\Vert_{X^p(\omega)} &\leq& \Vert \underset{I\in \Lambda'\setminus A_m}{\max} c_I(f,p,\omega)1_{\Lambda'\setminus A_m}\Vert_{X^p(\omega)}\\
&\le& C M_0 \underset{I\in \Lambda'\setminus \Lambda_m^t}{\max} c_I(f,p,\omega) (\sum_{J\in \Lambda'\setminus \Lambda_m^t}\frac{\omega(J)}{v_{J}})^{1/p}\\ &\leq&t^{-1} C M_0\underset{I\in \Lambda_m^t\setminus \Lambda'}{\min}c_I(f,p,\omega)(\sum_{I\in \Lambda_m^t\setminus \Lambda'}\frac{\omega(I)}{v_I})^{1/p} \\
&\leq& \frac{C^2 M_0}{tm_0}\Vert \underset{I\in \Lambda_m^t\setminus \Lambda'}{\min} c_I(f,p,\omega)1_{\Lambda_m^t\setminus \Lambda'}\Vert_{X^p(\omega)}\\ &\leq& \frac{C^2 M_0}{tm_0}\Vert P_{\Lambda_m^t\setminus \Lambda'}(f)\Vert_{X^p(\omega)} \\ &=& \frac{C^2 M_0}{tm_0}\Vert P_{\Lambda_m^t\setminus \Lambda'}(f-\alpha 1_{\varepsilon B})\Vert_{X^p(\omega)} \\
&\leq &\frac{C^2 M_0}{tm_0} \|f-\alpha 1_{\varepsilon \Lambda'}\|_{X^p(\omega)}. \end{eqnarray*}
This completes the proof with $C(t)= 1+\frac{C^2 M_0}{tm_0}$. \end{proof}
\begin{corollary} (i) If $\omega\in A_p^d$ then
the Haar basis has the $t$-PCCG property (and hence is $t$-greedy) in $L^p(\omega)$ with $1<p<\infty$.
(ii) The Haar basis has the $(t,w_I)$-PCCG property (and hence is $(t, w_I)$-greedy) in $L^p([0,1])$ for any sequence $(w_I)_{I\in\mathcal{D}}$ with $0<\inf w_I\le \sup w_I <\infty.$
\end{corollary} \noindent{\it \bf Acknowledgment:} The authors would like to thank to G. Garrig\'os and E. Hern\'andez for useful conversations during the elaboration of this paper.
\vspace*{-1.5cm}
\end{document} |
\begin{document}
\title{A multiplicatively symmetrized version of the Chung-Diaconis-Graham random process} \author{Martin Hildebrand \footnote{Department of Mathematics and Statistics, University at Albany, State University of New York, Albany, NY 12222. {\tt [email protected]}}} \maketitle \begin{abstract} This paper considers random processes of the form $X_{n+1}=a_nX_n+b_n\pmod p$ where $p$ is odd, $X_0=0$, $(a_0,b_0), (a_1,b_1), (a_2,b_2),...$ are i.i.d., and $a_n$ and $b_n$ are independent with $P(a_n=2)=P(a_n=(p+1)/2)=1/2$ and $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$. This can be viewed as a multiplicatively symmetrized version of a random process of Chung, Diaconis, and Graham. This paper shows that order $(\log p)^2$ steps suffice for $X_n$ to be close to uniformly distributed on the integers mod $p$ for all odd $p$ while order $(\log p)^2$ steps are necessary for $X_n$ to be close to uniformly distributed on the intgers mod $p$. \end{abstract}
\section{Introduction}
Chung, Diaconis, and Graham~\cite{cdg} comsidered random processes of the form $X_{n+1}=2X_n+b_n\pmod p$ where $p$ is odd, $X_0=0$, and $b_0, b_1, b_2,...$ are i.i.d. with $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$. They showed that order $(\log p)\log(\log p)$ steps suffice to make $X_n$ close to uniformly distributed on the integers mod $p$. Diaconis~\cite{diaconis} asked about random processes of the form $X_{n+1}=a_nX_n+b_n \pmod p$ where $p$ is odd, $X_0=0$, and $(a_0,b_0), (a_1,b_1), (a_2,b_2),...$ are i.i.d. with $a_n$ and $b_n$ being independent, $P(a_n=2)=P(a_n=(p+1)/2)=1/2$ and $P(b_n=1)=P(b_n=-1)=1/2$. In his Ph.D. thesis, the author~\cite{mvhphd} showed that order $(\log p)^2$ steps suffice to make $X_n$ close to uniformly distributed on the integers mod $p$ and that order $(\log p)^2$ steps
are necessary to make $X_n$ close to uniformly distributed on the integers mod $p$. The techniques used there can be readily adapted if the distribution is changed so that $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$; in this case, these techniques show that order $((\log p)(\log(\log p)))^2$ steps suffice to make $X_n$ close to uniformly distributed on the integers mod $p$ for all odd integers $p$ and order $(\log p)^2$ steps suffice for almost all odd integers $p$
while order $(\log p)^2$ steps are necessary to make $X_n$ close to uniformly distributed in the integrs mod $p$. This paper shows that this result can be improved to show that order $(\log p)^2$ steps suffice to make $X_n$ close to uniformly distributed on the integers mod $p$ for all odd integers $p$.
\section{Some Background, Notation, and Main Result}
We let the integers mod $p$ be denoted by ${\mathbb Z}/p{\mathbb Z}$. We may denote elements of this group by $0, 1,..., p-1$ instead of $0+p{\mathbb Z}, 1+p{\mathbb Z},...,(p-1)+{\mathbb Z}$.
A probability $P$ on the integers mod $p$ satifies $P(s)\ge 0$ for $s\in{\mathbb Z}/p{\mathbb Z}$ and $\sum_{s\in{\mathbb Z}/p{\mathbb Z}}P(s)=1$.
We use the variation distance to measure how far a probability $P$ on ${\mathbb Z}/p{\mathbb Z}$ is from the uniform distribution on ${\mathbb Z}/p{\mathbb Z}$. This distance is given by \[
\|P-U\|=\frac{1}{2}\sum_{s\in{\mathbb Z}/p{\mathbb Z}}\left|P(s)-\frac{1}{p}\right|
=\max_{A\subset{\mathbb Z}/p{\mathbb Z}}|P(A)-U(A)| \]
where $P(A)=\sum_{s\in A}P(s)$ and $U(A)=\sum_{s\in A}1/p=|A|/p$. Note that $\|P-U\|\le 1$ for all probabilities $P$ on ${\mathbb Z}/p{\mathbb Z}$.
\begin{proposition} \label{probmixture} If $P=p_1P_1+p_2P_2+...+p_mP_m$ where $p_1, p_2,..., p_m$ are positive real numbers summing to $1$, then \[
\|P-U\|\le\sum_{i=1}^mp_i\|P_i-U\|. \] \end{proposition} This proposition can be readily shown using the triangle inequality.
If $P$ is a probability on ${\mathbb Z}/p{\mathbb Z}$, define the Fourier tranform \[\hat P(k)=\sum_{j=0}^{p-1}P(j)e^{2\pi ijk/p}\]
for $k=0, 1,..., p-1$. The Upper Bound Lemma of Diaconis and Shahshahani (see, for example, Diaconis~\cite{diaconis}, p. 24) implies \[
\|P-U\|^2\le\frac{1}{4}\sum_{k=1}^{p-1}|\hat P(k)|^2. \]
The main theorem is \begin{theorem} \label{mainthm} Suppose $X_0=0$ and $p$ is an odd integer greater than $1$. Let $X_{n+1}=a_nX_n+b_n \pmod p$ where $(a_0,b_0), (a_1,b_1), (a_2,b_2),...$ are i.i.d. such that $a_n$ and $b_n$ are independent,
$P(a_n=2)=P(a_n=(p+1)/2)=1/2$, and $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$. Let $P_n(j)=P(X_n=j)$ for $j\in{\mathbb Z}/p{\mathbb Z}$. Let $\epsilon>0$ be given. For some $c>0$, if $n>c(\log p)^2$, then $\|P_n-U\|<\epsilon$. \end{theorem}
\section{Beginnings of the argument}
Observe that \begin{eqnarray*} X_0&=&0\\ X_1&=&b_0\\ X_2&=&a_1b_0+b_1\\ X_3&=&a_2a_1b_0+a_2b_1+b_2\\ &&...\\ X_n&=&a_{n-1}...a_2a_1b_0+a_{n-1}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1} \end{eqnarray*}
We shall focus on the distribution of $X_n$ given values for $a_1, a_2,..., a_{n-1}$. In the case where $a_{n-1}=2$, $a_{n-2}=(p+1)/2$, $a_{n-3}=2$, $a_{n-4}=(p+1)/2$, etc., then \[ X_n=2(b_{n-2}+b_{n-4}+...)+(b_{n-1}+b_{n-3}+...) \pmod p . \] If $n=c(\log p)^2$, then $X_n$ lies between $-(3/2)c(\log p)^2$ and $(3/2)c(\log p)^2$ and, for large enough $p$, will not be close to uniformly distributed on the integers mod $p$. In the case where $a_{n-1}=2$, $a_{n-2}=2$, $a_{n-3}=2$, ..., $a_0=2$, then results of Chung, Diaconis, and Graham~\cite{cdg} show that order $(\log p)\log(\log p)$ steps suffice to make $X_n$ close to uniformly distributed on the integers mod $p$, and so order $(\log p)^2$ steps suffice as well.
Let $P_n(a_{n-1}, a_{n-2},...,a_1)(s)=P(a_{n-1}...a_1b_0+a_{n-1}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1}=s \pmod p )$ where $b_0, b_1,...,b_{n-1}$ are i.i.d. uniform on $\{1,0,-1\}$.
We shall show \begin{theorem} \label{indiviudalcases} Let $\epsilon>0$ be given. There exists a constant $c>0$ such that if $n>c(\log p)^2$, then \[
\|P_n(a_{n-1}, a_{n-2},..., a_1)-U\|<\epsilon/2 \] except for a set $A$ of values $(a_{n-1}, a_{n-2},..., a_1)$ in
$\{2,(p+1)/2\}^{n-1}$ where $|A|<(\epsilon/2)2^{n-1}$. ($\{2,(p+1)/2\}^{n-1}$ is the set of $(n-1)$-tuples with entries in $\{2,(p+1)/2\}$.) \end{theorem}
By Proposition~\ref{probmixture}, Theorem~\ref{indiviudalcases} implies Theorem~\ref{mainthm}.
\section{Random Walk on the Exponent} \label{rwexp}
Suppose $a_0, a_1, a_2,...$ are i.i.d. with $P(a_1=2)=P(a_1=(p+1)/2)=1/2$. In the integers mod $p$, one can view $(p+1)/2$ as $2^{-1}$, the multiplicative inverse of $2$. So $1, a_{n-1}, a_{n-1}a_{n-2}, a_{n-1}a_{n-2}a_{n-3},...$ can be viewed as $2^{w_0}, 2^{w_1}, 2^{w_2}, 2^{w_3},...$ where $w_0=0$ and $w_{j+1}-w_j$ are i.i.d. for $j=0, 1, 2,...$ with $P(w_{j+1}-w_j=1)=P(w_{j+1}-w_j=-1)=1/2$.
Let $M_j=\max\{w_0, w_1,..., w_j\}$ and $m_j=\min\{w_0, w_1,..., w_j\}$.
By Theorem 1 of Section III.7 of Feller~\cite{feller}, $P(M_j=\ell)=p_{j,\ell}+p_{j,\ell+1}$ where $p_{j,\ell}={j \choose (j+\ell)/2}2^{-j}$ where the binomial coefficient is $0$ unless $(j+\ell)/2$ is an integer between $0$ and $j$, inclusive. Thus by Central Limit Theorem considerations, for some constant $c_1>0$, if $\epsilon_1>0$ and $j=\lceil c_1(\log p)^2\rceil$, then $P(M_j\le 0.5\log_2p)<\epsilon_1/4$ for sufficiently large $p$, and, by symmetry, $P(-m_j\le 0.5\log_2p)<\epsilon_1/4$ for sufficiently large $p$. Also by Central Limit Theorem considerations, for some constant $c_2>0$, $P(M_j\ge (c_2/2)\log_2p)<\epsilon_1/4$ and $P(-m_j\ge (c_2/2)\log_2p)<\epsilon_1/4$ for sufficiently large $p$. So if
$j=\lceil c_1(\log p)^2\rceil$, $P(\log_2p<M_j-m_j<c_2\log_2p)>1-\epsilon_1$ for sufficiently large $p$. If this event does not hold, then $(a_{n-1},a_{n-2},...,a_1)$ might be in the set $A$.
Exercise III.10 of Feller~\cite{feller} gives \[ z_{r,2n}=\frac{1}{2^{2n-r}}{2n-r\choose n} \] where $z_{r,2n}$ is the probability of exactly $r$ returns to the origin in the first $2n$ steps of the symmetric nearest neighbor random walk on the integers. Observe \[ z_{0,2n}=\frac{1}{2^{2n}}{2n\choose n}\sim \frac{1}{\sqrt{\pi n}}, \] which is approximately a multiple of $1/\log p$ if $n$ is approximately a multiple of $(\log p)^2$.
Observe that if $r\ge 0$, then \begin{eqnarray*} \frac{z_{r+1,2n}}{z_{r,2n}}&=&\frac{1/2^{2n-r-1}}{1/2^{2n-r}} \frac{{2n-r-1\choose n}}{{2n-r\choose n}}\\ &=&2\frac{n-r}{2n-r}\\ &\le &1. \end{eqnarray*} Thus $z_{r+1,2n}\le z_{r,2n}$.
For $k\in[m_j,M_j]$ with $j\lceil c_1(\log p)^2\rceil$,
let $R(k)$ be the number of $i$ such that $w_i=k$ where $0<i-\min_i\{w_i=k\}\le(\log p)^2$. Observe that $P(R(k)\le f(p))\le c_3(f(p)+1)/\log p$ for some positive constant $c_3$.
For some positive constant $c_4$, observe that
$E(|\{k:R(k)\le f(p), m_j\le k\le M_j\}|\ | \log_2p<M_j-m_j<c_2(\log_2p)) \le c_4(f(p)+1)$. Thus by Markov's inequality,
$P(|\{k:R(k)\le f(p), m_j\le k\le M_j\}|\ge c_5(f(p)+1)| \log_2p<M_j-m_j< c_2(\log_2p))\le c_4/c_5$.
\section{Fourier transform argument}
Let $\tilde P_n(a_{n-1}, a_{n-2},..., a_1)(s)=P(2^n(a_{n-1}a_{n-2}...a_1b_0+ a_{n-1}a_{n-2}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1})=s \pmod p )$
where $b_0, b_1,..., b_{n-1}$ are i.i.d. uniform on $\{1, 0, -1\}$. Observe $\|\tilde P_n(a_{n-1}, a_{n-2},..., a_1)-U\|=
\|P_n(a_{n-1}, a_{n-2},..., a_1)-U\|$ since $p$ is odd. Note that all powers of $2$ in $2^na_{n-1}a_{n-2}...a_1$, $2^na_{n-1}a_{n-2}...a_2$, ..., $2^na_{n-1}$, $2^n$ are nonnegative.
The Upper Bound Lemma implies \begin{eqnarray*}
\|\tilde P_n(a_{n-1}, a_{n-2},..., a_1)-U\|&\le&\frac{1}{4}\sum_{m=1}^{p-1} \prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p) \right)^{2R(\ell-n)}\\ &\times &\prod_{r=j+1}^{n-1}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p) \right)^2. \end{eqnarray*} Note that the first product term is for times up to $j$ and the second product term is for times after $j$. Recall $j=\lceil c_1(\log p)^2\rceil$.
Note that \[ \left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)} \le \cases{9^{-R(\ell-n)}&if $1/4\le \{2^{\ell}m/p\}<3/4$\cr 1&otherwise} \] and \[ \left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p)\right)^2 \le \cases{1/9&if $1/4\le \{2^{n+w_r}m/p\}<3/4$\cr 1&otherwise} \] where $\{x\}$ is the fractional part of $x$.
Assume
$|\{k:R(k)\le {c_6} \log(\log p), m_j\le k\le M_j\}|<c_5(\log(\log p)+1)$ where $c_5$ is such that $c_4c_6/c_5<\epsilon_2$ where $\epsilon_2>0$ is given and $j=\lceil c_1(\log p)^2\rceil$ and
$|\{k:R(k)<(\log(\log p))^{2.1}, m_j\le k\le M_j\}|<(\log(\log p))^{2.5}$. Also assume $\log_2p<M_j-m_j<c_2(\log_2p)$, If these assumptions don't hold, then
$(a_{n-1},a_{n-2},...,a_1)$ might be in the set $A$. We shall consider various cases for $m$.
{\underbar {Case 1}}: $m$ is such that for some $\ell\in[n+m_j,n+M_j]$, $1/4\le\{2^{\ell}m/p\}<3/4$ and $R(\ell-n)>(\log(\log p))^{2.1}$. Let $S_1$ be the set of such $m$ in $1, 2,..., p-1$. Then, by arguments similar to those in Chung, Diaconis, and Graham~\cite{cdg} \[ \sum_{m\in S_1}\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3} \cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)}<\epsilon. \] Details appear in Section~\ref{fouriervalues}.
{\underbar {Case 2}}: $m\notin S_1$ and for $b$ values of $\ell\in[n+m_j,n+M_j]$, $1/4\le\{2^{\ell}m/p\}<3/4$ and
${c_6} \log(\log p)<R(\ell-n)\le(\log(\log p))^{2.1}.$ Let $S_{2,b}$ be the set of such $m$ in $1, 2,..., p-1$.
Let's consider the binary expansion of $m/p$; in particular, consider the positions $n+m_j+1$ through $n+M_j+1$. If $1/4\le\{2^{\ell}m/p\}<3/4$, then there is an ``alternation'' between positions $(\ell+1)$ and $(\ell+2)$, i.e. there is a $1$ followed by a $0$ or a $0$ followed by a $1$. We say an alternation follows position $\ell$ if there is an alternation between positions $\ell+1$ and $\ell+2$. Alternations will start following $b$ of no more than $(\log(\log p))^{2.5}$ positions $\ell$ where ${c_6} \log(\log p)<R(\ell-n)<(\log(\log p))^{2.1}$, and alternations may or may not start following each of no more than $c_5(\log(\log p)+1)$ positions $\ell$ with $R(\ell-n)\le {c_6} \log(\log p)$. No other alternations may occur. Place $n+m_j+1$ may be either $0$ or $1$.
Places $n+m_j+1$ through $n+M_j+1$ of the binary expansion of $m/p$ are unique for each $m$ in $\{1,2,...,p-1\}$ since $M_j-m_j>\log_2p$ by an observation similar to the blocks in the argument of Chung, Diaconis, and Graham~\cite{cdg} being unique. So \begin{eqnarray*}
|S_{2,b}|&\le &2\cdot 2^{c_5(\log(\log p)+1)}{\lfloor(\log(\log p))^{2.5}\rfloor \choose b}\\ &\le &2\cdot 2^{c_5(\log(\log p)+1)}(\log(\log p))^{2.5b} \end{eqnarray*}
If $m\in S_{2,b}$, then \[ \prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p) \right)^{2R(\ell-n)} \le (1/9)^{b{c_6} \log(\log p)}. \] So \begin{eqnarray*} &&\sum_{m\in S_{2,b}}\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+ \frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)} \\ &\le & 2\cdot 2^{c_5(\log (\log p)+1)}((\log(\log p))^{2.5} (1/9)^{{c_6} \log(\log p)})^b \end{eqnarray*}
Note that for large enough $p$, $(\log(\log p))^{2.5}(1/9)^{{c_6} \log(\log p)}<1/2$. Also observe for $b\ge b_{\min}$ where $b_{\min}$ is a value depending on $c_5$ and ${c_6}$, \[ 2^{c_5(\log (\log p)+1)}((\log(\log p))^{2.5}(1/9)^{{c_6} \log(\log p)})^b\rightarrow 0 \] as $p\rightarrow\infty$. Thus \[ \sum_{b=b_{\min}}^{\infty} 2^{c_5(\log(\log p)+1)}((\log(\log p))^{2.5} (1/9)^{{c_6} \log(\log p)})^b\rightarrow 0 \] and \[ \sum_{b=b_{\min}}^{\infty}\sum_{m\in S_{2,b}}\prod_{\ell=n+m_j}^{n+M_j} \left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)} \rightarrow 0. \] So all we need to consider are $m\in S_{2,b}$ where $b<b_{\min}$.
To consider such $m$, we shall look at further steps in the Fourier transform. We shall use the following lemma. \begin{lemma} Let $\epsilon^{\prime}>0$ be given. Let $d$ be a positive number. For some constant $c_7>0$, except with probability no more than $\epsilon^{\prime}$, \[ \max_{\ell=d+1}^{d+\lfloor c_7(\log p)^2\rfloor}w_{\ell}-\min_{\ell=d+1}^{d+\lfloor c_7(\log p)^2\rfloor} w_{\ell}>2\log_2p. \] If this inequality holds, then, given $m\in\{1, 2,..., p-1\}$, $1/4\le \{2^{\ell}m/p\}<3/4$ for some $\ell\in\{d+1, d+2,..., d+\lfloor c_7(\log p)^2 \rfloor\}$. With probability at least $1-(\log(\log p))^{2.5}/\log p$, \[
|\{h:\ell+1\le h\le\ell+(\log p)^2, w_{\ell}=w_h\}|>(\log(\log p))^{2.1}. \] \end{lemma}
{\it Proof:} Similar to reasoning in section~\ref{rwexp}, the existence of $c_7$ follows by Central Limit Theorem considerations and Theorem 1 of Section III.7 of Feller~\cite{feller}. The existence of such $\ell$ follows since for each positive integer $k$, at least one of $\{2^km/p\}$, $\{2^{k+1}m/p\}$,...,$\{2^{k+\lfloor 2\log_2p\rfloor-1}m/p\}$ lies in $[1/4,3/4)$. The result
on $|\{h:\ell+1\le h\le\ell+(\log p)^2, w_{\ell}=w_h\}|$ follows similarly to the earlier argument that $P(R(k)\le f(p))\le c_3(f(p)+1)/\log p$.
$\Box$
Suppose $n_{before}$ is the number of $m$ being considered, i.e. need further Fourier transform terms before going an additional $\lfloor c_7(\log p)^2\rfloor +\lfloor(\log p)^2\rfloor$ terms. Afterwards, we will need to continue to consider only $m$ such that $\ell$ in the lemma exists and
$|\{h:\ell+1\le h\le \ell+(\log_2p)^2: w_{\ell}=w_jh\}|<(\log(\log p))^{2.1}$; otherwise we have sufficient additional terms in the Fourier transform; see Section~\ref{fouriervalues}. Except for at most $(\epsilon^{\prime}+o(1))2^{n-1}$ $(n-1)$-tuples in $A$, $n_{after}\le n_{before}(\log(\log p))^{2.5}/\log p$ where $n_{after}$ is the number of $m$ still being considered after going the additional $\lfloor c_7(\log p)^2\rfloor+\lfloor(\log p)^2\rfloor$ steps. Repeating this a fixed number $f$ times will give $n_{after}<1$, i.e. $n_{after}=0$ except for at most $f(\epsilon^{\prime}+o(1))2^{n-1}$ $(n-1)$-tuples in $A$.
\section{Bounding the Fourier transform sums} \label{fouriervalues}
Some of the ideas in this section, for example ``alternations'', come from Chung, Diaconis, and Graham~\cite{cdg}.
Suppose $m\in S_1$. If \[ g(x)=\cases{1/9&if $1/4\le\{x\}<3/4$ \cr 1&otherwise,} \] then \begin{eqnarray*} \prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p) \right)^{2R(\ell-n)} &\le& \prod_{\ell=n+m_j}^{n+M+j}(g(2^{\ell}m/p))^{R(\ell-n)}\\ &\le&(1/9)^{{c_6} \log(\log p)A(B_m)} \end{eqnarray*} where $A(B_m)$ is the number of ``alternations'' in the first $M_j-m_j$ positions of the binary expansion of $\{2^{n+m_j}m/p\}$. An alternation in the binary expansion $.\alpha_1\alpha_2\alpha_3...$ occurs when $\alpha_i\ne\alpha_{i+1}$. There will be an alternation in the first $\lceil\log_2p\rceil$ positions of the binary expansion of $\{2^{n+m_j}m/p\}$ if $m\in\{1, 2,..., p-1\}$, and for different $m\in\{1, 2,..., p-1\}$, the first $\lceil \log_2p\rceil$ positions of the binary expansion of $\{2^{n+m_j}m/p\}$ will differ. The inequality ending $<(1/9)^{{c_6} \log(\log p)A(B_m)}$ occurs since for some $\ell\in[n+m_j,n+M_j]$ with $1/4\le\{2^{\ell}m/p\}<3/4$, $R(\ell-n)\ge(\log(\log p))^{2.1}$ and the $R(\ell-n)$ powers of $1/9$ also cover all $c_5(\log(\log p)+1)$ terms of the from $(1/9)^{R(\ell-n)}$ with $\ell$ such that
$R(\ell-n)\le{c_6}\log(\log p)$ if $p$ is large enough.
Observe \begin{eqnarray*} \sum_{m\in S_1}(1/9)^{{c_6} \log(\log p)A(B_m)}&\le& \sum_{m=1}^{p-1}(1/9)^{{c_6} \log(\log p)A(B_m)}\\ &\le& 2\sum_{s=1}^{M_j-m_j}{M_j-m_j\choose s}(1/9)^{{c_6} \log(\log p)s}\\ &\le& 2\sum_{s=1}^{M_j-m_j}(M_j-m_j)^s(1/9)^{{c_6} \log(\log p)s}\\ &\rightarrow&0 \end{eqnarray*} as $p\rightarrow\infty$ if $\log_2p<M_j-m_j<c_2(\log p)$ and ${c_6}$ is large enough.
Now suppose $m\in S_{2,0}$ and for some $\ell$ with $1/4\le\{2^{\ell}m/p\}<3/4$ where $\ell<n-(\log p)^2$ and
$|\{h:\ell+1\le h\le(\log p)^2,w_{\ell}=w_h\}|\ge(\log(\log p))^{2.1}$, then \begin{eqnarray*} && \prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p) \right)^{2R(\ell-n)} \\ && \times \prod_{r=j+1}^{n-1}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p)\right)^2 \\ &\le& (1/9)^{{c_6} \log(\log p) A(B_m)}. \end{eqnarray*} In other words, the powers of $1/9$ for these values of $h$ cover all $c_5(\log(\log p)+1)$ terms of the form $(1/9)^{R(\ell-n)}$ with $\ell$ such that
$R(\ell-n)\le {c_6}\log(\log p)$ if $p$ is large enough. By reasoning similar to the sum involving $m\in S_1$, \[ \sum_{m\in S_{2,0}}(1/9)^{{c_6} \log(\log p) A(B_m)} \rightarrow 0 \] as $p\rightarrow\infty$.
\section{Lower Bound}
The argument for the lower bound is more straightforward and is based upon \cite{mvhphd}.
\begin{theorem} \label{lowerbound} Suppose $X_n$, $a_n$, $b_n$, and $p$ are as in Theorem~\ref{mainthm}. Let
$\epsilon>0$ be given. For some $c>0$, if $n<c(\log p)^2$ for large enough $p$, then $\|P_n-U\|>1-\epsilon$. \end{theorem}
{\it Proof:} Let $m_j$ and $M_j$ be as in Section~\ref{rwexp}. For some $c>0$, if $n=\lfloor c(\log p)^2\rfloor$, then $P(m_j\le -0.25\log_2p)<\epsilon/3$ and $P(M_j\ge 0.25\log_2p)<\epsilon/3$. If $m_j>-0.25\log_2p$ and $M_j<0.25\log_2p$, then $2^{\lceil -0.25\log_2p\rceil}X_n$ lies in the interval $[-{\sqrt p}c(\log p)^2, {\sqrt p}c(\log p)^2]$, and so
$\|P_n-U\|\ge(1-2\epsilon/3)-(2{\sqrt p}c(\log p)^2+1)/p>1-\epsilon$ for sufficiently large $p$.
\section{Discussion of Generalizations for $a_n$}
One can ask if the results generalize to the case where $a$ is a fixed integer greater than $1$, $(a,p)=1$, and $P(a_n=a)=P(a_n=a^{-1})=1/2$. The results indeed should generalize. Chapter 3 of Hildebrand~\cite{mvhphd} gives a result if $P(a_n=a)=1$. This result gives an upper bound similar to the original Chung-Diaconis-Graham result with $P(a_n=2)=1$ and involves an $a$-ary expansion along with a generalization of alternations in a Fourier transform argument. The random walk on the exponent should work with powers of $a$ instead of powers of $2$. The Fourier transform argument may consider the interval $[1/a^2,1-1/a^2)$ instead of $[1/4,3/4)$. The constant $1/9$ may be replaced by another constant less than $1$. One needs to be careful with the size of the analogue of $S_{2,b}$.
Also Breuillard and Varj\'u~\cite{bv} consider the Chung-Diaconis-Graham process with $P(a_n=a)=1$ where $a$ is not fixed. One might explore cases where $P(a_n=a)=P(a_n=a^{-1})=1/2$ where $a$ is not fixed but does have a multiplicative inverse in the integers mod $p$.
\section{Questions for Further Study}
Eberhard and Varj\'u~\cite{ev} prove and locate a cut-off phenomonon for most odd integers $p$ in the original Chung-Diaconis-Graham random process. However, the diffusive nature of the random walk on the exponent suggests that a cut-off phenomenon might not appear in the multiplicatively symmetrized version. Exploring this question more rigorously is a problem for further study.
The Chung-Diaconis-Graham random process can be extended to multiple dimensions. Klyachko~\cite{klyachko} considers random processes of the form $X_{N+1}=A_NX_N+B_N \pmod p$ where $X_N$ is a random vector in $({\mathbb Z}/p{\mathbb Z})\times({\mathbb Z}/p{\mathbb Z})$ and $A_N$ is a fixed $2\times 2$ matrix with some conditions. Perhaps techniques in this paper could be combined with Klyachko's result to get a result for the case where $A_N$ is a fixed $2\times 2$ matrix or its inverse with probability $1/2$ each.
\section{Acknowledgment}
The author would like to thank the referee for some suggestions.
This is a preprint of an article published in {\it Journal of Theoretical Probability}. The final authenticated version is available online at
{\tt https://doi.org/10.1007/s10959-021-01088-3}.
\end{document} |
\begin{document}
\title{Sharp asymptotic behavior of solutions of the $3d$ Vlasov-Maxwell system with small data}
\begin{abstract} We study the asymptotic properties of the small data solutions of the Vlasov-Maxwell system in dimension three. No neutral hypothesis nor compact support assumptions are made on the data. In particular, the initial decay in the velocity variable is optimal. We use vector field methods to obtain sharp pointwise decay estimates in null directions on the electromagnetic field and its derivatives. For the Vlasov field and its derivatives, we obtain, as in \cite{FJS3}, optimal pointwise decay estimates by a vector field method where the commutators are modification of those of the free relativistic transport equation. In order to control high velocities and to deal with non integrable source terms, we make fundamental use of the null structure of the system and of several hierarchies in the commuted equations. \end{abstract}
\tableofcontents \section{Introduction}
This article is concerned with the asymptotic behavior of small data solutions to the three-dimensional Vlasov-Maxwell system. These equations, used to model collisionless plasma, describe, for one species of particles\footnote{Our results can be extended without any additional difficulty to several species of particles.}, a distribution function $f$ and an electromagnetic field which will be reprensented by a two form $F_{\mu \nu}$. The equations are given by\footnote{We will use all along this paper the Einstein summation convention so that, for instance, $v^i \partial_i f = \sum_{i=1}^3 v^i \partial_i f$ and $\nabla^{\mu} F_{\mu \nu} = \sum_{\mu=0}^3 \nabla^{\mu} F_{\mu \nu}$. The latin indices goes from $1$ to $3$ and the greek indices from $0$ to $3$.} \begin{eqnarray}\label{VM1} v^0\partial_t f+v^i \partial_i f +ev^{\mu}{ F_{\mu}}^{ j} \partial_{v^j} f & = & 0, \\ \label{VM2} \nabla^{\mu} F_{\mu \nu} & = & e J(f)_{\nu} \hspace{2mm} := \hspace{2mm} e\int_{v \in \mathbb{R}^3} \frac{v_{\nu}}{v^0} f dv, \\ \label{VM3} \nabla^{\mu} {}^* \! F_{\mu \nu} & = & 0, \end{eqnarray}
where $v^0=\sqrt{m^2+|v|^2}$, $m>0$ is the mass of the particles and $e \in \mathbb{R}^*$ their charge. For convenience, we will take $m=1$ and $e=1$ for the remainder of this paper. The particle density $f$ is a non-negative\footnote{In this article, the sign of $f$ does not play any role.} function of $(t,x,v) \in \mathbb{R}_+ \times \mathbb{R}^3 \times \mathbb{R}^3$, while the electromagnetic field $F$ and its Hodge dual ${}^* \! F $ are $2$-forms depending on $(t,x) \in \mathbb{R}_+ \times \mathbb{R}^3$. We can recover the more common form of the Vlasov-Maxwell system using the relations $$E^i=F_{0i} \hspace{8mm} \text{and} \hspace{8mm} B^i=-{}^* \! F_{0i},$$ so that the equations can be rewritten as \begin{flalign*}
& \hspace{3cm} \sqrt{1+|v|^2} \partial_t f+v^i \partial_i f + (\sqrt{1+|v|^2} E+v \times B) \cdot \nabla_v f = 0, & \\
& \hspace{3cm} \nabla \cdot E = \int_{v \in \mathbb{R}^3}fdv, \hspace{1.1cm} \partial_t E^j = (\nabla \times B)^j -\int_{v \in \mathbb{R}^3} \frac{v^j}{\sqrt{1+|v|^2}}fdv, & \\ & \hspace{3cm} \nabla \cdot B = 0, \hspace{2,2cm} \partial_t B = - \nabla \times E. & \end{flalign*} We refer to \cite{Glassey} for a detailed introduction to this system. \subsection{Small data results for the Vlasov-Maxwell system}
The first result on global existence with small data for the Vlasov-Maxwell system in $3d$ was obtained by Glassey-Strauss in \cite{GSt} and then extended to the nearly neutral case in \cite{Sc}. This result required compactly supported data (in $x$ and in $v$) and shows that $\int_v f dv \lesssim \frac{\epsilon}{(1+t)^3}$, which coincides with the linear decay. They also obtain estimates for the electromagnetic field and its derivatives of first order, but they do not control higher order derivatives of the solutions. The result established by Schaeffer in \cite{Sc} allows particles with high velocity but still requires the data to be compactly supported in space\footnote{Note also that when the Vlasov field is not compactly supported (in $v$), the decay estimate obtained in \cite{Sc} on its velocity average contains a loss.}.
In \cite{dim4}, using vector field methods, we proved optimal decay estimates on small data solutions and their derivatives of the Vlasov-Maxwell system in high dimensions $d \geq 4$ without any compact support assumption on the initial data. We also obtained that similar results hold when the particles are massless ($m=0$) under the additional assumption that $f$ vanishes for small velocities\footnote{Note that there exists initial data violating this condition and such that the system does not admit a local classical solution (see Section $8$ of \cite{dim4}).}.
A better understanding of the null condition of the system led us in our recent work \cite{massless} to an extension of these results to the massless 3d case. In \cite{ext} we study the asymptotic properties of solutions to the massive Vlasov-Maxwell in the exterior of a light cone for mildly decaying initial data. Due to the strong decay satisfied by the particle density in such a region we will be able to lower the initial decay hypothesis on the electromagnetic field and then avoid any difficulty related to the presence of a non-zero total charge.
The results of this paper establish sharp decay estimates on the small data solutions to the three-dimensional Vlasov-Maxwell system. The hypotheses on the particle density in the variable $v$ are optimal in the sense that we merely suppose $f$ (as well as its derivatives) to be initially integrable in $v$, which is a necessarily condition for the source term of the Maxwell equations to be well defined.
Recently, Wang proved independently in \cite{Wang} a similar result for the $3d$ massive Vlasov-Maxwell system. Using both vector field methods and Fourier analysis, he does not require compact support assumptions on the initial data but strong polynomial decay hypotheses in $(x,v)$ on $f$ and obtained optimal pointwise decay estimates on $\int_v f dv$ and its derivatives.
\subsection{Vector fields and modified vector fields for the Vlasov equations}
The vector field method of Klainerman was first introduced in \cite{Kl85} for the study of nonlinear wave equations. It relies on energy estimates, the algebra $\mathbb{P}$ of the Killing vector fields of the Minkowski space and conformal Killing vector fields, which are used as commutators and multipliers, and weighted functional inequalities now known as Klainerman-Sobolev inequalities.
In \cite{FJS}, the vector field method was adapted to relativistic transport equations and applied to the small data solutions of the Vlasov-Nordstr\"om system in dimensions $d \geq 4$. It provided sharp asymptotics on the solutions and their derivatives. Key to the extension of the method is the fact that even if $Z \in \mathbb{P}$ does not commute with the free transport operator $T:= v^{\mu} \partial_{\mu}$, its complete lift\footnote{The expression of the complete lift of a vector field of the Minkowski space is presented in Definition \ref{defliftcomplete}.} $\widehat{Z}$ does. The case of the dimension $3$, studied in \cite{FJS2}, required to consider modifications of the commutation vector fields of the form $Y=\widehat{Z}+\Phi^{\nu} \partial_{\nu}$, where $\widehat{Z}$ is a complete lift of a Killing field (and thus commute with the free transport operator) while the coefficients $\Phi$ are constructed by solving a transport equation depending on the solution itself. In \cite{Poisson} (see also \cite{Xianglong}), similar results were proved for the Vlasov-Poisson equations and, again, the three-dimensionsal case required to modify the set of commutation vector fields in order to compensate the worst source terms in the commuted transport equations. Let us also mention \cite{rVP}, where the asymptotic behavior of the spherically symmetric small data solutions of the massless relativistic Vlasov-Poisson system are studied\footnote{Note that the Lorentz boosts cannot be used as commutation vector fields for this system since the Vlasov equation and the Poisson equation have different speed of propagation.}. Vector field methods led to a proof of the stability of the Minkowski spacetime for the Einstein-Vlasov system, obtained independently by \cite{FJS3} and \cite{Lindblad}.
Note that vector field methods can also be used to derive integrated decay for solutions to the the massless Vlasov equation on curved background such as slowly rotating Kerr spacetime (see \cite{ABJ}).
\subsection{Charged electromagnetic field}
In order to present our main result, we introduce in this subsection the pure charge part and the chargeless part of a $2$-form. \begin{Def} Let $G$ be a sufficiently regular $2$-form defined on $[0,T[ \times \mathbb{R}^3$. The total charge $Q_G(t)$ of $G$ is defined as $$ Q_G(t) \hspace{2mm} = \hspace{2mm} \lim_{r \rightarrow + \infty} \int_{\mathbb{S}_{t,r}} \frac{x^i}{r}G_{0i} d \mathbb{S}_{t,r},$$ where $\mathbb{S}_{t,r}$ is the sphere of radius $r$ of the hypersurface $\{t \} \times \mathbb{R}^3$ which is centered at the origin $x=0$. \end{Def} If $(f,F)$ is a sufficiently regular solution to the Vlasov-Maxwell system, $Q_F$ is a conserved quantity. More precisely, $$ \forall \hspace{0.5mm} t \in [0,T[, \hspace{2cm} Q_F(t)=Q_F(0)= \int_{x \in \mathbb{R}^3} \int_{v \in \mathbb{R}^3} f(0,x,v) dv dx.$$
Note that the derivatives of $F$ are automatically chargeless (see Appendix $C$ of \cite{massless}). The presence of a non-zero charge implies $\int_{\mathbb{R}^3} r|F|^2 dx = +\infty$ and prevents us from propagating strong weighted $L^2$ norms on the electromagnetic field. This leads us to decompose $2$-forms into two parts. For this, let $\chi : \mathbb{R} \rightarrow [0,1]$ be a cut-off function such that $$ \forall \hspace{0.5mm} s \leq -2, \hspace{3mm} \chi(s) =1 \hspace{1cm} \text{and} \hspace{1cm} \forall \hspace{0.5mm} s \geq -1, \hspace{3mm} \chi(s) =0.$$ \begin{Def}\label{defpure1} Let $G$ be a sufficiently regular $2$-form with total charge $Q_G$. We define the pure charge part $\overline{G}$ and the chargeless part $\widetilde{G}$ of $G$ as $$\overline{G}(t,x) := \chi(t-r) \frac{Q_G(t)}{4 \pi r^2} \frac{x_i}{r} dt \wedge dx^i \hspace{1cm} \text{and} \hspace{1cm} \widetilde{G} := G-\overline{G}.$$ \end{Def}
One can then verify that $Q_{\overline{G}}=Q_G$ and $Q_{\widetilde{G}}=0$, so that the hypothesis $\int_{\mathbb{R}^3} r|\widetilde{G}|^2 dx = +\infty$ is consistent. Notice moreover that $G=\widetilde{G}$ in the interior of the light cone.
The study of non linear systems with a presence of charge was initiated by \cite{Shu} in the context of the Maxwell-Klein Gordon equations. The first complete proof of such a result was given by Lindblad and Sterbenz in \cite{LS} and improved later by Yang (see \cite{Yang}). Let us also mention the work of \cite{Bieri}.
\subsection{Statement of the main result}
\begin{Def} We say that $(f_0,F_0)$ is an initial data set for the Vlasov-Maxwell system if $f_0 : \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ and the $2$-form $F_0$ are both sufficiently regular and satisfy the constraint equations $$\nabla^i (F_{0})_{i0} =- \int_{v \in \mathbb{R}^3} f_0 dv \hspace{10mm} \text{and} \hspace{10mm} \nabla^i {}^* \! (F_0)_{i0} =0.$$ \end{Def} The main result of this article is the following theorem.
\begin{Th}\label{theorem} Let $N \geq 11$, $\epsilon >0$, $(f_0,F_0)$ an initial data set for the Vlasov-Maxwell equations \eqref{VM1}-\eqref{VM3}and $(f,F)$ be the unique classical solution to the system arising from $(f_0,F_0)$. If
$$ \sum_{ |\beta|+|\kappa| \leq N+3} \int_{x \in \mathbb{R}^3} \int_{v \in \mathbb{R}^3} (1+|x|)^{2N+3}(1+|v|)^{|\kappa|} \left| \partial_{x}^{\beta} \partial_v^{\kappa} f_0 \right| dv dx + \sum_{ |\gamma| \leq N+2} \int_{x \in \mathbb{R}^3} (1+|x|)^{2 |\gamma|+1} \left| \nabla_x^{\gamma} \widetilde{F}_0 \right|^2 dx \leq \epsilon ,$$ then there exists $C>0$, $M \in \mathbb{N}$ and $\epsilon_0>0$ such that, if $\epsilon \leq \epsilon_0$, $(f,F)$ is a global solution to the Vlasov-Maxwell system and verifies the following estimates. \begin{itemize} \item Energy bounds for the electromagnetic field $F$ and its chargeless part: $\forall$ $t \in \mathbb{R}_+$,
$$ \sum_{\begin{subarray}{} Z^{\gamma} \in \mathbb{K}^{|\gamma|} \\ \hspace{1mm} |\gamma| \leq N \end{subarray}} \int_{|x| \geq t} \tau_+ \left( | \alpha ( \mathcal{L}_{ Z^{\gamma}}(\widetilde{F}) ) |^2 + | \rho ( \mathcal{L}_{ Z^{\gamma}}(\widetilde{F}) )|^2 + |\sigma ( \mathcal{L}_{ Z^{\gamma}}(\widetilde{F}) ) |^2 \right)+\tau_- |\underline{\alpha} ( \mathcal{L}_{ Z^{\gamma}}(\widetilde{F}) ) |^2 dx \leq C\epsilon ,$$
$$ \hspace{-0.3cm} \sum_{\begin{subarray}{} Z^{\gamma} \in \mathbb{K}^{|\gamma|} \\ \hspace{1mm} |\gamma| \leq N \end{subarray}} \int_{|x| \leq t} \hspace{-0.2mm} \tau_+ \left( \left| \alpha \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right|^2 \hspace{-0.2mm} + \hspace{-0.2mm} \left| \rho \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right|^2 \hspace{-0.2mm} + \hspace{-0.2mm} \left|\sigma \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right|^2 \right)+\tau_- \left|\underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right|^2 dx \leq C\epsilon \log^{2M}(3+t) .$$
\item Pointwise decay estimates for the null components of\footnote{If $|x| \geq t+1$, the logarithmical growth can be removed for the components $\alpha$ and $\underline{\alpha}$.} $\mathcal{L}_{Z^{\gamma}}(F)$: $\forall$ $|\gamma| \leq N-6$, $(t,x) \in \mathbb{R}_+ \times \mathbb{R}^3$, \begin{flalign*}
& \hspace{0.5cm} |\alpha(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\frac{\log(3+t)}{\tau_+^2} , \hspace{30mm} |\underline{\alpha}(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\frac{\log(3+t)}{\tau_+\tau_-} ,& \\
& \hspace{0.5cm} |\rho(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon} \frac{\log^2(3+t)}{\tau_+^2}, \hspace{29mm} |\sigma(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\frac{\log^2(3+t)}{\tau_+^2}.& \end{flalign*} \item Energy bounds for the Vlasov field: $\forall$ $t \in \mathbb{R}_+$,
$$\sum_{\begin{subarray}{} \hspace{0.5mm} Y^{\beta} \in \mathbb{Y}^{|\beta|} \\ \hspace{1mm} |\beta| \leq N \end{subarray}} \int_{x \in \mathbb{R}^3} \int_{v \in \mathbb{R}^3} \left|Y^{\beta} f \right| dv dx \leq C\epsilon.$$
\item Pointwise decay estimates for the velocity averages of $Y^{\beta} f$: $\forall$ $|\beta| \leq N-3$, $(t,x) \in \mathbb{R}_+ \times \mathbb{R}^3$,
$$\int_{ v \in \mathbb{R}^3} \left| Y^{\beta} f \right| dv \lesssim \frac{\epsilon}{\tau_+^2 \tau_-} \hspace{5mm} \text{and} \hspace{5mm} \int_{ v \in \mathbb{R}^3} \left| Y^{\beta} f \right| \frac{dv}{(v^0)^2} \lesssim \epsilon \frac{1}{\tau_+^3} \mathds{1}_{t \geq |x|}+ \epsilon\frac{\log^2(3+t)}{\tau_+^3\tau_-} \mathds{1}_{|x| \geq t}$$. \end{itemize} \end{Th} \begin{Rq}
For the highest derivatives of $f_0$, those of order at least $N-2$, we could save four powers of $|x|$ in the condition on the initial norm and even more for those of order at least $N+1$. We could also avoid any hypothesis on the derivatives of order $N+1$ and $N+2$ of $F_0$ (see Remark \ref{rqgainH}). \end{Rq} \begin{Rq} Assuming more decay on $\widetilde{F}$ and its derivatives at $t=0$, we could use the Morawetz vector field as a multiplier, propagate a stronger energy norm and obtain better decay estimates on its null components in the exterior of the light cone. We could recover the decay rates of the free Maxwell equations (see \cite{CK}) on $\alpha (F)$, $\underline{\alpha} (F)$ and $\sigma (F)$, but not on $\rho(F)$. We cannot obtain a better decay rate than $\tau_+^{-2}$ on $\rho (F)$ because of the presence of the charge. With our approach, we cannot recover the sourceless behavior in the interior region because of the slow decay of $\int_v f dv$. \end{Rq}
\subsection{Key elements of the proof} \subsubsection{Modified vector fields} In \cite{dim4}, we observed that commuting \eqref{VM1} with the complete lift of a Killing vector field gives problematic source terms. More precisely, if $Z \in \mathbb{P}$, \begin{equation}\label{sourcetermintro} [T_F, \widehat{Z} ] f= -v^{\mu} {\mathcal{L}_Z(F)_{\mu}}^{ j} \partial_{v^j} f, \hspace{10mm} \text{with} \hspace{3mm} T_F = v^{\mu}\partial_{\mu}+v^{\mu} {F_{\mu}}^{ j} \partial_{v^j}. \end{equation} The difficulty comes from the presence of $\partial_v$, which is not part of the commutation vector fields, since in the linear case ($F=0$) $\partial_v f$ essentially behaves as $t\partial_{t,x} f$. However, one can see that the source term has the same form as the non-linearity $v^{\mu} {F_{\mu}}^{ j} \partial_{v^j} f$. In \cite{dim4}, we controlled the error terms by taking advantage of their null structure and the strong decay rates given by high dimensions. Unfortunately, our method does not apply in dimension $3$ since even assuming a full understanding of the null structure of the system, we would face logarithmic divergences. The same problem arises for other Vlasov systems and were solved using modified vector fields in order to cancel the worst source terms in the commutation formula. Let us mention again the works of \cite{FJS2} for the Vlasov-Nordstr\"om system, \cite{Poisson} for the Vlasov-Poisson equations, \cite{FJS3} and \cite{Lindblad} for the Einstein-Vlasov system. We will thus consider vector fields of the form $Y=\widehat{Z}+\Phi^{\nu}\partial_{\nu}$, where the coefficients $\Phi^{\nu}$ are themselves solutions to transport equations, growing logarithmically. As a consequence, we will need to adapt the Klainerman-Sobolev inequalities for velocity averages and the result of Theorem $1.1$ of \cite{dim4} in order to replace the original vector fields by the modified ones. \subsubsection{The electromagnetic field and the non-zero total charge} Because of the presence of a non-zero total charge, i.e. $ \lim_{r \rightarrow + \infty} \int_{ \mathbb{S}_{0,r} } \frac{x^i}{r} (F_0)_{0i} d \mathbb{S}_{0,r} \neq 0$, we have, at $t=0$,
$$\int_{\mathbb{R}^3} (1+r) \left| \frac{x^i}{r} F_{0i} \right|^2 dx = \int_{\mathbb{R}^3} (1+r) |\rho(F)|^2 dx= + \infty$$
and we cannot propagate $L^2$ bounds on $\int_{\mathbb{R}^3} (1+t+r) |\rho(F)(t,x)|^2 dx$. However, provided that we can control the flux of the electromagnetic field on the light cone $t=r$, we can propagate weighted $L^2$ norms of $F$ in the interior region. To deal with the exterior of the light cone, recall from Definition \ref{defpure1} the decomposition \begin{equation}\label{explicit} F = \widetilde{F}+\overline{F}, \hspace{1cm} \text{with} \hspace{1cm} \overline{F}(t,x) := \chi(t-r) \frac{Q_F}{4 \pi r^2} dr \wedge dt . \end{equation}
The hypothesis $\int_{\mathbb{R}^3} (1+|x|) | \widetilde{F} (0,.)| dx < + \infty$ is consistent with the chargelessness of $\widetilde{F}$ and we can then propagate weighted energy norms of $\widetilde{F}$ and bound the flux of $F$ on the light cone. On the other hand, we have at our disposal pointwise estimates on $\overline{F}$ and its derivatives through the explicit formula \eqref{explicit}. These informations will allow us to deduce pointwise decay estimates on the null components of $F$ in both the exterior and the interior regions.
Another problem arises from the source terms of the commuted Maxwell equations, which need to be written with our modified vector fields. This leads us, as \cite{FJS2} and \cite{FJS3}, to rather consider them of the form $Y=\widehat{Z}+\Phi^{i}X_i$, where $X_i=\partial_i+\frac{v^i}{v^0}\partial_t$. The $X_i$ vector fields enjoy a kind of null condition\footnote{Note that they were also used in \cite{dim4} to improve the decay estimate on $\partial \int_v f ds$.} and allow us to avoid a small growth on the electromagnetic field norms which would prevent us to close our energy estimates\footnote{We make similar manipulations to recover the standard decay rate on the modified Klainerman-Sobolev inequalities.}. However, at the top order, a loss of derivative does not allow us to take advantage of them and creates a $t^{\eta}$-loss, with $\eta >0$ a small constant. A key step is to make sure that $\| \left| Y^{\kappa} \Phi \right|^2 Y f \|_{L^1_{x,v}}$, for $|\kappa|=N-1$, does not grow faster than $t^{\eta}$. \subsubsection{High velocities and null structure of the system}
After commuting the transport equation satisfied by the coefficients $\Phi^i$ and in order to prove energy estimates, we are led to control integrals such as
$$\int_0^t \int_{\mathbb{R}^3} \int_{v \in \mathbb{R}^3}(s+|x|) \left| \mathcal{L}_Z(F) f \right| dv dx ds.$$
If $f$ vanishes for high velocities, the characteristics of the transport equations have velocities bounded away from $1$. If $f$ is moreover initially compactly supported in space, its spatial support is ultimately disjoint from the light cone and, assuming enough decay on the Maxwell field, one can prove $$|\mathcal{L}_Z(F) f| \lesssim (1+t+r)^{-1}(1+|t-r|)^{-1}| f | \lesssim (1+t+r)^{-2}| f |,$$ so that \begin{equation}\label{eq:pb1}
\int_0^t \int_{\mathbb{R}^3} \int_{v \in \mathbb{R}^3}(s+|x|) \left| \mathcal{L}_Z(F) f \right| dv dx ds \lesssim \int_0^t (1+s)^{-1} ds,
\end{equation}
which is almost uniformly bounded in time\footnote{Dealing with these small growth is the next problem addressed.}. As we do not make any compact support assumption on the initial data, we cannot expect $f$ to vanish for high velocities and certain characteristics of the transport operator ultimately approach those of the Maxwell equations. We circumvent this difficulty by taking advantage of the null structure of the error term given in \eqref{sourcetermintro}, which, in some sense, allows us to transform decay in $|t-r|$ into decay in $t+r$. The key is that certain null components of $v$, $\mathcal{L}_Z(F)$ and $\nabla_v f :=(0,\partial_{v^1} f,\partial_{v^2}f,\partial_{v^3}f)$ behave better than others and we will see in Lemma \ref{nullG} that no product of three bad components appears. More precisely, noting $c \prec d$ if $d$ is expected to behave better than $c$, we have, $$v^L \prec v^A, \hspace{1mm} v^{\underline{L}}, \hspace{10mm} \underline{\alpha}(\mathcal{L}_Z(F)) \prec \rho (\mathcal{L}_Z(F)) \sim \sigma( \mathcal{L}_Z(F) ) \prec \alpha( \mathcal{L}_Z(F) ) \hspace{10mm} \text{and} \hspace{6mm} \left( \nabla_v f \right)^A \prec \left( \nabla_v f \right)^{r}.$$
In the exterior of the light cone (and for the massless relativistic transport operator), we have $v^A \prec v^{\underline{L}}$ since $v^{\underline{L}}$ permits to integrate along outgoing null cones\footnote{The angular component $v^A$ can, in some sense, merely do half of it since $|v^A| \lesssim \sqrt{v^0 v^{\underline{L}}}$.} and they are both bounded by $(1+t+r)^{-1}v^0\sum_{z \in \mathbf{k}_1} |z|$, where $\mathbf{k}_1$ is a set of weigths preserved by the free transport operator. In the interior region, the angular components still satisfies the same properties whereas $v^{\underline{L}}$ merely satisfies the inequality \begin{equation}\label{eq:intro1}
v^{\underline{L}} \lesssim \frac{|t-r|}{1+t+r}v^0+\frac{v^0}{1+t+r} \sum_{z \in \mathbf{k}} |z| \hspace{10mm} \text{( see Lemma \ref{weights1})}. \end{equation}
This inequality is crucial for us to close the energy estimates on the electromagnetic field without assuming more initial decay in $v$ on $f$. It gives a decay rate of $(1+t+r)^{-3}$ on $\int_v \frac{v^{\underline{L}}}{v^0} |f| dv$ by only using a Klainerman-Sobolev inequality (Theorem \ref{decayopti} and Proposition \ref{decayopti2} would cost us two powers of $v^0$). As $1 \lesssim v^0 v^{\underline{L}}$ for massive particles, we obtain, combining \eqref{eq:intro1} and Theorem \ref{decayopti}, for $g$ a solution to $v^{\mu} \partial_{\mu} g =0$,
$$ \forall \hspace{0.5mm} t \geq |x|, \hspace{10mm} \int_{v \in \mathbb{R}^3} |g|(t,x,v)dv \lesssim \frac{(1+|t-r|)^k}{(1+t+r)^{3+k}} \sum_{|\beta| \leq 3} \left\| (v^0)^{2k+2}(1+r)^k \widehat{Z}^{\beta}g \right\|_{L^1_{x,v}}(t=0).$$
In the exterior region, the estimate can be improved by removing the factor $(1+|t-r|)^k$ (however one looses one power of $r$ in the initial norm). This remarkable behavior reflects that the particles do not reach the speed of light so that $\int_{v \in \mathbb{R}^3} |g| dv$ enjoys much better decay properties along null rays than along time-like directions and should be compared with solutions to the Klein-Gordon equation (see \cite{Kl93}).
\subsubsection{Hierarchy in the equations}
Because of certains source terms of the commuted transport equation, we cannot avoid a small growth on certain $L^1$ norms as it is suggested by \eqref{eq:pb1}. In order to close the energy estimates, we then consider several hierarchies in the energy norms of the particle density, in the spirit of \cite{LR} for the Einstein equations or \cite{FJS3} for the Einstein-Vlasov system. Let us show how a hierarchy related to the weights $z \in \mathbf{k}_1$ preserved by the free massive transport operator (which are defined in Subsection \ref{sectionweights}) naturally appears. \begin{itemize} \item The worst source terms of the transport equation satisfied by $Yf$ are of the form $(t+r)X_i(F_{\mu \nu})\partial_{t,x} f$. \item Using the improved decay properties given by $X_i$ (see \eqref{eq:X}), we have
$$ \left| (t+r)X_i(F_{\mu \nu})\partial_{t,x} f \right| \lesssim \sum_{Z \in \mathbb{K}} |\nabla_Z F| \sum_{z \in \mathbf{k}_1} |z\partial_{t,x} f|.$$
\item Then, we can obtain a good bound on $\| Yf \|_{L^1_{x,v}}$ provided we have a satisfying one on $\| z \partial_{t,x} f \|_{L^1_{x,v}}$. We will then work with energy norms controlling $\| z^{N_0-\beta_P} Y^{\beta} f \|_{L^1_{x,v}}$, where $\beta_P$ is the number of non-translations composing $Y^{\beta}$.
\item At the top order, we will have to deal with terms such as $(t+r)z^{N_0}\partial_{t,x}^{\gamma}(F_{\mu \nu})\partial_{t,x}^{\beta} f$ and we will this time use the extra decay $(1+|t-r|)^{-1}$ given by the translations $\partial_{t,x}^{\gamma}$. \end{itemize}
\subsection{Structure of the paper}
In Section \ref{sec2} we introduce the notations used in this article. Basic results on the electromagnetic field as well as fundamental relations between the null components of the velocity vector $v$ and the weights preserved by the free transport operator are also presented. Section \ref{sec3} is devoted to the commutation vector fields. The construction and basic properties of the modified vector fields are in particular presented. Section \ref{sec4} contains the energy estimates and the pointwise decay estimates used to control both fields. Section \ref{secpurecharge} is devoted to properties satisfied by the pure charge part of the electromagnetic field. In Section \ref{sec6} we describe the main steps of the proof of Theorem \ref{theorem} and present the bootstrap assumptions. In Section \ref{sec7}, we derive pointwise decay estimates on the solutions and the $\Phi$ coefficients of the modified vector fields using only the bootstrap assumptions. Section \ref{sec8} (respectively Section \ref{sec12}) concerns the improvement of the bootstrap assumptions on the norms of the particle density (respectively the electromagnetic field). A key step consists in improving the estimates on the velocity averages near the light cone (cf. Proposition \ref{Xdecay}). In Section \ref{sec11}, we prove $L^2$ estimates for $\int_v|Y^{\beta}f|dv$ in order to improve the energy estimates on the Maxwell field.
\section{Notations and preliminaries}\label{sec2}
\subsection{Basic notations}
In this paper we work on the $3+1$ dimensional Minkowski spacetime $(\mathbb{R}^{3+1},\eta)$. We will use two sets of coordinates, the Cartesian $(t,x^1,x^2,x^3)$, in which $\eta=diag(-1,1,1,1)$, and null coordinates $(\underline{u},u,\omega_1,\omega_2)$, where $$\underline{u}=t+r, \hspace{5mm} u=t-r$$ and $(\omega_1,\omega_2)$ are spherical variables, which are spherical coordinates on the spheres $(t,r)=constant$. These coordinates are defined globally on $\mathbb{R}^{3+1}$ apart from the usual degeneration of spherical coordinates and at $r=0$. We will also use the following classical weights, $$\tau_+:= \sqrt{1+\underline{u}^2} \hspace{8mm} \text{and} \hspace{8mm} \tau_-:= \sqrt{1+u^2}.$$ We denote by $(e_1,e_2)$ an orthonormal basis on the spheres and by $\slashed{\nabla}$ the intrinsic covariant differentiation on the spheres $(t,r)=constant$. Capital Latin indices (such as $A$ or $B$) will always correspond to spherical variables. The null derivatives are defined by $$L=\partial_t+\partial_r \hspace{3mm} \text{and} \hspace{3mm} \underline{L}=\partial_t-\partial_r, \hspace{3mm} \text{so that} \hspace{3mm} L(\underline{u})=2, \hspace{2mm} L(u)=0, \hspace{2mm} \underline{L}( \underline{u})=0 \hspace{2mm} \text{and} \hspace{2mm} \underline{L}(u)=2.$$
The velocity vector $(v^{\mu})_{0 \leq \mu \leq 3}$ is parametrized by $(v^i)_{1 \leq i \leq 3}$ and $v^0=\sqrt{1+|v|^2}$ since we take the mass to be $1$. We introduce the operator $$T : f \mapsto v^{\mu} \partial_{\mu} f,$$ defined for all sufficiently regular functions $f : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$, and we denote $(0,\partial_{v^1}g, \partial_{v^2}g,\partial_{v^3}g)$ by $\nabla_v g$ so that \eqref{VM1} can be rewritten $$T_F(f) := v^{\mu} \partial_{\mu} f +F \left( v, \nabla_v f \right) =0.$$ We will use the notation $D_1 \lesssim D_2$ for an inequality such as $ D_1 \leq C D_2$, where $C>0$ is a positive constant independent of the solutions but which could depend on $N \in \mathbb{N}$, the maximal order of commutation. Finally we will raise and lower indices using the Minkowski metric $\eta$. For instance, $\nabla^{\mu} = \eta^{\nu \mu} \nabla_{\nu}$ so that $\nabla^{\partial_t}=-\nabla_{\partial_t}$ and $\nabla^{\partial_i}=\nabla_{\partial_i}$ for all $1 \leq i \leq 3$.
\subsection{Basic tools for the study of the electromagnetic field}\label{basicelec}
As we describe the electromagnetic field in geometric form, it will be represented, throughout this article, by a $2$-form. Let $F$ be a $2$-form defined on $[0,T[ \times \mathbb{R}^3_x$. Its Hodge dual ${}^* \! F$ is the $2$-form given by $${}^* \! F_{\mu \nu} = \frac{1}{2} F^{\lambda \sigma} \varepsilon_{ \lambda \sigma \mu \nu},$$ where $\varepsilon_{ \lambda \sigma \mu \nu}$ are the components of the Levi-Civita symbol. The null decomposition of $F$, introduced by \cite{CK}, is denoted by $(\alpha(F), \underline{\alpha}(F), \rho(F), \sigma (F))$, where $$\alpha_A(F) = F_{AL}, \hspace{5mm} \underline{\alpha}_A(F)= F_{A \underline{L}}, \hspace{5mm} \rho(F)= \frac{1}{2} F_{L \underline{L} } \hspace{5mm} \text{and} \hspace{5mm} \sigma(F) =F_{12}.$$ Finally, the energy-momentum tensor of $F$ is $$T[F]_{\mu \nu} := F_{\mu \beta} {F_{\nu}}^{\beta}- \frac{1}{4}\eta_{\mu \nu} F_{\rho \sigma} F^{\rho \sigma}.$$ Note that $T[F]_{\mu \nu}$ is symmetric and traceless, i.e. $T[F]_{\mu \nu}=T[F]_{\nu \mu}$ and ${T[F]_{\mu}}^{\mu}=0$. This last point is specific to the dimension $3$ and engenders additional difficulties in the analysis of the Maxwell equations in high dimension (see Section $3.3.2$ in \cite{dim4} for more details).
We have the following alternative form of the Maxwell equations (for a proof, see \cite{CK} or Lemmas $2.2$ and $D.3$ of \cite{massless}).
\begin{Lem}\label{maxwellbis} Let $G$ be a $2$-form and $J$ be a $1$-form both sufficiently regular and such that \begin{eqnarray} \nonumber \nabla^{\mu} G_{\mu \nu} & = & J_{\nu} \\ \nonumber \nabla^{\mu} {}^* \! G_{\mu \nu} & = & 0. \end{eqnarray} Then, $$\nabla_{[ \lambda} G_{\mu \nu ]} = 0 \hspace{4mm} \text{and} \hspace{4mm} \nabla_{[ \lambda} {}^* \! G_{\mu \nu ]} = \varepsilon_{\lambda \mu \nu \kappa} J^{\kappa}.$$ We also have, if $(\alpha, \underline{\alpha}, \rho, \sigma)$ is the null decomposition of $G$, \begin{eqnarray} \nabla_{\underline{L}} \hspace{0.5mm} \rho-\frac{2}{r} \rho+ \slashed{\nabla}^A \underline{\alpha}_A & = & J_{\underline{L}}, \label{eq:nullmax1} \\ \nabla_{\underline{L}} \hspace{0.5mm} \sigma-\frac{2}{r} \sigma+ \varepsilon^{AB} \slashed{\nabla}_A \underline{\alpha}_B & = & 0, \label{eq:nullmax2} \\ \nabla_{\underline{L}} \hspace{0.5mm} \alpha_A-\frac{\alpha_A}{r}+\slashed{\nabla}_{e_A} \rho+\varepsilon_{BA} \slashed{\nabla}_{e_B} \sigma &=& J_A. \label{eq:nullmax4} \end{eqnarray} \end{Lem}
We can then compute the divergence of the energy momentum tensor of a $2$-form.
\begin{Cor}\label{tensorderiv} Let $G$ and $J$ be as in the previous lemma. Then, $\nabla^{\mu} T[G]_{\mu \nu}=G_{\nu \lambda} J^{\lambda}$. \end{Cor}
\begin{proof} Using the previous lemma, we have \begin{eqnarray} \nonumber G_{\mu \rho} \nabla^{\mu} {G_{\nu}}^{\rho}& = & G^{\mu \rho} \nabla_{\mu} G_{\nu \rho} \\ \nonumber & = & \frac{1}{2} G^{\mu \rho} (\nabla_{\mu} G_{\nu \rho}-\nabla_{\rho} G_{\nu \mu}) \\ \nonumber & = & \frac{1}{2} G^{\mu \rho} \nabla_{\nu} G_{\mu \rho} \\ \nonumber & = & \frac{1}{4} \nabla_{\nu} (G^{\mu \rho} G_{\mu \rho}). \end{eqnarray}
Hence, $$\nabla^{\mu} T[G]_{\mu \nu} = \nabla^{\mu} (G_{\mu \rho}){G_{\nu}}^{\rho}+\frac{1}{4} \nabla_{\nu} (G^{\mu \rho} G_{\mu \rho})-\frac{1}{4}\eta_{\mu \nu} \nabla^{\mu} (G^{\sigma \rho} G_{\sigma \rho})=G_{\nu \rho} J^{\rho}.$$ \end{proof}
Finally, we recall the values of the null components of the energy-momentum tensor of a $2$-form. \begin{Lem}\label{tensorcompo} Let $G$ be $2$-form. We have
$$T[G]_{L L}=|\alpha(G)|^2, \hspace{8mm} T[G]_{\underline{L} \underline{L} }=|\underline{\alpha}(G)|^2 \hspace{8mm} \text{and} \hspace{8mm} T[G]_{L \underline{L}}=|\rho(G)|^2+|\sigma(G)|^2.$$ \end{Lem}
\subsection{Weights preserved by the flow and null components of the velocity vector}\label{sectionweights}
Let $(v^L,v^{\underline{L}},v^A,v^B)$ be the null components of the velocity vector, so that $$v=v^L L+ v^{\underline{L}} \underline{L}+v^Ae_A, \hspace{8mm} v^L=\frac{v^0+\frac{x_i}{r}v^i}{2} \hspace{8mm} \text{and} \hspace{8mm} v^{\underline{L}}=\frac{v^0-\frac{x_i}{r}v^i}{2}.$$ As in \cite{FJS}, we introduce the following set of weights, $$ \mathbf{k}_1 := \left\{\frac{v^{\mu}}{v^0} \hspace{1mm} / \hspace{1mm} 0 \leq \mu \leq 3 \right\} \cup \left\{ z_{\mu \nu} \hspace{1mm} / \hspace{1mm} \mu \neq \nu \right\}, \hspace{1cm} \text{with} \hspace{1cm } z_{\mu \nu} := x^{\mu}\frac{v^{\nu}}{v^0}-x^{\nu}\frac{v^{\mu}}{v^0}.$$ Note that \begin{equation}\label{weightpreserv} \forall \hspace{0.5mm} z \in \mathbf{k}_1, \hspace{8mm} T(z)=0. \end{equation}
Recall that if $\mathbf{k}_0 := \mathbf{k}_1 \cup \{ x^{\mu} v_{\mu} \}$, then $v^{\underline{L}} \lesssim \tau_+^{-1} \sum_{w \in \mathbf{k}_0} |w|$. Unfortunately, $x^{\mu} v_{\mu}$ is not preserved by\footnote{Note however that $x^{\mu} v_{\mu}$ is preserved by $|v| \partial_t+x^i \partial_i$, the massless relativistic transport operator.} $T$ so we will not be able to take advantage of this inequality in this paper. In the following lemma, we try to recover (part of) this extra decay. We also recall inequalities involving other null components of $v$, which will be used all along this paper. \begin{Lem}\label{weights1} The following estimates hold,
$$ 1 \leq 4v^0v^{\underline{L}}, \hspace{8mm} |v^A| \lesssim \sqrt{v^Lv^{\underline{L}}}, \hspace{8mm} |v^A| \lesssim \frac{v^0}{\tau_+} \sum_{z \in \mathbf{k}_1} |z|, \hspace{8mm} \text{and} \hspace{8mm} v^{\underline{L}} \lesssim \frac{\tau_-}{\tau_+} v^0+\frac{v^0}{\tau_+}\sum_{z \in \mathbf{k}_1}|z|.$$ \end{Lem} \begin{proof}
Note first that, as $v^0= \sqrt{1+|v|^2}$,
$$ 4r^2v^Lv^{\underline{L}} \hspace{2mm} = \hspace{2mm} r^2+r^2 |v|^2-|x^i|^2|v_i|^2-2\sum_{1 \leq k < l \leq n}x^kx^lv^kv^l \hspace{2mm} = \hspace{2mm} r^2+\sum_{1 \leq k < l \leq n} |z_{kl}|^2.$$
It gives us the first inequality since $v^L \leq v^0$. For the second one, use also that $rv^A=v^0C_A^{i,j} z_{ij}$, where $C_A^{i,j}$ are bounded functions on the sphere such that $re_A = C_A^{i,j} (x^i \partial_j-x^j \partial_i)$. The third one follows from $|v^A| \leq v^0$ and
$$|v^A| \lesssim \frac{v^0}{r} \sum_{1 \leq i < j \leq 3} |z_{ij}| = \frac{v^0}{tr} \sum_{1 \leq i < j \leq 3} \left| x^i\left( \frac{v^j}{v^0}t-x^j+x^j \right)-x^j\left( \frac{v^i}{v^0}t-x^i+x^i \right) \right| \lesssim \frac{v^0}{t} \sum_{q=1}^3 |z_{0q}|.$$
For the last inequality, note first that $v^{\underline{L}} \leq v^0$, which treats the case $t+|x| \leq 2$. Otherwise, use $$2tv^{\underline{L}}=tv^0-\frac{x^i}{r}tv_i = tv^0-v^0\frac{x^iz_{0i}}{r}-v^0r=(t-r)v^0-\frac{x^i}{r}z_{0i}v^0 \hspace{5mm} \text{and} \hspace{5mm} r v^{\underline{L}} =(r-t) v^{\underline{L}}+tv^{\underline{L}}.$$ \end{proof} \begin{Rq}\label{rqweights1}
Note that $v^{\underline{L}} \lesssim \frac{v^0}{\tau_+} \sum_{z \in \mathbf{k}_1} |z|$ holds in the exterior region. Indeed, if $r \geq t$,
$$v^0(r-t) \leq v^0|x|-|v|t \leq |v^0 x-tv| \leq \sum_{i=1}^3 |v^0x^i-tv^i|= v^0 \sum_{i=1}^3 |z_{0i}|.$$ We also point out that $1 \lesssim v^0 v^{\underline{L}}$ is specific to massive particles. \end{Rq}
Finally, we consider an ordering on $\mathbf{k}_1$ such that $\mathbf{k}_1 = \{ z_i \hspace{1mm} / \hspace{1mm} 1 \leq i \leq |\mathbf{k}_1| \}$. \begin{Def}\label{orderk1}
If $ \kappa \in \llbracket 1, |\mathbf{k}_1| \rrbracket^r$, we define $z^{\kappa} := z_{\kappa_1}...z_{\kappa_r}$. \end{Def} \subsection{Various subsets of the Minkowski spacetime}\label{secsubsets}
We now introduce several subsets of $\mathbb{R}_+ \times \mathbb{R}^3$ depending on $t \in \mathbb{R}_+$, $r \in \mathbb{R}_+$ or $u \in \mathbb{R}$. Let $\Sigma_t$, $\mathbb{S}_{t,r}$, $C_u(t)$ and $V_u(t)$ be defined as
\begin{flalign*}
& \hspace{0.5cm} \Sigma_t := \{t\} \times \mathbb{R}^n, \hspace{5.4cm} C_u(t):= \{(s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 / \hspace{1mm} s \leq t, \hspace{1mm} s-|y|=u \}, & \\
& \hspace{5mm} \mathbb{S}_{t,r}:= \{ (s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 \hspace{1mm} / \hspace{1mm} (s,|y|)=(t,r) \} \hspace{4mm} \text{and} \hspace{4mm} V_u(t) := \{ (s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 / \hspace{1mm} s \leq t, \hspace{1mm} s-|y| \leq u \}. & \end{flalign*} The volume form on $C_u(t)$ is given by $dC_u(t)=\sqrt{2}^{-1}r^{2}d\underline{u}d \mathbb{S}^{2}$, where $ d \mathbb{S}^{2}$ is the standard metric on the $2$ dimensional unit sphere.
\begin{tikzpicture} \draw [-{Straight Barb[angle'=60,scale=3.5]}] (0,-0.3)--(0,5); \fill[color=gray!35] (2,0)--(5,3)--(9.8,3)--(9.8,0)--(1,0); \node[align=center,font=\bfseries, yshift=-2em] (title)
at (current bounding box.south)
{The sets $\Sigma_t$, $C_u(t)$ and $V_u(t)$}; \draw (0,3)--(9.8,3) node[scale=1.5,right]{$\Sigma_t$}; \draw (2,0.2)--(2,-0.2); \draw [-{Straight Barb[angle'=60,scale=3.5]}] (0,0)--(9.8,0) node[scale=1.5,right]{$\Sigma_0$}; \draw[densely dashed] (2,0)--(5,3) node[scale=1.5,left, midway] {$C_u(t)$}; \draw (6,1.5) node[ color=black!100, scale=1.5] {$V_u(t)$}; \draw (0,-0.5) node[scale=1.5]{$r=0$}; \draw (2,-0.5) node[scale=1.5]{$-u$}; \draw (-0.5,4.7) node[scale=1.5]{$t$}; \draw (9.5,-0.5) node[scale=1.5]{$r$}; \end{tikzpicture}
We will use the following subsets, given for $ \underline{u} \in \mathbb{R}_+$, specifically in the proof of Proposition \ref{Phi1},
$$ \underline{V}_{\underline{u}}(t) := \{ (s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 / \hspace{1mm} s \leq t, \hspace{1mm} s+|y| \leq \underline{u} \}.$$ For $b \geq 0$ and $t \in \mathbb{R}_+$, define $\Sigma^b_t$ and $\overline{\Sigma}^b_t$ as
$$\Sigma^b_t:= \{ t \} \times \{ x \in \mathbb{R}^3 \hspace{1mm} / \hspace{1mm} |x| \leq t-b \} \hspace{6mm} \text{and} \hspace{6mm} \overline{\Sigma}^b_t:= \{ t \} \times \{ x \in \mathbb{R}^3 \hspace{1mm} / \hspace{1mm} |x| \geq t-b \}.$$ We also introduce a dyadic partition of $\mathbb{R}_+$ by considering the sequence $(t_i)_{i \in \mathbb{N}}$ and the functions $(T_i(t))_{i \in \mathbb{N}}$ defined by $$t_0=0, \hspace{5mm} t_i = 2^i \hspace{5mm} \text{if} \hspace{5mm} i \geq 1, \hspace{5mm} \text{and} \hspace{5mm} T_{i}(t)= t \mathds{1}_{t \leq t_i}(t)+t_i \mathds{1}_{t > t_i}(t).$$ We then define the truncated cones $C^i_u(t)$ adapted to this partition by
$$C_u^i(t) := \left\{ (s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 \hspace{2mm} / \hspace{2mm} t_i \leq s \leq T_{i+1}(t), \hspace{2mm} s-|y| =u \right\}= \left\{ (s,y) \in C_u(t) \hspace{2mm} / \hspace{2mm} t_i \leq s \leq T_{i+1}(t) \right\}.$$ The following lemma will be used several times during this paper. It depicts that we can foliate $[0,t] \times \mathbb{R}^3$ by $(\Sigma_s)_{0 \leq s \leq t}$, $(C_u(t))_{u \leq t}$ or $(C^i_u(t))_{u \leq t, i \in \mathbb{N}}$. \begin{Lem}\label{foliationexpli} Let $t>0$ and $g \in L^1([0,t] \times \mathbb{R}^3)$. Then $$ \int_0^t \int_{\Sigma_s} g dx ds \hspace{2mm} = \hspace{2mm} \int_{u=-\infty}^t \int_{C_u(t)} g dC_u(t) \frac{du}{\sqrt{2}} \hspace{2mm} = \hspace{2mm} \sum_{i=0}^{+ \infty} \int_{u=-\infty}^t \int_{C^i_u(t)} g dC^i_u(t) \frac{du}{\sqrt{2}}.$$ \end{Lem}
Note that the sum over $i$ is in fact finite. The second foliation will allow us to exploit $t-r$ decay since $\| \tau_-^{-1} \|_{L^{\infty}(C_u(t)}=\tau_-^{-1}$ whereas $\|\tau_-^{-1}\|_{L^{\infty}(\Sigma_s)}=1$. The last foliation will be used to take advantage of time decay on $C_u(t)$ (the problem comes from $\|\tau_+^{-1}\|_{L^{\infty}(C_u(t))} = \tau_-^{-1}$). More precisely, let $0 < \delta < a$ and suppose for instance that, $$\forall \hspace{0.5mm} t \in [0,T[, \hspace{6mm} \int_{C_u(t)} g dC_u(t) \leq C (1+t)^{\delta}, \hspace{5mm} \text{so that} \hspace{5mm} \int_{C_u^i(t)} g dC^i_u(t) \leq C (1+T_{i+1}(t))^{\delta} \leq C (1+t_{i+1})^{\delta} .$$ Then, $$ \int_{C_u(t)} \tau_+^{-a}g dC_u(t) \leq \sum_{i=0}^{+ \infty} \int_{C^i_u(t)} (1+s)^{-a} g dC^i_u(t) \leq \sum_{i=0}^{+ \infty} (1+t_{i})^{-a} \int_{C^i_u(t)} g dC^i_u(t) \leq 3^aC \sum_{i=0}^{+ \infty} (1+2^{i+1})^{\delta-a}.$$ As $\delta-a <0$, we obtain a bound independent of $T$. \subsection{An integral estimate}
A proof of the following inequality can be found in the appendix $B$ of \cite{FJS}.
\begin{Lem}\label{intesti}
Let $m \in \mathbb{N}^*$ and let $a$, $b \in \mathbb{R}$, such that $a+b >m$ and $b \neq 1$. Then $$\exists \hspace{0.5mm} C_{a,b,m} >0, \hspace{0.5mm} \forall \hspace{0.5mm} t \in \mathbb{R}_+, \hspace{1.5cm} \int_0^{+ \infty} \frac{r^{m-1}}{\tau_+^a \tau_-^b}dr \leq C_{a,b,m} \frac{1+t^{b-1}}{1+t^{a+b-m}} .$$ \end{Lem} \section{Vector fields and modified vector fields}\label{sec3}
For all this section, we consider $F$ a suffciently regular $2$-form.
\subsection{The vector fields of the Poincaré group and their complete lift}
We present in this section the commutation vector fields of the Maxwell equations and those of the relativistic transport operator (we will modified them to study the Vlasov equation). Let $\mathbb{P}$ be the generators of Poincaré group of the Minkowski spacetime, i.e. the set containing \begin{flalign*} & \hspace{1cm} \bullet \text{the translations\footnotemark} \hspace{18mm} \partial_{\mu}, \hspace{2mm} 0 \leq \mu \leq 3, & \\ & \hspace{1cm} \bullet \text{the rotations} \hspace{25mm} \Omega_{ij}=x^i\partial_{j}-x^j \partial_i, \hspace{2mm} 1 \leq i < j \leq 3, & \\ & \hspace{1cm} \bullet \text{the hyperbolic rotations} \hspace{8mm} \Omega_{0k}=t\partial_{k}+x^k \partial_t, \hspace{2mm} 1 \leq k \leq 3. \end{flalign*} \footnotetext{In this article, we will denote $\partial_{x^i}$, for $1 \leq i \leq 3$, by $\partial_{i}$ and sometimes $\partial_t$ by $\partial_0$.} We also consider $\mathbb{T}:= \{ \partial_{t}, \hspace{1mm} \partial_1, \hspace{1mm} \partial_2, \hspace{1mm} \partial_3\}$ and $\mathbb{O} := \{ \Omega_{12}, \hspace{1mm} \Omega_{13}, \hspace{1mm} \Omega_{23} \}$, the subsets of $\mathbb{P}$ containing respectively the translations and the rotational vector fields as well as $\mathbb{K}:= \mathbb{P} \cup \{ S \}$, where $S=x^{\mu} \partial_{\mu}$ is the scaling vector field. The set $\mathbb{K}$ is well known for commuting with the wave and the Maxwell equations (see Subsection \ref{subseccomuMax}). However, to commute the operator $T=v^{\mu} \partial_{\mu}$, one should consider the complete lifts of the elements of $\mathbb{P}$. \begin{Def}\label{defliftcomplete}
Let $W=W^{\beta} \partial_{\beta}$ be a vector field. Then, the complete lift $\widehat{W}$ of $W$ is defined by $$\widehat{W}=W^{\beta} \partial_{\beta}+v^{\gamma} \frac{\partial W^i}{\partial x^{\gamma}} \partial_{v^i}.$$ We then have $\widehat{\partial}_{\mu}=\partial_{\mu}$ for all $0 \leq \mu \leq 3$ and $$\widehat{\Omega}_{ij}=x^i \partial_j-x^j \partial_i+v^i \partial_{v^j}-v^j \partial_{v^i}, \hspace{2mm} \text{for} \hspace{2mm} 1 \leq i < j \leq 3, \hspace{6mm} \text{and} \hspace{6mm} \widehat{\Omega}_{0k} = t\partial_k+x^k \partial_t+v^0 \partial_{v^k}, \hspace{2mm} \text{for} \hspace{2mm} 1 \leq k \leq 3.$$ \end{Def} One can check that $[T,\widehat{Z}]=0$ for all $Z \in \mathbb{P}$. Since $[T,S]=T$, we consider $$\widehat{\mathbb{P}}_0 := \{ \widehat{Z} \hspace{1mm} / \hspace{1mm} Z \in \mathbb{P} \} \cup \{ S \}$$ and we will, for simplicity, denote by $\widehat{Z}$ an arbitrary vector field of $\widehat{\mathbb{P}}_0$, even if $S$ is not a complete lift. The weights introduced in Subsection \ref{sectionweights} are, in a certain sense, preserved by the action of $\widehat{\mathbb{P}}_0$.
\begin{Lem}\label{weights} Let $z \in \mathbf{k}_1$, $\widehat{Z} \in \widehat{\mathbb{P}}_0$ and $j \in \mathbb{N}$. Then
$$\widehat{Z}(v^0z) \in v^0 \mathbf{k}_1 \cup \{ 0 \} \hspace{8mm} \text{and} \hspace{8mm} \left| \widehat{Z} (z^j) \right| \leq 3j \sum_{w \in \mathbf{k}_1} |w|^j.$$ \end{Lem}
\begin{proof} Let us consider for instance $tv^1-x^1v^0$, $x^1v^2-x^2v^1$, $\widehat{\Omega}_{01}$ and $\widehat{\Omega}_{02}$. We have \begin{eqnarray} \nonumber \widehat{\Omega}_{01}(x^1v^2-x^2v^1 ) & = & tv^2-x^2v^0, \hspace{2cm} \widehat{\Omega}_{01}(tv^1-x^1v^0) \hspace{2mm} = \hspace{2mm} 0, \\ \nonumber \widehat{\Omega}_{02}(x^1v^2-x^2v^1 ) & = & x^1v^0-tv^1 \hspace{8mm} \text{and} \hspace{8mm} \widehat{\Omega}_{02}(tv^1-x^1v^0) \hspace{2mm} = \hspace{2mm} x^2v^1-x^1v^2. \end{eqnarray} The other cases are similar. Consequently,
$$\left| \widehat{Z} (z^j) \right| = \left| \widehat{Z} \left(\frac{1}{(v^0)^j}(v^0z)^j \right) \right| \leq j|z|^j+\frac{j}{(v^0)^j}\left| \widehat{Z} \left(v^0z \right) \right| |v^0z|^{j-1} \leq j|z|^j +j\frac{|\widehat{Z}(v^0z)|^j}{(v^0)^j}+j|z|^j,$$
since $|w||z|^{a-1} \leq |w|^a+|z|^a$ when $a \geq 1$. \end{proof} The vector fields introduced in this section and the averaging in $v$ almost commute in the following sense (we refer to \cite{FJS} or to Lemma \ref{lift2} below for a proof). \begin{Lem}\label{lift} Let $f : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R} $ be a sufficiently regular function. We have, almost everywhere,
$$\forall \hspace{0.5mm} Z \in \mathbb{K}, \hspace{8mm} \left|Z\left( \int_{v \in \mathbb{R}^3 } |f| dv \right) \right| \lesssim \sum_{ \begin{subarray}{} \widehat{Z}^{\beta} \in \widehat{\mathbb{P}}_0^{|\beta|} \\ \hspace{1mm} |\beta| \leq 1 \end{subarray}} \int_{v \in \mathbb{R}^3 } |\widehat{Z}^{\beta} f | dv .$$
Similar estimates hold for $\int_{v \in \mathbb{R}^3} (v^0)^k |f| dv$. For instance,
$$\left| S\left( \int_{v \in \mathbb{R}^3 } (v^0)^{-2}|f| dv \right) \right| \lesssim \int_{v \in \mathbb{R}^3 } (v^0)^{-2}|Sf| dv.$$ \end{Lem} The vector spaces engendered by each of the sets defined in this section are actually algebras. \begin{Lem} Let $\mathbb{L}$ be either $\mathbb{K}$, $\mathbb{P}$, $\mathbb{O}$, $\mathbb{T}$ or $\widehat{\mathbb{P}}_0$. Then for all $(Z_1,Z_2) \in \mathbb{L}^2$, $[Z_1,Z_2]$ is a linear combinations of vector fields of $\mathbb{L}$. Note also that if $Z_2=\partial \in \mathbb{T}$, then $[Z_1,\partial]$ can be written as a linear combination of translations. \end{Lem}
We consider an ordering on each of the sets $\mathbb{O}$, $\mathbb{P}$, $\mathbb{K}$ and $\widehat{\mathbb{P}}_0$. We take orderings such that, if $\mathbb{P}= \{ Z^i / \hspace{2mm} 1 \leq i \leq |\mathbb{P}| \}$, then $\mathbb{K}= \{ Z^i / \hspace{2mm} 1 \leq i \leq |\mathbb{K}| \}$, with $Z^{|\mathbb{K}|}=S$, and
$$ \widehat{\mathbb{P}}_0= \left\{ \widehat{Z}^i / \hspace{2mm} 1 \leq i \leq |\widehat{\mathbb{P}}_0| \right\}, \hspace{2mm} \text{with} \hspace{2mm} \left( \widehat{Z}^i \right)_{ 1 \leq i \leq |\mathbb{P}|}=\left( \widehat{Z^i} \right)_{ 1 \leq i \leq |\mathbb{P}|} \hspace{2mm} \text{and} \hspace{2mm} \widehat{Z}^{|\widehat{\mathbb{P}}_0|}=S .$$
If $\mathbb{L}$ denotes $\mathbb{O}$, $\mathbb{P}$, $\mathbb{K}$ or $\widehat{\mathbb{P}}_0$, and $\beta \in \{1, ..., |\mathbb{L}| \}^r$, with $r \in \mathbb{N}^*$, we will denote the differential operator $\Gamma^{\beta_1}...\Gamma^{\beta_r} \in \mathbb{L}^{|\beta|}$ by $\Gamma^{\beta}$. For a vector field $W$, we denote the Lie derivative with respect to $W$ by $\mathcal{L}_W$ and if $Z^{\gamma} \in \mathbb{K}^{r}$, we will write $\mathcal{L}_{Z^{\gamma}}$ for $\mathcal{L}_{Z^{\gamma_1}}... \mathcal{L}_{Z^{\gamma_r}}$. The following definition will be useful to lighten the notations in the presentation of commutation formulas.
\begin{Def}\label{goodcoeff} We call good coefficient $c(t,x,v)$ any function $c$ of $(t,x,v)$ such that
$$ \forall \hspace{0.5mm} Q \in \mathbb{N}, \hspace{1mm} \exists \hspace{0.5mm} C_Q >0, \hspace{2mm} \forall \hspace{0.5mm} |\beta| \leq Q, \hspace{2mm} (t,x,v) \in \mathbb{R}_+ \times \mathbb{R}_x^3 \times \mathbb{R}_v^3 \setminus \{ 0 \} \times \{ 0 \} \times \mathbb{R}_v^3, \hspace{8mm} \left| \widehat{Z}^{\beta} \left( c(t,x,v) \right) \right| \leq C_Q.$$ Similarly, we call good coefficient $c(v)$ any function $c$ such that
$$ \forall \hspace{0.5mm} Q \in \mathbb{N}, \hspace{2mm} \exists \hspace{0.5mm} C_Q >0, \hspace{2mm} \forall \hspace{0.5mm} |\beta| \leq Q, \hspace{2mm} v \in \mathbb{R}^3, \hspace{8mm} \left| \widehat{Z}^{\beta} \left( c(v) \right) \right| \leq C_Q.$$ Finally, we will say that $B$ is a linear combination, with good coefficients $c(v)$, of $(B^i)_{1 \leq i \leq M}$ if there exists good coefficients $(c_i(v))_{1 \leq i \leq M}$ such that $B=c_i B^i$. We define similarly a linear combination with good coefficients $c(t,x,v)$. \end{Def}
These good coefficients introduced here are to be thought of bounded functions which remain bounded when they are differentiated (by $\widehat{\mathbb{P}}_0$ derivatives) or multiplied between them. In the remainder of this paper, we will denote by $c(t,x,v)$ (or $c_Z(t,x,v)$, $c_i(t,x,v)$) any such functions. Note that $\widehat{Z}^{\beta} \left( c(t,x,v) \right)$ is not necessarily defined on $\{ 0 \} \times \{ 0 \} \times \mathbb{R}_v^3$ as, for instance, $c(t,x,v)=\frac{x^1}{t+r} \frac{v^2}{v^0}$ satisfies these conditions. Typically, the good coefficients $c(v)$ will be of the form $\widehat{Z}^{\gamma} \left( \frac{v^i}{v^0} \right)$.
Let us recall, by the following classical result, that the derivatives tangential to the cone behave better than others.
\begin{Lem}\label{goodderiv} The following relations hold, $$(t-r)\underline{L}=S-\frac{x^i}{r}\Omega_{0i}, \hspace{3mm} (t+r)L=S+\frac{x^i}{r}\Omega_{0i} \hspace{3mm} \text{and} \hspace{3mm} re_A=\sum_{1 \leq i < j \leq 3} C^{i,j}_A \Omega_{ij},$$ where the $C^{i,j}_A$ are uniformly bounded and depends only on spherical variables. In the same spirit, we have $$(t-r)\partial_t =\frac{t}{t+r}S-\frac{x^i}{t+r}\Omega_{0i} \hspace{3mm} \text{and} \hspace{3mm} (t-r) \partial_i = \frac{t}{t+r} \Omega_{0i}- \frac{x^i}{t+r}S- \frac{x^j}{t+r} \Omega_{ij}.$$ \end{Lem}
As mentioned in the introduction, we will crucially use the vector fields $(X_i)_{1 \leq i \leq 3}$, defined by \begin{equation}\label{eq:defXi} X_i := \partial_i+\frac{v^i}{v^0}\partial_t. \end{equation} They provide extra decay in particular cases since \begin{equation}\label{eq:Xi} X_i= \frac{1}{t} \left( \Omega_{0i}+z_{0i} \partial_t \right). \end{equation} We also have, using Lemma \ref{goodderiv} and $(1+t+r)X_i=X_i+2tX_i+(r-t)X_i$, that there exists good coefficients $c_Z(t,x,v)$ such that \begin{equation}\label{eq:X} (1+t+r)X_i=2z_{0i} \partial_t +\sum_{Z \in \mathbb{K}} c_Z(t,x,v) Z. \end{equation} By a slight abuse of notation, we will write $\mathcal{L}_{X_i}(F)$ for $\mathcal{L}_{\partial_i}(F)+\frac{v^i}{v^0} \mathcal{L}_{\partial_t}(F)$. We are now interested in the compatibility of these extra decay with the Lie derivative of a $2$-form and its null decomposition. \begin{Pro}\label{ExtradecayLie} Let $G$ be a sufficiently regular $2$-form. Then, with $z=t\frac{v^i}{v^0}-x^i$ if $X=X_i$ and $\zeta \in \{ \alpha, \underline{\alpha}, \rho, \sigma \}$, we have \begin{eqnarray}
\left| \mathcal{L}_{\partial}(G) \right| & \lesssim & \frac{1}{\tau_-} \sum_{Z \in \mathbb{K} } \left|\nabla_Z G \right| \hspace{2mm} \lesssim \hspace{2mm} \frac{1}{\tau_-} \sum_{ |\gamma| \leq 1 } \left| \mathcal{L}_{Z^{\gamma}}(G) \right| , \label{eq:goodlie} \\
\left| \mathcal{L}_{X}(G) \right| & \lesssim & \frac{1}{\tau_+} \left( |z| | \nabla_{\partial_t} G|+ \sum_{Z \in \mathbb{K}} \left|\nabla_Z G \right| \right), \label{eq:Xdecay} \\
\tau_-\left| \nabla_{\underline{L}} \zeta \right|+\tau_+\left| \nabla_L \zeta \right|+(1+r) \left| \slashed{\nabla} \zeta \right| & \lesssim & \sum_{|\gamma| \leq 1 } \left| \zeta \left( \mathcal{L}_{Z^{\gamma}}(G) \right) \right|, \label{eq:zeta} \\
\left| \zeta \left( \mathcal{L}_{\partial} (G) \right) \right| & \lesssim & \sum_{|\gamma| \leq 1 } \frac{1}{\tau_-} \left| \zeta \left( \mathcal{L}_{Z^{\gamma}}(G) \right) \right|+\frac{1}{\tau_+} \left| \mathcal{L}_{Z^{\gamma}}(G) \right|. \label{eq:zeta2} \end{eqnarray} \end{Pro} \begin{proof} To obtain the first two identities, use Lemma \ref{goodderiv} as well as \eqref{eq:X} and then remark that if $\Gamma$ is a translation or an homogeneous vector field,
$$ |\nabla_{\Gamma}(G)| \lesssim \left| \mathcal{L}_{\Gamma}(G) \right|+|G|.$$ For \eqref{eq:zeta}, we refer to Lemma $D.2$ of \cite{massless}. Finally, the last inequality comes from \eqref{eq:goodlie} if $2t \leq \max(r,1)$ and from $$\partial_i=\frac{\Omega_{0i}}{t}-\frac{x^i}{2t} L-\frac{x^i}{2t} \underline{L} \hspace{1cm} \text{and} \hspace{1cm} \eqref{eq:zeta} \hspace{6mm} \text{if} \hspace{3mm} 2t \geq \max(r,1).$$ \end{proof}
\begin{Rq}
We do not have, for instance, $\left| \rho \left( \mathcal{L}_{\partial_k} (G) \right) \right| \lesssim \sum_{|\gamma| \leq 1} \tau_-^{-1} \left| \rho \left( \mathcal{L}_{Z^{\gamma}} (G) \right) \right|$, for $1 \leq k \leq 3$. \end{Rq} \begin{Rq}
If $G$ solves the Maxwell equations $\nabla^{\mu} G_{\mu \nu} = J_{\nu}$ and $\nabla^{\mu} {}^* \! G_{\mu \nu} =0$, a better estimate can be obtained on $\alpha( \mathcal{L}_{\partial} (G) )$. Indeed, as $|\nabla_{\partial} \alpha | \leq | \nabla_{L} \alpha |+ |\underline{L} \alpha |+ | \slashed{\nabla} \alpha|$, \eqref{eq:zeta} and Lemma \ref{maxwellbis} gives us,
$$\forall \hspace{0.5mm} |x| \geq 1+\frac{t}{2}, \hspace{0.6cm} |\alpha( \mathcal{L}_{\partial} (G) ) |(t,x) \lesssim |J_A|+ \frac{1}{\tau_+} \sum_{|\gamma| \leq 1} \Big( |\alpha ( \mathcal{L}_{Z^{\gamma}} (G) ) |(t,x)+|\sigma ( \mathcal{L}_{Z^{\gamma}} (G) ) |(t,x)+|\rho ( \mathcal{L}_{Z^{\gamma}} (G) ) |(t,x) \Big).$$ We make the choice to work with \eqref{eq:zeta2} since it does not directly require a bound on the source term of the Maxwell equation, which lightens the proof of Theorem \ref{theorem} (otherwise we would have, among others, to consider more bootstrap assumptions). \end{Rq} \subsection{Modified vector fields and the first order commutation formula}
We start this section with the following commutation formula and we refer to Lemma $2.8$ of \cite{massless} for a proof\footnote{Note that a similar result is proved in Lemma \ref{calculF} below.}.
\begin{Lem}\label{basiccomuf} If $\widehat{Z} \in \widehat{\mathbb{P}}_0 \setminus \{ S \}$, then $$[T_F,\widehat{Z}]( f) = -\mathcal{L}_{Z}(F)(v,\nabla_v f) \hspace{8mm} \text{and} \hspace{8mm} [T_F,S]( f) = F(v,\nabla_v f)-\mathcal{L}_{S}(F)(v,\nabla_v f).$$ \end{Lem} In order to estimate quantities such as $\mathcal{L}_{Z}(F)(v,\nabla_v f)$, we rewrite $\nabla_v f$ in terms of the commutation vector fields (i.e. the elements of $\widehat{\mathbb{P}}_0$). Schematically, if we neglect the null structure of the system, we have, since $v^0\partial_{v^i}= \widehat{\Omega}_{0i}-t\partial_i-x^i\partial_t$, \begin{eqnarray}
\nonumber \left| \mathcal{L}_{Z}(F)(v,\nabla_v f) \right| & \lesssim & v^0\left| \mathcal{L}_{Z}(F) \right| |\partial_{v} f | \\ \nonumber
& \sim & \tau_+ \left| \mathcal{L}_{Z}(F) \right| |\partial_{t,x} f |+\text{l.o.t.}, \end{eqnarray} so that the $v$ derivatives engender a $\tau_+$-loss. The modified vector fields, constructed below, will allow us to absorb the worst terms in the commuted equations. \begin{Def}\label{defphi} Let $\mathbb{Y}_0$ be the set of vector fields defined by $$\mathbb{Y}_0:=\{ \widehat{Z}+\Phi_{\widehat{Z}}^j X_j \hspace{2mm} / \hspace{2mm} \widehat{Z} \in \widehat{\mathbb{P}}_0 \setminus \mathbb{T} \},$$ where $\Phi_{\widehat{Z}}^j : [0,T] \times \mathbb{R}^n_x \times \mathbb{R}^n_v $ are smooth functions which will be specified below and the $X_j$ are defined in \eqref{eq:defXi}. We will denote $\widehat{\Omega}_{0k}+\Phi_{\widehat{\Omega}_{0k}}^j X_j$ by $Y_{0k}$ and, more generally, $\widehat{Z}+\Phi_{\widehat{Z}}^j X_j$ by $Y_{\widehat{Z}}$. We also introduce the sets $$\mathbb{Y} := \mathbb{Y}_0 \cup \mathbb{T} \hspace{8mm} \text{and} \hspace{8mm} \mathbb{Y}_X:= \mathbb{Y} \cup \{ X_1,X_2,X_3 \}.$$
We consider an ordering on $\mathbb{Y}$ and $\mathbb{Y}_X$ compatible with $\widehat{\mathbb{P}}_0$ in the sense that if $\mathbb{Y} = \{ Y^i \hspace{1mm} / \hspace{1mm} 1 \leq i \leq |\mathbb{Y}| \}$, then $Y^i=\widehat{Z}^i+\Phi^k_{\widehat{Z}^i}X_k$ or $Y^i=\partial_{\mu}=\widehat{Z}^i$. We suppose moreover that $X_j$ is the $(|\mathbb{Y}|+j)^{th}$ element of $\mathbb{Y}_X$. Most of the time, for a vector field $Y \in \mathbb{Y}_0$, we will simply write $Y=\widehat{Z}+\Phi X$.
Let $\widehat{Z} \in \widehat{\mathbb{P}}_0 \setminus \{S \}$ and $1 \leq k \leq 3$. $\Phi_{\widehat{Z}}^k$ and $ \Phi^k_S$ are defined such as \begin{equation}\label{defPhicoeff} \hspace{-1.5mm} T_F(\Phi^k_{\widehat{Z}} )=-t\frac{v^{\mu}}{v^0}\mathcal{L}_Z(F)_{\mu k}, \hspace{7.5mm} T_F(\Phi^k_S)=t\frac{v^{\mu}}{v^0}\left(F_{\mu k}-\mathcal{L}_S(F)_{\mu k} \right) \hspace{6mm} \text{and} \hspace{6mm} \Phi_{\widehat{Z}}^k(0,.,.)=\Phi_{S}^k(0,.,.)=0. \end{equation} \end{Def} As explained during the introduction, we consider the $X_i$ vector fields rather than translations in view of \eqref{eq:X}. We are then led to compute $[T_F,X_i]$. \begin{Lem}\label{ComuX} Let $1 \leq i \leq 3$. We have $$[T_F,X_i]=-\mathcal{L}_{X_i}(F)(v,\nabla_v )+\frac{v^{\mu}}{v^0} F_{\mu X_i} \partial_t.$$ \end{Lem} \begin{proof} One juste has to notice that $$[T_F,X_i]=\frac{v^i}{v^0}[T_F,\partial_t]+[T_F,\partial_i]+F\left(v,\nabla_v \left( \frac{v^i}{v^0} \right) \right) \partial_t$$ and $v^{\mu} v^j F_{\mu j} =-v^{\mu} v^0 F_{\mu 0}$, as $F$ is antisymmetric. \end{proof} Finally, we study the commutator between the transport operator and these modified vector fields. The following relation, \begin{equation}\label{eq:vderiv} \partial_{v^i}=\frac{1}{v^0} \left( Y_{0i}-\Phi^j_{\widehat{\Omega}_{0i}} X_j-t X_i+ z_{0i}\partial_t \right), \end{equation} will be useful to express the $v$ derivatives in terms of the commutation vector fields \begin{Pro}\label{Comfirst} Let $Y \in \mathbb{Y}_0 \backslash \{ Y_S \}$. We have, using \eqref{defPhicoeff} \begin{eqnarray} \nonumber [T_F,Y] & = & -\frac{v^{\mu}}{v^0}{\mathcal{L}_Z(F)_{\mu}}^j \left(Y_{0j}-\Phi^k_{\widehat{\Omega}_{0j}} X_k+z_{0j}\partial_t \right) -\Phi^j_{\widehat{Z}}\mathcal{L}_{X_j}(F)(v,\nabla_v )+\Phi^j_{\widehat{Z}}\frac{v^{\mu}}{v^0} F_{\mu X_j} \partial_t, \\ \nonumber [T_F,Y_S] & = & \frac{v^{\mu}}{v^0}\left({F_{\mu}}^j-{\mathcal{L}_S(F)_{\mu}}^j \right) \left(Y_{0j}-\Phi^k_{\widehat{\Omega}_{0j}} X_k+z_{0j}\partial_t \right)-\Phi^j_{S}\mathcal{L}_{X_j}(F)(v,\nabla_v ) +\Phi^j_{S}\frac{v^{\mu}}{v^0} F_{\mu X_j} \partial_t. \end{eqnarray} \end{Pro} \begin{proof} We only treat the case $Y \in \mathbb{Y}_0 \setminus \{ Y_S \}$ (the computations are similar for $Y_S$). Using Lemmas \ref{basiccomuf} and \ref{ComuX} as well as \eqref{eq:vderiv}, we have \begin{eqnarray} \nonumber [T_F,Y] & = & [T_F,\widehat{Z}]+[T_F,\Phi^j_{\widehat{Z}} X_j] \\ \nonumber & = & -\mathcal{L}_Z(F)(v,\nabla_v )+T_F(\Phi^j_{\widehat{Z}} ) X_j +\Phi^j_{\widehat{Z}} [T_F,X_j]. \\ \nonumber & = & -\mathcal{L}_Z(F)(v,\nabla_v )+T_F(\Phi^j_{\widehat{Z}} )X_j -\Phi^j_{\widehat{Z}}\mathcal{L}_{X_j}(F)(v,\nabla_v )+\Phi^j_{\widehat{Z}}\frac{v^{\mu}}{v^0} F_{\mu X_j} \partial_t \\ \nonumber & = & -\frac{v^{\mu}}{v^0}{\mathcal{L}_Z(F)_{\mu}}^j \left(Y_{0j}-\Phi^k_{\widehat{\Omega}_{0j}} X_k+z_{0j}\partial_t \right)+ \left(t\frac{v^{\mu}}{v^0}{\mathcal{L}_Z(F)_{\mu}}^j+T_F(\Phi^j_{\widehat{Z}} ) \right) X_j \\ \nonumber & & -\Phi^j_{\widehat{Z}}\mathcal{L}_{X_j}(F)(v,\nabla_v )+\Phi^j_{\widehat{Z}}\frac{v^{\mu}}{v^0} F_{\mu X_j} \partial_t. \end{eqnarray} To conclude, recall from \eqref{defPhicoeff} that $t\frac{v^{\mu}}{v^0}{\mathcal{L}_Z(F)_{\mu}}^j+T_F(\Phi^j_{\widehat{Z}} )=0$. \end{proof} \begin{Rq}
As we will have $|\Phi| \lesssim \log^2(1+\tau_+)$, a good control on $z_{0j} \partial_t f$ and in view of the improved decay given by $X_j$ (see Proposition \ref{ExtradecayLie}), it holds schematically
$$ \left| [T_F,Y](f) \right| \lesssim \log^2 (1+\tau_+) \left| \mathcal{L}_Z(F)\right| |Y f|,$$
which is much better than $ \left| [T_F,\widehat{Z}](f) \right| \lesssim \tau_+ \left| \mathcal{L}_Z(F) \right| |\partial_{t,x} f|$. \end{Rq} Let us introduce some notations for the presentation of the higher order commutation formula. \begin{Def}
Let $Y^{\beta} \in \mathbb{Y}^{|\beta|}$. We denote by $\beta_T$ the number of translations composing $Y^{\beta}$ and by $\beta_P$ the number of modified vector fields (the elements of $\mathbb{Y}_0$). Note that $\beta_T$ denotes also the number of translations composing $\widehat{Z}^{\beta}$ and $Z^{\beta}$ and $\beta_P$ the number of elements of $\widehat{\mathbb{P}}_0 \setminus \mathbb{T}$ or $\mathbb{K} \setminus \mathbb{T}$. We have
$$|\beta|= \beta_T+\beta_P$$
and, for instance, if $Y^{\beta}=\partial_t Y_1 \partial_3$, $|\beta|=3$, $\beta_T=2$ and $\beta_P=1$. We define similarly $\beta_X$ if $Y^{\beta} \in \mathbb{Y}^{|\beta|}_X$. \end{Def} \begin{Def}\label{Pkp} Let $k=(k_T,k_P) \in \mathbb{N}^2$ and $ p \in \mathbb{N}$. We will denote by $P_{k,p}(\Phi)$ any linear combination of terms such as
$$ \prod_{j=1}^p Y^{\beta_j}(\Phi), \hspace{3mm} \text{with} \hspace{3mm} Y^{\beta_j} \in \mathbb{Y}^{|\beta_j|}, \hspace{3mm} \sum_{j=1}^p |\beta_j| = |k|, \hspace{3mm} \sum_{j=1}^p \left(\beta_j \right)_P = k_P$$
and where $\Phi$ denotes any of the $\Phi$ coefficients. Note that $\sum_{j=1}^p \left(\beta_j \right)_T = k_T$. Finally, if $ \min_{j} |\beta_j| \geq 1$, we will denote $\prod_{j=1}^p Y^{\beta_j}(\Phi)$ by $P_{\beta}(\Phi)$, where $\beta=(\beta_1,...\beta_p)$. \end{Def} \begin{Def} Let $k=(k_T,k_P,k_X) \in \mathbb{N}^3$ and $ p \in \mathbb{N}$. We will denote by $P^X_{k,p}(\Phi)$ any linear combination of terms such as
$$ \prod_{j=1}^p Y^{\beta_j}(\Phi), \hspace{3mm} \text{with} \hspace{3mm} Y^{\beta_j} \in \mathbb{Y}^{|\beta_j|}, \hspace{3mm} \sum_{j=1}^p |\beta_j| = |k|, \hspace{3mm} \sum_{j=1}^p \left(\beta_j \right)_P = k_P, \hspace{3mm} \sum_{j=1}^p \left(\beta_j \right)_X = k_X \hspace{3mm} \text{and} \hspace{3mm} \min_{1 \leq j \leq p} \left( \beta_j \right)_X \geq 1.$$ We will also denote $ \prod_{j=1}^p Y^{\beta_j}(\Phi)$ by $P^X_{\beta}(\Phi)$. \end{Def} \begin{Rq}
For convenience, if $p=0$, we will take $P_{k,p}(\Phi)=1$. Similarly, if $|\beta|=0$, we will take $P_{\beta}(\Phi)=P^X_{\beta}(\Phi)=1$. \end{Rq} In view of presenting the higher order commutation formulas, let us gather the source terms in different categories. \begin{Pro}\label{Comufirst} Let $Y \in \mathbb{Y} \setminus \mathbb{T}$. In what follows, $0 \leq \nu \leq 3$. The commutator $[T_F,Y]$ can be written as a linear combination, with $c(v)$ coefficients, of terms such as \begin{itemize}
\item $ \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \Gamma $, where $|\gamma| \leq 1$ and $\Gamma \in \mathbb{Y}_0$.
\item $\Phi \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \partial_{t,x} $, where $|\gamma| \leq 1$.
\item $z \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \partial_{t,x} $, where $|\gamma| \leq 1$ and $z \in \mathbf{k}_1$. \item $\Phi \mathcal{L}_{X}(F)(v,\nabla_v )$. \end{itemize} \end{Pro} Finally, let us adapt Lemma \ref{lift} to our modified vector fields. \begin{Lem}\label{lift2}
Let $f : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R} $ be a sufficiently regular function and suppose that for all $|\beta| \leq 1$, $|Y^{\beta} \Phi| \lesssim \log^{\frac{7}{2}}(1+\tau_+)$. Then, we have, almost everywhere,
$$\forall \hspace{0.5mm} Z \in \mathbb{K}, \hspace{8mm} \left|Z\left( \int_{v \in \mathbb{R}^3 } |f| dv \right) \right| \lesssim \sum_{ \begin{subarray}{} Y \in \mathbb{Y} \\ z \in \mathbf{k}_1 \end{subarray}} \int_{v \in \mathbb{R}^3 } \left( |Yf|+|f| +|X(\Phi)f| + \frac{ \log^7 (1 + \tau_+)}{\tau_+} \left(|z \partial_t f|+|zf| \right) \right) dv .$$ \end{Lem}
\begin{proof} Consider, for instance, the rotation $\Omega_{12}$. We have by integration by parts, as $\Omega_{12}=\widehat{\Omega}_{12}-v^{1} \partial_{v^2}+v^2 \partial_{v^1}$,
$$ \Omega_{12}\left( \int_{v \in \mathbb{R}^3 } |f| dv \right) = \int_{v \in \mathbb{R}^3} \widehat{\Omega}_{12} (|f|) dv -\int_{v \in \mathbb{R}^3} \left( v^1\partial_{v^2} -v^2 \partial_{v^1} \right)(|f|) dv= \int_{v \in \mathbb{R}^3} \widehat{\Omega}_{12} (|f|) dv.$$
This proves Lemma \ref{lift} for $\Omega_{12}$ since $| \widehat{\Omega}_{12} (|f|) |= | \frac{f}{|f|}\widehat{\Omega}_{12} (f) | \leq |\widehat{\Omega}_{12} (f)|$. On the other hand, \begin{eqnarray}\label{45:eq}
\int_{v \in \mathbb{R}^3} \widehat{\Omega}_{12} (|f|) dv & = & \int_{v \in \mathbb{R}^3} \left( \widehat{\Omega}_{12} +\Phi_{\widehat{\Omega}_{12}}^k X_k -\Phi_{\widehat{\Omega}_{12}}^k X_k \right)(|f|) dv \\
& = & \int_{v \in \mathbb{R}^3} \frac{f}{|f|} Y_{\widehat{\Omega}_{12}} f dv+\int_{v \in \mathbb{R}^3} X_k \left( \Phi^k_{\widehat{\Omega}_{12}} \right) |f| dv -\int_{v \in \mathbb{R}^3} X_k \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right) dv \label{eq:lif33}. \end{eqnarray} \eqref{45:eq} implies the result if $t+r \leq 1$. Otherwise, if $t \geq r$, note that by \eqref{eq:Xi}, \begin{eqnarray}
\nonumber \int_{v \in \mathbb{R}^3} X_k \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right) dv & = & \frac{1}{t}\int_{v \in \mathbb{R}^3} \left( \Omega_{0k}+z_{0k} \partial_t \right) \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right) dv \\ \nonumber
& = & \frac{1}{t}\int_{v \in \mathbb{R}^3} \left( Y_{0k}-v^0\partial_{v^k}-\Phi^q_{\widehat{\Omega}_{0k}}X_q+z_{0k} \partial_t \right) \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right)dv \\ \nonumber & = & \frac{1}{t}\int_{v \in \mathbb{R}^3} \left( Y_{0k}+\frac{v_k}{v^0}-\Phi^q_{\widehat{\Omega}_{0k}}X_q+z_{0k} \partial_t \right) \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right)dv . \end{eqnarray}
Consequently, in view of the bounds on $Y^{\beta} \Phi$ for $|\beta| \leq 1$,
$$ \left| \int_{v \in \mathbb{R}^3} X_k \left( \Phi^k_{\widehat{\Omega}_{12}} |f| \right) dv \right| \lesssim \sum_{Y \in \mathbb{Y}} \sum_{z \in \mathbf{k}_1} \int_{v \in \mathbb{R}^3} |Yf|+|f|+\frac{|z| \log^7 (1+t)}{t} \left( |\partial_t f|+|f| \right) dv ,$$ and it remains to combine it with \eqref{eq:lif33}. When $t \leq r$, one can use $rX_k=tX_k+(r-t)X_k$ and Lemma \ref{goodderiv}. \end{proof}
\begin{Rq}\label{lift3}
If moreover $|\Phi| \lesssim \log^2(1+\tau_+)$, one can prove similarly that, for $Z \in \mathbb{K}$, $z \in \mathbf{k}_1$ and $j \in \mathbb{N}^*$,
$$ \left|Z \left( \int_{v } |z^jf| dv \right) \right| \lesssim \hspace{1mm} j \hspace{-1mm} \sum_{ \begin{subarray}{} |\xi|+|\beta| \leq 1 \\ \hspace{2.5mm} w \in \mathbf{k}_1 \end{subarray}} \int_{v } |w^jP^X_{\xi}(\Phi) Y^{\beta} f|+\log^2(3+t)|w^{j-1}f|+\frac{ \log^7(1+\tau_+)|w|^{j+1}}{\tau_+} \left(| \partial_t f|+|f| \right) dv .$$ To prove this inequality, apply Lemma \ref{lift2} to $z^j f$ and use the two following properties,
$$|Y(z^j)| \leq |\widehat{Z}(z^j)|+|\Phi X(z^j)| \lesssim j \left( \sum_{w \in \mathbf{k}_1} |w|^j+ \log^2(1+\tau_+) |z|^{j-1} \right) \hspace{6mm} \text{and} \hspace{6mm} \sum_{w \in \mathbf{k}_1} |w||z|^j \lesssim \sum_{w \in \mathbf{k}_1} |w|^{j+1}.$$ It remains to apply Remark \ref{rqweights1} in order to get
$$\forall \hspace{0.5mm} |x| \geq 1+2t, \hspace{1cm} \log^2(1+\tau_+) |z|^{j-1} \lesssim \frac{\log^2(3+r)}{r} \sum_{w \in \mathbf{k}_1} |w z^{j-1} | \lesssim \sum_{w \in \mathbf{k}_1} |w^j|$$
and to note that $\log (1+\tau_+) \lesssim \log (3+t)$ if $|x| \leq 1+2t$. \end{Rq}
\subsection{Higher order commutation formula}
The following lemma will be useful for upcoming computations. \begin{Lem}\label{calculF} Let $G$ be a sufficiently regular $2$-form and $g$ a sufficiently regular function defined respectively on $[0,T[ \times \mathbb{R}^3$ and $[0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$. Let also $Y=\widehat{Z}+\Phi X \in \mathbb{Y}_0$ and $\nu \in \llbracket 0,3 \rrbracket$. We have, with $n_Z=0$ is $Z \in \mathbb{P}$ and $n_S=-1$, \begin{eqnarray} \nonumber Y \left( v^{\mu}G_{\mu \nu} \right) \hspace{-1.5mm} & = & \hspace{-1.5mm} v^{\mu}\mathcal{L}_{Z }(G)_{\mu \nu}+n_Zv^{\mu} G_{\mu \nu} +\Phi v^{\mu}\mathcal{L}_{X }(G)_{\mu \nu}+v^{\mu}G_{\mu [Z,\partial_{\nu}]}, \\ \nonumber Y \left( G \left( v , \nabla_v g \right) \right) \hspace{-1.5mm} & = & \hspace{-1.5mm} \mathcal{L}_Z(G) \left( v , \nabla_v g \right)+2n_ZG \left( v ,\nabla_v g \right)+\Phi \mathcal{L}_X(G) \left( v , \nabla_v g \right)+G \left( v , \nabla_v \widehat{Z} g \right)+c(v)\Phi G \left( v , \nabla_v \partial g \right) . \end{eqnarray} For $i \in \llbracket 1,3 \rrbracket$, $ Y \left( v^{\mu} \mathcal{L}_{X_i}(G)_{\mu \nu} \right)$ can be written as a linear combination, with $c(v)$ coefficients, of terms of the form
$$ \Phi^p v^{\mu} \mathcal{L}_{X Z^{\gamma} }(G)_{\mu \theta}, \hspace{3mm} \text{with} \hspace{3mm} 0 \leq \theta \leq 3 \hspace{3mm} \text{and} \hspace{3mm} \max(p,|\gamma|) \leq 1.$$ Finally, $Y \left( \mathcal{L}_{X_i}(G) \left( v , \nabla_v g \right) \right)$ can be written as a linear combination, with $c(v)$ coefficients, of terms of the form
$$ \Phi^p \mathcal{L}_{X Z^{\gamma} }(G) \left( v, \nabla \left( \widehat{Z}^{\kappa} g \right) \right), \hspace{3mm} \text{with} \hspace{3mm} \max(|\gamma|+|\kappa|,p+\kappa_P) \leq 1.$$ \end{Lem}
\begin{proof} Let $Z_v=\widehat{Z}-Z$ so that $Y=Z+Z_v+\Phi X$. We prove the second and the fourth properties (the first and the third ones are easier). We have \begin{eqnarray} \nonumber Y \left( G \left( v , \nabla_v g \right) \right) & = & \mathcal{L}_Z(G) \left( v, \nabla_v g \right)+G \left( [Z,v], \nabla_v g \right)+G \left( v,[Z,\nabla_v g] \right)+G \left( Z_v(v), \nabla_v g\right)+G \left( v, Z_v \left(\nabla_v g \right) \right) \\ \nonumber & &+\Phi \mathcal{L}_{X}(G) \left( v, \nabla_v g \right)+c(v) \Phi G \left( v , \nabla_v \partial g \right). \end{eqnarray} Note now that \begin{itemize} \item $S_v=0$ and $[S,v]=-v$, \item $[Z,v]=-Z_v(v)$ if $Z \in \mathbb{P}$. \end{itemize} The second identity is then implied by \begin{itemize} \item $[\partial, \nabla_v g]=\nabla_v \partial(g)$ and $[S, \nabla_v g ]= \nabla_v S(g)-\nabla_v g$. \item $[Z, \nabla_v g]+Z_v \left( \nabla_v g \right)= \nabla_v \widehat{Z}(g)$ if $Z \in \mathbb{O}$. \item $[\Omega_{0i}, \nabla_v g]+(\Omega_{0i})_v \left( \nabla_v g \right)= \nabla_v \widehat{Z}(g)-\frac{v}{v^0} \partial_{v^i}$ and $G(v,v)=0$ as $G$ is a $2$-form. \end{itemize} We now prove the fourth identity. We treat the case $Y=\widehat{Z}+\Phi X \in \mathbb{Y}_0 \setminus \{ Y_S \}$ as the computations are similar for $Y_S$. On the one hand, since $[\partial,X_i]=0$ and $X_k= \partial_k+\frac{v^k}{v^0}\partial_t$, one can easily check that $\Phi X_k \left( \mathcal{L}_{X_i}(G) \left( v , \nabla_v g \right) \right)$ gives four terms of the expected form. On the other hand, $$\widehat{Z} \left( \mathcal{L}_{X_i}(G) \left( v , \nabla_v g \right) \right)=\widehat{Z} \left( \mathcal{L}_{\partial_i}(G) \left( v , \nabla_v g \right) \right) +\widehat{Z} \left(\frac{v^i}{v^0} \mathcal{L}_{\partial_t}(G) \left( v , \nabla_v g \right) \right).$$ Applying the second equality of this Lemma to $\mathcal{L}_{\partial}(G)$, $g$ and $\widehat{Z}$ (which is equal to $Y$ when $\Phi=0$), we have \begin{eqnarray} \nonumber \widehat{Z} \left( \mathcal{L}_{\partial_i}(G) \left( v , \nabla_v g \right) \right) & = & \mathcal{L}_{Z \partial_i}(G) \left( v , \nabla_v g \right) +\mathcal{L}_{\partial_i}(G) \left( v , \nabla_v \widehat{Z} g \right) \\ \nonumber \widehat{Z} \left(\frac{v^i}{v^0} \mathcal{L}_{\partial_t}(G) \left( v , \nabla_v g \right) \right) & = & \widehat{Z} \left( \frac{v^i}{v^0} \right) \mathcal{L}_{\partial_t}(G) \left( v , \nabla_v g \right)+\frac{v^i}{v^0}\mathcal{L}_{Z \partial_t}(G) \left( v , \nabla_v g \right) +\frac{v^i}{v^0}\mathcal{L}_{\partial_t}(G) \left( v , \nabla_v \widehat{Z} g \right) \end{eqnarray} The sum of the last terms of these two identities is of the expected form. The same holds for the sum of the three other terms since \begin{eqnarray} \nonumber [\Omega_{0j},\partial_i]+\frac{v^i}{v^0}[\Omega_{0j},\partial_t]+v^0 \partial_{v^j}\left( \frac{v^i}{v^0} \right) \partial_t \hspace{-2mm} & = & \hspace{-2mm} -\delta_{j}^{i} \partial_t-\frac{v^i}{v^0}\partial_j-\frac{v^i v^j}{(v^0)^2} \partial_t+\delta_{j}^{i} \partial_t= -\frac{v^i}{v^0} X_j=c(v) X_j, \\ \nonumber [\Omega_{kj},\partial_i]+\frac{v^i}{v^0}[\Omega_{kj},\partial_t]+\left( v^k \partial_{v^j}-v^j \partial_{v^k} \right) \left( \frac{v^i}{v^0} \right) \partial_t \hspace{-2mm} & = & \hspace{-2mm} \delta_{j}^i \partial_k-\delta_k^i \partial_j+\left(\frac{v^k \delta_j^i-v^j \delta_k^i}{v^0} \right)\partial_t= \delta_j^i X_k -\delta_k^i X_j,\\ \nonumber [S,\partial_i]+\frac{v^i}{v^0}[S,\partial_t] \hspace{-2mm} & = & \hspace{-2mm} - \partial_i-\frac{v^i}{v^0}\partial_t=- X_i . \end{eqnarray} \end{proof}
We are now ready to present the higher order commutation formula. To lighten its presentation and facilitate its future usage, we introduce $\mathbb{G}:= \widehat{\mathbb{P}}_0 \cup \mathbb{Y}_0$, on which we consider an ordering. A combination of vector fields of $\mathbb{G}$ will always be denoted by $\Gamma^{\sigma}$ and we will also denote by $\sigma_T$ its number of translations and by $\sigma_P= |\sigma|-\sigma_T$ its number of homogeneous vector fields. In Lemma \ref{GammatoYLem} below, we will express $\Gamma^{\sigma}$ in terms of $\Phi$ coefficients and $\mathbb{Y}$ vector fields.
\begin{Pro}\label{ComuVlasov} Let $\beta$ be a multi-index. In what follows, $\nu \in \llbracket 0 , 3 \rrbracket$. The commutator $[T_F,Y^{\beta}]$ can be written as a linear combination, with $c(v)$ coefficients, of the following terms. \begin{itemize} \item \begin{equation}\label{eq:com1}
z^d P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma}, \tag{type 1-$\beta$}
\end{equation}
where \hspace{2mm} $z \in \mathbf{k}_1$, \hspace{2mm} $d \in \{ 0,1 \}$, \hspace{2mm} $|\sigma| \geq 1$ \hspace{2mm} $\max ( |\gamma|, |k|+|\gamma|, |k|+|\sigma| ) \leq |\beta|$, \hspace{2mm} $|k|+|\gamma|+|\sigma| \leq |\beta|+1$ \hspace{2mm} and \hspace{2mm} $p+k_P+\sigma_P+d \leq \beta_P$. Note also that, as \hspace{2mm} $|\sigma| \geq 1$, \hspace{2mm} $|k| \leq |\beta|- 1$. \item \begin{equation}\label{eq:com2} P_{k,p}(\Phi) \mathcal{L}_{X Z^{\gamma_0}}(F) \left( v, \nabla_v \Gamma^{\sigma} \right), \tag{type 2-$\beta$} \end{equation}
where \hspace{2mm} $|k|+|\gamma_0|+|\sigma| \leq |\beta|-1$, \hspace{2mm} $p+k_P+\sigma_P \leq \beta_P$ \hspace{2mm} and \hspace{2mm} $p \geq 1$. \item \begin{equation}\label{eq:com4}
P_{k,p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \left( v,\nabla_v \Gamma^{\sigma} \right), \tag{type 3-$\beta$}
\end{equation}
where \hspace{2mm} $|k|+|\gamma_0|+|\sigma| \leq |\beta|-1$, \hspace{2mm} $p+|\gamma_0| \leq |\beta|-1$ \hspace{2mm} and \hspace{2mm} $p+k_P+\sigma_P \leq \beta_P$.
\end{itemize} \end{Pro}
\begin{proof}
The result follows from an induction on $|\beta|$, Proposition \ref{Comufirst} (which treats the case $|\beta| =1$) and $$[T_F,YY^{\beta_0}]=Y[T_F,Y^{\beta_0}]+[T_F,Y]Y^{\beta_0}.$$
Let $ Q \in \mathbb{N}$ and suppose that the commutation formula holds for all $|\beta_0| \leq Q$. We then fix a multi-index $|\beta_0|=Q$, consider $Y \in Y$ and denote the multi-index corresponding to $YY^{\beta_0}$ by $\beta$. Then, $|\beta|=|\beta_0|+1$.
Suppose first that $Y=\partial$ is a translation so that $\beta_P=(\beta_0)_P$. Then, using Lemma \ref{basiccomuf}, we have $$ [T_F,\partial]Y^{\beta_0} = -\mathcal{L}_{\partial}(F)(v,\nabla_v Y^{\beta_0}), $$
which is a term of \eqref{eq:com4} as $|\beta_0| = |\beta|-1$ and $(\beta_0)_P=\beta_P$. Using the induction hypothesis, $\partial[T_F,Y^{\beta_0}]$ can be written as a linear combination with good coefficients $c(v)$ of terms of the form\footnote{We do not mention the $c(v)$ coefficients here since $\partial \left( c(v) \right) =0$.} \begin{itemize}
\item $ \partial \left( z^d P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma} \right) $, with $z \in \mathbf{k}_1$, $d \in \{0,1 \}$, $|\sigma| \geq 1$, $\max ( |\gamma|, |k|+|\gamma|, |k|+ |\sigma| ) \leq |\beta_0|$, $|k|+|\gamma|+|\sigma| \leq |\beta_0|+1$ and $p+k_P+\sigma_P+d \leq (\beta_0)_P$. This leads to the sum of the following terms. \begin{itemize} \item $\partial(z^d) P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma}$, which is of \eqref{eq:com1} since $\partial(z)=0$ or $\frac{v^{\lambda}}{v^0}$. \item $z^d P_{(k_T+1,k_P),p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma}+z^dP_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{\partial Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma}+z^dP_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} \partial Y^{\sigma},$ which is the sum of terms of \eqref{eq:com1} (as, namely, $k_P$ does not increase and $(\sigma_0)_P=\sigma_P$ if $Y^{\sigma_0}=\partial Y^{\sigma}$). \end{itemize}
\item $\partial \left( P_{k,p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \left( v,\nabla_v \Gamma^{\sigma} \right) \right)$, with $|k|+|\gamma_0|+|\sigma| \leq |\beta_0|-1$, $p+|\gamma_0| \leq |\beta_0|-1$ and $p+k_P+\sigma_P \leq (\beta_0)_P$. We then obtain $$ P_{(k_T+1,k_P),p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \Gamma^{\sigma} \right), \hspace{2.3mm} P_{k,p}(\Phi)\mathcal{L}_{ \partial \partial Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \Gamma^{\sigma} \right) \hspace{2.3mm} \text{and} \hspace{2.3mm} P_{k,p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \partial \Gamma^{\sigma} \right),$$
which are all of \eqref{eq:com4} since $|k|+|\gamma_0|+|\sigma|+1 \leq |\beta_0|=|\beta|-1$, $p+|\gamma_0|+1 \leq |\beta|-1$ and, if $\Gamma^{\overline{\sigma}} = \partial \Gamma^{\sigma}$, $p+k_P+\overline{\sigma}_P=p+k_P+\sigma_P \leq \left( \beta_0 \right)_P = \beta_P$.
\item $\partial \left( P_{k,p}(\Phi) \mathcal{L}_{ X Z^{\gamma_0}}(F) \left( v,\nabla_v \Gamma^{\sigma} \right) \right)$, with $|k|+|\gamma_0|+|\sigma| \leq |\beta_0|-1$, $p+k_P+\sigma_P \leq (\beta_0)_P$ and $p \geq1$. We then obtain, as $[\partial,X]=0$, $$ P_{(k_T+1,k_P),p}(\Phi) \mathcal{L}_{ X Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \Gamma^{\sigma} \right) \hspace{-0.3mm} , \hspace{1.7mm} P_{k,p}(\Phi)\mathcal{L}_{X \partial Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \Gamma^{\sigma} \right) \hspace{1.7mm} \text{and} \hspace{1.7mm} P_{k,p}(\Phi)\mathcal{L}_{ X Z^{\gamma_0}}(F) \hspace{-0.2mm} \left( v,\nabla_v \partial \Gamma^{\sigma} \right) \hspace{-0.3mm} ,$$
which are all of \eqref{eq:com2} since, for instance, $|k|+|\gamma_0|+|\sigma|+1 \leq |\beta_0| = |\beta|-1$. \end{itemize} We now suppose that $Y \in \mathbb{Y} \setminus \mathbb{T}$, so that $\beta_P = (\beta_0)_P+1$. We will write schematically that $Y=\widehat{Z}+\Phi X$. Using Proposition \ref{Comufirst}, we have that $[T_F,Y]Y^{\beta_0}$ can be written as a linear combination, with $c(v)$ coefficients, of the following terms. \begin{itemize}
\item $ \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \Gamma Y^{\beta_0} $, where $|\gamma| \leq 1$ and $\Gamma \in \mathbb{Y}$, which is of \eqref{eq:com1}.
\item $\Phi^{1-d}z^d \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \partial Y^{\beta_0}$, where $|\gamma| \leq 1$, $d \in \{0,1 \}$ and $z \in \mathbf{k}_1$, which is of \eqref{eq:com1} since, if $\xi$ is the multi-index corresponding to $\partial Y^{\beta_0}$, $\xi_P = (\beta_0)_P < \beta_P$.
\item $ \Phi \mathcal{L}_{X}(F)(v,\nabla_v Y^{\beta_0} )$, which is of \eqref{eq:com2} since $|\beta_0| \leq |\beta|-1$ and $1+(\beta_0)_P \leq \beta_P$. \end{itemize} It then remains to compute $Y[T_F,Y^{\beta_0}]$. Using the induction hypothesis, it can be written as a linear combination of terms of the form \begin{itemize}
\item $ Y \left(c(v) z^d P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} Y^{\sigma} \right),$ with $z \in \mathbf{k}_1$, $d \in \{0,1 \}$, $|\sigma| \geq 1$, $\max ( |\gamma|,|k|+|\gamma|, |k|+ |\sigma| ) \leq |\beta_0|$, $|k|+|\gamma|+|\sigma| \leq |\beta_0|+1$ and $p+k_P+\sigma_P+d \leq (\beta_0)_P$. It leads to the following error terms. \begin{itemize} \item $ Y\left( \frac{c(v)}{v^0} \right) z^dP_{k,p}(\Phi) v^{\mu}\mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} Y^{\sigma} $, which is of \eqref{eq:com1} since $Y\left( \frac{c(v)}{v^0} \right) = \widehat{Z} \left( \frac{c(v)}{v^0} \right) = \frac{c_0(v)}{v^0} $. \item $c(v)Y \left( z^d \right) P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F) Y^{\sigma}$, which is a linear combination of terms of \eqref{eq:com1} since, by Lemma \ref{weights}, $$Y(z)=\widehat{Z}(z)+\Phi^i_{\widehat{Z}} X_i(z)=c_0(v)z+z'+\Phi^i_{\widehat{Z}}c_i(v), \hspace{2mm} \text{where} \hspace{2mm} z' \in \mathbf{k}_1, \hspace{2mm} \text{and} \hspace{2mm} p+1+k_P+\sigma_P+1 \leq \beta_P.$$ \item $c(v)z^d P_{(k_T,k_P+1),p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} Y^{\sigma}+c(v)z^d P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} YY^{\sigma}$, which is the sum of terms of \eqref{eq:com1}, since $p+k_P+\sigma_P+d+1 \leq (\beta_0)_P+1 = \beta_P$.
\item $c(v)z^dP_{k,p+p_0}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\xi}Z^{\gamma}}(F)_{ \mu \theta} Y^{\sigma}$, with $\max (p_0 ,|\xi| ) \leq 1$, which is given by the first identity of Lemma \ref{calculF}. These terms are of \eqref{eq:com1} since $|k|+|\gamma|+|\xi|+|\sigma| \leq |\beta_0|+2 = |\beta|+1$ and $|\gamma|+|\xi| \leq |\beta|$. \end{itemize} For the remaining terms, we suppose for simplicity that $c(v)=1$, as we have just see that $Y \left(c(v) \right)$ is a good coefficient.
\item $ Y \Big( P_{k,p}(\Phi) \mathcal{L}_{ X Z^{\gamma_0}}(F) \left( v , \nabla_v \Gamma^{\sigma} \right) \Big) $, with $|k|+|\gamma_0|+|\sigma| \leq |\beta_0|-1$, $p+k_P+\sigma_P \leq (\beta_0)_P$ and $p \geq 1$. It gives us $$P_{(k_T,k_P+1),p}(\Phi) \mathcal{L}_{X Z^{\gamma_0}}(F) \left( v , \nabla_v \Gamma^{\sigma} \right), $$ which is of \eqref{eq:com2} since, $p+k_P+1+\sigma_P \leq (\beta_0)_P+1=\beta_P$. We also obtain, using the fourth identity of Lemma \ref{calculF},
$$c(v)P_{k,p+p_0}(\Phi)\mathcal{L}_{X Z^{\delta} Z^{\gamma_0}} (F) \left( v , \nabla_v \widehat{Z}^{\xi}\Gamma^{\sigma} \right), \hspace{3mm} \text{with} \hspace{3mm} \max(|\delta|+|\xi|,p_0+ \xi_P) \leq 1.$$
They are all of \eqref{eq:com2} since $|k|+|\gamma_0|+|\delta|+|\sigma|+|\xi| \leq |\beta_0|=|\beta|-1$, $p+p_0+k_P+\sigma_P+\xi_P \leq (\beta_0)_P+1=\beta_P$ and $p+p_0 \geq p \geq 1$.
\item $ Y \Big(P_{k,p}(\Phi) \mathcal{L}_{\partial Z^{\gamma_0}}(F) \left( v , \nabla_v \Gamma^{\sigma} \right) \Big) $, with $|k|+|\gamma_0|+|\sigma| \leq |\beta_0|-1$, $p+|\gamma_0| \leq |\beta_0|-1$ and $p+k_P+\sigma_P \leq (\beta_0)_P$. We obtain \begin{itemize} \item $P_{(k_T,k_P+1),p}(\Phi) \mathcal{L}_{\partial Z^{\gamma_0}}(F) \left( v , \nabla_v \Gamma^{\sigma} \right) $, clearly of \eqref{eq:com4}, \end{itemize} and, using the second identity of Lemma \ref{calculF}, \begin{itemize} \item $ P_{k,p+1}(\Phi)\mathcal{L}_{X \partial Z^{\gamma_0}} (F) \left( v , \nabla_v \Gamma^{\sigma} \right)$, which is of \eqref{eq:com2}, and
$$c(v)P_{k,p+p_0}(\Phi)\mathcal{L}_{Z^{\delta} \partial Z^{\gamma_0}} (F) \left( v , \nabla_v \widehat{Z}^{\xi}\Gamma^{\sigma} \right), \hspace{3mm} \text{with} \hspace{3mm} |\delta|+|\xi| \leq 1, \hspace{3mm} p_0+|\delta| \leq 1 \hspace{3mm} \text{and} \hspace{3mm} p_0+ \xi_P \leq 1.$$
As $p+p_0+|\gamma_0|+|\delta| \leq p+|\gamma_0|+1 \leq |\beta|-1$, $p+p_0+k_P+\sigma_P+\xi_P \leq (\beta_0)_P+1=\beta_P$ and, if $|\delta|=1$, $[Z^{\delta}, \partial ] \in \mathbb{T} \cup \{ 0 \}$, we can conclude that these terms are of \eqref{eq:com4}. \end{itemize} \end{itemize} \end{proof}
\begin{Rq}\label{rqjustifnorm}
To deal with the weight $\tau_+$ in the terms of \eqref{eq:com2} and \eqref{eq:com4} (hidden by the $v$ derivatives), we will take advantage of the extra decay given by the $X$ vector fields or the translations $\partial_{\mu}$ through Proposition \ref{ExtradecayLie}. To deal with the terms of \eqref{eq:com1}, when $d=1$, we will need to control the $L^1$ norm of $\sum_{w \in \mathbf{k}_1} |w|^{q+1}P_{k,p}(\Phi)Y^{\sigma}f$, with $k_P+\sigma_P < \beta_P$, in order to control $\||z|^q Y^{\beta}f\|_{L^1_{x,v}}$. \end{Rq}
As we will need to bound norms such as $\| P_{\xi}(\Phi) Y^{\beta} f \|_{L^1_{x,v}}$, we will apply Proposition \ref{ComuVlasov} to $\Phi$ and we then need to compute the derivatives of $T_F(\Phi)$. This is the purpose of the next proposition. \begin{Pro}\label{sourcePhi}
Let $Y^{\beta} \in \mathbb{Y}^{|\beta|}$ and $Z^{\gamma_1} \in \mathbb{K}^{|\gamma_1|}$ (we will apply the result for $|\gamma_1| \leq 1$). Then, $$ Y^{\beta} \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma_1}}(F)_{\mu \zeta} \right)$$ can be written as a linear combination, with $c(v)$ coefficients, of the following terms, with $0 \leq \theta, \nu \leq 3$ and $p \leq |\beta|$.
\begin{equation}\label{equa1}
\hspace{-0.5cm} x^{\theta} \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}, \hspace{18mm} \text{where} \hspace{6mm} |\gamma| \leq |\beta| \hspace{17mm} \text{and} \hspace{6mm} \gamma_T=\beta_T. \tag{family $\beta-1$}
\end{equation} \begin{equation}\label{equa1bis}
\hspace{-0.3cm} P_{k,p}(\Phi)\frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}, \hspace{10mm} \text{where} \hspace{5mm} |k|+|\gamma| \leq |\beta|-1 \hspace{6mm} \text{and} \hspace{6mm} k_P \leq \beta_P. \tag{family $\beta-2$}
\end{equation}
\begin{equation}\label{equa2}
x^{\theta}P_{k,p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}, \hspace{6mm} \text{where} \hspace{5mm} |k|+|\gamma| \leq |\beta|-1 \hspace{6mm} \text{and} \hspace{6mm} k_P < \beta_P. \tag{family $\beta-3$}
\end{equation} \end{Pro}
\begin{proof}
Let us prove this by induction on $|\beta|$. The result holds for $|\beta|=0$. We then consider $Y^{\beta}=YY^{\beta_0} \in \mathbb{Y}^{|\beta|}$ and we suppose that the Proposition holds for $\beta_0$. Suppose first that $Y= \partial$, so that $\beta_P=(\beta_0)_P$. Using the induction hypothesis, $\partial Y^{\beta_0} \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma_1}}(F)_{\mu \nu} \right)$ can be written as a linear combination, with good coefficients $c(v)$, of the following terms. \begin{itemize}
\item $ \partial (x^{\theta}) \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}$, with $|\gamma| \leq |\beta_0| < |\beta|$, which is part of \eqref{equa1bis}.
\item $x^{\theta} \frac{v^{\mu}}{v^0} \mathcal{L}_{\partial Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}$, with $1+|\gamma| \leq 1+|\beta_0|=|\beta|$. Denoting $\partial Z^{\gamma}$ by $Z^{\xi}$, we have $\xi_T=1+\gamma_T=1+(\beta_0)_T=\beta_T$ and this term is part of \eqref{equa1}.
\item $ P_{(k_T+1,k_P),p}(\Phi)\frac{v^{\mu}}{v^0} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}$, with $|k|+1+|\gamma| \leq |\beta|-1+1=|\beta|-1$ and $k_P \leq (\beta_0)_P = \beta_P$, which is part of \eqref{equa1bis}.
\item $ P_{k,p}(\Phi)\frac{v^{\mu}}{v^0} \mathcal{L}_{\partial Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}$, with $|k|+|\gamma|+1 \leq |\beta_0|-1+1=|\beta|-1$ and $k_P \leq (\beta_0)_P = \beta_P$, which is part of \eqref{equa1bis}.
\item $\partial(x^{\theta}) P_{k,p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}$, with $|k|+|\gamma| \leq |\beta_0|-1 \leq |\beta|-2$ and $k_P < (\beta_0)_P=\beta_P$, which is then equal to $0$ or part of \eqref{equa1bis}.
\item $x^{\theta} P_{(k_T+1,k_P),p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}$, with $|k|+1+|\gamma| \leq |\beta_0|-1+1=|\beta|-1$ and $k_P < (\beta_0)_P=\beta_P$, which is then part of \eqref{equa2}.
\item $x^{\theta}P_{k,p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{\partial X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}$, with $|k|+|\gamma|+1 \leq |\beta|-1$ and $k_P < \beta_P$, which is part of \eqref{equa2}, as $[\partial, X ]=0$. \end{itemize} Suppose now that $Y=\widehat{Z}+\Phi X \in \mathbb{Y}_0$. We then have $\beta_P=(\beta_0)_P+1$ and $(\beta_0)_T=\beta_T$. In the following, we will skip the case where $Y$ hits $c(v)(v^0)^{-1}$ and we suppose for simplicty that $c(v)=1$. Note however that this case is straightforward since $$ Y\left( \frac{c(v)}{v^0} \right)= \widehat{Z} \left( \frac{c(v)}{v^0} \right)= \frac{\widehat{Z}(c(v))}{v^0}+c(v) \widehat{Z} \left( \frac{1}{v^0} \right) = \frac{c_1(v)}{v^0} .$$ Using again the induction hypothesis, $Y Y^{\beta_0} \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma_1}}(F)_{\mu \zeta} \right)$ can be written as a linear combination of the following terms.
\begin{itemize}
\item $ Y (x^{\theta}) \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu}$, with $|\gamma| \leq |\beta_0| < |\beta|$ and $\gamma_T=(\beta_0)_T=\beta_T$. As, schematically (with $\delta=0$ or $\delta=1$), \begin{equation}\label{eq:53} Y(x^{\theta})=\widehat{Z}(x^{\theta})+\Phi X(x^{\theta})=\delta x^{\kappa}+c(v)\Phi, \end{equation} This leads to terms of \eqref{equa1} and \eqref{equa1bis}.
\item $x^{\theta} \frac{1}{v^0} Y \left( v^{\mu} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu} \right)$, with $|\gamma| \leq |\beta_0|$ and $\gamma_T=(\beta_0)_T=\beta_T$. Using the first identity of Lemma \ref{calculF}, we have that $Y \left( v^{\mu} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \theta} \right)$ is a linear combination of terms such as
$$v^{\mu}\mathcal{L}_{ Z^{\gamma_0} Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \lambda} , \hspace{3mm} \text{with} \hspace{3mm} |\gamma_0| \leq 1, \hspace{3mm} (\gamma_0)_T=0, \hspace{3mm} \text{and} \hspace{3mm} 0 \leq \lambda \leq 3,$$ leading to terms of \eqref{equa1}, and $$\Phi v^{\mu}\mathcal{L}_{ X Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu},$$
giving terms of \eqref{equa2}, as $|\gamma| \leq |\beta_0|=|\beta|-1$.
\item $\frac{1}{v^0} Y \left( P_{k,p}(\Phi) \right) v^{\mu} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu} $, with $|k|+|\gamma| \leq |\beta_0|-1$ and $k_P \leq \beta_P$. We obtain terms of \eqref{equa1bis}, since
$$Y \left( P_{k,p}(\Phi) \right)=P_{(k_T,k_P+1),p}(\Phi), \hspace{3mm} |k|+1+|\gamma| \leq |\beta|-1 \hspace{3mm} \text{and} \hspace{3mm} k_P+1 \leq (\beta_0)_P+1 = \beta_P .$$
\item $\frac{1}{v^0} P_{k,p}(\Phi) Y \left( v^{\mu} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu} \right)$, with $|k|+|\gamma| \leq |\beta_0|-1$ and $k_P \leq (\beta_0)_P$. Using the first identity of Lemma \ref{calculF}, we have that $Y \left( v^{\mu} \mathcal{L}_{ Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \nu} \right)$ is a linear combination of terms of the form
$$c(v) \Phi^r v^{\mu}\mathcal{L}_{ Z^{\gamma_0} Z^{\gamma} Z^{\gamma_1}}(F)_{\mu \lambda} , \hspace{6mm} \text{with} \hspace{6mm} \max(r,|\gamma_0|) \leq 1 \hspace{6mm} \text{and} \hspace{6mm} 0 \leq \lambda \leq 3.$$
We then obtain terms of \eqref{equa1bis}, as $|k|+|\gamma|+|\gamma_0| \leq |\beta_0|=|\beta|-1$ and $k_P \leq \beta_P$.
\item $Y\left(x^{\theta} \right)P_{k,p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{XZ^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}$, with $|k|+|\gamma| \leq |\beta_0|-1$ and $k_P < (\beta_0)_P$, which, using \eqref{eq:53}, gives terms of \eqref{equa1bis} and \eqref{equa2}.
\item $ x^{\theta}P_{(k_T,k_P+1),p}(\Phi) \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}$, with $|k|+1+|\gamma| \leq |\beta_0|-1+1=|\beta|-1$ and $k_P+1 < (\beta_0)_P+1=\beta_P$, which is part of \eqref{equa2}.
\item $x^{\theta}P_{k,p}(\Phi)\frac{1}{v^0} Y \left( v^{\mu} \mathcal{L}_{ X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu} \right)$, with $|k|+|\gamma| \leq |\beta_0|-1$ and $k_P < (\beta_0)_P$. By the third point of Lemma \ref{calculF}, we can write $Y \left( v^{\mu} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu} \right)$ as a linear combination of terms such as
$$c(v) \Phi^r v^{\mu} \mathcal{L}_{ X Z^{\gamma_0} Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \lambda}, \hspace{3mm} \text{with} \hspace{3mm} \max(r,|\gamma_0|) \leq 1 \hspace{3mm} \text{and} \hspace{3mm} 0 \leq \lambda \leq 3.$$
It gives us terms of \eqref{equa2}, as $|k|+|\gamma_0|+|\gamma| \leq |\beta_0|=|\beta|-1$ and $k_P < \beta_P$. \end{itemize} \end{proof}
The worst terms are those of \eqref{equa1}. They do not appear in the source term of $T_F \left( P^X_{\zeta}(\Phi) \right)$, which explains why our estimate on $\| P^X_{\zeta}(\Phi) Y^{\beta} f \|_{L^1_{x,v}}$ will be better than the one on $\| P_{\xi}(\Phi) Y^{\beta} f \|_{L^1_{x,v}}$.
\begin{Pro}\label{sourceXPhi}
Let $Y^{\overline{\beta}} \in \mathbb{Y}_X^{|\overline{\beta}|}$, with $\overline{\beta}_X \geq 1$, $Z^{\gamma_1} \in \mathbb{K}^{|\gamma_1|}$ and $\beta$ be a multi-index associated to $\mathbb{Y}$ such that $\beta_P=\overline{\beta}_P$ and $\beta_T=\overline{\beta}_T+\overline{\beta}_X$. Then, $ Y^{\overline{\beta}} \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma_1}}(F)_{\mu \zeta} \right)$ can be written as a linear combination of terms of \eqref{equa1bis}, \eqref{equa2} and, \begin{equation}\label{equa2bis}
\text{if} \hspace{6mm} \beta_P=0, \hspace{6mm} x^{\theta} \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma} Z^{\gamma_1} }(F)_{\mu \nu}, \hspace{6mm} \text{where} \hspace{5mm} |\gamma| \leq |\beta|-1. \tag{family $\beta-3-bis$} \end{equation} \end{Pro} \begin{proof} The proof is similar to the previous one. The difference comes from the fact a $X$ vector field necessarily have to hit a term of the first family, giving either a term of the second family or of the third-bis family, where we we do not have the condition $k_P < \beta_P$ since $k_P$ and $\beta_P$ could be both equal to $0$. \end{proof}
\subsection{The null structure of $G(v,\nabla_v g)$} In this subsection, we consider $G$, a $2$-form defined on $[0,T[ \times \mathbb{R}^3$, and $g$, a function defined on $[0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$, both sufficiently regular. We investigate in this subsection the null structure of $G(v,\nabla_v g)$ in view of studying the error terms obtained in Proposition \ref{ComuVlasov}. Let us denote by $(\alpha, \underline{\alpha}, \rho, \sigma)$ the null decomposition of $G$. Then, expressing $G \left( v, \nabla_v g \right)$ in null coordinates, we obtain a linear combination of the following terms. \begin{itemize} \item The terms with the radial component of $\nabla_v g$ (remark that $\left( \nabla_v g \right)^L =- \left( \nabla_v g \right)^{\underline{L}}=\left( \nabla_v g \right)^r$), \begin{equation}\label{eq:radi} v^L \rho \left( \nabla_v g \right)^{\underline{L}}, \hspace{8mm} v^{\underline{L}} \rho \left( \nabla_v g \right)^{L}, \hspace{8mm} v^A \alpha_A \left( \nabla_v g \right)^{L} \hspace{8mm} \text{and} \hspace{8mm} v^A \underline{\alpha}_A \left( \nabla_v g \right)^{\underline{L}}. \end{equation} \item The terms with an angular component of $\nabla g$, \begin{equation}\label{eq:angu} \varepsilon_{BA} v^B \sigma \left( \nabla_v g \right)^{A}, \hspace{12mm} v^{L} \alpha_A \left( \nabla_v g \right)^{A} \hspace{8mm} \text{and} \hspace{8mm} v^{\underline{L}} \underline{\alpha}_A \left( \nabla_v g \right)^{A}. \end{equation} \end{itemize} We are then led to bound the null components of $\nabla_v g$. A naive estimate, using $v^0\partial_{v^k}= Y_k-\Phi X-t\partial_k-x^k \partial_t$, gives \begin{equation}\label{naive2}
\left| \left( \nabla_v g \right)^{L} \right|, \hspace{1mm} \left| \left( \nabla_v g \right)^{\underline{L}} \right|, \hspace{1mm} \left| \left( \nabla_v g \right)^{A} \right| \leq \left| \nabla_v g \right| \lesssim \frac{\tau_++|\Phi|}{v^0} |\nabla_{t,x} g |+\frac{1}{v^0}\sum_{Y \in \mathbb{Y}} |Y g|. \end{equation} With these inequalities, using our schematic notations $c \prec d$ if $d$ is expected to behave better than $c$, we have $v^L \rho \left( \nabla_v g \right)^{\underline{L}} \prec \varepsilon_{BA} v^B \sigma \left( \nabla_v g \right)^{A}$, since $v^L \prec v^B$ and $\rho \sim \sigma$. The purpose of the following result is to improve \eqref{naive2} for the radial component in order to have a better control on $v^L \rho \left( \nabla_v g \right)^{\underline{L}}$. \begin{Lem}\label{vradial} Let $g$ be a sufficiently regular function, $z \in \mathbf{k}_1$ and $j \in \mathbb{N}^*$. We have
$$ \left| \left( \nabla_v g \right)^{r} \right| \lesssim \frac{\tau_-+|\Phi|}{v^0} |\nabla_{t,x} g |+\frac{1}{v^0}\sum_{Y \in \mathbb{Y}} |Y g| \hspace{10mm} \text{and} \hspace{10mm} \left| \left( \nabla_v z^j \right)^r \right| \lesssim \frac{\tau_-}{v^0}|z|^{j-1}+\frac{1}{v^0} \sum_{w \in \mathbf{k}_1} |w |^j.$$ \end{Lem} \begin{proof} We have $$( \nabla_v g )^r=\frac{x^i}{r}\partial_{v^i} g \hspace{8mm} \text{and} \hspace{8mm} \frac{x^i}{rv^0}(t\partial_i+x_i\partial_t)=\frac{1}{v^0}(t\partial_r+r\partial_t)=\frac{1}{v^0}(S+(r-t)\underline{L}),$$ so that, using $\partial_{v^i}=\frac{1}{v^0}(\widehat{\Omega}_{0i}-t\partial_i-x_i\partial_t)$, \begin{equation}\label{equ:proof} ( \nabla_v g )^r = \frac{x^i }{rv^0}\widehat{\Omega}_{0i} \left(g \right)-\frac{1}{v^0}S \left( g \right) +\frac{t-r}{v^0}\underline{L} \left( g \right). \end{equation}
To prove the first inequality, it only remains to write schematically that $\widehat{\Omega}_{0i}=Y_{0i}-\Phi X$, $S=Y_S-\Phi X$ and to use the triangle inequality. To complete the proof of the second inequality, apply \eqref{equ:proof} to $g=z^j$, recall from Lemma \ref{weights} that $ \left| \widehat{Z} \left( z^j \right) \right| \lesssim \sum_{z \in \mathbf{k}_1} |w|^j$ and use that $\left| \underline{L} \left( z^j \right) \right| \lesssim |z|^{j-1}$. \end{proof} For the terms containing an angular component, note that they are also composed by either $\alpha$, the better null component of the electromagnetic field, $v^A$ or $v^{\underline{L}}$. The following lemma is fundamental for us to estimate the energy norms of the Vlasov field. \begin{Lem}\label{nullG}
We can bound $\left| G(v, \nabla_v g ) \right|$ either by
$$ \left( |\rho|+|\underline{\alpha}| \right) \left( \sum_{ Y \in \mathbb{Y}} |Y(g)| \hspace{-0.2mm}+ \hspace{-0.2mm} \left( \tau_-+|\Phi|+\sum_{w \in \mathbf{k}_1} |w| \right) |\nabla_{t,x} g | \hspace{-0.5mm} \right)+ \left(|\alpha|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\sigma| \right) \hspace{-1mm} \left( \sum_{ Y \in \mathbb{Y}} |Y(g)| \hspace{-0.2mm} + \hspace{-0.2mm} (\tau_++|\Phi|) |\nabla_{t,x} g | \right)$$ or by
$$ \left( |\alpha|+|\rho|+\sqrt{\frac{v^{\underline{L}}}{v^0} }|\sigma|+\sqrt{\frac{v^{\underline{L}}}{v^0} }|\underline{\alpha}| \right) \left( \sum_{ Y \in \mathbb{Y}} |Y(g)|+ \left( \tau_++|\Phi| \right) |\nabla_{t,x} g | \right)$$ \end{Lem} \begin{proof}
The proof consists in bounding the terms given in \eqref{eq:radi} and \eqref{eq:angu}. By Lemma \ref{vradial} and $|v^A| \lesssim \sqrt{v^0v^{\underline{L}}}$, one has
$$ \left| v^L \rho \left( \nabla_v g \right)^{\underline{L}}-v^{\underline{L}} \rho \left( \nabla_v g \right)^L+v^A \underline{\alpha}_A \left( \nabla_v g \right)^{\underline{L}} \right| \lesssim \left( |\rho|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\underline{\alpha}| \right)\left( \sum_{ Y \in \mathbb{Y}} |Y(g)|+ \left( \tau_-+|\Phi| \right) |\nabla_{t,x} g | \right) .$$
As $v^0 \partial_{v^i} = Y_i-\Phi X-x^i \partial_t-t \partial_i$ and $|v^B | \lesssim \sqrt{v^0 v^{\underline{L}}}$, we obtain
$$ \left| v^L \alpha_A \left( \nabla_v g \right)^A+v^A \alpha_A \left( \nabla_v g \right)^{L}+v^B \sigma_{BA} \left( \nabla_v g \right)^A \right| \lesssim \left(|\alpha|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\sigma| \right) \left( \sum_{ Y \in \mathbb{Y}} |Y(g)|+ (\tau_++\Phi|) |\nabla_{t,x} g | \right).$$ Finally, using $v^0 \partial_{v^i} = Y_i-\Phi X-x^i \partial_t-t \partial_i$ and Lemma \ref{weights1} (for the first inequality), we get \begin{eqnarray}
\nonumber \left| v^{\underline{L}} \underline{\alpha}_A \left( \nabla_v g \right)^A \right| & \lesssim & |\underline{\alpha}| \left( \sum_{ Y \in \mathbb{Y}} |Y(g)|+ \left( \tau_-+|\Phi|+\sum_{w \in \mathbf{k}_1} |w| \right) |\nabla_{t,x} g | \right) \\ \nonumber
\left| v^{\underline{L}} \underline{\alpha}_A \left( \nabla_v g \right)^A \right| & \lesssim & \sqrt{\frac{v^{\underline{L}}}{v^0}} |\underline{\alpha}| \left( \sum_{ Y \in \mathbb{Y}} |Y(g)|+ \left( \tau_++|\Phi| \right) |\nabla_{t,x} g | \right). \end{eqnarray} \end{proof} \begin{Rq}
The second inequality will be used in extremal cases of the hierarchies considered, where we will not be able to take advantage of the weights $w \in \mathbf{k}_1$ in front of $|\nabla_{t,x} g|$ and where the terms $\sum_{Y \in \mathbb{Y}_0} |Y g |$ will force us to estimate a weight $z \in \mathbf{k}_1$ by $\tau_+$ (see Proposition \ref{ComuPkp} below). \end{Rq} \subsection{Source term of $T_F(z^jP_{\xi}(\Phi) Y^{\beta}f)$}
In view of Remark \ref{rqjustifnorm}, we will consider hierarchised energy norms controling, for $Q$ a fixed integer, $\| z^{Q-\xi_P-\beta_P} P_{\xi}(\Phi) Y^{\beta} f \|_{L^1_{x,v}}$. In order to estimate them, we compute in this subsection the source term of $T_F(z^jP_{\xi}(\Phi) Y^{\beta}f)$. We start by the following technical result. \begin{Lem}\label{GammatoYLem}
Let $h : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ be a sufficiently regular function and $\Gamma^{\sigma} \in \mathbb{G}^{|\sigma|}$. Then, \begin{eqnarray}
\nonumber \Gamma^{\sigma} h & = & \sum_{\begin{subarray}{} \hspace{1mm} |g|+|\overline{\sigma}| \leq |\sigma| \\ \hspace{2mm} |g| \leq |\sigma|-1 \\ r+g_P+\overline{\sigma}_P \leq \sigma_P \end{subarray} } c^{g,r}_{\overline{\sigma}}(v) P_{g,r}(\Phi) Y^{\overline{\sigma}} h ,\\ \nonumber
\left| \partial_{v^i} \left( \Gamma^{\sigma} h \right) \right| & \lesssim & \sum_{\delta=0}^1 \sum_{\begin{subarray}{} \hspace{3.5mm} |g|+|\overline{\sigma}| \leq |\sigma|+1 \\ \hspace{9mm} |g| \leq |\sigma| \\ r+g_P+\overline{\sigma}_P+\delta \leq \sigma_P+1 \end{subarray} } \tau_+^\delta \left| P_{g,r}(\Phi) Y^{\overline{\sigma}} h \right|. \end{eqnarray} \begin{proof}
The first formula can be proved by induction on $|\sigma|$, using that $\widehat{Z}=Y-\Phi X$ for each $\widehat{Z}$ composing $\Gamma^{\sigma}$. The inequality then follows using $v^0 \partial_{v^i}=Y_i-\Phi X-t \partial_i-x^i \partial_t$. \end{proof} \end{Lem} \begin{Pro}\label{ComuPkp}
Let $N \in \mathbb{N}$ and $N_0 \geq N$. Consider $\zeta^0$ and $\beta$ multi-indices such that $|\zeta^0|+|\beta| \leq N$ and $|\zeta^0| \leq N-1$. Let also $z \in \mathbf{k}_1$ and $j \leq N_0-\zeta^0_P-\beta_P$. Then, $T_F(z^jP_{\zeta^0}(\Phi) Y^{\beta} f)$ can be bounded by a linear combination of the following terms, where $|\gamma|+|\zeta| \leq |\zeta^0|+|\beta|$. \begin{itemize} \item \begin{equation}\label{eq:cat0}
\left| F \left(v, \nabla_v \left( z^j \right) \right) P_{\zeta^0}(\Phi) Y^{\beta} f \right|. \tag{category $0$} \end{equation} \item \begin{equation}\label{eq:cat1}
\left( \left| \nabla_{Z^{\gamma}} F \right|+\frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| +\frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \right)\left| \Phi \right|^n \left|w^i P_{\zeta}(\Phi) Y^{\kappa} f \right|, \tag{category $1$} \end{equation}
where \hspace{2mm} $n \leq 2N$, \hspace{2mm} $w \in \mathbf{k}_1$, \hspace{2mm} $|\zeta|+|\gamma|+|\kappa| \leq |\zeta^0|+|\beta|+1$, \hspace{2mm} $i \leq N_0 -\zeta_P-\kappa_P$, \hspace{2mm} $\max( |\gamma|, |\zeta|+|\kappa|) \leq |\zeta^0|+|\beta|$ \hspace{2mm} and \hspace{2mm} $|\zeta| \leq N-1$. \item \begin{equation}\label{eq:cat3}
\hspace{-10mm} \frac{\tau_+}{\tau_-} |\rho \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) | \left|z^{j-1} P_{\zeta}(\Phi) Y^{\sigma} f \right| \hspace{5mm} \text{and} \hspace{5mm} \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}}\left| \underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right| \left| z^i P_{\zeta}(\Phi) Y^{\kappa} f \right|, \tag{category $2$} \end{equation}
where \hspace{2mm} $|\zeta|+|\gamma|+|\kappa| \leq |\zeta^0|+|\beta|+1$, \hspace{2mm} $j-1$, $i=N_0-\zeta_P-\kappa_P$, \hspace{2mm} $\max( |\gamma|, |\zeta|+|\kappa|) \leq |\zeta^0|+|\beta|$ \hspace{2mm} and \hspace{2mm} $|\zeta| \leq N-1$. Morevover, we have $i \leq j$. \item \begin{equation}\label{eq:cat4}
\tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \theta} z^j P_{\zeta}(\Phi) Y^{\beta} f \right|, \tag{category $3$} \end{equation}
with \hspace{2mm}$ |\zeta| < |\zeta^0|$, \hspace{2mm} $\zeta_T+\gamma_T = \zeta^0_T$, \hspace{2mm} $\zeta_P \leq \zeta^0_P$, and \hspace{2mm} $|\zeta|+|\gamma| \leq |\zeta^0|+1$. This implies $j \leq N_0-\zeta_P-\beta_P$. \end{itemize}
Note that the terms of \eqref{eq:cat3} only appears when $j=N_0-k_P-\beta_P$ and the ones of \eqref{eq:cat4} when $|\zeta^0| \geq 1$. \end{Pro} \begin{proof} The first thing to remark is that $$T_F(z^jP_{\zeta^0}(\Phi) Y^{\beta} f)=F \left(v, \nabla_v \left( z^j \right) \right) P_{\zeta^0}(\Phi) Y^{\beta} f +z^jT_F(P_{\zeta^0}(\Phi))Y^{\beta} f+z^jP_{\zeta^0}(\Phi) T_F(Y^{\beta} f ).$$
We immediately obtain the terms of \eqref{eq:cat0}. Let us then consider $z^jP_{\zeta^0}(\Phi) T_F(Y^{\beta} f )$. Using Proposition \ref{ComuVlasov}, it can be written as a linear combination of terms of \eqref{eq:com1}, \eqref{eq:com2} or \eqref{eq:com4} (applied to $f$), multiplied by $z^jP_{\zeta^0}(\Phi)$. Consequently, $|z^jP_{\zeta^0}(\Phi) T_F(Y^{\beta} f )|$ can be bounded by a linear combination of \begin{itemize}
\item $|z|^j\left| w^d Z^{\gamma}(F_{\mu \nu}) \right| \left| P_{k,p}(\Phi)P_{\zeta^0}(\Phi) Y^{\kappa} f \right|$, with $d \in \{0,1 \}$, $w \in \mathbf{k}_1$, $|\sigma| \geq 1$, $\max( |\gamma|, |k|+|\gamma|, |k|+|\kappa|,|k|+1 ) \leq |\beta|$, $|k|+|\gamma|+|\kappa| \leq |\beta|+1$ and $p+k_P+\kappa_P+d \leq \beta_P$. Now, note that
$$ \exists \hspace{0.5mm} n, \hspace{0.5mm} \zeta \hspace{3mm} \text{such that} \hspace{3mm} P_{k,p}(\Phi) P_{\zeta^0}(\Phi) = \Phi^n P_{\zeta}(\Phi), \hspace{3mm} n \leq |\beta|, \hspace{3mm} \zeta_T=k_T+\zeta^0_T \hspace{3mm} \text{and} \hspace{3mm} \zeta_P=k_P+\zeta^0_P.$$
Consequently, $|\zeta|=|k|+|\zeta^0| \leq |\zeta^0|+|\beta|-1 \leq N-1$, \hspace{2mm} $|\zeta|+|\gamma| =|k|+|\zeta^0|+|\gamma| \leq |\zeta^0|+|\beta|$, $$|\zeta|+|\kappa|=|k|+|\zeta^0|+|\kappa| \leq |\zeta^0| + |\beta| \hspace{3mm} \text{and} \hspace{3mm} |\zeta|+|\gamma|+|\kappa| \leq |k|+|\zeta^0|+|\gamma|+|\kappa| \leq |\zeta^0|+|\beta|+1.$$ Since $$k_P+\kappa_P+d \leq \beta_P \hspace{3mm} \text{and} \hspace{3mm} \zeta_P=k_P+ \zeta^0 _P, \hspace{3mm} \text{we have} \hspace{3mm} j+d \leq N_0-\zeta_P-\kappa_P.$$ Finally, as $|z^j w^d| \leq |z|^{j+d}+|w|^{j+d}$, we obtain terms of \eqref{eq:cat1}.
\item $|z|^j\left| P_{k,p}(\Phi) \mathcal{L}_{ X Z^{\gamma_0}}(F)\left( v, \nabla_v \left( \Gamma^{\sigma} f \right) \right) P_{\zeta^0}(\Phi) \right|$, with $|k|+|\gamma_0|+|\sigma| \leq |\beta|-1$, $p+k_P+\sigma_P \leq \beta_P$ and $p \geq 1$. Then, apply Lemma \ref{GammatoYLem} in order to get
$$\left| \nabla_v \left( \Gamma^{\sigma} f \right) \right| \lesssim \sum_{\delta=0}^1 \sum_{\begin{subarray}{} \hspace{3mm} |g|+|\overline{\sigma}| \leq |\sigma|+1 \\ \hspace{5mm} |g| \leq |\sigma| \\ r+g_P+\overline{\sigma}_P+\delta \leq \sigma_P+1 \end{subarray} } \tau_+^\delta \left| P_{g,r}(\Phi) Y^{\overline{\sigma}} f \right|.$$
Fix parameters $(\delta, g , r, \overline{\sigma})$ as in the right hand side of the previous inequality and consider first the case $\delta=0$. Then, $|z|^j\left| \mathcal{L}_{ X Z^{\gamma_0}}(F) \right| \left| P_{k,p}(\Phi) P_{g,r}(\Phi)P_{\zeta^0}(\Phi) Y^{\overline{\sigma}} f \right|$ can be bounded by terms such as
$$ |z|^j\left| Z^{\gamma}(F_{\mu \nu}) \right| \left|\Phi^n P_{\zeta}(\Phi) Y^{\overline{\sigma}} f \right| \hspace{-0.3mm} , \hspace{1.9mm} \text{with} \hspace{1.9mm} |\gamma| \leq |\gamma_0|+1, \hspace{1.9mm} n \leq p+r, \hspace{2mm} \zeta_T=k_T+g_T+ \zeta^0_T , \hspace{1.9mm} \zeta_P=k_P+g_P+\zeta^0_P .$$
We then have $n \leq 2|\beta|$, $|\zeta|+|\gamma|+|\overline{\sigma}| \leq |k|+|g|+|\zeta^0|+|\gamma_0|+1+|\overline{\sigma}| \leq |\zeta^0|+|\beta|+1$, $|\zeta|+|\overline{\sigma}| \leq |\zeta^0|+ |\beta|$ and $|\zeta| \leq |\zeta^0|+|\beta|-1$. As $$\zeta_P+\overline{\sigma}_P =k_P+g_P+\zeta^0_P+\overline{\sigma}_P \leq k_P+\sigma_P+1+\zeta^0_P \leq \zeta^0_P+\beta_P,$$ we have $j \leq N_0-\zeta_P-\overline{\sigma}_P$. If $\delta=1$, use the inequality \eqref{eq:Xdecay} of Proposition \ref{ExtradecayLie} to compensate the weight $\tau_+$. The only difference is that it brings a weight $w \in \mathbf{k}_1$. To handle it, use $|z^j w | \leq |z|^{j+1}+|w|^{j+1}$ and $$\zeta_P+\overline{\sigma}_P =k_P+g_P+\zeta^0_P+\overline{\sigma}_P \leq k_P+\sigma_P+1-\delta+\zeta^0_P \leq \zeta^0_P+\beta_P-1,$$ so that $j+1 \leq N_0-\zeta_P-\beta_P$. In both cases, we then have terms of \eqref{eq:cat1}.
\item $|z|^j\left| P_{k,p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F)\left( v, \nabla_v \left( \Gamma^{\sigma^0} f \right) \right) P_{\zeta_0}(\Phi) \right|$, with $|k|+|\gamma_0|+|\sigma^0| \leq |\beta|-1$, $p+|\gamma_0| \leq |\beta|-1$ and $p+k_P+\sigma^0_P \leq \beta_P$, which arises from a term of \eqref{eq:com4}. Applying Lemma \ref{GammatoYLem}, we can schematically suppose that
$$ \Gamma^{\sigma^0} = c(v) \Phi^r P_{\chi}(\Phi) Y^{\kappa} \hspace{3mm} \text{with} \hspace{3mm} |\chi|+|\kappa| \leq |\sigma^0|, \hspace{3mm} |\chi| \leq |\sigma^0|-1 \hspace{3mm} \text{and} \hspace{3mm} r+r_{\chi} +\chi_P+\kappa_P \leq \sigma^0_P,$$ where $r_{\chi}$ is the number of $\Phi$ coefficients in $P_{\chi}(\Phi)$. As $Y \left( c(v) \right)$ is a good coefficient, $c(v)$ does not play any role in what follows and we then suppose for simplicity that $c(v)=1$. We suppose moreover, in order to not have a weight in excess, that \begin{equation}\label{condihyp} j+k_P+\chi_P+\kappa_P < N_0-\zeta^0_P \end{equation} and we will treat the remaining cases below. Using the first inequality of Lemma \ref{nullG} and denoting by $(\alpha, \underline{\alpha}, \rho, \sigma)$ the null decomposition of $\mathcal{L}_{\partial Z^{\gamma_0}}(F)$, we can bound the quantity considered here by the sum of the three following terms \begin{equation}\label{eq:unus}
|z|^j\left| P_{k,p}(\Phi) P_{\zeta_0}(\Phi) \right| \left( |\alpha|+|\rho|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\sigma|+|\underline{\alpha}| \right) \sum_{ Y \in \mathbb{Y}_0} \left| Y \left( \Phi^r P_{\chi}(\Phi) Y^{\kappa} f \right) \right|, \end{equation} \begin{equation}\label{eq:duo}
|z|^j\left| P_{k,p}(\Phi) P_{\zeta_0}(\Phi) \right| \left( |\rho|+ |\underline{\alpha}| \right) \left( \tau_- +|\Phi|+ \hspace{-1mm} \sum_{w \in \mathbf{k}_1} |w| \right) \left| \nabla_{t,x} \left( \Phi^r P_{\chi}(\Phi) Y^{\kappa} f \right) \right|, \end{equation} \begin{equation}\label{eq:tres}
|z|^j \left| P_{k,p}(\Phi) P_{\zeta_0}(\Phi) \right| \left(\tau_++|\Phi| \right)\left(|\alpha|+\sqrt{\frac{v^{\underline{L}}}{v^0}} |\sigma| \right) \left| \nabla_{t,x} \left( \Phi^r P_{\chi}(\Phi) Y^{\kappa} f \right) \right|. \end{equation} Let us start by \eqref{eq:unus}. We have schematically, for $Y \in \mathbb{Y}_0$, $Y^{\kappa^1}=Y^{\kappa}$ and $Y^{\kappa^2}=Y Y^{\kappa}$,
$$P_{k,p}(\Phi) P_{\zeta^0}(\Phi) Y \left( \Phi^r P_{\chi}(\Phi) Y^{\kappa} f \right) = \Phi^{n_1}P_{\zeta^1}(\Phi) Y^{\kappa^1} f+\Phi^{n_2}P_{\zeta^2}(\Phi) Y^{\kappa^2} f,$$ $$\text{with} \hspace{3mm} |n_i| \leq p+r, \hspace{3mm} |\zeta^i|=|k|+|\zeta^0|+|\chi|+\delta_1^{i} \hspace{3mm} \text{and} \hspace{3mm} \zeta^i_P=k_P+\zeta^0_P+\chi_P+\delta_{1}^{i}.$$ We have, according to \eqref{condihyp}, $$j+\zeta^i_P+\kappa^i_P = \zeta^0_P+j +k_P+\chi_P+\kappa_P+1 \leq N_0.$$ Consequently, as \begin{equation}\label{bound45}
|\alpha|+|\rho|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\sigma|+ |\underline{\alpha}| \lesssim \left| \mathcal{L}_{ \partial Z^{\gamma}}(F) \right| \lesssim \sum_{|\gamma| \leq |\gamma_0|+1} \left|\nabla_{ Z^{\gamma}} F \right| \hspace{3mm} \text{and} \hspace{3mm} |\zeta^i|+|\gamma|+|\kappa^i| \leq |\beta|+|\zeta^0|+1,
\end{equation} we obtain terms of \eqref{eq:cat1} (the other conditions are easy to check).
Let us focus now on \eqref{eq:duo} and \eqref{eq:tres}. Defining $Y^{\kappa^3}=Y^{\kappa}$ and $Y^{\kappa^4}= \partial Y^{\kappa}$, we have schematically
$$P_{k,p}(\Phi) P_{\zeta^0}(\Phi) \partial \left( \Phi^r P_{\chi}(\Phi) Y^{\kappa} f \right)= \Phi^{n_3}P_{\zeta^3}(\Phi) Y^{\sigma^3} f+\Phi^{n_4}P_{\zeta^4}(\Phi) Y^{\kappa^4} f,$$ $$\text{with} \hspace{3mm} |n_i| \leq p+r \leq 2|\beta|-2, \hspace{3mm} |\zeta^i|=|k|+|\zeta^0|+|\chi|+\delta_{i}^{3} \hspace{3mm} \text{and} \hspace{3mm} \zeta^i_P=k_P+\zeta^0_P+\chi_P.$$ This time, one obtains $j +1 \leq N_0-\zeta^i_P-\kappa^i_P $. As, by inequality \eqref{eq:zeta2} of Proposition \ref{ExtradecayLie},
$$\left( |\rho|+ |\underline{\alpha}| \right) \lesssim \frac{1}{\tau_-}\sum_{|\gamma| \leq |\gamma_0|+1} \left| \nabla_{Z^{\gamma}} F \right|, \hspace{5mm} |\alpha| \lesssim \sum_{|\gamma| \leq |\gamma_0|+1}\frac{1}{\tau_-} |\alpha (\mathcal{L}_{Z^{\gamma}}(F))|+ \frac{1}{\tau_+}\left| \nabla_{Z^{\gamma}} F \right| ,$$
$$ |\sigma| \lesssim \sum_{|\gamma| \leq |\gamma_0|+1}\frac{1}{\tau_-} |\sigma (\mathcal{L}_{Z^{\gamma}}(F))|+ \frac{1}{\tau_+}\left| \nabla_{Z^{\gamma}} F \right| \hspace{5mm} \text{and} \hspace{5mm} |z^j w | \leq |z|^{j+1}+|w|^{j+1},$$ \eqref{eq:duo} and \eqref{eq:tres} also give us terms of \eqref{eq:cat1}. \item We now treat the remaining terms arising from those of \eqref{eq:com4}, for which $$j+k_P+\chi_P+\kappa_P=N_0-\zeta^0_P.$$ This equality can only occur if $j=N_0-\zeta^0_P-\beta_P$ and $k_P+\chi_P+\kappa_P=\beta_P$. It implies $p+r+r_{\chi}=0$ and we then have to study terms of the form
$$|z|^j\left| \mathcal{L}_{ \partial Z^{\gamma_0}}(F)\left( v, \nabla_v \left( Y^{\kappa} f \right) \right) P_{\zeta^0}(\Phi) \right|, \hspace{2mm} \text{with} \hspace{2mm} |\gamma_0|+|\kappa| \leq |\beta|-1.$$ Using the second inequality of Lemma \ref{nullG}, and denoting again the null decomposition of $\mathcal{L}_{\partial Z^{\gamma_0}}(F)$ by $(\alpha, \underline{\alpha}, \rho, \sigma)$, we can bound it by quantities such as
$$\left| \Phi \right| \left| \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \right| \left| z^j P_{\zeta^0}(\Phi) \partial Y^{\kappa} f \right|, \hspace{3mm} \text{leading to terms of} \hspace{3mm} \eqref{eq:cat1}, $$ \begin{equation}\label{3:eq}
|\rho| \left| P_{\zeta^0}(\Phi) \right| \left( \tau_+|z|^{j-1}\left| Y Y^{\sigma} f \right|+\tau_- |z|^j \left| \partial Y^{\kappa} f \right| \right), \hspace{3mm} \text{with} \hspace{3mm} Y \in \mathbb{Y}_0, \hspace{3mm} \text{and}
\end{equation} \begin{equation}\label{2:eq}
\left( |\alpha|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\sigma|+\sqrt{\frac{v^{\underline{L}}}{v^0}} |\underline{\alpha}| \right) \left| P_{\zeta^0}(\Phi) \right| \left( \tau_+|z|^{j-1}\left| Y Y^{\kappa} f \right|+\tau_+ |z|^j \left| \partial Y^{\kappa} f \right| \right), \hspace{3mm} \text{with} \hspace{3mm} Y \in \mathbb{Y}_0.
\end{equation}
If $Y Y^{\kappa}=Y^{\chi^1}$ and $\partial Y^{\kappa}=Y^{\chi^2}$, we have $$|\zeta^0|+|\chi^i| \leq |k|+|\beta|, \hspace{8mm} j-1 = N_0-\zeta^0_P-\chi^1_P \hspace{8mm} \text{and} \hspace{8mm} j = N_0-\zeta^0_P-\chi_P^2.$$ Thus, \eqref{3:eq} and \eqref{2:eq} give terms of \eqref{eq:cat1} and \eqref{eq:cat3} since we have, according to inequality \eqref{eq:zeta2} of Proposition \ref{ExtradecayLie} and for $\varphi \in \{\alpha, \underline{\alpha}, \rho, \sigma \}$,
$$ |\varphi| \lesssim \sum_{|\gamma| \leq |\gamma_0|+1} \tau_-^{-1} \left| \varphi \left( \mathcal{L}_{Z^{\gamma}} (F) \right) \right|+\tau_+^{-1} \left| \nabla_{Z^{\gamma}} F \right| .$$ \end{itemize}
It then remains to bound $T_F(P_{\zeta^0}(\Phi))z^jY^{\beta}f$. If $|\zeta^0| \geq 1$, there exists $ 1 \leq p \leq |\zeta^0|$ and $\left( \xi^i \right)_{1 \leq i \leq p}$ such that
$$P_{\zeta^0}(\Phi) = \prod_{i=1}^p Y^{\xi^i} \Phi, \hspace{8mm} \min_{1 \leq i \leq p} |\xi^i| \geq 1, \hspace{8mm} \sum_{i=1}^p |\xi^i|=|k| \hspace{8mm} \text{and} \hspace{8mm} \sum_{i=1}^p (\xi^i)_T=k_T.$$ Then, $T_F(P_{\zeta_0}(\Phi))=\sum_{i=1}^p T_F(Y^{\xi^i} \Phi ) \prod_{j \neq i} Y^{\xi^j} \Phi$ and let us, for instance, bound $T_F(Y^{\xi^1} \Phi) Y^{\beta} f \prod_{j =2}^p Y^{\xi^j} \Phi$. To lighten the notation, we define $\chi$ such that $$P_{\chi}(\Phi)=\prod_{j =2}^p Y^{\xi^j} \Phi, \hspace{8mm} \text{so that} \hspace{8mm} (\chi_T,\chi_P)=\left(\zeta^0_T-\xi^1_T,\zeta^0_P-\xi^1_P \right).$$
Using Propositions \ref{ComuVlasov} and \ref{sourcePhi} (with $|\gamma_1| \leq 1$), $T_F(Y^{\xi_1} \Phi) P_{\chi}(\Phi) Y^{\beta} f $ can be written as a linear combination of terms of $(type \hspace{1mm} 1-\xi_1)$, $(type \hspace{1mm} 2-\xi_1)$, $(type \hspace{1mm} 3-\xi_1)$ (applied to $\Phi$), $(family \hspace{1mm} 1-\xi_1)$, $(family \hspace{1mm} 2-\xi_1)$ and $(family \hspace{1mm} 3-\xi_1)$, multiplied by $P_{\chi}(\Phi) Y^{\beta} f$. The treatment of the first three type of terms is similar to those which arise from $z^j P_{\zeta^0}(\Phi)T_F(Y^{\beta} f )$, so we only give details for the first one. We then have to bound \begin{itemize}
\item $|z|^j\left| Z^{\gamma}(F_{\mu \nu}) \right| \left|w^d P_{k,p}(\Phi) Y^{\kappa} \Phi P_{\chi}(\Phi) Y^{\beta} f \right|$, with $d \in \{0,1 \}$, $w \in \mathbf{k}_1$, $|\kappa| \geq 1$ $\max( |\gamma|, |k|+|\gamma|, |k|+|\kappa| ) \leq |\xi^1|$, $|k|+|\gamma|+|\kappa| \leq |\xi^1|+1$ and $p+k_P+\kappa_P+d \leq \xi^1_P$. Note now that
$$P_{k,p}(\Phi) Y^{\kappa} \Phi P_{\chi}(\Phi)= \Phi^n P_{\zeta}(\Phi), \hspace{3mm} \text{with} \hspace{3mm} n \leq p \leq |\xi^1| , \hspace{3mm} \zeta_T=k_T+\kappa_T+\chi_T \hspace{3mm} \text{and} \hspace{3mm} \zeta_P=k_P+\kappa_P+\chi_P.$$ Note moreover that
$$|\zeta|+|\gamma|+|\beta|=|k|+|\gamma|+|\kappa|+|\chi|+|\beta| \leq |\xi^1|+|\chi|+|\beta|+1 = |\zeta^0|+|\beta|+1, \hspace{3mm} |\zeta|+|\beta| \leq |\zeta^0|+|\beta|$$ and $\zeta_P+\beta_P+d=k_P+\kappa_P+d+\chi_P+\beta_P \leq \xi^1_P+\chi_P+\beta_P= \zeta^0_P+\beta_P$, which proves that this is a term of \eqref{eq:cat1}.
\item $\tau_+|z|^j \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \theta} P_{\chi}(\Phi) Y^{\beta} f \right| $, with $|\gamma| \leq |\xi^1|+1$ and $\gamma_T=\xi^1_T$. It is part of \eqref{eq:cat4} as
$$|\chi| < |k|, \hspace{6mm} \chi_T+\gamma_T=\chi_T+\xi^1_T = \zeta^0_T, \hspace{6mm} \chi_P \leq \zeta^0_P \hspace{6mm} \text{and} \hspace{6mm} |\chi|+|\gamma| \leq |\chi|+|\xi^1|+1 =|\zeta^0|+1.$$
\item $\left| Z^{\gamma}(F_{\mu \nu})\right| \left| z^j P_{k,p}(\Phi) P_{\chi} (\Phi)Y^{\beta} f \right| $, with $|k|+ |\gamma| \leq |\xi^1|-1$, $k_P \leq \xi^1_P$ and $p \leq |\xi^1|$, which is part of \eqref{eq:cat1}. Indeed, we can write
$$P_{k,p}(\Phi) P_{\chi} (\Phi) = \Phi^r P_{\zeta}(\Phi), \hspace{3mm} \text{with} \hspace{3mm} r \leq p \leq |\xi^1|, \hspace{3mm} \left( \zeta_T, \zeta_P \right) = \left( k_T+\chi_T,k_P+\chi_P \right)$$
and we then have $|\zeta|+|\gamma| = |k|+|\gamma|+|\chi| \leq |\xi^1|+|\chi| \leq |\zeta^0|$,
$$|\zeta|+|\gamma|+|\beta| \leq |\xi^1|+|\chi| +|\beta| \leq |\zeta^0|+|\beta| \hspace{3mm} \text{and} \hspace{3mm} \zeta_P+\beta_P \leq \xi^1_P+\chi_P+\beta_P = \zeta^0_P+\beta_P$$
\item $\tau_+ \left| \mathcal{L}_{X Z^{\gamma_0}}(F)\right| \left|z^j P_{k,p}(\Phi) P_{\chi} (\Phi) Y^{\beta} f \right|$, with $|k|+ |\gamma_0| \leq |\xi_1|-1$, $k_P < \xi^1_P$ and $p \leq |\xi^1|$. By inequality \eqref{eq:Xdecay} of Proposition \ref{ExtradecayLie}
$$\exists \hspace{1mm} w \in \mathbf{k}_1, \hspace{8mm} \tau_+\left| \mathcal{L}_{X Z^{\gamma_0}}(F)\right| \lesssim (1+|w|) \sum_{|\gamma| \leq |\gamma_0|+1} \left| \nabla_{Z^{\gamma}} F \right|.$$
Note moreover that $k_P+\chi_P+\beta_P \leq \xi^1_P-1+\chi_P+\beta_P < \zeta^0_P+\beta_P$, as\footnote{Note that this term could appear only if $\xi^1_P \geq 1$.} $k_P < \xi^1_P$. We then have $j+1 \leq N_0-k_P-\chi_P-\beta_P$ and we obtain, using $|z^jw| \leq |z|^{j+1}+|w|^{j+1}$ and writting again $P_{k,p}(\Phi) P_{\chi} (\Phi) = \Phi^r P_{\zeta}(\Phi)$, terms which are in \eqref{eq:cat1} (the other conditions can be checked as previously). \end{itemize} \end{proof}
\begin{Rq}\label{hierarchyjustification} There is three types of terms which bring us to consider a hierarchy on the quantities of the form $z^j P_{\xi}(\Phi) Y^{\beta} f$. \begin{itemize} \item Those of \eqref{eq:cat0}, as $\nabla_v \left( z^j \right)$ creates (at least) a $\tau_-$-loss and since $\tau_- F \sim \tau_+^{-1}$.
\item The first ones of \eqref{eq:cat3}. Indeed, we will have $|\rho| \lesssim \tau_+^{- \frac{3}{2}}\tau_-^{-\frac{1}{2}}$, so, using\footnote{We will be able to lose one power of $v^0$ as it is suggested by the energy estimate of Proposition \ref{energyf}.} $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$,
$$\frac{\tau_+}{\tau_-}|\rho| \lesssim \frac{v^0}{\tau_+}+\frac{v^{\underline{L}}}{\tau_-^3}.$$ $v^{\underline{L}} \tau_-^{-3}$ will give an integrable term, as the component $v^{\underline{L}}$ will allow us to use the foliation $(u,C_u(t))$ of $[0,t] \times \mathbb{R}^3_x$. However, $v^0 \tau_+^{-1}$ will create a logarithmical growth. \item The ones of \eqref{eq:cat4}, because of the $\tau_+$ weight and the fact that even the better component of $\mathcal{L}_{Z^{\gamma}}(F)$ will not have a better decay rate than $\tau_+^{-2}$. \end{itemize}
We will then classify them by $|\xi|+|\beta|$ and $j$, as one of these quantities is lowered in each of these terms. \end{Rq} \begin{Rq}\label{deuxblocs}
Let $\beta$ and, for $i \in \{1,2\}$, $\zeta^i$ be multi-indices such that $|\zeta^i|+|\beta| \leq N$, $|\zeta^1| \leq N-1$ and $N_0 \geq 2N-1$. We can adapt the previous proposition to $T_F \left( z^j P_{\zeta_1}(\Phi) P_{\zeta_2}(\Phi) Y^{\beta} f \right)$. One just has \begin{itemize} \item to add the factor $P_{\zeta_2}(\Phi)$ (or $P_{\zeta_1}(\Phi)$) in the terms of each categories and \item to replace conditions such as $j \leq N_0-\zeta_P-\sigma_P$ by $j \leq N_0-\zeta_P - \zeta^2_P-\sigma_P$ (or $j \leq N_0-\zeta_P - \zeta^1_P-\sigma_P$). \end{itemize} \end{Rq}
The worst terms are those of \eqref{eq:cat4} as they are responsible for the stronger growth of the top order energy norms. However, as suggested by the following proposition, we will have better estimates on $\| z^j P_{\xi}^X(\Phi) Y^{\beta} \|_{L^1_{x,v}}$.
\begin{Pro}\label{ComuPkpX}
Let $N \in \mathbb{N}$, $z \in \mathbf{k}_1$, $N_0 \geq N$, $\xi^0$, $\beta$ and $j \in \mathbb{N}$ be such that $|\xi^0| \leq N-1$, $|\xi^0|+|\beta| \leq N$ and $j \leq N_0-\xi^0_P-\beta_P$. Then, $T_F(z^j P^X_{\xi^0}(\Phi) Y^{\beta} f)$ can be bounded by a linear combination of terms of \eqref{eq:cat0}, \eqref{eq:cat1}\hspace{-0.1mm}, \eqref{eq:cat3} and \begin{equation}\label{eq:cat4bis}
\frac{\tau_+}{\tau_-} \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{ Z^{\gamma}}(F)_{\mu \nu} w^{j} P^X_{\xi}(\Phi) Y^{\beta} f \right|, \tag{category $3-X$} \end{equation}
with \hspace{2mm} $\xi_X < \xi^0_X$, \hspace{2mm} $\xi_T \leq \xi^0_T$, \hspace{2mm} $\xi_P \leq \xi^0_P$, \hspace{2mm} $|\xi|+|\gamma|+|\beta| \leq |\xi|+|\beta|+1$, \hspace{2mm} $|\gamma| \leq |\xi|+1$, \hspace{2mm} $w \in \mathbf{k}_1$ \hspace{2mm} and \hspace{2mm} $j = N_0-\zeta_P-\beta_P$.
Note that the terms of \eqref{eq:cat3} only appear when $j=N_0-\xi^0_P-\beta_P$ and those of \eqref{eq:cat4bis} if $j=N_0-\xi^0_P-\beta_P$ and $|\xi^0| \geq 1$. \end{Pro} \begin{proof} Proposition \ref{ComuVlasov} also holds for $Y^{\beta} \in \mathbb{Y}_X$ in view of Lemma \ref{ComuX} and the fact that $X$ can be considered as $c(v) \partial$. Then, one only has to follow the proof of the previous proposition and to apply Proposition \ref{sourceXPhi} where we used Proposition \ref{sourcePhi}. Hence, instead of terms of \eqref{eq:cat4}, we obtain
$$ \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma}} (F)_{\mu \nu} z^j P_{\chi}^X(\Phi) Y^{\beta} f \right|, \hspace{3mm} \text{with} \hspace{3mm} |\gamma| \leq |\xi^1|, \hspace{3mm} \chi_X < \xi^0_X, \hspace{3mm} \chi_T \leq \xi^0_T \hspace{3mm} \text{and} \hspace{3mm} \chi_P \leq \xi^0_P.$$ Apply now the second and then the first inequality of Proposition \ref{ExtradecayLie} to obtain that
$$\tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{X Z^{\gamma}} (F)_{\mu \theta} z^j P_{\chi}^X(\Phi) Y^{\beta} f \right| \lesssim \left| P_{\chi}^X(\Phi) Y^{\beta} f \right| \sum_{|\delta| \leq |\xi_1|+1} \hspace{-0.6mm} \left( \sum_{w \in \mathbf{k}_1 } \frac{ |w|^{j+1}}{\tau_-}\left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\delta}}(F)_{\mu \theta} \right|+|z|^j\left| \mathcal{L}_{Z^{\delta}}(F)\right| \hspace{-0.6mm} \right)$$ which leads to terms of \eqref{eq:cat4bis} (if $j=N_0-\chi_P-\beta_P$) and \eqref{eq:cat1} (as $P_{\chi}^X(\Phi)$ can be bounded by a linear combination of $P_{\chi^0}(\Phi)$ with $\chi^0_T = \chi_T+\chi_X$ and $\chi^0_P \leq \chi_P$). \end{proof} \begin{Rq}
As we will mostly apply this commutation formula with a lower $N_0$ than for our utilizations of Proposition \ref{ComuPkp} or for $|\xi^0|=0$, we will have to deal with terms of \eqref{eq:cat4bis} only once (for \eqref{Auxenergy}). \end{Rq}
\subsection{Commutation of the Maxwell equations}\label{subseccomuMax}
We recall the following property (see Lemma $2.8$ of \cite{massless} for a proof). \begin{Lem}\label{basiccom} Let $G$ and $M$ be respectively a $2$-form and a $1$-form such that $\nabla^{\mu} G_{\mu \nu}=M_{\nu}$. Then, $$\forall \hspace{0.5mm} Z \in \mathbb{P}, \hspace{5mm} \nabla^{\mu} \mathcal{L}_{Z}(G)_{\mu \nu} = \mathcal{L}_{Z} (M)_{\nu} \hspace{10mm} \text{and} \hspace{10mm} \nabla^{\mu} \mathcal{L}_{S}(G)_{\mu \nu} = \mathcal{L}_{S} (M)_{\nu} +2M_{\nu}.$$ If $g$ is a sufficiently regular function such that $\nabla^{\mu} G_{\mu \nu} = J(g)_{\nu}$, then $$\forall \hspace{0.5mm} Z \in \mathbb{P}, \hspace{5mm} \nabla^{\mu} \mathcal{L}_{Z}(G)_{\mu \nu} = J(\widehat{Z} g)_{\nu} \hspace{10mm} \text{and} \hspace{10mm} \nabla^{\mu} \mathcal{L}_{S}(G)_{\mu \nu} = J(Sg)_{\nu}+3J(g)_{\nu}.$$ \end{Lem}
We need to adapt this formula since we will control $Yf$ and not $\widehat{Z}f$. We cannot close the estimates using only the formula
$$ J(\widehat{Z} f)=J(Y f ) -J(\Phi^k_{\widehat{Z}} X_k f )$$ as we will have $\|\Phi \|_{L^{\infty}_{v}} \lesssim \log^2 (\tau_+ )$ and since this small loss would prevent us to close the energy estimates. \begin{Pro}\label{ComuMax1} Let $Z \in \mathbb{K}$. Then, for $0 \leq \nu \leq 3$, $\nabla^{\mu} \mathcal{L}_Z(F)_{\mu \nu}$ can be written as a linear combination of the following terms. \begin{itemize}
\item $\int_v \frac{v_{\nu}}{v^0}( X \Phi )^j Y^{\kappa} f dv$, with $j+|\kappa| \leq 1$.
\item $\frac{1}{\tau_+} \int_v c(t,x,v) z P_{k,p}(\Phi) Y^{\kappa} f dv$, with $z \in \mathbf{k}_1$, $p+|k|+|\kappa| \leq 3$ and $|k|+|\kappa| \leq 1$. \end{itemize} \end{Pro} \begin{Rq}\label{rq11} We would obtain a similar proposition if $J(f)_{\nu}$ was equal to $\int_v c_{\nu}(v) f dv$, except that we would have to replace $\frac{v_{\nu}}{v^0}$, in the first terms, by certain good coefficients $c(v)$. \end{Rq} \begin{proof} If $Z \in \mathbb{T}$, the result ensues from Lemma \ref{basiccom}. Otherwise, we have, using \eqref{eq:X} \begin{eqnarray} \nonumber J(\widehat{Z} f) & = & J(Yf)-J(\Phi^{k} X_{k} f) \\ \nonumber & = & J(Yf)_{\nu}+J(X_k(\Phi^k) f)_{\nu}-J(X_k(\Phi^k f)) \\ \nonumber & = & J(Yf)+J(X_k(\Phi^k) f)-\frac{1}{1+t+r} \sum_{k=1}^3 J\left( \left(2z_{0k}\partial_t+\sum_{Z \in \mathbb{K}} c_Z(t,x,v) Z \right)(\Phi^k f) \right) . \end{eqnarray}
Now, note that $J(z_{0k} \partial_t ( \Phi^k f ))= J(z_{0k} \Phi \partial_t f+z_{0k} \partial_t(\Phi) f)$ and, for $Z \in \mathbb{K} \setminus \mathbb{T}$ (in the computations below, we consider $Z=\Omega_{0i}$, but the other cases are similar), by integration by parts in $v$, \begin{eqnarray} \nonumber J\left( Z(\Phi^k f) \right) \hspace{-1.4mm} & = & \hspace{-1.4mm} J\left((Y-v^{0}\partial_{v^i}-\Phi^q X_q)(\Phi^k f) \right) \\ \nonumber & = & \hspace{-1.4mm} J \left(Y(\Phi^k)f+\Phi^k Y(f)-\Phi^q X_q ( \Phi^k ) f +\Phi^q \Phi^k X_q (f) \right) \hspace{-0.3mm} + \hspace{-0.3mm} \left(\int_v \Phi^k f dv \right) dx^{i} \hspace{-0.3mm} - \hspace{-0.3mm} \left( \int_v \Phi^k f \frac{v_i}{v^0}dv \right) dx^{0} \hspace{-0.2mm} , \end{eqnarray} where $dx^{\mu}$ is the differential of $x^{\mu}$. \end{proof} We are now ready to establish the higher order commutation formula. \begin{Pro}\label{ComuMaxN} Let $R \in \mathbb{N}$ and $Z^{\beta} \in \mathbb{K}^{R}$. Then, for all $0 \leq \nu \leq 3$, $\nabla^{\mu} \mathcal{L}_{Z^{\beta}}(F)_{\mu \nu}$ can be written as a linear combination of terms such as \begin{equation}\label{eq:comu1}
\int_v \frac{v_{\nu}}{v^0} P^X_{\xi}( \Phi ) Y^{\kappa} f dv, \hspace{3mm} \text{with} \hspace{3mm} |\xi|+|\kappa| \leq R, \tag{type $1-R$} \end{equation} \begin{equation}\label{eq:comu2}
\frac{1}{\tau_+}\int_v c(t,x,v) zP_{k,p}(\Phi) Y^{\kappa} f dv, \hspace{3mm} \text{with} \hspace{3mm} p+|k|+|\kappa| \leq 3R \hspace{3mm} \text{and} \hspace{3mm} k+|\kappa| \leq R.\tag{type $2-R$} \end{equation} \end{Pro}
\begin{proof} We will use during the proof the following properties, arising from Lemma \ref{weights} and the definition of the $X_i$ vector field, \begin{equation}\label{eq:Yz} \forall \hspace{0.5mm} (Y,z) \in \mathbb{Y} \times \mathbf{k}_1, \hspace{3mm} \exists \hspace{0.5mm} z' \in \mathbf{k}_1, \hspace{3mm} Y(z)=c_1(v)z+z'+c_2(v)\Phi, \end{equation} \begin{equation}\label{eq:PX} P^X_{\xi}(\Phi) = \sum_{\begin{subarray}{} \zeta_T = \xi_T+\xi_X \\ \hspace{2mm} \zeta_P \leq \xi_P \end{subarray}} c^\zeta(v) P_{\zeta}(\Phi). \end{equation}
Let us suppose that the formula holds for all $|\beta_0| \leq R-1 $, with $R \geq 2$ (for $R-1=1$, see Proposition \ref{ComuMax1}). Let $(Z,Z^{\beta_0}) \in \mathbb{K} \times \mathbb{K}^{|\beta_0|}$ with $|\beta_0|=R-1$ and consider the multi-index $\beta$ such that $Z^{\beta}=Z Z^{\beta_0}$. We fix $\nu \in \llbracket 0,3 \rrbracket$. By the first order commutation formula, Remark \ref{rq11} and the induction hypothesis, $\nabla^{\mu} \mathcal{L}_{Z^{\beta}}(F)_{\mu \nu}$ can be written as a linear combination of the following terms (to lighten the notations, we drop the good coefficients $c(t,x,v)$ in the integrands of the terms given by Proposition \ref{ComuMax1}). \begin{itemize}
\item $\int_v \frac{v_{\nu}}{v^0} \left( X \Phi \right)^j Y^{\kappa^0} \left( P_{\xi}^X( \Phi ) Y^{\kappa} f \right) dv$, with $j+|\kappa^0| \leq 1$ and $|\xi|+|\kappa| \leq R-1$. It leads to $\int_v \frac{v_{\nu}}{v^0} P_{\xi}^X( \Phi ) Y^{\kappa} f dv$, $$ \int_v \frac{v_{\nu}}{v^0} X(\Phi) P^X_{\xi}( \Phi ) Y^{\kappa} f dv, \hspace{5mm} \int_v \frac{v_{\nu}}{v^0} Y \left(P^X_{\xi}( \Phi ) \right) Y^{\kappa} f dv \hspace{5mm} \text{and} \hspace{5mm} \int_v \frac{v_{\nu}}{v^0} P^X_{\xi}( \Phi ) Y^{\kappa^0} Y^{\kappa} f dv,$$
which are all of \eqref{eq:comu1} since $Y\left(P^X_{\xi}( \Phi ) \right)=P_{\zeta}^X(\Phi)$, with $|\zeta|=|\xi|+1$, and $|\xi|+1+|\kappa| \leq R$.
\item $\int_v c(v) \left( X \Phi \right)^j Y^{\kappa^0} \left( \frac{z}{\tau_+} c(t,x,v) P_{k,p}(\Phi) Y^{\kappa} f \right) dv$, with $j+|\kappa^0| \leq 1$, $z \in \mathbf{k}_1$, $p+|k|+|\kappa| \leq 3R-3$ and $|k|+|\kappa| \leq R-1$. For simplicity, we suppose $c(v)=1$. As $$ Y \left( \frac{1}{\tau_+} c(t,x,v) \right) = \frac{1}{\tau_+} c_1(t,x,v)+ \frac{1}{\tau_+} c_2(t,x,v) \Phi,$$ we obtain, dropping the dependance in $(t,x,v)$ of the good coefficients, the following terms (with the first one corresponding to $j=1$ and the other ones to $j=0$). $$\frac{1}{\tau_+}\int_v c zP_{(k_T+1,k_P),p+1}(\Phi) Y^{\kappa} f dv, \hspace{5mm} \frac{1}{\tau_+}\int_v (c+c_1) zP_{k,p}(\Phi) Y^{\kappa} f dv, \hspace{5mm} \frac{1}{\tau_+}\int_v c_2 zP_{k,p+1}(\Phi) Y^{\kappa} f dv, $$ $$\frac{1}{\tau_+}\int_v c zP_{(k_T+\kappa^0_T,k_P+\kappa^0_P),p}(\Phi) Y^{\kappa} f dv, \hspace{5mm} \frac{1}{\tau_+}\int_v c Y(z) P_{k,p}(\Phi) Y^{\kappa} f dv, \hspace{5mm} \frac{1}{\tau_+}\int_v c z P_{k,p}(\Phi) Y^{\kappa^0} Y^{\kappa} f dv.$$ It is now easy to check that all these terms are of \eqref{eq:comu2} (for the penultimate term, recall in particular \eqref{eq:Yz}). For instance, for the first one, we have
$$(p+1)+(|k|+1)+|\kappa|=(p+|k|+|\kappa|)+2 \leq 3R-1 \leq 3R \hspace{3mm} \text{and} \hspace{3mm} (|k|+1)+|\kappa| \leq (|k|+|\kappa|)+1 \leq R.$$
\item $\frac{1}{\tau_+} \int_v zP_{k^0,p^0}(\Phi) Y^{\kappa^0} \left( P_{\xi}^X( \Phi ) Y^{\kappa} f \right) dv$, with $p^0+|k^0|+|\kappa^0| \leq 3$, $|k^0|+|\kappa^0| \leq 1$ and $|\xi|+|\kappa| \leq R-1$. According to \eqref{eq:PX}, we can suppose without loss of generality that $P_{\xi}^X( \Phi )=c(v) P_{\zeta}( \Phi )$, with $|\zeta| \leq |\xi|$. If $|k^0|=1$, we obtain
$$ \frac{1}{\tau_+} \int_v c(v) zP_{(\zeta_T+k^0_T,\zeta_P+k^0_P),r}( \Phi ) Y^{\kappa} f dv, \hspace{1cm} \text{with} \hspace{1cm} r \leq |\zeta|+p^0,$$
which is of \eqref{eq:comu2} since $$(|\zeta|+p^0)+(|\zeta|+|k^0|)+|\kappa| \leq (p^0+|k^0|)+2(|\xi|+|\kappa|)\leq 2R+1 \leq 3R \hspace{3mm} \text{and} \hspace{3mm} (|\zeta|+|k^0|)+|\kappa| \leq R.$$
If $|k_0|=0$, we obtain, with $r \leq |\zeta|+p^0$ and since $Y^{\kappa_0}(c(v))=c_1(v)$, $$ \frac{1}{\tau_+} \int_v (c+c_1)(v) zP_{(\zeta_T,\zeta_P),r}(\Phi ) Y^{\kappa} f dv, \hspace{5mm} \frac{1}{\tau_+} \int_v c(v) zP_{(\zeta_T,\zeta_P),r}( \Phi ) Y^{\kappa^0} Y^{\kappa} f dv \hspace{5mm} \text{and} \hspace{5mm}$$ $$\frac{1}{\tau_+} \int_v c(v) zP_{(\zeta_T+\kappa^0_T,\zeta_P+\kappa^0_P),r}( \Phi ) Y^{\kappa} f dv, $$ which are of \eqref{eq:comu2} since
$$|\zeta|+1+|\kappa| \leq R \hspace{3mm} \text{and} \hspace{3mm} |\zeta|+p^0+|\zeta|+|\kappa^0|+|\kappa| \leq 3+2R-2 \leq 3R .$$
\item $\frac{1}{\tau_+} \int_v w P_{k^0,p^0}(\Phi) Y^{\kappa^0} \left( \frac{z}{\tau_+} c(t,x,v) P_{k,p}(\Phi) Y^{\kappa} f \right) dv$, with $(w,z) \in \mathbf{k}_1^2$, $p^0+|k^0|+|\kappa^0| \leq 3$, $|k^0|+|\kappa^0| \leq 1$, $p+|k|+|\kappa| \leq 3R-3$ and $|k|+|\kappa| \leq R-1$.
If $|k^0|=1$, we obtain the term $$\frac{1}{\tau_+} \int_v c_0(t,x,v) wP_{k+k^0,p+p^0}(\Phi) Y^{\sigma} f dv, \hspace{3mm} \text{where} \hspace{3mm} c_0(t,x,v):=c(t,x,v)\frac{z}{\tau_+},$$ which is of \eqref{eq:comu2} since
$$|k+k^0|+(p+p^0)+|\kappa| \leq (p+|k|+|\kappa|)+(p^0+|k^0|) \leq 3R \hspace{3mm} \text{and} \hspace{3mm} |k+k^0|+|\kappa|=(|k|+|\kappa|)+1 \leq R.$$
If $|k_0|=0$, using that $$ \frac{z}{\tau_+}c(t,x,v)+Y^{\kappa^0} \left( \frac{z}{\tau_+} c(t,x,v) \right) = c_3(t,x,v)+c_4(t,x,v)\Phi,$$ we obtain the following terms of \eqref{eq:comu2}, $$\frac{1}{\tau_+} \int_v \left( c_3(t,x,v) P_{k,p+p^0}(\Phi)+c_4(t,x,v) P_{k,p+p^0+1}(\Phi) \right) w Y^{\kappa} f dv, $$ $$\frac{1}{\tau_+} \int_v c_0(t,x,v)w P_{k,p+p^0}(\Phi) Y^{\kappa^0}Y^{\kappa} f dv \hspace{6mm} \text{and} \hspace{6mm} \frac{1}{\tau_+} \int_v c_0(t,x,v)w P_{(k_T,\kappa^0_T,k_P+\kappa^0_P),p+p^0}(\Phi) Y^{\kappa} f.$$ \end{itemize} \end{proof}
Recall from the transport equation satisfied by the $\Phi$ coefficients that, in order to estimate $Y^{\gamma} \Phi$, we need to control $\mathcal{L}_{Z^{\beta}}(F)$ with $|\beta|=|\gamma|+1$. Consequently, at the top order, we will rather use the following commutation formula.
\begin{Pro}\label{CommuFsimple}
Let $Z^{\beta} \in \mathbb{K}^{|\beta|}$. Then,
$$\nabla^{\mu} \mathcal{L}_{Z^{\beta}}(F)_{\mu \nu} = \sum_{\begin{subarray}{} |q|+|\kappa| \leq |\beta| \\ \hspace{1.5mm} |q| \leq |\beta|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} J \left( c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) Y^{\kappa} f \right),$$ where $P_{q,p}(\Phi)$ can contain $\mathbb{Y}_X$, and not merely $\mathbb{Y}$, derivatives of $\Phi$. We then denote by $q_X$ its number of $X$ derivatives. \end{Pro} \begin{proof} Iterating Lemma \ref{basiccom}, we have \begin{equation}\label{comuiterbasic}
\nabla^{\mu} \mathcal{L}_{Z^{\beta}}(F)_{\mu \nu} = \sum_{|\gamma| \leq |\beta| } C^{\beta}_{\gamma} J \left( \widehat{Z}^{\gamma} f \right). \end{equation}
The result then follows from an induction on $|\gamma|$. Indeed, write $\widehat{Z}^{\gamma}=\widehat{Z} \widehat{Z}^{\gamma_0}$ and suppose that \begin{equation}\label{lifttomodif}
\widehat{Z}^{\gamma_0} f=\sum_{\begin{subarray}{} |q|+|\kappa| \leq |\gamma_0| \\ \hspace{1.5mm} |q| \leq |\gamma_0|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) Y^{\kappa} f . \end{equation} If $\widehat{Z}=\partial \in \mathbb{T}$, then
$$\widehat{Z}^{\gamma} f = \sum_{\begin{subarray}{} |q|+|\kappa| \leq |\gamma_0| \\ \hspace{1.5mm} |q| \leq |\gamma_0|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} c^{k,q}_{\kappa}(v) P_{(q_T+1,q_P,q_X),p}(\Phi) Y^{\kappa} f+c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) \partial Y^{\kappa} f = \sum_{\begin{subarray}{} |q|+|\kappa| \leq |\gamma| \\ \hspace{1.5mm} |q| \leq |\gamma|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) Y^{\kappa} f.$$ Otherwise $\gamma_P=(\gamma_0)_P+1$ and write $\widehat{Z}=Y-\Phi X$ with $Y \in \mathbb{Y}_0$. Hence, using $X Y^{\kappa} f=c(v) \partial Y^{\kappa} f$, \begin{eqnarray}
\nonumber \widehat{Z}^{\gamma} f \hspace{-2mm} & = & \hspace{-2mm} \sum_{\begin{subarray}{} |q|+|\kappa| \leq |\gamma_0| \\ \hspace{1.5mm} |q| \leq |\gamma_0|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} \Big( Y \left(c^{k,q}_{\kappa}(v) \right) P_{q,p}(\Phi) Y^{\kappa} f+ c^{k,q}_{\kappa}(v) P_{(q_T,q_P+1,q_X),p}(\Phi) Y^{\kappa} f +c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) YY^{\kappa} f \\ \nonumber
& & \hspace{-4.5mm} +c^{k,q}_{\kappa}(v) P_{(q_T,q_P,q_X+1),p+1}(\Phi) Y^{\kappa} f +c^{k,q}_{\kappa}(v) P_{(q_T,q_P,q_X),p+1}(\Phi) c(v) \partial Y^{\kappa} \Big) \lesssim \hspace{-0.2mm} \sum_{\begin{subarray}{} |q|+|\kappa| \leq |\gamma| \\ \hspace{1.5mm} |q| \leq |\gamma|-1 \\ \hspace{1mm} p \leq q_X+\kappa_T \end{subarray}} c^{k,q}_{\kappa}(v) P_{q,p}(\Phi) Y^{\kappa} f . \end{eqnarray} \end{proof}
\section{Energy and pointwise decay estimates}\label{sec4}
In this section, we recall classical energy estimates for both the electromagnetic field and the Vlasov field and how to obtain pointwise decay estimates from them. For that purpose, we need to prove Klainerman-Sobolev inequalities for velocity averages, similar to Theorem $8$ of \cite{FJS} or Theorem $1.1$ of \cite{dim4}, adapted to modified vector fields.
\subsection{Energy estimates}\label{energy} For the particle density, we will use the following approximate conservation law. \begin{Pro}\label{energyf} Let $H : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R} $ and $g_0 : \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ be two sufficiently regular functions and $F$ a sufficiently regular $2$-form defined on $[0,T[ \times \mathbb{R}^3$. Then, $g$, the unique classical solution of \begin{eqnarray} \nonumber T_F(g)&=&H \\ \nonumber g(0,.,.)&=&g_0, \end{eqnarray} satisfies the following estimate,
$$\forall \hspace{0.5mm} t \in [0,T[, \hspace{1cm} \| g \|_{L^1_{x,v}}(t)+ \sup_{u \in \mathbb{R}} \left\|\frac{v^{\underline{L}}}{v^0} g \right\|_{L^1(C_u(t))L^1_{v}} \leq 2 \| g_0 \|_{L^1_{x,v}}+2 \int_0^t \int_{\Sigma_s} \int_v |H | \frac{dv}{v^0}dxds.$$ \end{Pro}
\begin{proof}
The estimate follows from the divergence theorem, applied to $\int_v \frac{v^{\mu}}{v^0}|f|dv$ in $[0,t] \times \mathbb{R}^3$ and $V_u(t)$, for all $u \leq t$. We refer to Proposition $3.1$ of \cite{massless} for more details. \end{proof}
We consider, for the remainder of this section, a $2$-form $G$ and a $1$-form $J$, both defined on $[0,T[ \times \mathbb{R}^3$ and sufficiently regular, such that \begin{eqnarray} \nonumber \nabla^{\mu} G_{\mu \nu} & = & J_{\nu} \\ \nonumber \nabla^{\mu} {}^* \! G_{\mu \nu} & = & 0. \end{eqnarray}
We denote by $(\alpha,\underline{\alpha},\rho,\sigma)$ the null decomposition of $G$. As $\int_{\Sigma_0} r \rho(G)|(0,x)dx=+\infty$ when the total charge is non-zero, we cannot control norms such as $\left\|\sqrt{\tau_+} \rho \right\|_{L^2(\Sigma_t)}$ and we then separate the study of the electromagnetic field in two parts. \begin{itemize} \item The exterior of the light cone, where we propagate $L^2$ norms on the chargeless part $\widetilde{F}$ of $F$ (introduced, as $\overline{F}$, in Definition \ref{defpure1}), which has a finite initial weighted energy norm. The pure charge part $\overline{F}$ is given by an explicit formula, which describes directly its asymptotic behavior. As $F=\widetilde{F}+\overline{F}$, we are then able to obtain pointwise decay estimates on the null components of $F$. \item The interior of the light cone, where we can propagate $L^2$ weighted norms of $F$ since we control its flux on $C_0(t)$ with the bounds obtained on $\widetilde{F}$ in the exterior region. \end{itemize} We then introduce the following energy norms.
\begin{Def}\label{defMax1} Let $N \in \mathbb{N}$. We define, for $t \in [0,T[$, \begin{eqnarray}
\nonumber \mathcal{E}^0[G](t) & : = & \int_{\Sigma_t}\left( |\alpha|^2+|\underline{\alpha}|^2+2|\rho|^2+2|\sigma|^2 \right)dx +\sup_{u \leq t} \int_{C_u(t)} \left( |\alpha|^2+|\rho|^2+|\sigma|^2 \right) dC_u(t), \\
\nonumber \mathcal{E}_N^0[G](t) & : = & \sum_{\begin{subarray}{l} \hspace{0.5mm} Z^{\gamma} \in \mathbb{K}^{|\gamma|} \\ \hspace{1mm} |\gamma| \leq N \end{subarray}} \mathcal{E}^0_N[\mathcal{L}_{ Z^{\gamma}}(G)](t), \\
\nonumber \mathcal{E}^{S, u \geq 0}[G](t) & := & \int_{\Sigma^0_t} \tau_+ \left( |\alpha|^2+|\rho|^2+|\sigma|^2 \right)+\tau_- |\underline{\alpha}| dx+ \sup_{0 \leq u \leq t} \int_{C_u(t)} \tau_+ |\alpha|^2+\tau_-\left( |\rho|^2+|\sigma|^2 \right) d C_u(t) . \\
\nonumber \mathcal{E}_N[G](t) & := &\sum_{\begin{subarray}{l} \hspace{0.5mm} Z^{\gamma} \in \mathbb{K}^{|\gamma|} \\ \hspace{1mm} |\gamma| \leq N \end{subarray}} \mathcal{E}^{S, u \geq 0}_N[\mathcal{L}_{ Z^{\gamma}}(G)](t) \\
\nonumber \mathcal{E}^{S,u \leq 0}[G](t) & := & \int_{\overline{\Sigma}^{0}_t} \tau_+ \left( |\alpha|^2+|\rho|^2+|\sigma|^2 \right)+\tau_- |\underline{\alpha}| dx+ \sup_{ u \leq 0} \int_{C_u(t)} \tau_+ |\alpha|^2+\tau_-\left( |\rho|^2+|\sigma|^2 \right) d C_u(t) \\
\nonumber \mathcal{E}^{Ext}_N[G](t) & : = & \sum_{\begin{subarray}{l} \hspace{0.5mm} Z^{\gamma} \in \mathbb{K}^{|\gamma|} \\ \hspace{1mm} |\gamma| \leq N \end{subarray}} \mathcal{E}^{S,u \leq 0}_N[\mathcal{L}_{Z^{\gamma}}(G)](t). \end{eqnarray} \end{Def} The following estimates hold. \begin{Pro}\label{energyMax1} Let $\overline{S} := S+ \partial_t \mathds{1}_{u >0}+2 \tau_- \partial_t \mathds{1}_{u \leq 0}$. For all $ t \in [0,T[$, \begin{eqnarray}
\nonumber \mathcal{E}^0[G](t) & \leq & 2\mathcal{E}^0[G](0) + 8\int_0^t \int_{\Sigma_s} |G_{\mu 0} J^{\mu}| dx ds \\ \nonumber
\mathcal{E}^{S,u \leq 0}[G](t) & \leq & 6\mathcal{E}^{S,u \leq 0}[G](0) + 8\int_0^t \int_{\overline{\Sigma}^{0}_s } \left| \overline{S}^{\nu} G_{\nu \mu } J^{\mu} \right| dx ds \\ \nonumber
\mathcal{E}^{S,u \geq 0}[G](t) & \leq & 3\mathcal{E}^{S,u \leq 0}[\widetilde{G}](t) + 8\int_0^t \int_{\Sigma^{0}_s } \left| \overline{S}^{\nu} G_{\nu \mu } J^{\mu} \right| dx ds. \end{eqnarray} \end{Pro} \begin{proof} For the first inequality, apply the divergence theorem to $T_{\mu 0}[G]$ in $[0,t] \times \mathbb{R}^3$ and $V_u(t)$, for all $u \leq t$. Let us give more details for the other ones. Denoting $ T[G]$ by $T$ and using Lemma \ref{tensorcompo}, we have, if $u \leq 0$, \begin{eqnarray} \nonumber \nabla^{\mu} \left( \tau_- T_{\mu 0} \right) & = & \tau_-\nabla^{\mu} T_{\mu 0}-\frac{1}{2}\underline{L} \left( \tau_- \right) T_{L 0} \\ \nonumber
& = & \tau_- \nabla^{\mu} T_{\mu 0} -\frac{u}{2\tau_-} \left( \left| \alpha \right|^2+\left| \rho \right|^2+\left| \sigma \right|^2 \right) \hspace{2mm} \geq \hspace{2mm} \tau_- \nabla^{\mu} T_{\mu 0}. \end{eqnarray} Consequently, applying Corollary \ref{tensorderiv} and the divergence theorem in $V_{u_0}(t)$, for $u_0 \leq 0$, we obtain \begin{equation}\label{eq:1}
\int_{\overline{\Sigma}^{u_0}_t} \tau_- T_{00}dx + \frac{1}{\sqrt{2}} \int_{C_{u_0}(t)} \tau_-T_{L0}dC_{u_0}(t) \leq \int_{\overline{\Sigma}^{u_0}_0} \sqrt{1+r^2} T_{00}dx-\int_0^t \int_{\overline{\Sigma}^{u_0}_s } \tau_- G_{0 \nu} J^{\nu} dx ds.
\end{equation} On the other hand, as $\nabla^{\mu} S^{\nu}+\nabla^{\nu} S^{\mu}=2\eta^{\mu \nu}$ and ${T_{\mu}}^{\mu}=0$, we have \begin{eqnarray} \nonumber \nabla^{\mu} \left( T_{\mu \nu} S^{\nu} \right) & = & \nabla^{\mu} T_{\mu \nu}S^{\nu}+T_{\mu \nu} \nabla^{\mu} S^{\nu} \\ \nonumber & = & G_{\nu \lambda} J^{\lambda} S^{\nu} +\frac{1}{2} T_{\mu \nu} \left( \nabla^{\mu} S^{\nu}+\nabla^{\nu} S^{\mu} \right) \\ \nonumber & = & G_{\nu \lambda} J^{\lambda} S^{\nu}. \end{eqnarray} Applying again the divergence theorem in $V_{u_0}(t)$, for all $u_0 \leq 0$, we get \begin{equation}\label{eq:2}
\int_{\overline{\Sigma}^{u_0}_t} T_{0 \nu} S^{\nu} dx + \frac{1}{\sqrt{2}} \int_{C_{u_0}(t)} T_{L \nu} S^{\nu}dC_{u_0}(t) = \int_{\overline{\Sigma}^{u_0}_0} T_{0 \nu} S^{\nu} dx-\int_0^t \int_{\overline{\Sigma}^{u_0}_s } G_{\mu \nu} J^{\mu} S^{\nu} dx ds.
\end{equation} Using Lemma \ref{tensorcompo} and $2S=(t+r)L+(t-r) \underline{L}$, notice that \begin{flalign*}
& \hspace{1.3cm} 4\tau_-T_{00} = \tau_-\left( |\alpha|^2+|\underline{\alpha}|^2+2|\rho|^2+2|\sigma|^2 \right), \hspace{10mm} 4T_{0 \nu} S^{\nu} = (t+r)|\alpha|+(t-r)|\underline{\alpha}|+2t(|\rho|+|\sigma|), & \\
& \hspace{1.3cm} 2 \tau_- T_{L0}= \tau_- \left( |\alpha|^2+|\rho|^2+|\sigma|^2 \right), \hspace{23mm} 2 T_{L \nu} S^{\nu} = (t+r) |\alpha|^2+(t-r)|\rho|^2+(t-r)|\sigma|^2, & \end{flalign*} and then add twice \eqref{eq:1} to \eqref{eq:2}. The second estimate then follows and we now turn on the last one. Recall that $\nabla^{\mu} T_{\mu \nu} G=G_{\nu \lambda} J^{\lambda}$ and $\nabla^{\mu} \left( T_{\mu \nu} S^{\nu} \right) = G_{\nu \lambda} J^{\lambda} S^{\nu}$. Hence, by the divergence theorem applied in $[0,t] \times \mathbb{R}^3 \setminus V_{0}(t)$, we obtain \begin{equation}\label{eq:1bis}
\int_{\Sigma^0_t} \left( T_{00} +T_{0 \nu} S^{\nu} \right)dx = \frac{1}{\sqrt{2}} \int_{C_{0}(t)} \left( T_{L0} +T_{L \nu} S^{\nu} \right) d C_{0}(t) - \int_0^t \int_{\Sigma^{0}_s } G_{0 \nu} J^{\nu} + S^{\nu} G_{\nu \mu } J^{\mu} dx ds.
\end{equation}
By Lemma \ref{tensorcompo}, we have $4T_{00} = \left( |\alpha|^2+|\underline{\alpha}|^2+2|\rho|^2+2|\sigma|^2 \right) $, so that \begin{equation}\label{eq:1bbis}
4T_{00} +4T_{0 \nu} S^{\nu} \geq \tau_+|\alpha|^2+\tau_-|\underline{\alpha}|^2+\tau_+|\rho|^2+\tau_+|\sigma|^2 \geq 0 \hspace{1cm} \text{on} \hspace{5mm} \Sigma^{0}_t. \end{equation} Consequently, the divergence theorem applied in $ V_{u}(t) \setminus V_0(t)$, for $0 \leq u \leq t$, gives \begin{equation}\label{eq:2bis} \frac{1}{\sqrt{2}} \int_{C_{u}(t)} \left( T_{L0} +T_{L \nu} S^{\nu} \right) dC_u(t) \leq \frac{1}{\sqrt{2}} \int_{C_{0}(t)} \left( T_{L0} +T_{L \nu} S^{\nu} \right) d C_0(t) - \int_{V_u(t) \setminus V_0(t)} \left( G_{0 \nu} J^{\nu} + S^{\nu} G_{\nu \mu } J^{\mu} \right).
\end{equation}
Not now that $ T_{L0} +T_{L \nu} S^{\nu} \geq \tau_+|\alpha|^2+\tau_-|\rho|^2+\tau_-|\sigma|^2$ if $u \geq 0$ since \begin{flalign*}
& \hspace{0.8cm} 2 T_{L0}= |\alpha|^2+|\rho|^2+|\sigma|^2 \hspace{7mm} \text{and} \hspace{7mm} 2 T_{L \nu} S^{\nu} = (t+r) |\alpha|^2+(t-r)|\rho|^2+(t-r)|\sigma|^2. & \end{flalign*} It then remains to take the $\sup$ over all $0 \leq u \leq t$ in \eqref{eq:2bis}, to combine it with \eqref{eq:1bis}, \eqref{eq:1bbis} and to remark that \begin{eqnarray}
\nonumber 2\int_{C_{0}(t)} T_{L0} +T_{L \nu} S^{\nu} d C_0(t) & \leq & \int_{C_{0}(t)} |\rho|^2+|\sigma|^2 d C_{0}(t)+\int_{C_0(t)} \tau_+ |\alpha|^2 d C_0(t) \\ \nonumber & \leq & \mathcal{E}^{S, u \leq 0}[\widetilde{G}](t), \end{eqnarray} since $G=\widetilde{G}$ on $C_0(t)$. \end{proof} \subsection{Pointwise decay estimates} \subsubsection{Decay estimates for velocity averages} As the set of our commutation vector fields is not $\widehat{\mathbb{P}}_0$, we need to modify the following standard Klainerman-Sobolev inequality, which was proved in \cite{FJS} (see Theorem $8$).
\begin{Pro}\label{KSstandard} Let $g$ be a sufficiently regular function defined on $[0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$. Then, for all $(t,x) \in [0,T[ \times \mathbb{R}^3$,
$$\forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{1cm} \int_{v \in \mathbb{R}^3} |g(t,x,v)| dv \lesssim \frac{1}{\tau_+^2 \tau_-} \sum_{\begin{subarray}{l} \widehat{Z}^{\beta} \in \widehat{\mathbb{P}}_0^{|\beta|} \\ \hspace{1mm} |\beta| \leq 3 \end{subarray}}\|\widehat{Z}^{\beta} g \|_{L^1_{x,v}}(t).$$ \end{Pro}
We need to rewrite it using the modified vector fields. For the remainder of this section, $g$ will be a sufficiently regular function defined on $[0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$. We also consider $F$, a regular $2$-form, so that we can consider the $\Phi$ coefficients introduced in Definition \ref{defphi} and we suppose that they satisfy the following pointwise estimates, with $M_1 \geq 7$ a fixed integer. For all $(t,x,v) \in [0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3$,
$$|Y \Phi|(t,x,v) \lesssim \log^{\frac{7}{2}}(1+\tau_+), \hspace{8mm} |\Phi|(t,x,v) \lesssim \log^2(1+\tau_+) \hspace{8mm} \text{and} \hspace{8mm} \sum_{ |\kappa| \leq 3} |Y^{\kappa} \Phi|(t,x,v) \lesssim \log^{M_1}(1+\tau_+).$$
\begin{Pro}\label{KS1} For all $(t,x) \in [0,T[ \times \mathbb{R}^3$,
$$\tau_+^2 \tau_- \int_{v \in \mathbb{R}^3} |g(t,x,v)| dv \lesssim \sum_{ |\xi|+|\beta| \leq 3 } \left\| P^X_{\xi}(\Phi)Y^{\beta} g \right\|_{L^1_{x,v}} \hspace{-0.8mm} (t)+ \sum_{ |\kappa| \leq \min(2+\kappa_T,3)} \sum_{z \in \mathbf{k}_1}\frac{\log^{6M_1}(3+t)}{1+t} \left\| z Y^{\kappa} g \right\|_{L^1_{x,v}} \hspace{-0.8mm} (t) .$$ \end{Pro} \begin{Rq}
This inequality is suitable for us since we will bound $\left\| P^X_{\xi}(\Phi)Y^{\beta} g \right\|_{L^1_{x,v}} $ without any growth in $t$. Moreover, observe that $Y^{\kappa}$ contains at least a translation if $|\kappa|=3$, which is compatible with our hierarchy on the weights $z \in \mathbf{k}_1$ (see Remark \ref{rqjustifnorm}). \end{Rq} \begin{proof}
Let $(t,x) \in [0,T[ \times \mathbb{R}^n$. Consider first the case $|x| \leq \frac{1+t}{2}$, so that, with $\tau := 1+t$,
$$\forall \hspace{0.5mm} |y| \leq \frac{1}{4}, \hspace{3mm} \tau \leq 10(1+|t-|x+\tau y||).$$ For a sufficiently regular function $h$, we then have, using Lemmas \ref{goodderiv} and then \ref{lift2}, \begin{eqnarray} \nonumber
\left| \partial_{y^i} \left( \int_v |h|(t,x+\tau y,v) dv \right) \right| & = & \left| \tau \partial_i \int_v |h|(t,x+\tau y,v) dv \right| \\ \nonumber
& \lesssim & \left| (1+|t-|x+\tau y||) \partial_i \int_v |h|(t,x+\tau y,v) dv \right| \\ \nonumber & \lesssim & \sum_{Z \in \mathbb{K}} \left| Z \int_v |h|(t,x+\tau y,v) dv \right| \\ \nonumber & \lesssim & \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 1 \\ \hspace{3mm} p \leq 1 \end{subarray}} \sum_{z \in \mathbf{k}_1} \int_v \hspace{-0.5mm} \left( |P^X_{\xi}(\Phi) Y^{\beta} h|+\frac{\log^7 (1+\tau_+)}{\tau_+} |z \partial^p_t h| \right) (t,x+\tau y,v) dv . \end{eqnarray}
Using a one dimensional Sobolev inequality, we obtain, for $\delta=\frac{1}{4 \sqrt{3}}$ (so that $|y| \leq \frac{1}{4}$ if $|y^i| \leq \delta$ for all $1 \leq i \leq 3$), \begin{eqnarray}
\nonumber \int_v |g|(t,x,v) dv & \lesssim & \sum_{n=0}^1 \int_{|y^1| \leq \delta} \left| \left(\partial_{y^1} \right)^n \int_v |g|(t,x+\tau(y^1,0,0),v) dv \right| dy^1 \\ \nonumber
& \lesssim & \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 1 \\ \hspace{3mm} p \leq 1 \\ \hspace{2.5mm} z \in \mathbf{k}_1 \end{subarray}} \int_{|y^1| \leq \delta} \int_v \left( |P^X_{\xi}(\Phi) Y^{\beta}g|+\frac{\log^7 ( 3+t)}{1+t} |z \partial^p_t g| \right) (t,x+\tau (y^1,0,0),v) dv dy^1 \hspace{-0.7mm}. \end{eqnarray}
Repeating the argument for $y^2$ and the functions $\int_v P^X_{\xi}(\Phi) Y^{\beta}g dv$ and $ \int_v z \partial^p_t g dv$, we get, as $|z| \leq 2t$ in the region considered and dropping the dependence in $(t,x+\tau(y^1,y^2,0),v)$ of the functions in the integral,
$$\int_v |g|(t,x,v) dv \lesssim \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 2 \\ \hspace{2.5mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{\begin{subarray}{l} |\zeta|+|\kappa| \leq 2 \\ |\kappa| \leq 1+\kappa_T \end{subarray}} \int_{|y^1| \leq \delta} \int_{|y^2| \leq \delta} \int_v |P^X_{\xi}(\Phi) Y^{\beta} g|+\frac{\log^{14} (3+t)}{1+t} |z P^X_{\zeta}(\Phi) Y^{\kappa} g| dv dy^1 dy^2.$$ Repeating again the argument for the variable $y^3$, we finally obtain
$$\int_v |g|(t,x,v) dv \lesssim \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 3 \\ \hspace{2.5mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{\begin{subarray}{l} |\zeta|+|\kappa| \leq 3 \\ |\kappa| \leq 2+\kappa_T \end{subarray}} \int_{|y| \leq \frac{1}{4}} \int_v |P^X_{\xi}(\Phi) Y^{\beta} g|+ \frac{\log^{21} (3+t)}{1+t} |z P_{\zeta}^X(\Phi) Y^{\kappa} g|dv(t,x+\tau y)dy.$$
It then remains to remark that $\left| P^X_{\zeta}(\Phi) \right| \lesssim \log^{3M_1}(3+t)$ on the domain of integration and to make the change of variables $z=\tau y$. Note now that one can prove similarly that, for a sufficiently regular function $h$, \begin{equation}\label{eq:sobsphere}
\int_v |h|(t,r,\theta,\phi)dv \lesssim \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 2 \\ \hspace{1mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{ |\kappa| \leq \min(1+\kappa_T,2)} \int_{\mathbb{S}^2} \int_v |P_{\xi}^X(\Phi)Y^{\beta}h|+\frac{\log^{14+2M_1} (1+\tau_+)}{\tau_+}|zY^{\kappa} h | dv d\mathbb{S}^2 (t,r). \end{equation} Indeed, by a one dimensional Sobolev inequality, we have
$$\int_v |f|(t,r,\theta,\phi,v)dv \lesssim \sum_{r=0}^1 \int_{\omega_1} \left| \left(\partial_{\omega_1} \right)^r \int_v |f|(t,r,\theta+\omega_1,\phi,v)dv \right| d\omega_1.$$
Then, since $\partial_{\omega_1}$ ( and $\partial_{\omega_2}$) can be written as a combination with bounded coefficients of the rotational vector fields $\Omega_{ij}$, we can repeat the previous argument. Finally, let us suppose that $\frac{1+t}{2} \leq |x |$. We have, using again Lemmas \ref{goodderiv} and \ref{lift2},
\begin{eqnarray}
\nonumber |x|^2 \tau_- \int_v |g|(t,x,v)dv & = & -|x|^2\int_{|x|}^{+ \infty} \partial_r \left( \tau_- \int_v |g|(t,r,\theta,\phi,v)dv \right)dr \\ \nonumber
& \lesssim & \int_{|x|}^{+ \infty} \int_v | g|(t,r,\theta,\phi,v)dvr^2dr+\int_{|x|}^{+ \infty}\left| \tau_- \partial_r \int_v |g|(t,r,\theta,\phi,v)dv \right| r^2 dr \\ \nonumber
& \leq & \sum_{\begin{subarray}{l} |\xi|+|\beta| \leq 1 \\ \hspace{3mm} p \leq 1 \end{subarray}} \sum_{ w \in \mathbf{k}_1} \int_{0}^{+ \infty} \int_v \hspace{-0.5mm} \left( \hspace{-0.5mm} |P^X_{\xi}(\Phi) Y^{\beta} g|+\frac{\log^7 (3+t)}{1+t} |w \partial^p_t g| \right)(t,r,\theta,\phi,v) dv r^2 dr.
\end{eqnarray}
It then remains to apply \eqref{eq:sobsphere} to the functions $P^X_{\xi}(\Phi) Y^{\beta} g$ and $z \partial^p_t g$ and to remark that $|z| \leq 2\tau_+$. \end{proof} A similar, but more general, result holds. \begin{Cor}\label{KS2} Let $z \in \mathbf{k}_1$ and $j \in \mathbb{N}$. Then, for all $(t,x) \in [0,T[ \times \mathbb{R}^3$, \begin{eqnarray}
\nonumber \int_{v \in \mathbb{R}^n} |z|^j|g(t,x,v)| dv & \lesssim & \frac{1}{\tau_+^2 \tau_-} \sum_{w \in \mathbf{k}_1} \Bigg( \sum_{d=0}^{\min(3,j)} \sum_{|\xi|+|\beta| \leq 3-d} \log^{2d}(3+t) \left\| w^{j-d} P^X_{\xi}(\Phi) Y^{\beta} g \right\|_{L^1_{x,v}} \hspace{-0.8mm} (t) \\ \nonumber
& & \hspace{4.5cm} +\frac{\log^{6M_1}(3+t)}{1+t} \sum_{ |\kappa| \leq \min(2+\kappa_T,3) } \|w^{j+1} Y^{\kappa} f \|_{L^1_{x,v}} \hspace{-0.8mm} (t) \Bigg) . \end{eqnarray} \end{Cor} \begin{proof} One only has to follow the proof of Proposition \ref{KS1} and to use Remark \eqref{lift3} instead of Lemma \ref{lift2}). \end{proof} A weaker version of this inequality will be used in Subsection \ref{subsecH}. \begin{Cor}\label{KS3} Let $z \in \mathbf{k}_1$ and $j \in \mathbb{N}$. Then, for all $(t,x) \in [0,T[ \times \mathbb{R}^3$, \begin{eqnarray}
\nonumber \int_{v \in \mathbb{R}^n} |z|^j|g(t,x,v)| dv & \lesssim & \frac{1}{\tau_+^2 \tau_-} \sum_{w \in \mathbf{k}_1} \Bigg( \sum_{d=0}^{\min(3,j)} \sum_{|\beta| \leq 3-d} \log^{2d+M_1}(3+t) \left\| w^{j-d} Y^{\beta} g \right\|_{L^1_{x,v}} \hspace{-0.8mm} (t) \\ \nonumber
& & \hspace{4.5cm} +\frac{\log^{6M_1}(3+t)}{1+t} \sum_{ |\kappa| \leq \min(2+\kappa_T,3) } \|w^{j+1} Y^{\kappa} f \|_{L^1_{x,v}} \hspace{-0.8mm} (t) \Bigg) . \end{eqnarray} \end{Cor} \begin{proof} Start by applying Corollary \ref{KS2}. It remains to bound the terms of the form
$$\left\| w^{j-d} P^X_{\xi}(\Phi) Y^{\beta} g \right\|_{L^1_v L^1(\Sigma_t)}, \hspace{1cm} \text{with} \hspace{1cm} d \leq \min(3,j), \hspace{5mm} |\xi|+|\beta| \leq 3-d \hspace{5mm} \text{and} \hspace{5mm} |\xi| \geq 1.$$
For this, we divide $\Sigma_t$ in two regions, the one where $r \leq 1+2t$ and its complement. As $|P^X_{\xi}(\Phi)| \lesssim \log^{M_1}(1+\tau_+)$ and $\tau_+ \lesssim 1+t$ if $r \leq 1+2t$, we have
$$\left\| w^{j-d} P^X_{\xi}(\Phi) Y^{\beta} g \right\|_{L^1_v L^1(|y| \leq 2t)} \lesssim \log^{M_1}(3+t) \left\| w^{j-d} Y^{\beta} g \right\|_{L^1_v L^1(\Sigma_t)}.$$
Now recall from Remark \ref{rqweights1} that $1+r \lesssim \sum_{z_0 \in \mathbf{k}_1} |z_0|$ and $|P^X_{\xi}(\Phi)|(1+r)^{-1} \lesssim \frac{\log^{M_1}(3+t)}{1+t}$ if $r \geq 1+ 2t$, so that
$$\left\| w^{j-d} P^X_{\xi}(\Phi) Y^{\beta} g \right\|_{L^1_v L^1(|y| \geq 2t)} \lesssim \frac{\log^{M_1}(3+t)}{1+t} \sum_{z_0 \in \mathbf{k}_1} \left\| z_0^{j+1} Y^{\beta} g \right\|_{L^1_v L^1(\Sigma_t)}.$$
The result follows from $|\beta| \leq 2-d \leq 2+\beta_T$. \end{proof}
We are now interested in adapting Theorem $1.1$ of \cite{dim4} to the modified vector fields.
\begin{Th}\label{decayopti}
Suppose that $\sum_{|\kappa| \leq 3} \|Y^{\kappa} \Phi\|_{L^{\infty}_{x,v}} (0) \lesssim 1$. Let $H : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ and $h_0 : \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ be two sufficiently regular functions and $h$ the unique classical solution of \begin{eqnarray} \nonumber T_F(h) & = & H \\ \nonumber h(0,.,.) & = & h_0. \end{eqnarray}
Consider also $z \in \mathbf{k}_1$ and $j \in \mathbb{N}$. Then, for all $(t,x) \in [0,T[ \times \mathbb{R}^3$ such that $t \geq |x|$, \begin{eqnarray}
\nonumber \tau_+^3\int_v |z^jh|(t,x,v)\frac{dv}{(v^0)^2} & \lesssim & \sum_{ |\beta| \leq 3 } \| (1+r)^{|\beta|+j} \partial^{\beta}_{t,x} h \|_{L^1_x L^1_v} (0) \\ \nonumber
& &+\sum_{\begin{subarray}{} |\xi|+|\beta| \leq 3 \\ \hspace{1.5mm} w \in \mathbf{k}_1 \end{subarray} } \hspace{1.5mm} \sum_{\begin{subarray}{} 0 \leq d \leq 3 \\ \delta \in \{0,1\} \end{subarray} }\frac{ \log^{2d}(3+t)}{\sqrt{1+t}^{\delta}} \int_0^t \int_{\Sigma_s} \int_v \left| T_F \left(w^{j-d+\delta} P^X_{\xi}(\Phi)Y^{\beta} h \right) \right| \frac{dv}{v^0} dx ds, \end{eqnarray}
where $|\xi|=0$ and $|\beta| \leq \min(2+\beta_T,3)$ if $\delta=1$. \end{Th} \begin{proof}
If $|x| \leq \frac{t}{2}$, the result follows from Corollary \ref{KS2} and the energy estimate of Proposition \ref{energyf}. If $\frac{t}{2} \leq |x| \leq t$, we refer to Section $5$ of \cite{dim4}, where Lemma $5.2$ can be rewritten in the same spirit as we rewrite Proposition \ref{KSstandard} with modified vector fields. \end{proof} To deal with the exterior, we use the following result. \begin{Pro}\label{decayopti2}
For all $(t,x) \in [0,T[ \times \mathbb{R}^3$ such that $|x| \geq t$, we have
$$\int_v |g|(t,x,v) \frac{dv}{(v^0)^2} \lesssim \frac{1}{\tau_+} \sum_{w \in \mathbf{k}_1} \int_v |w|| g|(t,x,v) dv.$$ \end{Pro} \begin{proof}
Let $|x| \geq t$. If $|x| \leq 1$, $\tau_+ \leq 3$ and the estimate holds. Otherwise, $\tau_+ \leq 3|x|$ so, as $\left( x^i-t \frac{v^i}{v^0} \right) \in \mathbf{k}_1$ and
$$\left| x-t\frac{v}{v^0} \right| \geq |x|-t\frac{|v|}{v^0} \geq |x| \frac{(v^0)^2-|v|^2}{v^0(v^0+|v|)} \geq \frac{|x|}{2(v^0)^2}, \hspace{3mm} \text{we have} \hspace{3mm} \int_v |g|(t,x,v) \frac{dv}{(v^0)^2} \lesssim \frac{1}{|x|} \sum_{w \in \mathbf{k}} \int_v |w||g|(t,x,v)dv.$$ \end{proof} \begin{Rq} Using $1 \lesssim v^0 v^{\underline{L}}$ and Lemma \ref{weights1}, we can obtain a similar inequality for the interior of the light cone, at the cost of a $\tau_-$-loss. Note however that because of the presence of the weights $w \in \mathbf{k}_1$, this estimate, combined with Corollary \ref{KS2}, is slightly weaker than Theorem \ref{decayopti}. During the proof, this difference will lead to a slower decay rate insufficient to close the energy estimates. \end{Rq}
\subsubsection{Decay estimates for the electromagnetic field}
We start by presenting weighted Sobolev inequalities for general tensor fields. Then we will use them in order to obtain improved decay estimates for the null components of a $2$-form\footnote{Note however that our improved estimates on the components $\alpha$, $\rho$ and $\sigma$ require the $2$-form $G$ to satisfy $\nabla^{\mu} {}^* \! G_{\mu \nu} =0$.}. In order to treat the interior of the light cone (or rather the domain in which $|x| \leq 1+\frac{1}{2}t$), we will use the following result.
\begin{Lem}\label{decayint} Let $U$ be a smooth tensor field defined on $[0,T[ \times \mathbb{R}^3$. Then,
$$\forall \hspace{0.5mm} t \in [0,T[, \hspace{8mm} \sup_{|x| \leq 1+\frac{t}{2}} |U(t,x)| \lesssim \frac{1}{(1+t)^2} \sum_{|\gamma| \leq 2} \| \sqrt{\tau_-} \mathcal{L}_{Z^{\gamma}}(U)(t,y) \|_{L^2 \left( |y| \leq 2+\frac{3}{4}t \right)}.$$ \end{Lem} \begin{proof}
As $|\mathcal{L}_{Z^{\gamma}}(U)| \lesssim \sum_{|\beta| \leq |\gamma|} \sum_{\mu, \nu} | Z^{\beta} (U_{\mu \nu})|$, we can restrict ourselves to the case of a scalar function. Let $t \in \mathbb{R}_+$ and $|x| \leq 1+ \frac{1}{2}t$. Apply a standard $L^2$ Sobolev inequality to $V: y \mapsto U(t,x+\frac{1+t}{4}y)$ and then make a change of variables to get
$$|U(t,x)|=|V(0)| \lesssim \sum_{|\beta| \leq 2} \| \partial_x^{\beta} V \|_{L^2_y(|y| \leq 1)} \lesssim \left( \frac{1+t}{4} \right)^{-\frac{3}{2}} \sum_{|\beta| \leq 2} \left( \frac{1+t}{4} \right)^{|\beta|} \| \partial_x^{\beta} U(t,.) \|_{L^2_y(|y-x| \leq \frac{1+t}{4})}.$$
Observe now that $|y-x| \leq \frac{1+t}{4}$ implies $|y| \leq 2+\frac{3}{4}t$ and that $1+t \lesssim \tau_-$ on that domain. By Lemma \ref{goodderiv} and since $[Z, \partial] \in \mathbb{T} \cup \{0 \}$, it follows
$$( 1+t )^{|\beta|+\frac{1}{2}} \| \partial_x^{\beta} U(t,.) \|_{L^2_y(|y-x| \leq \frac{1+t}{4})} \hspace{1.5mm} \lesssim \hspace{1.5mm} \| \tau_-^{|\beta|+\frac{1}{2}} \partial_x^{\beta} U(t,.) \|_{L^2_y(|y| \leq 2+\frac{3}{4}t)} \hspace{1.5mm} \lesssim \hspace{1.5mm} \sum_{|\gamma| \leq |\beta|} \| \sqrt{\tau_-} Z^{\gamma} U(t,.) \|_{L^2_y(|y| \leq 2+\frac{3}{4}t)}.$$ \end{proof} For the remaining region, we have the three following inequalities, coming from Lemma $2.3$ (or rather from its proof for the second estimate) of \cite{CK}. We will use, for a smooth tensor field $V$, the pointwise norm
$$ |V|^2_{\mathbb{O},k} := \sum_{p \leq k} \sum_{\Omega^{\gamma} \in \mathbb{O}^{p}} | \mathcal{L}_{\Omega^{\gamma}}(V)|^2.$$ \begin{Lem}\label{Sob} Let $U$ be a sufficiently regular tensor field defined on $\mathbb{R}^3$. Then, for $t \in \mathbb{R}_+$, \begin{eqnarray}
\nonumber \forall \hspace{0.5mm} |x| \geq \frac{t}{2}+1, \hspace{10mm} |U(x)| & \lesssim & \frac{1}{|x|\tau_-^{\frac{1}{2}}} \left( \int_{ |y| \geq \frac{t}{2}+1} |U(y)|^2_{\mathbb{O},2}+\tau_-^2|\nabla_{\partial_r} U(y) |^2_{\mathbb{O},1} dy \right)^{\frac{1}{2}}, \\ \nonumber
\forall \hspace{0.5mm} |x| > t, \hspace{10mm} |U(x)| & \lesssim & \frac{1}{|x|\tau_-^{\frac{1}{2}}} \left( \int_{ |y| \geq t} |U(y)|^2_{\mathbb{O},2}+\tau_-^2|\nabla_{\partial_r} U(y) |^2_{\mathbb{O},1} dy \right)^{\frac{1}{2}}, \\ \nonumber
\forall \hspace{0.5mm} x \neq 0, \hspace{10mm} |U(x)| & \lesssim & \frac{1}{|x|^{\frac{3}{2}}} \left( \int_{|y| \geq |x|} |U(y)|^2_{\mathbb{O},2}+|y|^2|\nabla_{\partial_r} U(y) |^2_{\mathbb{O},1} dy \right)^{\frac{1}{2}}. \end{eqnarray} \end{Lem}
Recall that $G$ and $J$ satisfy \begin{eqnarray} \nonumber \nabla^{\mu} G_{\mu \nu} & =& J_{\nu} \\ \nonumber \nabla^{\mu} {}^* \! G_{ \mu \nu } & = & 0 \end{eqnarray} and that $(\alpha, \underline{\alpha}, \rho, \sigma)$ denotes the null decomposition of $G$. Before proving pointwise decay estimates on the components of $G$, we recall the following classical result and we refer, for instance, to Lemma $D.1$ of \cite{massless} for a proof. Concretely, it means that $\mathcal{L}_{\Omega}$, for $\Omega \in \mathbb{O}$, $\nabla_{\partial_r}$, $\nabla_{\underline{L}}$ and $\nabla_L$ commute with the null decomposition. \begin{Lem}\label{randrotcom} Let $\Omega \in \mathbb{O}$. Then, denoting by $\zeta$ any of the null component $\alpha$, $\underline{\alpha}$, $\rho$ or $\sigma$, $$ [\mathcal{L}_{\Omega}, \nabla_{\partial_r}] G=0, \hspace{1.2cm} \mathcal{L}_{\Omega}(\zeta(G))= \zeta ( \mathcal{L}_{\Omega}(G) ) \hspace{1.2cm} \text{and} \hspace{1.2cm} \nabla_{\partial_r}(\zeta(G))= \zeta ( \nabla_{\partial_r}(G) ) .$$ Similar results hold for $\mathcal{L}_{\Omega}$ and $\nabla_{\partial_t}$, $\nabla_L$ or $\nabla_{\underline{L}}$. For instance, $\nabla_{L}(\zeta(G))= \zeta ( \nabla_{L}(G) )$. \end{Lem} \begin{Pro}\label{decayMaxwell} We have, for all $(t,x) \in \mathbb{R}_+ \times \mathbb{R}^3$, \begin{eqnarray}
\nonumber |\rho|(t,x) , \hspace{2mm} |\sigma|(t,x) & \lesssim & \frac{ \sqrt{\mathcal{E}_2[G](t)+\mathcal{E}_2^{Ext}[G](t)}}{\tau_+^{\frac{3}{2}}\tau_-^{\frac{1}{2}}}, \\ \nonumber
|\alpha|(t,x) & \lesssim & \frac{\sqrt{ \mathcal{E}_2[G](t)+\mathcal{E}_2^{Ext}[G](t)}+\sum_{|\kappa| \leq 1} \|r^{\frac{3}{2}} \mathcal{L}_{Z^{\kappa}}(J)_A\|_{L^2(\Sigma_t)}}{\tau_+^2} \\ \nonumber
|\underline{\alpha}|(t,x) & \lesssim & \min\left( \frac{\sqrt{\mathcal{E}_2[G](t)+\mathcal{E}_2^{Ext}[G](t)}}{\tau_+ \tau_-}, \frac{\sqrt{\mathcal{E}^0_2[G](t)}}{\tau_+ \tau_-^{\frac{1}{2}}} \right). \end{eqnarray}
Moreover, if $|x| \geq \max (t,1)$, the term involving $\mathcal{E}_2[G](t)$ on the right hand side of each of these three estimates can be removed. \end{Pro}
\begin{Rq} As we will have a small loss on $\mathcal{E}_2[F]$ and not on $\mathcal{E}^0_2[F]$, the second estimate on $\underline{\alpha}$ is here for certain situations, where we will need a decay rate of degree at least $1$ in the $t+r$ direction. \end{Rq}
\begin{proof}
Let $(t,x) \in [0,T[ \times \mathbb{R}^3$. If $|x| \leq 1+\frac{1}{2}t$, $\tau_- \leq \tau_+ \leq 2+2t$ so the result immediately follows from Lemma \ref{decayint}. We then focus on the case $|x| \geq 1+\frac{t}{2}$. During this proof, $\Omega^{\beta}$ will always denote a combination of rotational vector fields, i.e. $\Omega^{\beta} \in \mathbb{O}^{|\beta|}$. Let $\zeta$ be either $\alpha$, $ \rho$ or $ \sigma$. As, by Lemma \ref{randrotcom}, $\nabla_{\partial_r}$ and $\mathcal{L}_{\Omega}$ commute with the null decomposition, we have, applying Lemma \ref{Sob},
$$r^3 \tau_- |\zeta|^2 \lesssim \int_{ |y| \geq \frac{t}{2}+1} |\sqrt{r} \zeta |^2_{\mathbb{O},2}+\tau_-^2|\nabla_{\partial_r} (\sqrt{r} \zeta) |_{\mathbb{O},1}^2 dy \lesssim \sum_{\begin{subarray}{} |\gamma| \leq 2 \\ |\beta| \leq 1 \end{subarray}} \int_{ |y| \geq \frac{t}{2}+1} r| \zeta ( \mathcal{L}_{Z^{\gamma}} (G) |^2+r\tau_-^2| \zeta ( \mathcal{L}_{\Omega^{\beta}} (\nabla_{\partial_r} G)) |^2 dy.$$ As $\nabla_{\partial_r}$ commute with $\mathcal{L}_{\Omega}$ and since $\nabla_{\partial_r}$ commute with the null decomposition (see Lemma \ref{randrotcom}), we have, using $2\partial_r= L-\underline{L}$ and \eqref{eq:zeta}, \begin{equation}\label{zetaeq2}
| \zeta ( \mathcal{L}_{\Omega} (\nabla_{\partial_r} G)) |+| \zeta ( \nabla_{\partial_r} G) | \hspace{2mm} \lesssim \hspace{2mm} | \nabla_{\partial_r} \zeta ( \mathcal{L}_{\Omega} (G) |+| \nabla_{\partial_r} \zeta ( G) | \hspace{2mm} \lesssim \hspace{2mm} \frac{1}{\tau_-}\sum_{ |\gamma| \leq 2} | \zeta ( \mathcal{L}_{Z^{\gamma}} (G) |.
\end{equation} As $\tau_+ \lesssim r \leq \tau_+$ in the region considered, it finally comes
$$\tau_+^3 \tau_- |\zeta|^2 \lesssim \sum_{|\gamma| \leq 2} \int_{ |y| \geq \frac{t}{2}+1} \tau_+| \zeta ( \mathcal{L}_{Z^{\gamma}} (G) |^2 dx \lesssim \mathcal{E}_2[G](t)+\mathcal{E}^{Ext}_2[G](t).$$ Let us improve now the estimate on $\alpha$. As, by Lemma \ref{basiccom}, $\nabla^{\mu} \mathcal{L}_{\Omega} (G)_{\mu \nu} = \mathcal{L}_{\Omega}(J)_{\nu}$ and $\nabla^{\mu} {}^* \! \mathcal{L}_{\Omega} (G)_{\mu \nu} = 0$ for all $\Omega \in \mathbb{O}$, we have according to Lemma \ref{maxwellbis} that
$$\forall \hspace{0.5mm} |\beta| \leq 1, \hspace{15mm} \nabla_{\underline{L}} \alpha(\mathcal{L}_{\Omega^{\beta}} (G))_A=\frac{1}{r}\alpha(\mathcal{L}_{\Omega^{\beta}} (G))_A-\slashed{\nabla}_{e_A}\rho (\mathcal{L}_{\Omega^{\beta}} (G)) +\varepsilon_{AB} \slashed{\nabla}_{e_B} \sigma (\mathcal{L}_{\Omega^{\beta}} (G))+\mathcal{L}_{\Omega^{\beta}}(J)_A.$$ Thus, using \eqref{eq:zeta}, we obtain, for all $\Omega \in \mathbb{O}$, \begin{equation}\label{alphaeq2}
| \alpha ( \nabla_{\partial_r} G) |+| \alpha ( \mathcal{L}_{\Omega} (\nabla_{\partial_r} G)) | \lesssim \left|J_A \right| +\left| \mathcal{L}_{\Omega} (J)_A \right|+ \frac{1}{r}\sum_{ |\gamma| \leq 2} \left( | \alpha ( \mathcal{L}_{Z^{\gamma}} (G) |+| \rho ( \mathcal{L}_{Z^{\gamma}} (G) |+| \sigma ( \mathcal{L}_{Z^{\gamma}} (G) | \right).
\end{equation} Hence, utilizing this time the third inequality of Lemma \ref{Sob} and \eqref{alphaeq2} instead of \eqref{zetaeq2}, we get
$$\tau_+^4 |\alpha|^2 \lesssim r^4 |\alpha|^2 \lesssim \int_{ |y| \geq |x|} |\sqrt{r} \alpha|^2_{\mathbb{O},2}+r^2|\nabla_{\partial_r} ( \sqrt{r} \alpha) |_{\mathbb{O},1}^2 dy \lesssim \mathcal{E}_2[G](t)+\mathcal{E}_2^{Ext}[G](t)+\sum_{|\kappa| \leq 1} \|r^{\frac{3}{2}} \mathcal{L}_{Z^{\kappa}}(J)_A\|^2_{L^2(\Sigma_t)}.$$ Using the same arguments as previously, one has \begin{eqnarray}
\nonumber \int_{|y| \geq \frac{t}{2}+1} \left| \underline{\alpha} \right|^2_{\mathbb{O},2} +\tau_-^2 \left| \nabla_{\partial_r} \underline{\alpha} \right|_{\mathbb{O},1}^2 dy & \lesssim & \mathcal{E}^0_2[G](t), \\ \nonumber
\int_{ |y| \geq \frac{t}{2}+1} \left| \sqrt{\tau_-} \underline{\alpha} \right|^2_{\mathbb{O},2} +\tau_-^2 \left| \nabla_{\partial_r} \left( \sqrt{\tau_-} \underline{\alpha} \right) \right|_{\mathbb{O},1}^2 dy & \lesssim & \mathcal{E}_2[G](t)+\mathcal{E}_2^{Ext}[G](t)
\end{eqnarray}
and a last application of Lemma \ref{Sob} gives us the result. The estimates for the region $|x| \geq \max (t,1)$ can be obtained similarly, using the second inequality of Lemma \ref{Sob} instead of the first one. \end{proof} Losing two derivatives more, one can improve the decay rate of $\rho$ and $\sigma$ near the light cone. \begin{Pro}\label{Probetteresti} Let $M \in \mathbb{N}$, $C>0$ and assume that
\begin{equation}\label{eq:imprdecay} \forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{1cm} \sum_{|\gamma| \leq 1} |\mathcal{L}_{Z^{\gamma}}(G)|(t,x)+|J_{\underline{L}}|(t,x) \hspace{2mm} \leq \hspace{2mm} C\frac{\log^M(3+t)}{\tau_+ \tau_-}. \end{equation} Then, we have
\begin{equation}\label{eq:imprdecay2} \forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{1cm} |\rho|(t,x)+|\sigma|(t,x) \hspace{2mm} \lesssim \hspace{2mm} C\frac{\log^{M+1}(3+t)}{\tau_+^2}. \end{equation} \end{Pro} \begin{proof} Let $(t,x)=(t,r \omega) \in [0,T[ \times \mathbb{R}^3$. If $r \leq \frac{t+1}{2}$ or $t \leq \frac{r+1}{2}$ the inequalities follow from \eqref{eq:imprdecay} since $\tau_+ \lesssim \tau_-$ in these two cases. We then suppose that $\frac{t+1}{2} \leq r \leq 2t-1$, so that $\tau_+ \leq 10\min(r,t)$. Hence, we obtain from equations \eqref{eq:nullmax1}-\eqref{eq:nullmax2} of Lemma \ref{maxwellbis} and \eqref{eq:zeta} that \begin{equation}\label{eq:fortheproof3}
|\nabla_{\underline{L}} \hspace{0.5mm} \rho|(t,x)+|\nabla_{\underline{L}} \hspace{0.5mm} \sigma|(t,x) \hspace{2mm} \lesssim \hspace{2mm} |J_{\underline{L}}|(t,x)+ \frac{1}{\tau_+} \sum_{|\gamma| \leq 1} |\mathcal{L}_{Z^{\gamma}}(G)|(t,x).
\end{equation}
Let $\zeta$ be either $\rho$ or $\sigma$ and $$\varphi ( \underline{u}, u) := \zeta \left( \frac{\underline{u}+u}{2}, \frac{\underline{u}-u}{2} \omega \right), \quad \text{so that, by \eqref{eq:fortheproof3} and \eqref{eq:imprdecay}}, \quad |\nabla_{\underline{L}} \varphi |(\underline{u},u) \lesssim C\frac{\log^M \left(3+\frac{\underline{u}+u}{2} \right)}{(1+\underline{u})^2(1+|u|)}.$$ \begin{itemize} \item If $r \geq t$, we then have \begin{eqnarray}
\nonumber |\zeta|(t,x) & = & |\varphi|(t+r,t-r) \hspace{2mm} \leq \hspace{2mm} \int_{u=-t-r}^{t-r} |\nabla_{\underline{L}} \varphi |(t+r,u) du + |\varphi|(t+r,-t-r) \\ \nonumber
& \lesssim & \int_{u=-t-r}^{t-r} |\nabla_{\underline{L}} \varphi |(t+r,u) du + |\zeta|(0,(t+r)\omega) \\ \nonumber
& \lesssim & C \frac{ \log^M \left(3+t \right)}{(1+t+r)^2} \int_{u=-t-r}^{t-r} \frac{du}{1+|u|} + \frac{C}{(1+t+r)^2} \hspace{2mm} \lesssim \hspace{2mm} C \frac{\log^M \left(3+t \right)}{(1+t+r)^2} \log(1+t+r). \end{eqnarray} It then remains to use that $t+r \lesssim 1+t$ in the region studied. \item If $r \leq t$, we obtain using the previous estimate, \begin{eqnarray}
\nonumber |\zeta|(t,x) & = & |\varphi|(t+r,t-r) \hspace{2mm} \leq \hspace{2mm} \int_{u=0}^{t-r} |\nabla_{\underline{L}} \varphi |(t+r,u) du + |\varphi|(t+r,0) \\ \nonumber
& \lesssim & \int_{u=0}^{t-r} |\nabla_{\underline{L}} \varphi |(t+r,u) du + |\zeta|\left( \frac{t+r}{2}, \frac{t+r}{2} \right) \\ \nonumber
& \lesssim & C \frac{ \log^M \left(3+t \right)}{(1+t+r)^2} \int_{u=0}^{t-r} \frac{du}{1+|u|} + C \frac{\log^{M+1} \left(3+\frac{t+r}{2} \right)}{(1+t+r)^2} \hspace{2mm} \lesssim \hspace{2mm} C \frac{\log^{M+1} \left(3+t \right)}{(1+t+r)^2} . \end{eqnarray} \end{itemize} This concludes the proof. \end{proof}
\begin{Rq}
Assuming enough decay on $|F|(t=0)$ and on the spherical components of the source term $J_A$, one could prove similarly that $|\alpha| \lesssim \log^{M+2}(3+t) \frac{\tau_-}{\tau_+^3}$. \end{Rq}
\section{The pure charge part of the electromagnetic field}\label{secpurecharge}
As we will consider an electromagnetic field with a non-zero total charge, $\int_{\mathbb{R}^3} r|\rho(F)| dx$ will be infinite and we will not be able to apply the results of the previous section to $F$ and its derivatives. As mentioned earlier, we will split $F$ in $\widetilde{F}+\overline{F}$, where $\widetilde{F}$ and $\overline{F}$ are introduced in Definition \ref{defpure1}. We will then apply the results of the previous section to the chargeless field $\widetilde{F}$, which will allow us to derive pointwise estimates on $F$ since the field $\overline{F}$ is completely determined. More precisely, we will use the following properties of the pure charge part $\overline{F}$ of $F$. \begin{Pro}\label{propcharge} Let $F$ be a $2$-form with a constant total charge $Q_F$ and $\overline{F}$ its pure charge part $$\overline{F}(t,x) := \chi(t-r) \frac{Q_F}{4 \pi r^2} \frac{x_i}{r} dt \wedge dx^i.$$
Then, \begin{enumerate} \item $\overline{F}$ is supported in $\cup_{t \geq 0} V_{-1}(t)$ and $\widetilde{F}$ is chargeless. \item $\rho(\overline{F})(t,x)=-\frac{Q_F}{4 \pi r^2} \chi(t-r)$, \hspace{2mm} $\alpha(\overline{F})=0$, \hspace{2mm} $\underline{\alpha}(\overline{F})=0$ \hspace{2mm} and \hspace{2mm} $\sigma(\overline{F})=0$.
\item $\forall \hspace{0.5mm} Z^{\gamma} \in \mathbb{K}^{|\gamma|}$, \hspace{1mm} $\exists \hspace{0.5mm} C_{\gamma} >0$, \hspace{5mm} $|\mathcal{L}_{Z^{\gamma}} (\overline{F}) | \leq C_{\gamma} |Q_F| \tau_+^{-2}$. \item $\overline{F}$ satisfies the Maxwell equations $\nabla^{\mu} \overline{F}_{\mu \nu} = \overline{J}_{\nu}$ and $\nabla^{\mu} {}^* \! \overline{F}_{\mu \nu} =0$, with $\overline{J}$ such that $$\overline{J}_0(t,x)= \frac{Q_F}{4 \pi r^2} \chi'(t-r) \hspace{5mm} \text{and} \hspace{5mm} \overline{J}_i(t,x) =-\frac{Q_F}{4 \pi r^2} \frac{x_i}{r} \chi'(t-r).$$
$\overline{J}$ is then supported in $\{ (s,y) \in \mathbb{R}_+ \times \mathbb{R}^3 \hspace{1mm} / \hspace{1mm} -2 \leq t-|y| \leq -1 \}$ and its derivatives satisfy
$$ \forall \hspace{0.5mm} Z^{\gamma} \in \mathbb{K}^{|\gamma|}, \hspace{1mm} \exists \hspace{0.5mm} \widetilde{C}_{\gamma} >0, \hspace{10mm} |\mathcal{L}_{Z^{\gamma}} (\overline{J})^L |+\tau_+|\mathcal{L}_{Z^{\gamma}} (\overline{J})^A |+\tau_+^2|\mathcal{L}_{Z^{\gamma}} (\overline{J})^{\underline{L}} | \leq \frac{\widetilde{C}_{\gamma} |Q_F|}{ \tau_+^2}.$$ \end{enumerate} \end{Pro} \begin{proof} The first point follows from the definitions of $\overline{F}$, $\chi$ and $$ Q_{\widetilde{F}}(t) \hspace{1mm} = \hspace{1mm} Q_F-Q_{\overline{F}}(t) \hspace{1mm} = \hspace{1mm} Q_F-\lim_{r \rightarrow + \infty} \left( \int_{\mathbb{S}_{t,r}} \frac{x^i}{r} \overline{F}_{0i} d \mathbb{S}_{t,r} \right) \hspace{1mm} = \hspace{1mm} Q_F- \frac{Q_F}{4 \pi r^2} \int_{\mathbb{S}_{t,r}} d \mathbb{S}_{t,r} \hspace{1mm} = \hspace{1mm} 0.$$ The second point is straightforward and depicts that $\overline{F}$ has a vanishing magnetic part and a radial electric part. The third point can be obtained using that, \begin{itemize} \item for a $2$-form $G$ and a vector field $\Gamma$, $\mathcal{L}_{\Gamma}(G)_{\mu \nu} = \Gamma(G_{\mu \nu})+\partial_{\mu} (\Gamma^{\lambda} ) G_{\lambda \nu}+\partial_{\nu} ( \Gamma^{\lambda} ) G_{\mu \lambda}$. \item For all $Z \in \mathbb{K}$, $Z$ is either a translation or a homogeneous vector field. \item For a function $\chi_0 : u \mapsto \chi_0(u)$, we have $\Omega_{ij}(\chi_0(u))=0$, $$ \partial_{t} (\chi_0(u))= \chi_0'(u), \hspace{0.6cm} \partial_{i} (\chi_0(u))= -\frac{x^i}{r}\chi_0'(u), \hspace{0.6cm} S(\chi_0(u))= u \chi_0'(u), \hspace{0.6cm} \Omega_{0i} (\chi(u)) = -\frac{x^i}{r}u \chi_0'(u).$$
\item $1+t \leq \tau_+ \lesssim r$ on the support of $\overline{F}$ and $ |u| \leq \tau_- \leq \sqrt{5}$ on the support of $\chi'$. \end{itemize} Consequently, one has
$$\forall \hspace{0.5mm} Z^{\xi} \in \mathbb{K}^{|\xi|}, \hspace{5mm} Z^{\xi} \left( \frac{x^i}{r^3} \chi(t-r) \right) \leq C_{\xi,\chi} \tau_+^{-2} \hspace{1cm} \text{and} \hspace{1cm} \left| \mathcal{L}_{Z^{\gamma}}(\overline{F}) \right| \lesssim \sum_{|\kappa| \leq |\gamma| } \sum_{\mu=0}^3 \sum_{\nu = 0}^3 \left| Z^{\kappa}(\overline{F}_{\mu \nu}) \right| \lesssim \frac{C_{\gamma}}{\tau_+^2}.$$ The equations $\nabla^{\mu} {}^* \! \overline{F}_{\mu \nu}$, equivalent to $\nabla_{[ \lambda} \overline{F}_{\mu \nu]}=0$ by Proposition \ref{maxwellbis}, follow from $\overline{F}_{ij}=0$ and that the electric part of $\overline{F}$ is radial, so that $\nabla_i \overline{F}_{ 0j}-\nabla_j \overline{F}_{0i} =0$. The other ones ensue from straightforward computations, \begin{eqnarray} \nonumber \nabla^{i} \overline{F}_{i0} \hspace{-1mm} & = & \hspace{-1mm} -\frac{Q_F}{4 \pi} \partial_i \hspace{-0.2mm} \left( \frac{x^i}{r^3} \chi(t-r) \hspace{-0.2mm} \right) \hspace{0.8mm} = \hspace{0.8mm} -\frac{Q_F}{4 \pi} \left( \hspace{-0.2mm} \left( \frac{3}{r^3} -3\frac{x_i x^i}{r^5} \right) \hspace{-0.2mm} \chi(t-r)- \frac{x^i}{r^3} \times \frac{x_i}{r} \chi'(t-r) \hspace{-0.2mm} \right) \hspace{0.8mm} = \hspace{0.8mm} \frac{Q_F}{4 \pi r^2} \chi'(t-r), \\ \nonumber \nabla^{\mu} \overline{F}_{\mu i} \hspace{-1mm} & = & \hspace{-1mm} -\partial_t \overline{F}_{0i} \hspace{1mm} = \hspace{1mm} -\frac{Q_F}{4 \pi} \frac{x^i}{r^3} \chi'(t-r). \end{eqnarray} For the estimates on the derivatives of $\overline{J}$, we refer to \cite{LS} (equations $(3.52a)-(3.52c)$). \end{proof}
\section{Bootstrap assumptions and strategy of the proof}\label{sec6}
Let, for the remainder of this article, $N \in \mathbb{N}$ such that $N \geq 11$ and $M \in \mathbb{N}$ which will be fixed during the proof. Let also $0 < \eta < \frac{1}{16}$ and $(f_0,F_0)$ be an initial data set satisfying the assumptions of Theorem \ref{theorem}. By a standard local well-posedness argument, there exists a unique maximal solution $(f,F)$ of the Vlasov-Maxwell system defined on $[0,T^*[$, with $T^* \in \mathbb{R}_+^* \cup \{+ \infty \}$. Let us now introduce the energy norms used for the analysis of the particle density. \begin{Def}\label{normVlasov} Let $Q \leq N$, $q \in \mathbb{N}$ and $a = M+1$. For $g$ a sufficiently regular function, we define the following energy norms, \begin{eqnarray}
\nonumber \mathbb{E}[g](t) & := & \| g \|_{L^1_{x,v} }(t) +\int_{C_u(t)} \int_v \frac{v^{\underline{L}}}{v^0} \left| g \right| dv dC_u(t), \\ \nonumber
\mathbb{E}^{q}_Q[g](t) & := & \sum_{\begin{subarray}{l} 1 \leq i \leq 2 \\ \hspace{1mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{ \begin{subarray}{} |\xi^i|+|\beta| \leq Q \\ \hspace{1mm} |\xi^i| \leq Q-1 \end{subarray}} \sum_{j=0}^{2N-1+q- \xi^1_P-\xi^2_P-\beta_P} \log^{- (j+ |\xi^1|+|\xi^2|+|\beta|)a}(3+t) \mathbb{E} \left[ z^j P_{\xi^1}(\Phi)P_{\xi^2}(\Phi)Y^{\beta} f \right] \hspace{-1mm} (t), \\ \nonumber
\overline{\mathbb{E}}_N[g](t) & := & \sum_{\begin{subarray}{l} 1 \leq i \leq 2 \\ \hspace{1mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{ \begin{subarray}{} |\xi^i|+|\beta| \leq Q \\ \hspace{1mm} |\xi^i| \leq Q-1 \end{subarray}} \sum_{j=0}^{2N-1- \xi^1_P-\xi^2_P-\beta_P} \hspace{-1mm} \log^{-aj}(3+t) \mathbb{E} \left[ z^j P_{\xi^1}(\Phi)P_{\xi^2}(\Phi)Y^{\beta} f \right] \hspace{-1mm} (t), \\ \nonumber
\mathbb{E}^X_{N-1}[f](t) & := & \sum_{\begin{subarray}{l} 1 \leq i \leq 2 \\ \hspace{1mm} z \in \mathbf{k}_1 \end{subarray}} \sum_{ |\zeta^i|+|\beta| \leq N-1} \sum_{j=0}^{2N-2-\zeta^1_P-\zeta^2_P-\beta_P} \log^{-2j}(3+t) \mathbb{E} \left[ z^j P^X_{\zeta^1}(\Phi)P^X_{\zeta^2}(\Phi)Y^{\beta} f \right]\hspace{-1mm} (t), \\ \nonumber
\mathbb{E}^X_{N}[f](t) & := & \sum_{ z \in \mathbf{k}_1} \sum_{ \begin{subarray}{} |\zeta|+|\beta| \leq N \\ \hspace{1mm} |\zeta| \leq N-1 \end{subarray}} \sum_{j=0}^{2N-2-\zeta_P-\beta_P} \log^{-2j}(3+t) \mathbb{E} \left[ z^j P^X_{\zeta}(\Phi)Y^{\beta} f \right]\hspace{-1mm} (t) . \end{eqnarray} To understand the presence of the logarithmical weights, see Remark \ref{hierarchyjustification}. \end{Def} In order to control the derivatives of the $\Phi$ coefficients and $\overline{\mathbb{E}}_N[f]$ at $t=0$, we prove the following result. \begin{Pro}\label{Phi0}
Let $|\beta| \leq N-1$ a multi index and $Y^{\beta} \in \mathbb{Y}^{|\beta|}$. Then, at $t=0$, \begin{eqnarray}
\nonumber \max \left( |Y^{\beta} \Phi |, | \widehat{Z}^{\beta} \Phi | \right) & \lesssim & \frac{1+r^2}{v^0} \sum_{|\gamma| \leq |\beta| -1} \left| \mathcal{L}_{Z^{\gamma}}(F) \right| \\ \nonumber & \lesssim & \frac{\sqrt{\epsilon}}{v^0 }. \end{eqnarray} \end{Pro} \begin{proof} Note that the second inequality ensues from \begin{equation}\label{decayF00}
\sum_{|\gamma| \leq N-2} \left\| \mathcal{L}_{Z^{\gamma}}(F) \right\|_{L^{\infty}(\Sigma_0)} \lesssim \frac{\sqrt{\epsilon}}{1+r^2}, \end{equation}
which comes from Proposition \ref{decayMaxwell}. Let us now prove the first inequality. Unless the opposite is mentioned explicitly (as in \eqref{equa7}), all functions considered here will be evaluated at $t=0$. As $\Phi(0,.,.)=0$, the result holds for $|\beta|=0$. Let $1 \leq |\beta| \leq N-1$ and suppose that the result holds for all $|\sigma| < |\beta|$. Note that, for instance, $$ Y_2 Y_1 \Phi = \widehat{Z}_2 \widehat{Z}_1 \Phi +\Phi X \widehat{Z}_1 \Phi+Y_2(\Phi) X \Phi+\Phi \widehat{Z}_2 X \Phi + \Phi \Phi X X \Phi.$$ More generally, we have, \begin{equation}\label{equa5}
\left| Y^{\beta} \Phi \right| \lesssim \sum_{\begin{subarray}{} p \leq |k|+|\sigma| \leq |\beta| \\ \hspace{2.5mm} k < |\beta| \end{subarray} } P_{k,p}(\Phi) \widehat{Z}^{\sigma} \Phi. \end{equation}
Consequently, using the induction hypothesis, we only have to prove the result for $\widehat{Z}^{\beta} \Phi$. Indeed, as $|k| < |\beta|$, by \eqref{decayF00}, \begin{equation}\label{equa3}
|P_{k,p}(\Phi) \widehat{Z}^{\sigma}(\Phi) | \lesssim |\widehat{Z}^{\sigma}(\Phi)| \left|\frac{1+r^{2}}{v^0}\right|^p \sum_{|\gamma| \leq N-2 } \left| \mathcal{L}_{Z^{\gamma}}(F) \right|^p \lesssim |\widehat{Z}^{\sigma}(\Phi)|. \end{equation}
Combining \eqref{equa5} and \eqref{equa3}, we would then obtain the inequality on $|Y^{\beta} \Phi|$, if we would have it on $\widehat{Z}^{\sigma} \Phi$ for all $|\sigma| \leq |\beta|$. Let us then prove that the result holds for $\widehat{Z}^{\beta} \Phi$ and suppose, for simplicity, that $\Phi=\Phi^k_{\widehat{Z}}$, with $\widehat{Z} \neq S$. Remark that
$$|\widehat{Z}^{\beta} \Phi | \lesssim \sum_{|\alpha_2|+|\alpha_1|+q \leq |\beta|}(1+|x|)^{|\alpha_1|+q}(v^0)^{|\alpha_2|} |\partial_{v}^{\alpha_2} \partial_x^{\alpha_1} \partial_t^q \Phi|$$ and let us prove by induction on $q$ that \begin{equation}\label{equa6}
\forall \hspace{0.5mm} |\alpha_2|+|\alpha_1|+q \leq |\beta|, \hspace{8mm} (1+|x|)^{|\alpha_1|+q}(v^0)^{|\alpha_2|} |\partial_{v}^{\alpha_2} \partial_x^{\alpha_1} \partial_t^q \Phi| \lesssim \frac{1+r^2}{v^0} \sum_{|\gamma| \leq |\beta| -1} \left| \mathcal{L}_{Z^{\gamma}}(F) \right|. \end{equation} Recall that for $t \in [0,T^*[$, \begin{equation}\label{equa7} T_F(\Phi)=v^{\mu} \partial_{\mu} \Phi + F(v,\nabla_v \Phi)=-t\frac{v^{\mu}}{v^0} \mathcal{L}_Z(F)_{\mu k}. \end{equation}
As $\Phi(0,.,.)=0$ and $v^0\partial_t \Phi=-v^i \partial_i \Phi-F(v,\nabla_v \Phi)$, implying $\partial_t \Phi(0,.,.)=0$, \eqref{equa6} holds for $q \leq 1$. Let $2 \leq q \leq |\beta|$ and suppose that \eqref{equa6} is satisfied for all $q_0 < q$. Let $|\alpha_2|+|\alpha_1| \leq |\beta|-q$. Using the commutation formula given by Lemma \ref{basiccomuf}, we have (at $t=0$),
$$v^0\partial_x^{\alpha_1} \partial_t^q \Phi=-v^i \partial_i\partial_x^{\alpha_1} \partial_t^{q-1} \Phi-\frac{v^{\mu}}{v^0}\mathcal{L}_{\partial_x^{\alpha_1} \partial_t^{q-2} Z}(F)_{\mu k}+\sum_{|\gamma_1|+q_1+|\gamma_2| = |\alpha_1|+q-1} C^1_{\gamma_1, \gamma_2} \mathcal{L}_{\partial^{\gamma_2}}(F)(v,\nabla_v \partial_{x}^{\gamma_1} \partial_t^{q_1} \Phi),$$ Dividing the previous equality by $v^0$, taking the $\partial_v^{\alpha_2}$ derivatives of each side and using Lemma \ref{goodderiv}, we obtain \begin{eqnarray}
\nonumber |\partial_v^{\alpha_2} \partial_x^{\alpha_1} \partial_t^q \Phi| & \lesssim & \sum_{|\alpha_3| \leq |\alpha_2|} (v^0)^{-|\alpha_2|+|\alpha_3|}|\partial^{\alpha_3}_v\partial_x\partial_x^{\alpha_1} \partial_t^{q-1} \Phi|+\sum_{|\gamma| \leq |\alpha_1|+q-2} \frac{1}{(v^0)^{1+|\alpha_2|}(1+r)^{|\alpha_1|+q-2}}\left|\mathcal{L}_{Z^{\gamma} Z}(F)\right|\\ \nonumber & & +\sum_{\begin{subarray}{} |\gamma_1|+q_1+n = |\alpha_1|+q-1 \\ \hspace{5mm} 1 \leq |\alpha_4| \leq |\alpha_2|+1 \end{subarray}} \sum_{|\gamma_2| \leq n} \frac{1}{(v^0)^{|\alpha_2|-|\alpha_4|+1}(1+r)^n}\left| \mathcal{L}_{Z^{\gamma_2}}(F) \right| |\partial_v^{\alpha_4}\partial_{x}^{\gamma_1} \partial_t^{q_1} \Phi|. \end{eqnarray}
It then remains to multiply both sides of the inequality by $(v^0)^{|\alpha_2|}(1+r)^{|\alpha_1|+q}$ and \begin{itemize}
\item To bound $(v^0)^{|\alpha_2|}(1+r)^{|\alpha_1|+q} (v^0)^{-|\alpha_2|+|\alpha_3|}|\partial^{\alpha_3}_v\partial_x\partial_x^{\alpha_1} \partial_t^{q-1} \Phi|$ with the induction hypothesis.
\item To remark that $(v^0)^{|\alpha_2|}(1+r)^{|\alpha_1|+q} \frac{1}{(v^0)^{1+|\alpha_2|}(1+r)^{|\alpha_1|+q-2}}\left|\mathcal{L}_{Z^{\gamma} Z}(F)\right|$ has the desired form.
\item To note that, using $|\gamma_1|+q_1+1 = |\alpha_1|+q-n$ and the induction hypothesis, \begin{eqnarray}
\nonumber \frac{(v^0)^{|\alpha_2|}(1+r)^{|\alpha_1|+q}}{(v^0)^{|\alpha_2|-|\alpha_4|+1}(1+r)^n} |\partial_v^{\alpha_4}\partial_{x}^{\gamma_1} \partial_t^{q_1} \Phi|\left| \mathcal{L}_{Z^{\gamma_2}}(F) \right| \hspace{-2mm} & = & \hspace{-2mm} \frac{1+r}{v^0}(v^0)^{|\alpha_4|}(1+r)^{|\gamma_1|+q_1}|\partial_v^{\alpha_4}\partial_{x}^{\gamma_1} \partial_t^{q_1} \Phi|\left| \mathcal{L}_{Z^{\gamma_2}}(F) \right| \\ \nonumber
& \lesssim & \hspace{-2mm} \frac{1+r}{v^0} \left| \mathcal{L}_{Z^{\gamma_2}}(F) \right| \hspace{-1mm} \sum_{|\zeta| \leq |\alpha_4|+|\gamma_1|+q_1-1} \hspace{-1mm} \frac{(1+r)^2}{v^0} \left| \mathcal{L}_{Z^{\zeta}}(F) \right| \\ \nonumber
& \lesssim & \hspace{-2mm} \sum_{|\zeta| \leq |\alpha_2|+|\alpha_1|+q-1} \frac{(1+r)^2}{v^0} \left| \mathcal{L}_{Z^{\zeta}}(F) \right| , \end{eqnarray}
since $\left| \mathcal{L}_{Z^{\gamma_2}}(F) \right| \lesssim (1+r)^{-2}$, as $|\gamma_2| \leq |\alpha_1|+q-1 \leq |\beta|-1 \leq N-2$. This concludes the proof of the Proposition. \end{itemize} \end{proof} \begin{Cor}\label{coroinit} There exists $\widetilde{C} >0$ a constant depending only on $N$ such that $ \mathbb{E}^{4}_N[f](0) \leq \widetilde{C} \epsilon = \widetilde{\epsilon}$. Without loss of generality and in order to lighten the notations, we suppose that $\mathbb{E}^{4}_N[f](0) \leq \epsilon$. \end{Cor} \begin{proof}
All the functions considered here are evaluated at $t=0$. Consider multi-indices $\xi_1$, $\xi_2$ and $\beta$ such that, for $i \in \{1 , 2 \}$, $\max(|\xi^i|+1,|\xi^i|+|\beta|) \leq N$ and $j \leq 2N+3-\xi^1_P-\xi^2_P-\beta_P$. Then,
$$\left|z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\beta} f \right| \leq \left|z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) \widehat{Z}^{\beta} f \right|+ \sum_{\begin{subarray}{} \hspace{1mm} |k|+|\kappa| \leq |\beta| \\ \hspace{1mm} |k| \leq |\beta|-1 \\ p+k_P+\kappa_P < \beta_P \end{subarray} } \left|z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) P_{k,p}(\Phi) \widehat{Z}^{\kappa} f \right|.$$ Using the previous proposition and the assumptions on $f_0$, one gets, with $C_1 >0$ a constant,
$$ \mathbb{E}^{4}_N[f](0) \leq (1+C_1 \sqrt{\epsilon} )\sum_{\begin{subarray}{l} \hspace{0.5mm} \widehat{Z}^{\beta} \in \widehat{\mathbb{P}}_0^{|\beta|} \\ |\beta| \leq N \end{subarray}} \| z^{2N+3-\beta_P} \widehat{Z}^{\beta} f \|_{L^1_{x,v}}(0) .$$ By similar computations than in Appendix $B$ of \cite{massless}, we can bound the right hand side of the last inequality by $\widetilde{C} \epsilon$ using the smallness hypothesis on $(f_0,F_0)$. \end{proof} By a continuity argument and the previous corollary, there exists a largest time $T \in ]0,T^*[$ such that, for all $t \in [0,T[$, \begin{eqnarray}\label{bootf1} \mathbb{E}^4_{N-3}[f](t) & \leq & 4\epsilon, \\ \mathbb{E}^{0}_{N-1}[f](t) & \leq & 4\epsilon, \label{bootf2} \\ \overline{\mathbb{E}}_{N}[f](t) & \leq & 4\epsilon(1+t)^{\eta}, \label{bootf3} \\
\sum_{|\beta| \leq N-2} \left\| r^{\frac{3}{2}} \int_v \frac{v^A}{v^0} \widehat{Z}^{\beta} f dv \right\|_{L^2(\Sigma_t)} & \leq & \sqrt{\epsilon}, \label{bootL2} \\ \mathcal{E}^0_{N}[F](t) & \leq & 4\epsilon, \label{bootF1} \\ \mathcal{E}^{Ext}_N[\widetilde{F}](t) & \leq & 8 \epsilon, \label{bootext} \\ \mathcal{E}_{N-3}[F](t) & \leq & 30\epsilon \log^2(3+t), \label{bootF2} \\ \mathcal{E}_{N-1}[F](t) & \leq & 30\epsilon \log^{2M}(3+t), \label{bootF3} \\ \mathcal{E}_{N}[F](t) & \leq & 30\epsilon(1+t)^{\eta}. \label{bootF4} \end{eqnarray}
The remainder of the proof will then consist in improving our bootstrap assumptions, which will prove that $(f,F)$ is a global solution to the $3d$ massive Vlasov-Maxwell system. The other points of the theorem will be obtained during the proof, which is divided in four main parts.
\begin{enumerate} \item First, we will obtain pointwise decay estimates on the particle density, the electromagnetic field and then on the derivatives of the $\Phi$ coefficients, using the bootstrap assumptions.
\item Then, we will improve the bootstrap assumptions \eqref{bootf1}, \eqref{bootf2} and \eqref{bootf3} by several applications of the energy estimate of Proposition \ref{energyf} and the commutation formula of Proposition \ref{ComuPkp}. The computations will also lead to optimal pointwise decay estimates on $\int_v |Y^{\beta} f | \frac{dv}{(v^0)^2}$.
\item The next step consists in proving enough decay on the $L^2$ norms of $\int_v |zY^{\beta} f | dv$, which will permit us to improve the bootstrap assumption \eqref{bootL2}. \item Finally, we will improve the bootstrap assumptions \eqref{bootF1}-\eqref{bootF4} by using the energy estimates of Proposition \ref{energyMax1}. \end{enumerate} \section{Immediate consequences of the bootstrap assumptions}\label{sec7}
In this section, we prove pointwise estimates on the Maxwell field, the $\Phi$ coefficients and the Vlasov field. We start with the electromagnetic field.
\begin{Pro}\label{decayF}
We have, for all $|\gamma| \leq N-3$ and $(t,x) \in [0,T[ \times \mathbb{R}^3$, \begin{flalign*}
& \hspace{0.5cm} |\alpha(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\frac{\log^M(3+t)}{\tau_+^2}, \hspace{17mm} |\underline{\alpha}(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\min \left( \frac{1}{\tau_+\tau_-^{\frac{1}{2}}}, \frac{\log^M(3+t)}{\tau_+\tau_-} \right) \hspace{-0.5mm} ,& \\
& \hspace{0.5cm} |\sigma(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon} \frac{\log^M(3+t)}{\tau_+^{\frac{3}{2}}\tau_-^{\frac{1}{2}}}, \hspace{17mm} |\rho(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \sqrt{\epsilon}\frac{\log^M(3+t)}{\tau_+^{\frac{3}{2}}\tau_-^{\frac{1}{2}}}.& \end{flalign*}
Moreover, if $|x| \geq t$, \begin{flalign*}
& \hspace{0.5cm} |\alpha(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \frac{\sqrt{\epsilon}}{\tau_+^2}, \hspace{35mm} |\underline{\alpha}(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \frac{\sqrt{\epsilon}}{\tau_+\tau_-}, & \\
& \hspace{0.5cm} |\sigma(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \frac{\sqrt{\epsilon}}{\tau_+^{\frac{3}{2}}\tau_-^{\frac{1}{2}}}, \hspace{31mm} |\rho(\mathcal{L}_{Z^{\gamma}}(F))|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \frac{\sqrt{\epsilon}}{\tau_+^{\frac{3}{2}}\tau_-^{\frac{1}{2}}}. & \end{flalign*} We also have
$$ \forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{1.5cm} \sum_{|\kappa| \leq N} \left| \mathcal{L}_{Z^{\kappa}}(\overline{F}) \right|(t,x) \hspace{1mm} \lesssim \hspace{1mm} \frac{\epsilon}{\tau_+^2}.$$ \end{Pro}
\begin{Rq}\label{lowderiv}
If $|\gamma| \leq N-5$, we can replace the $\log^M(3+t)$-loss in the interior of the lightcone by a $\log(3+t)$-loss (for this, use the bootstrap assumption \eqref{bootF2} instead of \eqref{bootF3} in the proof below). \end{Rq} \begin{Rq}\label{decayoftheo}
Applying Proposition \ref{Probetteresti} and using the estimate \eqref{decayf} proved below, we can also improve the decay rates of the components $\rho$ and $\sigma$ near the light cone. We have, for all $|\gamma| \leq N-6$, \begin{eqnarray}
\forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{1cm} |\rho(\mathcal{L}_{Z^{\gamma}}(F))|(t,x)+|\sigma (\mathcal{L}_{Z^{\gamma}}(F))|(t,x) & \lesssim & \sqrt{\epsilon} \frac{\log^2(3+t)}{\tau_+^2} . \label{kevatal} \end{eqnarray} \end{Rq}
\begin{proof}
The last estimate, concerning $\overline{F}$, ensues from Proposition \ref{propcharge} and $|Q_F| \leq \| f_0 \|_{L^1_{x,v}} \leq \epsilon$. The estimate $\tau_+ \sqrt{\tau_-}|\underline{\alpha}| \lesssim \sqrt{\epsilon}$ follows from Proposition \ref{decayMaxwell} and the bootstrap assumption \eqref{bootF1}. Note that the other estimates hold with $F$ replaced by $\widetilde{F}$ since $\mathcal{E}_{N-1}[F]=\mathcal{E}_{N-1}[\widetilde{F}]$ and according to Proposition \ref{decayMaxwell} and the bootstrap assumptions \eqref{bootext}, \eqref{bootF3} and \eqref{bootL2}. It then remains to use $F=\widetilde{F}+\overline{F}$ and the estimates obtained on $\overline{F}$ and $\widetilde{F}$. \end{proof} \begin{Rq}\label{justif} Even if the pointwise decay estimates \eqref{kevatal}, which correspond to the ones written in Theorem \ref{theorem}, are stronger than the ones given by Proposition \ref{decayF} (or Remark \ref{lowderiv}) in the region located near the light cone, we will not work with them for two reasons. \begin{enumerate} \item Using these stronger decay rates do not simplify the proof. We compensate the lack of decay in $t+r$ of the estimates given by Proposition \ref{decayF} for the components $\rho$ and $\sigma$ by taking advantage of the inequality\footnote{We are able to use this inequality in the energy estimates as the degree in $v$ of the source terms of $T_F(Y^{\beta} f)$ is $0$ whereas the one of $v^{\mu} \partial_{\mu}Y^{\beta}f$ is equal to $1$.} $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$ and the good properties of $v^{\underline{L}}$. \item Compared to the estimates given by Remark \ref{lowderiv}, \eqref{kevatal} requires to control one derivative more of the electromagnetic field in $L^2$. Working with them would then force us to take $N \geq 12$. \end{enumerate} \end{Rq} We now turn on the $\Phi$ coefficients and start by the following lemma. \begin{Lem}\label{LemPhi}
Let $G$, $G_1$, $G_2 : [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ and $\varphi_0 : \mathbb{R}^3_x \times \mathbb{R}^3_v \rightarrow \mathbb{R}$ be four sufficiently regular functions such that $|G| \leq G_1+G_2$. Let $\varphi$, $\widetilde{\varphi}$, $\varphi_1$ and $\varphi_2$ be such that $$ T_F( \varphi ) =G, \hspace{5mm} \varphi(0,.,.)=\varphi_0, \hspace{12mm} T_F(\widetilde{\varphi})=0, \hspace{5mm} \widetilde{\varphi}(0,.,.)=\varphi_0$$ and, for $i \in \{1,2 \}$, $$ T_F(\varphi_i)=G_i, \hspace{5mm} \varphi_i(0,.,.)=0.$$ Then, on $[0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$,
$$|\varphi| \leq |\widetilde{\varphi}|+|\varphi_1|+|\varphi_2|.$$ \end{Lem} \begin{proof} Denoting by $X(s,t,x,v)$ and $V(s,t,x,v)$ the characteristics of the transport operator, we have by Duhamel's formula, \begin{eqnarray}
\nonumber |\varphi|(t,x,v) & = & \left| \widetilde{\varphi}(t,x,v)+\int_0^t \frac{G}{v^0} \left( s,X(s,t,x,v),V(s,t,x,v) \right) ds \right| \\ \nonumber
& \leq & |\widetilde{\varphi}|(t,x,v)+\int_0^t \frac{G_1+G_2}{v^0} \left( s,X(s,t,x,v),V(s,t,x,v) \right) ds \\ \nonumber
& = & |\widetilde{\varphi}|(t,x,v)+|\varphi_1|(t,x,v)+|\varphi_2|(t,x,v). \end{eqnarray} \end{proof} \begin{Pro}\label{Phi1} We have, $\forall \hspace{0.5mm} (t,x,v) \in [0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v$
$$ |\Phi|(t,x,v) \lesssim \sqrt{\epsilon} \log^2 (1+\tau_+), \hspace{3mm} |\partial_{t,x} \Phi| (t,x,v) \lesssim \sqrt{\epsilon} \log^{\frac{3}{2}}(1+\tau_+) \hspace{3mm} \text{and} \hspace{3mm} |Y \Phi|(t,x,v) \lesssim \sqrt{\epsilon} \log^{\frac{7}{2}} (1+\tau_+).$$ \end{Pro} \begin{proof}
We will obtain this result through the previous Lemma and by parameterizing the characteristics of the operator $T_F$ by $t$ or by $u$. Let us start by $\Phi$ and recall that, schematically, $T_F(\Phi)=-t\frac{v^{\mu}}{v^0} \mathcal{L}_Z(F)_{\mu k}$. Denoting by $(\alpha, \underline{\alpha}, \rho, \sigma)$ the null decomposition of $\mathcal{L}_Z(F)$ and using $|v^A| \lesssim \sqrt{v^0 v^{\underline{L}}}$ (see Lemma \ref{weights1}), we have \begin{eqnarray}
\nonumber \left| \frac{v^{\mu}}{v^0} \mathcal{L}_Z(F)_{\mu k} \right| & \lesssim & \frac{v^L+|v^A|}{v^0}|\alpha|+\frac{v^L+v^{\underline{L}}}{v^0}|\rho|+\frac{|v^A|}{v^0}|\sigma|+\frac{v^{\underline{L}}+|v^A|}{v^0}|\underline{\alpha}| \\ \nonumber
& \lesssim & |\alpha|+|\rho|+|\sigma|+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\underline{\alpha}|. \end{eqnarray} Using the pointwise estimates given by Remark \ref{lowderiv} as well as the inequalities $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, which comes from Lemma \ref{weights1}, and $2ab \leq a^2+b^2$, we get \begin{equation}\label{eq:firstphiesti}
\tau_+\left| \frac{v^{\mu}}{v^0} \mathcal{L}_Z(F)_{\mu k} \right| \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\frac{\epsilon v^0 v^{\underline{L}}}{\tau_+ \tau_-}}\log(3+t)+v^{\underline{L}}\frac{\sqrt{\epsilon}}{ \tau_-} \log(3+t) \hspace{2mm} \lesssim \hspace{2mm} \frac{v^0 \sqrt{\epsilon}}{\tau_+}\log(3+t)+\frac{v^{\underline{L}}\sqrt{\epsilon}}{\tau_-} \log(3+t). \end{equation} Consider now the functions $\varphi_1$ and $\varphi_2$ such that $$T_F(\varphi_1) = \frac{v^0 \sqrt{\epsilon}}{\tau_+}\log(3+t), \hspace{8mm} T_F(\varphi_2) = \frac{v^{\underline{L}}\sqrt{\epsilon}}{\tau_-}\log(3+t) \hspace{6mm} \text{and} \hspace{6mm} \varphi_1(0,.,.)=\varphi_2(0,.,.)=0.$$
According to Lemma \ref{LemPhi}, we have $|\Phi| \lesssim |\varphi_1|+|\varphi_2|$. In order to estimate $\varphi_1$, we will parametrize the characteristics of the operator $T_F$ by $t$. More precisely, let $(X_{s,y,v}(t),V_{s,y,v}(t))$ be the value in $t$ of the characteristic which is equal to $(y,v)$ in $t=s$, with $s < T$. Dropping the indices $s$, $y$ and $w$, we have $$ \frac{dX^i}{dt}(t) = \frac{V^i(t)}{V^0(t)} \hspace{1.5cm} \text{and} \hspace{1.5cm} \frac{d V^i}{dt}(t) = \frac{V^{\mu}(t)}{V^0(t)} {F_{ \mu}}^{ i}(t,X(t)).$$ Duhamel's formula gives
$$ |\varphi_1|(s,y,v) \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \int_0^s \frac{\log(3+t) }{\tau_+ \big( t, X_{t,y,v}(t) \big)} ds \hspace{2mm} \leq \hspace{2mm} \sqrt{\epsilon} \int_0^s \frac{\log(3+t)}{1+t} ds \hspace{2mm} \leq \hspace{2mm} \sqrt{\epsilon} \log^2 (3+s).$$ For $\varphi_2$, we parameterize the characteristics of $T_F$ by\footnote{Note that $T_F=2v^{\underline{L}} \partial_u +2v^L \partial_{\underline{u}}+v^A e_A+F(v,\nabla_v)$} $u$. For a point $(s,y) \in [0,T[ \times \mathbb{R}^3$, we will write its coordinates in the null frame as $(z,\underline{z},\omega_1, \omega_2)$. Let $(\underline{U}_{z,\underline{z},\omega_1, \omega_2,v}(u), \Omega^1_{z,\underline{z},\omega_1, \omega_2,v}(u),\Omega^2_{z,\underline{z},\omega_1, \omega_2,v}(u),V_{z,\underline{z},\omega_1, \omega_2,v}(u))$ be the value in $u$ of the characteristic which is equal to $(s,y,v)=(z,\underline{z},\omega_1,\omega_2,v)$ in $u=z$. Dropping the indices $z$, $\underline{z}$, $\omega_1$, $\omega_2$ and $v$, we have $$\frac{d \underline{U}}{du}(u)= \frac{V^L(u)}{V^{\underline{L}}(u)}, \hspace{1.5cm} \frac{d \Omega^A}{du}(u) = \frac{V^A(u)}{2V^{\underline{L}}(u)} \hspace{1.5cm} \text{and} \hspace{1.5cm} \frac{d V^i}{du}(u) = \frac{V^{\mu}(u)}{2V^{\underline{L}}(u)} {F_{\mu}}^{ i}(u,\underline{U}(u),\Omega (u)).$$ Note that $u \mapsto \frac{1}{2}(u+\underline{U}(u))$ vanishes in a unique $z_0$ such that $ -\underline{z} \leq z_0 \leq z$, i.e. the characteristic reaches the hypersurface $\Sigma_0$ once and only once, at $u=z_0$. This can be noticed on the following picture, representing a possible trajectory of $(u,\underline{U}(u))$, which has to be in the backward light cone of $(z,\underline{z})$ by finite time of propagation,
\begin{tikzpicture} \draw [-{Straight Barb[angle'=60,scale=3.5]}] (0,-0.3)--(0,4); \fill[color=gray!35] (1,0)--(4,3)--(7,0)--(1,0); \node[align=center,font=\bfseries, yshift=-2em] (title)
at (current bounding box.south)
{\hspace{1cm} The trajectory of $\left (u,\underline{U} \left(u \right) \right)$ for $u \leq z$.}; \draw [-{Straight Barb[angle'=60,scale=3.5]}] (0,0)--(9,0) node[scale=1.5,right]{$\Sigma_0$}; \fill (4,3) circle[radius=2pt]; \fill (1,0) circle[radius=2pt]; \fill (7,0) circle[radius=2pt]; \draw (4.6,3) node[scale=1]{$(z,\underline{z})$}; \draw (1,-0.3) node[scale=1]{$(0,z)$}; \draw (7,-0.3) node[scale=1]{$(0,\underline{z})$}; \draw (0,-0.5) node[scale=1.5]{$r=0$}; \draw (-0.5,3.7) node[scale=1.5]{$t$}; \draw (8.7,-0.5) node[scale=1.5]{$r$}; \draw[scale=1,domain=sqrt(3)+1:4,smooth,variable=\x,black] plot (\x,{0.5*(\x-1)*(\x-1)-1.5}); \end{tikzpicture}
or by noticing that $$g(u):=u+\underline{U}(u) \hspace{12mm} \text{satisfies} \hspace{12mm} g'(u) \hspace{1mm} \geq \hspace{1mm} 1+ \frac{V^L\left( u \right)}{V^{\underline{L}}\left( u \right)} \hspace{1mm} \geq \hspace{1mm} 1$$ so that $g$ vanishes in $z_0$ such that $-\underline{z}=z-(z+\underline{z}) \leq z_0 \leq z$. Similarly, one can prove (or observe) that $\sup_{z_0 \leq u \leq z } \underline{U}(u) \leq \underline{z}$. It then comes that \begin{equation}\label{eq:varphi2}
|\varphi_2|(s,y,v) \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \int_{z_0}^z \frac{\log \left( 3+\underline{U} \left(u \right) \right)}{ \tau_-(u,\underline{U} \left( u \right))} du \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \log (3+ \underline{z} ) \int_{-\underline{z}}^z \frac{1}{1+|u|} d u \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \log^2(1+\underline{z}),
\end{equation}
which allows us to deduce that $|\Phi|(s,y,v) \lesssim \sqrt{\epsilon} \log^2 (3+s+|y|)$. We prove the other estimates by the continuity method. Let $0 < T_0 < T$ and $ \underline{u} > 0$ be the largest time and null ingoing coordinate such that \begin{equation}\label{bootPhi}
|\nabla_{t,x} \Phi| (t,x,v) \leq C \sqrt{\epsilon} \log^{\frac{3}{2}} (1+\tau_+) \hspace{8mm} \text{and} \hspace{8mm} \sum_{Y \in \mathbb{Y}_0} |Y \Phi| (t,x,v) \leq C \sqrt{\epsilon} \log^{\frac{7}{2}} (1+\tau_+) \end{equation} hold for all $(t,x,v) \in \underline{V}_{\underline{u}}(T_0) \times \mathbb{R}^3_v$ and where the constant $C>0$ will be specified below. The goal now is to improve the estimates of \eqref{bootPhi}. Using the commutation formula of Lemma \ref{basiccomuf} and the definition of $\Phi$, we have (in the case where $\Phi$ is not associated to the scaling vector field), for $\partial \in \mathbb{T}$, $$T_F \left( \partial \Phi \right) = - \mathcal{L}_{\partial}(F)(v,\nabla_v \Phi)-\partial \left( t\frac{v^{\mu}}{v^0}\mathcal{L}_{Z}(F)_{\mu k} \right).$$ With $\delta = \partial (t) \in \{0,1 \}$, one has $$ \partial \left( t\frac{v^{\mu}}{v^0}\mathcal{L}_{Z}(F)_{\mu k} \right) = \delta \frac{v^{\mu}}{v^0}\mathcal{L}_{Z}(F)_{\mu k}+t\frac{v^{\mu}}{v^0}\mathcal{L}_{\partial Z}(F)_{\mu k}.$$
Using successively the inequality \eqref{eq:zeta2}, the pointwise decay estimates\footnote{Note that we use the estimate $|\underline{\alpha}| \lesssim \sqrt{\epsilon} \tau_+^{-1}\tau_-^{-\frac{1}{2}}$ here in order to obtain a decay rate of $\tau_+^{-1}$ in the $t+r$ direction.} given by Remark \ref{lowderiv} and the inequalities $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, $2ab \leq a^2+b^2$, we get \begin{eqnarray}
\nonumber t\frac{v^{\mu}}{v^0}\mathcal{L}_{\partial Z}(F)_{\mu k} & \lesssim & \tau_+ \left( |\alpha ( \mathcal{L}_{\partial Z}(F) ) |+|\rho (\mathcal{L}_{\partial Z}(F))|+| \sigma ( \mathcal{L}_{\partial Z}(F) ) |+ \sqrt{\frac{v^{\underline{L}}}{v^0}} |\underline{\alpha} ( \mathcal{L}_{\partial Z}(F) ) | \right) \\ \nonumber
& \lesssim & \frac{\tau_+}{\tau_-} \sqrt{v^0 v^{\underline{L}}} \sum_{|\beta|\leq 2} \Big(\frac{\tau_-}{\tau_+} \left| \mathcal{L}_{Z^{\beta}}(F) \right|+ |\alpha ( \mathcal{L}_{Z^{\beta}}(F) ) |+|\rho (\mathcal{L}_{ Z^{\beta} }(F))|+| \sigma ( \mathcal{L}_{Z^{\beta}}(F) ) | \\ \nonumber
& & + \sqrt{\frac{v^{\underline{L}}}{v^0}} |\underline{\alpha} ( \mathcal{L}_{ Z^{\beta}}(F) ) | \Big) \\ & \lesssim & \sqrt{v^0 v^{\underline{L}}} \frac{\sqrt{\epsilon} \log(3+t)}{\tau_+^{\frac{1}{2}} \tau_-^{\frac{3}{2}}}+ v^{\underline{L}} \frac{\tau_+}{\tau_-} \frac{\sqrt{\epsilon} }{\tau_+ \tau_-^{\frac{1}{2}}} \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \frac{ v^0 }{\tau_+} \log^{\frac{1}{2}}(3+t) + \sqrt{\epsilon} \frac{ v^{\underline{L}} }{\tau_-^{\frac{3}{2}}} \log^{\frac{3}{2}}(3+t). \label{bootPhiT1} \end{eqnarray} Similarly, \begin{eqnarray}
\nonumber \frac{v^{\mu}}{v^0}\mathcal{L}_{ Z}(F)_{\mu k} & \lesssim & \left( |\alpha ( \mathcal{L}_{ Z}(F) ) |+|\rho (\mathcal{L}_{ Z}(F))|+| \sigma ( \mathcal{L}_{ Z}(F) ) |+ \sqrt{\frac{v^{\underline{L}}}{v^0}} |\underline{\alpha} ( \mathcal{L}_{ Z}(F) ) | \right) \\ & \lesssim & \frac{\sqrt{\epsilon} \log(3+t)}{\tau_+^{\frac{3}{2}} \tau_-^{\frac{1}{2}}}+ v^{\underline{L}} \frac{\sqrt{\epsilon} }{\tau_+ \tau_-^{\frac{1}{2}}} \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \frac{ v^0 }{\tau_+^{\frac{5}{4}}} + \sqrt{\epsilon} \frac{ v^{\underline{L}} }{\tau_-^{\frac{5}{4}}} . \label{bootPhiT2} \end{eqnarray}
Expressing $\mathcal{L}_{\partial}(F)(v,\nabla_v \Phi)$ in null components, denoting by $(\alpha, \underline{\alpha}, \rho, \sigma)$ the null decomposition of $\mathcal{L}_{\partial}(F)$ and using the inequalities $|v^A| \lesssim \sqrt{v^0v^{\underline{L}}}$, $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$ (see Lemma \ref{weights1}), one has \begin{equation}\label{eq:referlater}
\left|\mathcal{L}_{\partial}(F)(v,\nabla_v \Phi) \right| \lesssim \sqrt{v^0v^{\underline{L}}}|\rho | \left| \left( \nabla_v \Phi \right)^r\right|+\left(\sqrt{v^0v^{\underline{L}}} |\alpha |+v^{\underline{L}}|\underline{\alpha} |+v^{\underline{L}} |\sigma | \right) \left| \nabla_v \Phi \right|.
\end{equation} Using Lemma \ref{vradial}, $v^0 \partial_{v^i}=Y_i-\Phi X-t\partial_i-x^i \partial_t$ and the bootstrap assumption on the $\Phi$ coefficients \eqref{bootPhi}, we obtain \begin{eqnarray}
\nonumber \left| \left( \nabla_v \Phi \right)^r\right| & \lesssim & \sum_{Y \in \mathbb{Y}_0} |Y \Phi |+ |\Phi| |X( \Phi ) |+ \tau_- |\nabla_{t,x} \Phi | \hspace{2mm} \lesssim \hspace{2mm} C \sqrt{\epsilon} \log^{\frac{7}{2}} (1+\tau_+)+C\sqrt{\epsilon} \tau_- \log^{\frac{3}{2}} (1+\tau_+) , \\ \nonumber
\left| \nabla_v \Phi \right| & \lesssim & \sum_{Y \in \mathbb{Y}_0} |Y \Phi |+ |\Phi| |X( \Phi ) |+ \tau_+ |\nabla_{t,x} \Phi | \hspace{2mm} \lesssim \hspace{2mm} C\sqrt{\epsilon} \log^{\frac{7}{2}} (1+\tau_+)+C\sqrt{\epsilon} \tau_+ \log^{\frac{3}{2}} (1+\tau_+).
\end{eqnarray} We then deduce, by \eqref{eq:zeta2} and the pointwise estimates given by Remark \ref{lowderiv}, \begin{eqnarray}
\nonumber \sqrt{v^0v^{\underline{L}}}|\rho | \left| \left( \nabla_v \Phi \right)^r\right|+\sqrt{v^0v^{\underline{L}}} |\alpha |\left| \nabla_v \Phi \right| & \lesssim & C\epsilon \frac{\sqrt{v^0 v^{\underline{L}}}}{\tau_+ \tau_- } \log^{\frac{5}{2}} (1+\tau_+) \hspace{2mm} \lesssim \hspace{2mm} C \epsilon \frac{v^0}{\tau_+^{\frac{3}{2}}}+ C \epsilon \frac{v^{\underline{L}}}{\tau_-^2}, \\ \nonumber
\left( v^{\underline{L}}|\underline{\alpha} |+v^{\underline{L}} |\sigma | \right) \left| \nabla_v \Phi \right| & \lesssim & C \epsilon \frac{v^{\underline{L}}}{\tau_-^{\frac{3}{2}}} \log^{\frac{3}{2}}(1+\tau_+). \end{eqnarray} Combining these two last estimates with \eqref{bootPhiT1} and \eqref{bootPhiT2}, we get
$$ \left| T_F \left( \partial \Phi \right) \right| \lesssim (\sqrt{\epsilon}+C\epsilon) \frac{v^0}{\tau_+} \log^{\frac{1}{2}}(1+\tau_+)+ (\sqrt{\epsilon}+C\epsilon) \frac{v^{\underline{L}}}{\tau_-^{\frac{5}{4}}} \log^{\frac{3}{2}}(1+\tau_+).$$ We then split $\partial \Phi$ in three functions $\widetilde{\psi}+\psi_1+\psi_2$ such that $\psi_1(0,.,.)=\psi_2(0,.,.)=0$, $\widetilde{\psi}(0,.,.)=\partial \Phi (0,.,.)$, $$ T_F(\psi_1)=(\sqrt{\epsilon}+C\epsilon) \frac{v^0}{\tau_+} \log^{\frac{1}{2}}(1+\tau_+), \hspace{10mm} T_F(\psi_2)=(\sqrt{\epsilon}+C\epsilon) \frac{v^{\underline{L}}}{\tau_-^{\frac{5}{4}}} \log^{\frac{3}{2}}(1+\tau_+) \hspace{10mm} \text{and} \hspace{10mm} T_F(\widetilde{\psi})=0 .$$
According to Proposition \ref{Phi0}, we have $\|\widetilde{\psi} \|_{L^{\infty}_{t,x,v}} = \| \partial \Phi (0,.,.) \|_{L^{\infty}_{x,v}} \lesssim \sqrt{\epsilon}$. Fix now $(s,y,v) \in \underline{V}_{\underline{u}}(T_0) \times \mathbb{R}^3_v$ and let $(z,\underline{z}, \omega_1, \omega_2)$ be the coordinates of $(s,y)$ in the null frame. Keeping the notations used previously in this proof, we have \begin{eqnarray}
\nonumber |\psi_1|(s,y,v) & \lesssim & (\sqrt{\epsilon}+C\epsilon) \int_0^s \frac{ \log^{\frac{1}{2}} (1+\tau_+(t,X(t)))}{\tau_+ (t,X(t))} dt \\ & \lesssim & (\sqrt{\epsilon}+C\epsilon) \int_0^{s} \frac{ \log^{\frac{1}{2}} (3+t)}{1+t} dt \hspace{2mm} \lesssim \hspace{2mm} (\sqrt{\epsilon}+C\epsilon) \log^{\frac{3}{2}} (3+t), \label{eq:repeat1} \\ \nonumber
|\psi_2|(s,y,v) & \lesssim & (\sqrt{\epsilon}+C\epsilon) \int_{z_0}^z \frac{\log^{\frac{3}{2}} \left( 1+\tau_+(u,\underline{U} \left(u \right)) \right)}{ \tau_-^{\frac{5}{4}}(u,\underline{U} \left( u \right))} du \\ & \lesssim & (\sqrt{\epsilon}+C\epsilon) \log^{\frac{3}{2}} (3+ \underline{z} ) \int_{-\underline{z}}^z \frac{1}{(1+|u|)^{\frac{5}{4}}} d u \hspace{2mm} \lesssim \hspace{2mm} (\sqrt{\epsilon}+C\epsilon) \log^{\frac{3}{2}}(3+\underline{z}). \label{eq:repeat2} \end{eqnarray} Thus, there exists $C_1 >0$ such that
$$ \forall \hspace{0.5mm} (s,y,v) \in \underline{V}_{\underline{u}}(T_0) \times \mathbb{R}^3_v, \hspace{1cm} |\nabla_{t,x} \Phi |(s,y,v) \leq C_1(\sqrt{\epsilon}+C \epsilon ) \log^{\frac{3}{2}}(1+\tau_+(s,y))$$ and we can then improve the bootstrap assumption on $\nabla_{t,x} \Phi$ if $C$ is choosen large enough and $\epsilon$ small enough. It remains to study $Y \Phi$ with $Y \in \mathbb{Y}_0$. Using Lemma \ref{Comufirst}, $T_F(Y \Phi)$ can be bounded by a linear combination of terms of the form
$$ \left| \frac{v^{\mu}}{v^0}\mathcal{L}_Z(F)_{\mu k} Y \Phi \right|, \hspace{9mm} \tau_+\left| \frac{v^{\mu}}{v^0}\mathcal{L}_Z(F)_{\mu k} \partial_{t,x} \Phi \right|, \hspace{9mm} \left| \Phi \mathcal{L}_{\partial}(F)(v, \nabla_v \Phi ) \right| \hspace{9mm} \text{and} \hspace{9mm} \left| Y \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z}(F)_{\mu k} \right) \right|.$$
Using the bootstrap assumption \eqref{bootPhi} in order to estimate $|Y \Phi |$ and reasoning as for \eqref{bootPhiT2}, one obtains
$$ \left| \frac{v^{\mu}}{v^0}\mathcal{L}_Z(F)_{\mu k} Y \Phi \right| \hspace{2mm} \lesssim \hspace{2mm} C\epsilon \frac{v^0}{\tau_+^{\frac{5}{4}}}+C\epsilon \frac{v^{\underline{L}}}{\tau_-^{\frac{5}{4}}} .$$
Bounding $|\partial_{t,x} \Phi|$ with the bootstrap assumption \eqref{bootPhi} and using the inequality \eqref{eq:firstphiesti}, it follows
$$\tau_+\left| \frac{v^{\mu}}{v^0}\mathcal{L}_Z(F)_{\mu k} \partial \Phi \right| \hspace{2mm} \lesssim \hspace{2mm} C\epsilon \frac{v^0}{\tau_+}\log^{\frac{5}{2}}(1+\tau_+)+C\epsilon \frac{v^{\underline{L}}}{\tau_-}\log^{\frac{5}{2}}(1+\tau_+).$$
As $|\Phi| \lesssim \sqrt{\epsilon} \log^2(1+\tau_+)$, we get, using the bound obtained on the left hand side of \eqref{eq:referlater}, $$\Phi \mathcal{L}_{\partial}(F)(v, \nabla_v \Phi ) \hspace{2mm} \lesssim \hspace{2mm} C\epsilon \frac{v^0}{\tau_+^{\frac{3}{2}}} \log^2(1+\tau_+)+C\epsilon \frac{v^{\underline{L}}}{\tau_-^{\frac{3}{2}}} \log^{\frac{7}{2}}(1+\tau_+).$$ For the remaining term, one has schematically, by the first equality of Lemma \ref{calculF},
$$ \left| Y \left( t \frac{v^{\mu}}{v^0} \mathcal{L}_{Z}(F)_{\mu k} \right) \right| \hspace{2mm} \lesssim \hspace{2mm} \left(\tau_++|\Phi| \right) \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z}(F)_{\mu \theta} \right|+\tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{ZZ}(F)_{\mu k} \right|+\tau_+ |\Phi|\left| \frac{v^{\mu}}{v^0} \mathcal{L}_{\partial Z}(F)_{\mu k} \right|. $$
Using $|\Phi| \lesssim \log^2(1+\tau_+) \leq \tau_+$ and following \eqref{eq:firstphiesti}, we get
$$ \left(\tau_++|\Phi| \right) \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z}(F)_{\mu \theta} \right|+\tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{ZZ}(F)_{\mu k} \right| \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \frac{v^0}{\tau_+} \log (1+\tau_+)+\sqrt{\epsilon} \frac{v^{\underline{L}}}{\tau_-} \log (1+\tau_+).$$
Combining \eqref{bootPhiT1} with $|\Phi| \lesssim \log^2(1+\tau_+)$, we obtain
$$ \tau_+ |\Phi|\left| \frac{v^{\mu}}{v^0} \mathcal{L}_{\partial Z}(F)_{\mu k} \right| \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \frac{v^0}{\tau_+} \log^{\frac{5}{2}} (1+\tau_+)+\sqrt{\epsilon} \frac{v^{\underline{L}}}{\tau_-^{\frac{3}{2}}} \log^{\frac{7}{2}} (1+\tau_+).$$ Consequently, one has
$$\left| T_F ( Y \Phi ) \right| \hspace{2mm} \lesssim \hspace{2mm} (\sqrt{\epsilon}+C \epsilon) \frac{v^0}{\tau_+} \log^{\frac{5}{2}} (1+\tau_+)+(\sqrt{\epsilon}+C \epsilon) \frac{v^{\underline{L}}}{\tau_-^{\frac{5}{4}}} \log^{\frac{7}{2}} (1+\tau_+)+(\sqrt{\epsilon}+C \epsilon) \frac{v^{\underline{L}}}{\tau_-} \log^{\frac{5}{2}} (1+\tau_+).$$
One can then split $Y \Phi$ in three functions $\widetilde{\varsigma}$, $\varsigma_1$ and $\varsigma_2$ defined as $\widetilde{\psi}$, $\psi_1$ and $\psi_2$ previously. We have $\| \widetilde{\varsigma} \|_{L^{\infty}_{t,x,v}} \lesssim \sqrt{\epsilon}$ since $\|Y \Phi \|_{L^{\infty}_{x,v}}(0) \lesssim \sqrt{\epsilon}$ (see Proposition \ref{Phi0}) and we can obtain $|\varsigma_1|+|\varsigma_2| \lesssim ( \sqrt{\epsilon}+C \epsilon ) \log^{\frac{7}{2}} (1+\tau_+)$ by similar computations as those of \eqref{eq:repeat1}, \eqref{eq:repeat2} and \eqref{eq:varphi2}. So, taking $C$ large enough and $\epsilon$ small enough, we can improve the bootstrap assumption on $Y \Phi$ and conclude the proof. \end{proof} For the higher order derivatives, we have the following result. \begin{Pro} For all $(Q_1,Q_2) \in \llbracket 0, N-4 \rrbracket^2$ satisfying $Q_2 \leq Q_1$, there exists $R(Q_1,Q_2) \in \mathbb{N}$ such that
$$\forall \hspace{0.5mm} |\beta| \leq N-4, \hspace{3mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{12mm} \left| Y^{\beta} \Phi \right|(t,x) \lesssim \sqrt{\epsilon} \log^{R(|\beta|,\beta_P)} (1+\tau_+ ).$$ Note that $R(Q_1,Q_2)$ is independent of $M$ if $Q_1 \leq N-6$. \end{Pro} \begin{proof}
The proof is similar to the previous one and we only sketch it. We process by induction on $Q_1$ and, at $Q_1$ fixed, we make an induction on $Q_2$. Let $|\beta| \leq N-4$ and suppose that the result holds for all $ Q_1 \leq |\beta|$ and $Q_2 \leq \beta_P$ satisfying $Q_1 < |\beta|$ or $Q_2 < \beta_P$. Let $0 < T_0 < T$ and $\underline{u} >0$ be such that
$$ \forall \hspace{0.5mm} (t,x,v) \in \underline{V}_{\underline{u}}(T_0) \times \mathbb{R}^3_v, \hspace{1cm} |Y^{\beta} \Phi|(t,x,v) \leq C \sqrt{\epsilon} \log^{R(|\beta|,\beta_P)}(1+\tau_+),$$ with $C>0$ a constant sufficiently large. We now sketch the improvement of this bootstrap assumption, which will imply the desired result. The source terms of $T_F(Y^{\beta} \Phi)$, given by Propositions \ref{ComuVlasov} and \ref{sourcePhi}, can be gathered in two categories. \begin{itemize}
\item The ones where there is no $\Phi$ coefficient derived more than $|\beta| -1$ times, which can then be bounded by the induction hypothesis and give logarithmical growths, as in the proof of the previous Proposition. We then choose $R(|\beta|,\beta_P)$ sufficiently large to fit with these growths.
\item The ones where a $\Phi$ coefficient is derived $|\beta|$ times. Note then that they all come from Proposition \ref{ComuVlasov}, when $|\sigma| = |\beta|$ for the quantities of \eqref{eq:com1} and when $|\sigma|=|\beta|-1$ for the other ones. We then focus on the most problematic ones (with a $\tau_+$ or $\tau_-$ weight, which can come from a weight $z \in \mathbf{k}_1$ for the terms of \eqref{eq:com1}), leading us to integrate along the characteristics of $T_F$ the following expressions. \end{itemize} \begin{equation}\label{eq:Phi11}
\tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} Y^{\kappa} \Phi \right|, \hspace{5mm} \text{with} \hspace{5mm} |\gamma| \leq N-3, \hspace{5mm} |\kappa| = |\beta| \hspace{5mm} \text{and} \hspace{5mm} \kappa_P < \beta_P, \end{equation} \begin{equation}\label{eq:Phi22}
\left| \Phi^{p} \mathcal{L}_{\partial Z^{\gamma_0}} (F) \left( v, \Gamma^{\kappa} \Phi \right) \right|, \hspace{5mm} \text{with} \hspace{5mm} |\gamma_0| \leq N-4, \hspace{5mm} |\kappa| = |\beta|-1 \hspace{5mm} \text{and} \hspace{5mm} p+\kappa_P \leq \beta_P. \end{equation} To deal with \eqref{eq:Phi11}, use the induction hypothesis, as $\kappa_P < \beta_P$. For the other terms, recall from Lemma \ref{GammatoYLem} that we can schematically suppose that
$$\Gamma^{\kappa} \Phi = P_{q,n}(\Phi) Y^{\zeta} \Phi, \hspace{5mm} \text{with} \hspace{5mm} |q|+|\zeta| \leq |\beta|-1, \hspace{5mm} |q| \leq |\beta|-2 \hspace{5mm} \text{and} \hspace{5mm} n+q_P+\zeta_P = \kappa_P.$$ Expressing \eqref{eq:Phi22} in null coordinates and transforming the $v$ derivatives with Lemma \ref{vradial} or $v^0 \partial_{v^i}=Y_i-\Phi X-x^i \partial_t-t \partial_i$, we obtain the following bad terms,
$$ \left( \tau_- |\rho|+ \tau_+ |\alpha|+\tau_+\sqrt{\frac{v^{\underline{L}}}{v^0}}\left( |\sigma|+|\underline{\alpha}| \right) \right) \Phi^p \partial_{t,x} \left( P_{q,n}(\Phi) Y^{\zeta} \Phi \right).$$
Then, note that there is no derivatives of order $|\beta|$ in $\Phi^p \partial_{t,x} \left( P_{q,n}(\Phi) \right) Y^{\zeta} \Phi$ so that these terms can be handled using the induction hypothesis. It then remains to study the terms related to $ P_{q,n+p}(\Phi) \partial_{t,x} Y^{\zeta} \Phi$. If $\zeta_P < \beta_P$, we can treat them using again the induction hypothesis. Otherwise $p+n=0$ and we can follow the treatment of \eqref{eq:referlater}. Finally, the fact that $R(|\beta|,\beta_P)$ is independent of $M$ if $|\beta| \leq N-6$ follows from Remark \ref{lowderiv} and that we merely need pointwise estimates on the derivatives of $F$ up to order $N-5$ in order to bound $Y^{\xi} \Phi$, with $|\xi| \leq N-6$. \end{proof} \begin{Rq}\label{estiPkp} There exist $(M_1,M_2) \in \mathbb{N}^2$, with $M_1$ independent of $M$, such that, for all $p \leq 3N$ and $(t,x,v) \in [0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3$,
$$ \sum_{|k| \leq N-6} |P_{k,p}(\Phi)|(t,x,v) \lesssim \log^{M_1}(1+\tau_+) \hspace{10mm} \text{and} \hspace{10mm} \sum_{|k| \leq N-4} |P_{k,p}(\Phi)|(t,x,v) \lesssim \log^{M_2}(1+\tau_+).$$ \end{Rq}
We are now able to apply the Klainerman-Sobolev inequalities of Proposition \ref{KS1} and Corollary \ref{KS2}. Combined with the bootstrap assumptions \eqref{bootf1}, \eqref{bootf3} and the estimates on the $\Phi$ coefficients, one immediately obtains that, for any $z \in \mathbf{k}_1$, $\max(|\xi|+|\beta|,|\xi|+1) \leq N-6$, $j \leq 2N-\xi_P-\beta_P$, \begin{equation}\label{decayf}
\forall \hspace{0.5mm} (t,x) \in [0,T[\times \mathbb{R}^3, \qquad \int_v |z^jP_{\xi}(\Phi)Y^{\beta} f|(t,x,v) dv \lesssim \epsilon \frac{\log^{(j+|\xi|+|\beta|+3)a}(3+t)}{\tau_+^2\tau_-}. \end{equation}
\section{Improvement of the bootstrap assumptions \eqref{bootf1}, \eqref{bootf2} and \eqref{bootf3} }\label{sec8}
As the improvement of all the energy bounds concerning $f$ are similar, we unify them as much as possible. Hence, let us consider \begin{itemize} \item $Q \in \{ N-3,N-1,N\}$, $n_{N-3}=4$, $n_{N-1}=0$ and $n_N=0$.
\item Multi-indices $\beta^0$, $\xi^0$ and $\xi^2$ such that $\max (|\xi^0|+|\beta^0|, 1+|\xi^0| ) \leq Q$ and $\max (|\xi^2|+|\beta^0|, 1+|\xi^2| ) \leq Q$. \item A weight $z_0 \in \mathbf{k}_1$ and $q \leq 2N-1+n_Q-\xi^0_P-\xi^2_P-\beta^0_P$. \end{itemize} According to the energy estimate of Propostion \ref{energyf}, Corollary \ref{coroinit} and since $\xi^0$ and $\xi^2$ play a symmetric role, we could improve \eqref{bootf1}-\eqref{bootf3}, for $\epsilon$ small enough, if we prove that \begin{eqnarray}\label{improvebootf}
\int_0^t \int_{\Sigma_s} \int_v \left| T_F \left( z^q_0 P_{\xi^0}(\Phi) Y^{\beta^0} f \right) P_{\xi^2}(\Phi) \right| \frac{dv}{v^0} dx ds & \lesssim & \epsilon^{\frac{3}{2}}(1+t)^{\eta} \log^{aq}(3+t) \hspace{3mm} \text{if} \hspace{3mm} Q =N, \\
& \lesssim & \epsilon^{\frac{3}{2}} \log^{(q+|\xi^0|+|\xi^2|+|\beta^0|)a}(3+t) \hspace{3mm} \text{otherwise}. \label{improvebootf2} \end{eqnarray}
For that purpose, we will bound the spacetime integral of the terms given by Proposition \ref{ComuPkp}, applied to $z^q_0 P_{\xi^0}(\Phi) Y^{\beta^0} f$. We start, in Subsection \ref{sec81}, by covering the term of \eqref{eq:cat0}. Subsection \ref{sec82} (respectively \ref{ref:L2elec}) is devoted to the study of the expressions of the other categories for which the electromagnetic field is derived less than $N-3$ times (respectively more than $N-2$ times). Finally, we treat the more critical terms in Subsection \ref{sec86}. In Subsection \ref{Ximpro}, we bound $\mathbb{E}_N^X[f]$, $ \mathbb{E}_{N-1}^X[f]$ and we improve the decay estimate of $\int_v (v^0)^{-2} |Y^{\beta} f|dv$ near the light cone.
\subsection{The terms of \eqref{eq:cat0}}\label{sec81}
The purpose of this Subsection is to prove the following proposition.
\begin{Pro}\label{M1}
Let $\xi^1$, $\xi^2$ and $\beta$ such that $\max(1+|\xi^i|,|\xi^i|+|\beta|) \leq N$ for $i \in \{1,2 \}$. Consider also $z \in \mathbf{k}_1$, $r \in \mathbb{N}^*$, $0 \leq \kappa \leq \eta$, $0 < j \leq 2N+3-\xi^1-\xi^2_P-\beta_P$ and suppose that, $$\forall \hspace{0.5mm} t \in [0,T[, \hspace{8mm} \mathbb{E} \left[ z^j P_{\xi^1}(\Phi)P_{\xi^2}(\Phi) Y^{\beta} f \right](t)+\log^2(3+t)\mathbb{E} \left[ z^{j-1} P_{\xi^1}(\Phi)P_{\xi^2}(\Phi) Y^{\beta} f \right](t) \lesssim \epsilon (1+t)^{\kappa} \log^r(3+t).$$ Then,
$$ \int_0^t \int_{\Sigma_s} \int_v \left| F\left(v,\nabla_v z^j\right) P_{\xi^1}(\Phi)P_{\xi^2}(\Phi) Y^{\beta} f \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}(1+t)^{\kappa} \log^{r}(3+t).$$ \end{Pro} \begin{proof} To lighten the notations, we denote $P_{\xi^1}(\Phi)P_{\xi^2}(\Phi) Y^{\beta}f$ by $h$ and, for $d \in \{0,1 \}$, $\mathbb{E} \left[ z^{j-d} h \right]$ by $H_{j-d}$, so that
$$H_{j-d}(t) \hspace{1mm} = \hspace{1mm} \| z^{j-d} h \|_{L^1_{x,v}}(t)+\sup_{u \in \mathbb{R}} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0}|z^{j-d} h| dv dC_u(t) \hspace{1mm} \lesssim \hspace{1mm} \epsilon (1+t)^{\kappa} \log^{r-2d}(3+t).$$ Using Lemmas \ref{weights1} and \ref{vradial}, we have
$$ \left| \left( \nabla_v z^j \right)^L \right|, \hspace{1mm} \left| \left( \nabla_v z^j \right)^{\underline{L}} \right|, \hspace{1mm} \frac{|v^A|+v^{\underline{L}}}{v^0}\left| \left( \nabla_v z^j \right)^A \right| \lesssim \frac{\tau_-}{v^0}|z|^{j-1}+\frac{1}{v^0} \sum_{w \in \mathbf{k}_1} |w|^j.$$
Hence, the decomposition of $F\left(v,\nabla_v |z|^j\right)$ in our null frame brings us to control the integral, over $[0,T] \times \mathbb{R}^3_x \times \mathbb{R}^3_v$, of\footnote{The second term comes from $\alpha(F)_Av^L\left(\nabla_v |z|^j \right)^A$.}
$$\left( \tau_-|w|^{j-1}+|w|^j \right)(|\rho(F)|+|\alpha(F)|+|\sigma(F)|+|\underline{\alpha}(F)|)\frac{|h|}{v^0} \hspace{5mm} \text{and} \hspace{5mm} \left( \tau_+|w|^{j-1}+ |w|^j \right) |\alpha(F)|\frac{|h|}{v^0}.$$ According to Remark \ref{lowderiv} and using $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$ (see Lemma \ref{weights1}), we have
$$ \tau_-(|\rho(F)|+|\sigma(F)|+|\underline{\alpha}(F)|)+\tau_+|\alpha(F)| \lesssim \sqrt{\epsilon} \frac{\log(3+t)}{\tau_+}, \hspace{6mm} |\rho(F)|+|\sigma(F)|+|\underline{\alpha}(F)|+|\alpha(F)| \lesssim \sqrt{\epsilon} \frac{v^0}{\tau_+^{\frac{3}{2}}}+\sqrt{\epsilon}\frac{v^{\underline{L}}}{\tau_-^{\frac{3}{2}}}.$$ The result is then implied by the following two estimates, \begin{eqnarray}
\nonumber \int_0^t \int_{\Sigma_s} \int_v \sqrt{\epsilon} |h|\left( \frac{|w|^{j-1}}{1+s}\log(3+s)+\frac{|w|^j}{(1+s)^{\frac{3}{2}}} \right) dvdxds \hspace{-0.5mm} & \lesssim & \hspace{-0.5mm} \sqrt{\epsilon} \int_0^t \frac{\log(3+s)}{1+s} H_{j-1}(s)ds+\int_0^t \frac{H_{j}(s)}{(1+s)^{\frac{3}{2}}} ds \\ \nonumber & \lesssim & \hspace{-0.5mm} \epsilon^{\frac{3}{2}} \int_0^t \frac{\log^{r-1}(3+t)}{(1+s)^{1-\kappa}} +\frac{\log^{r}(3+t)}{(1+s)^{\frac{5}{4}-\kappa}} ds \\ \nonumber
& \lesssim & \epsilon^{\frac{3}{2}}(1+t)^{\kappa} \log^r(3+t), \\
\nonumber \int_0^t \int_{\Sigma_s} \frac{\sqrt{\epsilon}}{\tau_-^{\frac{3}{2}}} \int_v \frac{v^{\underline{L}}}{v^0} \left| w^j h \right| dvdxds \hspace{-0.5mm} & = & \hspace{-0.5mm} \int_{u=-\infty}^t \frac{\sqrt{\epsilon}}{\tau_-^{\frac{3}{2}}} \int_{C_u(t)} \int_v \frac{v^{\underline{L}}}{v^0} \left| w^j h \right| dv dC_u(t) du \\ \nonumber & \lesssim & \hspace{-0.5mm} \sqrt{\epsilon} H_j(t) \int_{u=-\infty}^{+ \infty} \frac{du}{\tau_-^{\frac{3}{2}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} (1+t)^{\kappa} \log^r (3+t). \end{eqnarray} \end{proof}
\subsection{Bounds on several spacetime integrals}\label{sec82}
We estimate in this subsection the spacetime integral of the source terms of \eqref{eq:cat1}-\eqref{eq:cat4} of $T_F(z_0^q P_{\xi^0}(\Phi) Y^{\beta^0}f )$, multiplied by $(v^0)^{-1} P_{\xi^2}(\Phi)$, where the electromagnetic field is derived less than $N-3$ times. We then fix, for the remainder of the subsection, \begin{itemize}
\item multi-indices $\gamma$, $\beta$ and $\xi^1$ such that $$|\gamma| \leq N-3, \hspace{3mm} |\xi^1|+ |\gamma| + |\beta| \leq Q+1, \hspace{3mm} |\beta| \leq |\beta^0|, \hspace{3mm} |\xi^1|+|\beta| \leq |\xi^0|+|\beta^0| \leq Q \hspace{3mm} \text{and} \hspace{3mm} |\xi^1| \leq Q-1.$$ \item $n \leq 2N$, \hspace{2mm} $z \in \mathbf{k}_1$ \hspace{2mm} and \hspace{2mm} $j \in \mathbb{N}$ \hspace{2mm} such that \hspace{2mm} $j \leq 2N-1+n_Q-\xi^1_P-\xi^2_P-\beta_P$.
\item We will make more restrictive hypotheses for the study of the terms of \eqref{eq:cat3} and \eqref{eq:cat4}. For instance, for the last ones, we will take $|\xi^1| < |\xi^0|$ and $j=q$. This has to do with their properties described in Proposition \ref{ComuPkp}. \end{itemize}
Note that $|\xi^2|+|\beta| \leq Q$. To lighten the notations, we introduce $$h := z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\beta} f.$$ We start by treating the terms of \eqref{eq:cat1}. \begin{Pro}\label{M2} Under the bootstrap assumptions \eqref{bootf1}-\eqref{bootf3}, we have,
$$I_1:=\int_0^t \int_{\Sigma_s} \int_v |\Phi|^n \left( \left| \nabla_{Z^{\gamma}} F \right|+\frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right)\right|+\frac{\tau_+}{\tau_-}\sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \right) \left| h \right| \frac{dv}{v^0} dx ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}.$$ \end{Pro} \begin{proof} According to Propositions \ref{Phi1}, \ref{decayF} and $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, we have \begin{eqnarray}
\nonumber \left| \Phi \right|^n \left| \nabla_{Z^{\gamma}} F \right|+\left| \Phi \right|^n\frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right)\right|+\left| \Phi \right|^n \frac{\tau_+}{\tau_-}\sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \hspace{-1.5mm} & \lesssim & \hspace{-1.5mm} \sqrt{\epsilon}\log^{4N+M}(3+t)\left( \frac{\sqrt{v^0 v^{\underline{L}}}}{\tau_+\tau_-}+ \frac{v^{\underline{L}}}{\tau_+^{\frac{1}{2}}\tau_-^{\frac{3}{2}}} \right) \\ \nonumber & \lesssim & \hspace{-1.5mm} \sqrt{\epsilon} \frac{v^0}{\tau_+^{\frac{5}{4}}}+\sqrt{\epsilon} \frac{v^{\underline{L}}}{\tau_+^{\frac{1}{4}}\tau_-^{\frac{3}{2}}}. \end{eqnarray} Then, \begin{eqnarray}
\nonumber I_1 & \lesssim & \int_0^t \int_{\Sigma_s} \frac{\sqrt{\epsilon}}{\tau_+^{\frac{5}{4}}} \int_v |h| dv dx ds + \int_0^t \int_{\Sigma_s} \frac{\sqrt{\epsilon}}{\tau_+^{\frac{1}{4}}\tau_-^{\frac{3}{2}}} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv dx ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \int_0^t \frac{\mathbb{E}[h](s)}{(1+s)^{\frac{5}{4}}}ds+\sqrt{\epsilon} \int_{u=-\infty}^t \int_{C_u(t)} \frac{1}{\tau_+^{\frac{1}{4}}\tau_-^{\frac{3}{2}}} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv d C_u(t) du. \end{eqnarray} Recall now the definition of $(t_i)_{i \in \mathbb{N}}$, $(T_i(t))_{i \in \mathbb{N}}$ and $C_u^i(t)$ from Subsection \ref{secsubsets}. By the bootstrap assumption \eqref{bootf3} and $2\eta < \frac{1}{8}$, we have
$$\mathbb{E}[h](s) \lesssim \epsilon(1+s)^{\frac{1}{8}} \hspace{5mm} \text{and} \hspace{5mm} \sup_{u \in \mathbb{R}} \int_{C_u^i(t)} \int_v v^0 v^{\underline{L}} |h| dv dC_u^i(t) \lesssim \epsilon (1+T_{i+1}(t))^{2 \eta} \lesssim \epsilon (1+t_{i+1})^{ \frac{1}{8}},$$ so that, using also\footnote{Note that the sum over $i$ is actually finite as $C^i_u(t) = \varnothing$ for $i \geq \log_2(1+t)$.} $1+t_{i+1} \leq 2(1+t_i) $ and Lemma \ref{foliationexpli}, \begin{eqnarray} \nonumber \sqrt{\epsilon} \int_0^t \frac{\mathbb{E}[h](s)}{(1+s)^{\frac{5}{4}}} & \lesssim & \epsilon^{\frac{3}{2}} \int_0^{+ \infty} \frac{ds}{(1+s)^{\frac{9}{8}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}, \\
\nonumber \sqrt{\epsilon} \int_{u=-\infty}^t \int_{C_u(t)} \frac{1}{\tau_+^{\frac{1}{4}}\tau_-^{\frac{3}{2}}} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv d C_u(t) du \hspace{-1mm} & = & \hspace{-1mm} \sqrt{\epsilon} \int_{u=-\infty}^t \sum_{i=0}^{+ \infty} \int_{C^i_u(t)} \frac{1}{\tau_+^{\frac{1}{4}}\tau_-^{\frac{3}{2}}} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv d C^i_u(t) du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \int_{u=-\infty}^t \frac{1}{\tau_-^{\frac{3}{2}}} \sum_{i=0}^{+ \infty}\frac{1}{(1+t_i)^{\frac{1}{4}}} \int_{C^i_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv d C^i_u(t) du \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_{u=-\infty}^t \frac{du}{\tau_-^{\frac{3}{2}}} \sum_{i=0}^{+ \infty}\frac{(1+t_{i+1})^{\frac{1}{8}}}{(1+t_{i+1})^{\frac{1}{4}}} \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_{u = - \infty}^{+\infty} \frac{du}{\tau_-^{\frac{3}{2}}} \sum_{i=0}^{+ \infty} 2^{-\frac{i}{8}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} \end{proof} We now start to bound the problematic terms. \begin{Pro}\label{M3} We study here the terms of \eqref{eq:cat3}. If, for $\kappa \geq 0$ and $r \in \mathbb{N}$, $$
\mathbb{E}[h](t)= \left\| h \right\|_{L^1_{x,v}}(t)+\sup_{u \in \mathbb{R}} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv dC_u(t) \lesssim \epsilon (1+s)^{\kappa} \log^r(3+t), \hspace{3mm} \text{then} $$
\begin{flalign*}
& \hspace{1cm} I^1_3:=\int_{0}^t \int_{\Sigma_s} \frac{\tau_+}{\tau_-} \left| \underline{\alpha} \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right| \int_v \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}(1+s)^{\kappa} \log^r(3+t) \hspace{3mm} \text{and} & \\
& \hspace{1cm} I^2_3:=\int_{0}^t \int_{\Sigma_s} \frac{\tau_+}{\tau_-} \left| \rho \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right|\int_v \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}(1+s)^{\kappa} \log^{r+a}(3+t) .& \end{flalign*} \end{Pro} \begin{Rq} The extra $\log^{a}(3+t)$-growth on $I^2_3$, compared to $I^1_3$, will not avoid us to close the energy estimates in view of the hierarchies in the energy norms. Indeed, we have $j=q-1$ (in $I^2_3$) according to the properties of the terms of \eqref{eq:cat3} (in $I_3^1$, we merely have $j \leq q$). \end{Rq} \begin{proof}
Recall first from Lemma \ref{weights1} that $1+|v^A| \lesssim \sqrt{v^0 v^{\underline{L}}}$. Then, using Proposition \ref{decayF} and the inequality $2CD \leq C^2+D^2$, one obtains
$$ \sqrt{\frac{v^{\underline{L}}}{v^0}} \frac{\tau_+}{\tau_-}\left|\underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right| \lesssim \sqrt{\epsilon} \frac{v^{\underline{L}}}{\tau_-^{\frac{3}{2}}} \hspace{8mm} \text{and} \hspace{8mm} \frac{\tau_+}{\tau_-} \left| \rho \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \lesssim \sqrt{\epsilon} \log^M(3+t) \frac{v^0}{\tau_+}+\sqrt{\epsilon} \log^M(3+t) \frac{v^{\underline{L}} }{ \tau_-^3} .$$ We then have, as $a = M+1$, \begin{eqnarray}
\nonumber I^1_3 & \lesssim & \int_{u = -\infty}^t \frac{\sqrt{\epsilon}}{\tau_-^{\frac{3}{2}}} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv dC_u(t) du \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \mathbb{E}[h](t) \int_{u=-\infty}^{+\infty} \frac{du}{\tau_-^{\frac{3}{2}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} (1+s)^{\kappa} \log^r(3+t) , \\
\nonumber I^2_3 & \lesssim & \sqrt{\epsilon} \int_0^t \int_{\Sigma_s} \frac{\log^M(3+s)}{\tau_+} \int_v |h| dv dx ds +\sqrt{\epsilon} \log^M (3+t) \int_{u = -\infty}^t \frac{\sqrt{\epsilon}}{\tau_-^{\frac{3}{2}}} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv dC_u(t) du \\ \nonumber & \lesssim & \sqrt{\epsilon} \int_0^t \frac{\log^{r+M}(3+s)}{(1+s)^{1- \kappa}} ds+ \epsilon^{\frac{3}{2}} (1+t)^{\kappa} \log^{r+M}(3+t) \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}}(1+t)^{\kappa} \log^{r+M+1}(3+t) \hspace{2mm} = \hspace{2mm} \epsilon^{\frac{3}{2}}(1+t)^{\kappa} \log^{r+a}(3+t) . \end{eqnarray}
\end{proof} We finally end this subsection by the following estimate. \begin{Pro}\label{MM3}
We suppose here that $\max( |\xi^1|+|\beta|, |\xi^1|+1) \leq N-1$. Then, \begin{eqnarray}
\nonumber I_4 \hspace{2mm} := \hspace{2mm} \int_0^t \int_{\Sigma_s} \tau_+ \int_v \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \left| h \right| \frac{dv}{v^0} dx ds & \lesssim & \epsilon^{\frac{3}{2}} \log^{(1+j+|\xi^1|+|\xi^2|+|\beta|)a}(3+t) \hspace{1cm} \text{if} \hspace{3mm} |\xi^2| \leq N-2, \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta} \hspace{4cm} \text{otherwise}. \end{eqnarray} \end{Pro} \begin{Rq}\label{rq:i4}
To understand the extra hypothesis made in this proposition, recall from the properties of the terms of \eqref{eq:cat4} that we can assume $|\xi^1| < |\xi^0|$, $\beta=\beta^0$ and $j=q$. We then have
$$1+j+|\xi^1|+|\xi^2|+|\beta| \leq q+|\xi^0|+|\xi^2|+|\beta^0|.$$ \end{Rq} \begin{proof}
Let us denote by $(\alpha, \underline{\alpha}, \rho, \sigma)$ the null decomposition of $\mathcal{L}_{Z^{\gamma}}(F)$. Using $1+|v^A| \leq \sqrt{v^0 v^{\underline{L}}}$ and Proposition \ref{decayF}, we have \begin{eqnarray}
\nonumber \tau_+ \left| \frac{v^{\mu}}{(v^0)^2} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \hspace{-1mm} & \lesssim & \hspace{-1mm} \tau_+ \sqrt{\frac{v^{\underline{L}}}{v^0}} \left( |\alpha|+|\rho|+|\sigma| \right)+\tau_+\frac{v^{\underline{L}}}{v^0}|\underline{\alpha}| \\ \nonumber & \lesssim & \hspace{-1mm} \sqrt{\epsilon} \sqrt{\frac{v^{\underline{L}}}{v^0}} \frac{\log^M (3+t)}{\sqrt{\tau_+ \tau_-}}+\sqrt{\epsilon} \frac{v^{\underline{L}}}{v^0} \frac{\log^M (3+t)}{\tau_-} \hspace{1.2mm} \lesssim \hspace{1.2mm} \sqrt{\epsilon} \frac{\log^M (3+t)}{\tau_+}+\sqrt{\epsilon} \frac{v^{\underline{L}}}{v^0} \frac{\log^M (3+t)}{\tau_-}. \end{eqnarray}
As $\tau_- \sim \tau_+$ away from the light cone (for, say\footnote{If $(s,y)$ is in one of these regions of $[0,t] \times \mathbb{R}^3$, we have $|y| \geq 2s$ or $|y| \leq \frac{s}{2}$.}, $u \leq -t$ and $ u \geq \frac{t}{2}$), we finally obtain that \begin{eqnarray}
\nonumber I_4 \hspace{-1mm} & = & \hspace{-1mm} \sqrt{\epsilon} \int_0^t \frac{\log^M(3+s)}{1+s} \int_{ \Sigma_s } \int_v |h| dv dx ds + \sqrt{\epsilon} \log^M (3+t) \int_{u=-t}^{\frac{t}{2}} \frac{1}{\tau_-} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h| dv dC_u(t) du \\ \nonumber & \lesssim & \hspace{-1mm} \sqrt{\epsilon} \log^M(3+t) \sup_{[0,t]} \mathbb{E}[h] \int_0^t \hspace{-0.5mm} \frac{ds}{1+s}+\sqrt{\epsilon}\log^M(3+t) \mathbb{E}[h](t) \int_{u=-t}^t \hspace{-0.5mm} \frac{du}{\tau_-} \hspace{1.2mm} \lesssim \hspace{1.2mm} \sqrt{\epsilon} \log^{a}(3+t) \sup_{[0,t]} \mathbb{E}[h] . \end{eqnarray}
If $|\xi^2| \leq N-2$, the bootstrap assumption \eqref{bootf1} or \eqref{bootf2} gives
$$ \sup_{[0,t]} \mathbb{E}[h] \leq \epsilon \log^{(j+|\xi^1|+|\xi^2|+|\beta|)a}(3+t)$$
and we can conclude the proof in that case. If $|\xi^2|=N-1$, we have $j \leq 2N-1-\xi^1_P-\xi^2_P-\beta_P$ since this case appears only if $Q=N$. Let $(i_1,i_2) \in \mathbb{N}^2$ be such that $$i_1+i_2=2j, \hspace{1cm} i_1 \leq 2N-1-2 \xi^1_P-\beta_P \hspace{1cm} \text{and} \hspace{1cm} i_2 \leq 2N-1-2 \xi^2_P-\beta_P.$$ Using the bootstrap assumptions \eqref{bootf2} and \eqref{bootf3}, we have \begin{eqnarray}
\nonumber \mathbb{E}[h](t) & = & \int_{\Sigma_t} \int_v \left| z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\beta} f \right| dv dx \\ \nonumber
& \lesssim & \left| \int_{\Sigma_t} \int_v \left| z^{i_1} P_{\xi^1}(\Phi)^2 Y^{\beta} f \right| dv dx \int_{\Sigma_t} \int_v \left| z^{i_2} P_{\xi^2}(\Phi)^2 Y^{\beta} f \right| dv dx \right|^{\frac{1}{2}} \\ \nonumber
& \lesssim & \left| \log^{(i_1+2|\xi^1|+|\beta|)a}(3+t) \mathbb{E}_{N-1}^0[f](t) \log^{a i_2}(3+t) \overline{\mathbb{E}}_N[f](t) \right|^{\frac{1}{2}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon (1+t)^{\frac{3}{4} \eta}, \end{eqnarray} which ends the proof. \end{proof} Note now that Propositions \ref{ComuPkp}, \ref{M1}, \ref{M2}, \ref{M3} and \ref{MM3} imply \eqref{improvebootf2} for $Q=N-3$, so that $\mathbb{E}^4_{N-3}[f] \leq 3 \epsilon$ on $[0,T[$. \subsection{Completion of the bounds on the spacetime integrals}\label{ref:L2elec}
In this subsection, we bound the spacetime integrals considered previously when the electromagnetic field is differentiated too many times to be estimated pointwise. For this, we make crucial use of the pointwise decay estimates on the velocity averages of $ \left| z^j P_{\zeta}(\Phi) Y^{\beta} f \right|$ which are given by \eqref{decayf}. The terms studied here appear only if $|\xi^0|+|\beta^0| \geq N-2$ since otherwise the electromagnetic field would be differentiated at most $N-3$ times. We then fix, for the remainder of the subsection, $Q \in \{N-1,N \}$, \begin{itemize}
\item multi-indices $\gamma$, $\beta$ and $\xi^1$ such that \hspace{1mm} $N-2 \leq |\gamma| \leq N$, $$ |\gamma|+|\xi^1| \leq Q, \hspace{3mm} |\xi^1|+ |\gamma| + |\beta| \leq Q+1, \hspace{3mm} |\beta| \leq |\beta^0|, \hspace{3mm} |\xi^1|+|\beta| \leq |\xi^0|+|\beta^0| \leq Q \hspace{3mm} \text{and} \hspace{3mm} |\xi^1| \leq Q-1.$$ \item $n \leq 2N$, \hspace{2mm} $z \in \mathbf{k}_1$ \hspace{2mm} and \hspace{2mm} $j \in \mathbb{N}$ \hspace{2mm} such that \hspace{2mm} $j \leq 2N-1-\xi^1_P-\xi^2_P-\beta_P$. \item Consistently with Proposition \ref{ComuPkp}, we will, in certain cases, make more assumptions on $\xi^1$ or $j$, such as $j \leq q$ for the terms of \eqref{eq:cat3}. \end{itemize}
Note that $|\xi^2|+|\beta| \leq Q$ and that there exists $i_1$ and $i_2$ such as $$i_1+i_2=2j, \hspace{5mm} i_1 \leq 2N-1-2\xi^1_P-\beta_P \hspace{5mm} \text{and} \hspace{5mm} i_2 \leq 2N-1-2\xi^2_P-\beta_P.$$ To lighten the notations, we introduce $$h := z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\beta} f, \hspace{10mm} h_1 := z^{i_1} P_{\xi^1}(\Phi)^2 Y^{\beta} f \hspace{10mm} \text{and} \hspace{10mm} h_2 := z^{i_2} P_{\xi^2}(\Phi)^2 Y^{\beta} f,$$
so that $\left| h \right| = \sqrt{| h_1 h_2 |}$. As $|\gamma| \geq N-2$, we have $|\xi^1| \leq 2 \leq N-7$ and $2|\xi^1| + |\beta| \leq 5 \leq N-6$. Thus, by Lemma \ref{weights1} and \eqref{decayf}, we have, for all $(t,x) \in [0,T[ \times \mathbb{R}^3$, \begin{equation}\label{eq:h1}
\tau_+^3 \int_v |h_1| \frac{dv}{(v^0)^2}+\tau_+^2 \tau_- \int_v |h_1| dv \hspace{1mm} \lesssim \hspace{1mm} \int_v \left(\tau_+^3 \frac{v^{\underline{L}}}{v^0}+\tau_+^2 \tau_- \right) |h_1| dv \hspace{1mm} \lesssim \hspace{1mm} \epsilon \log^{(4+i_1+2|\xi^1|+|\beta|)a}(3+t). \end{equation} Using Remark \ref{rqweights1}, we have, \begin{equation}\label{eq:h11}
\forall \hspace{0.5mm} |x| \geq t, \hspace{1cm} \tau_+^3 \tau_- \int_v |h_1| \frac{dv}{(v^0)^2} \hspace{1mm} \lesssim \hspace{1mm} \tau_+^3 \tau_- \int_v \frac{v^{\underline{L}}}{v^0} |h_1| dv \hspace{1mm} \lesssim \hspace{1mm} \epsilon \log^{(4+i_1+2|\xi^1|+|\beta|)a}(3+t). \end{equation} \begin{Pro}\label{M21} The following estimates hold,
$$I^1_1 := \int_0^t \int_{\Sigma_s} \int_v |\Phi|^n\left| \nabla_{Z^{\gamma}} F \right| \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}, \hspace{6mm} I^2_1 := \int_0^t \int_{\Sigma_s} \int_v |\Phi|^n \frac{\tau_+}{\tau_-}\sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}$$
$$ \text{and} \hspace{8mm} I^3_1 := \int_0^t \int_{\Sigma_s} \int_v |\Phi|^n \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}.$$ \end{Pro} \begin{proof}
Using the Cauchy-Schwarz inequality twice (in $x$ and then in $v$), $ \| \nabla_{Z^{\gamma}} F \|^2_{L^2(\Sigma_t)} \lesssim \mathcal{E}_N^0[F](t) \leq 4\epsilon $, $|\Phi| \lesssim \sqrt{\epsilon} \log^2(1+\tau_+)$, $\overline{\mathbb{E}}_N[f](t) \lesssim \epsilon (1+t)^{\eta}$ and \eqref{eq:h1}, we have \begin{eqnarray}
\nonumber I^1_1 & \lesssim & \int_0^t \| \nabla_{Z^{\gamma}} F \|_{L^2(\Sigma_s)} \left\| \int_v |\Phi|^{n} |h| \frac{dv}{v^0} \right\|_{L^2(\Sigma_s)}ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \int_0^t \left\| \log^{8N}(1+\tau_+) \int_v |h_1| \frac{dv}{(v^0)^2} \int_v |h_2| dv \right\|_{L^1(\Sigma_s)}^{\frac{1}{2}}ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \int_0^t \left\| \log^{8N}(1+\tau_+) \int_v |h_1| \frac{dv}{(v^0)^2} \right\|_{L^{\infty}(\Sigma_s)}^{\frac{1}{2}} \sqrt{\mathbb{E}[h_2](s)}ds \\ \nonumber & \lesssim & \epsilon \int_0^t \frac{\log^{4N+3Na}(3+s)}{(1+s)^{\frac{3}{2}}} \log^{ai_2}(3+s) \overline{\mathbb{E}}_N[f](s) ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} For the second one, recall from the bootstrap assumptions \eqref{bootF1} and \eqref{bootf3} that for all $t \in [0,T[$ and $i \in \mathbb{N}$,
$$ \int_{C^i_u(t)} \hspace{-0.5mm} |\sigma|^2 dC^i_u(t) \leq \mathcal{E}_N^0[F](t_{i+1}(t)) \lesssim \epsilon \hspace{3.2mm} \text{and} \hspace{3.2mm} \sup_{u \in \mathbb{R}} \int_{C_u^i(t)} \int_v \frac{ v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC^i_u(t) \lesssim \mathbb{E}[h_2](T_{i+1}(t)) \lesssim \epsilon (1+t_{i+1})^{2\eta}.$$ Hence, using this time a null foliation, one has \begin{eqnarray}
\nonumber I_1^2 & \lesssim & \sum_{i=0}^{+ \infty}\int_{u=-\infty}^t \frac{1}{\tau_-} \left| \int_{C^i_u(t)} |\sigma \left( \mathcal{L}_{Z^{\gamma}} ( F) \right)|^2 dC^i_u(t) \int_{C_u^i(t)} \tau_+^{2} \left| \int_v |\Phi|^n\sqrt{\frac{ v^{\underline{L}}}{v^0}} |h| \frac{dv}{v^0} \right|^2 dC_u^i(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \sum_{i=0}^{+ \infty} \int_{u=-\infty}^t \frac{1}{\tau_-} \left| \int_{C_u^i(t)} \tau_+^{2} \log^{8N}(1+\tau_+) \int_v \left| h_1 \right| \frac{dv}{(v^0)^2} \int_v \frac{ v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC_u^i(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \sum_{i=0}^{+ \infty }\int_{u=-\infty}^t \frac{1}{\tau_-} \left| \int_{C_u^i(t)} \frac{1}{\tau_+^{\frac{3}{4}}} \int_v \frac{ v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC_u^i(t) \right|^{\frac{1}{2}} du \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_{u=-\infty}^{+ \infty} \frac{du}{\tau_-^{\frac{9}{8}}} \sum_{i=0}^{+ \infty} \frac{(1+t_{i+1})^{\eta}}{(1+t_i)^{\frac{1}{4}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} For the last one, use first that $F=\widetilde{F}+\overline{F}$ to get
$$I^3_1 = I_1^{\widetilde{F}}+I_1^{\overline{F}} := \int_0^t \int_{\Sigma_s} \int_v |\Phi|^n \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(\widetilde{F}) \right) \right| \left| h \right| \frac{dv}{v^0} dx ds+\int_0^t \int_{\Sigma_s} \int_v |\Phi|^n \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(\overline{F}) \right) \right| \left| h \right| \frac{dv}{v^0} dx ds.$$
By Proposition \ref{decayF}, we have $|\mathcal{L}_{Z^{\gamma}}(\overline{F})| \lesssim \epsilon \tau_+^{-2}$. Hence, using $|\Phi| \lesssim \log^2(1+\tau_+)$ and $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, we have
$$ |\Phi|^n \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(\overline{F}) \right) \right| \lesssim \frac{ \epsilon \sqrt{v^0v^{\underline{L}}} }{\sqrt{v^0}\tau_+^{\frac{3}{4}} \tau_-} \leq \epsilon \frac{v^0}{\tau_+^{\frac{5}{4}}} + \epsilon \frac{ v^{\underline{L}} }{\tau_+^{\frac{1}{4}} \tau_-^2}$$ and we can bound $I_1^{\overline{F}}$ by $\epsilon^{\frac{3}{2}}$ as $I_1$ in Proposition \ref{M2}. For $I_1^{\widetilde{F}}$, remark first that, by the bootstrap assumptions \eqref{bootext}, \eqref{bootF4} and since $F=\widetilde{F}$ in the interior of the light cone,
$$ \int_{C^i_u(t)} \tau_+\left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(\widetilde{F}) \right) \right|^2 dC_u^i(t) \lesssim \mathcal{E}_N[F](T_{i+1}(t))+\mathcal{E}^{Ext}_{N}[\widetilde{F}](T_{i+1}(t)) \lesssim \epsilon (1+t_{i+1})^{\eta}.$$
It then comes, using $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, $16 \eta < 1$ and $\int_v |\Phi|^n |h_1| dv \lesssim \epsilon \tau_+^{-\frac{3}{2}}\tau_-^{-1}$, that \begin{eqnarray}
\nonumber I_1^{\widetilde{F}} & \lesssim & \sum_{i=0}^{+ \infty}\int_{u=-\infty}^t \frac{1}{\tau_-} \left| \int_{C^i_u(t)} \tau_+ \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right|^2 dC_u^i(t) \int_{C_u^i(t)} \tau_+ \left| \int_v |\Phi|^n \sqrt{\frac{ v^{\underline{L}}}{v^0}} |h| dv \right|^2 dC_u^i(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \sum_{i=0}^{+ \infty} (1+t_{i+1})^{\eta}\int_{u=-\infty}^t \frac{1}{\tau_-} \left| \int_{C_u^i(t)} \tau_+ \int_v |\Phi|^n \left| h_1 \right| dv \int_v \frac{ v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC_u^i(t) \right|^{\frac{1}{2}} du \\ \nonumber & \lesssim & \sqrt{\epsilon} \sum_{i=0}^{+ \infty }\frac{(1+t_{i+1})^{2\eta}}{(1+t_i)^{\frac{1}{4}}} \int_{u=-\infty}^{+\infty} \frac{1}{\tau_-^{\frac{3}{2}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \sum_{i=0}^{+ \infty} 2^{-\frac{i}{4}(1-8 \eta)} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} \end{proof} We now turn on the problematic terms. \begin{Pro}\label{M32}
If $|\xi_2| \leq N-2$, we have \begin{eqnarray}
\nonumber I^1_3 & = & \int_{0}^t \int_{\Sigma_s} \frac{\tau_+}{\tau_-} \left| \underline{\alpha} \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right| \int_v \sqrt{\frac{v^{\underline{L}}}{v^0}} |h| \frac{dv}{v^0} dx ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \log^{(3+j+|\xi_1|+|\xi_2|+|\beta|)a} (3+t) \hspace{3mm} \text{and} \\ \nonumber I^2_3 & = & \int_{0}^t \int_{\Sigma_s} \frac{\tau_+}{\tau_-} \left| \rho \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right|\int_v |h| \frac{dv}{v^0} dx ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \log^{(2+j+|\xi_1|+|\xi_2|+|\beta|)a} (3+t). \end{eqnarray}
Otherwise, $|\xi^2|=N-1$ and $I^1_3 +I^2_3 \lesssim \epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta}$. \end{Pro} \begin{Rq}\label{nopb} Note that these estimates are sufficient to improve the bootstrap assumptions \eqref{bootf2} and \eqref{bootf3}. Indeed, \begin{itemize}
\item the case $|\xi^2|=N-1$ concerns only the study of $\overline{\mathbb{E}}_N[f]$.
\item Even if the bound on $I^2_3+I^1_3$, when $|\xi^2| \leq N-2$ could seem to possess a factor $\log^{3a}(3+t)$ in excess, one has to keep in mind that $|\gamma| \geq N-2$, so $|\xi^1|+|\beta| \leq 3$ and $|\xi^0|+|\beta^0| \geq N-2$. Moreover, by the properties of the terms of \eqref{eq:cat3}, $j \leq q$. We then have, as $N \geq 8$,
$$j+3+|\xi^1|+|\xi^2|+|\beta| \leq q+|\xi^0|+|\xi^2|+|\beta^0|.$$ \end{itemize} \end{Rq} \begin{proof} Throughout this proof, we will use \eqref{eq:h1} and the bootstrap assumption \eqref{bootF1}, which implies
$$ \left\| \underline{\alpha} \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right\|_{L^2 \left( \Sigma_t \right) } + \sup_{u \in \mathbb{R}} \left\| \rho \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right\|_{L^2 \left( C_u(t) \right) } \lesssim \sqrt{\mathcal{E}_{N}^0[F](t)} \lesssim \epsilon^{\frac{1}{2}}.$$ Applying the Cauchy-Schwarz inequality twice (in $(t,x)$ and then in $v$), we get \begin{eqnarray}
\nonumber I^1_3 & \lesssim & \left| \int_{0}^t \frac{\left\| \underline{\alpha} \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right\|_{L^2 \left( \Sigma_s \right) }}{1+s} ds \int_{u=- \infty}^t \int_{C_u(t)} \frac{\tau_+^3}{\tau_-^2} \left| \int_v \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| h\right| \frac{dv}{v^0} \right|^2 dC_u(t) du \right|^{\frac{1}{2}} \\ \nonumber
& \lesssim & \epsilon^{\frac{1}{2}}\log^{\frac{1}{2}}(1+t) \left| \int_{u=- \infty}^t \frac{1}{\tau_-^2} \int_{C_u(t)} \int_v \frac{v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC_u(t) du \right|^{\frac{1}{2}} \sup_{u \in \mathbb{R}} \left\| \tau_+^3 \int_v |h_1| \frac{dv}{(v^0)^2} \right\|_{L^{\infty} \left( C_u(t) \right) }^{\frac{1}{2}} \\ \nonumber
& \lesssim & \epsilon \log^{\frac{1}{2}+\frac{a}{2} \left(4+i_1+ 2|\xi|^1+|\beta| \right)}(3+t) \sqrt{\mathbb{E}[h_2](t)}. \end{eqnarray} Using $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$ and the Cauchy-Schwarz inequality (this time in $(\underline{u},\omega_1,\omega_2)$ and then in $v$), we obtain \begin{eqnarray}
\nonumber I^2_3 & \lesssim & \int_{u=-\infty}^t \left\| \rho \left( \mathcal{L}_{Z^{\gamma}} ( F) \right) \right\|_{L^2 \left( C_u(t) \right) } \left| \int_{C_u(t)} \frac{\tau_+^2}{\tau_-^2} \left| \int_v \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| h \right| dv \right|^2 dC_u(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \epsilon^{\frac{1}{2}} \int_{u = - \infty}^t \frac{1}{\tau_-^{\frac{3}{2}}} \left\| \tau_+^2 \tau_- \int_v |h_1| dv \right\|_{L^{\infty} \left( C_u(t) \right) }^{\frac{1}{2}} \left| \int_{C_u(t)} \int_v \frac{v^{\underline{L}}}{v^0} \left| h_2 \right| dv dC_u(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \epsilon \log^{\frac{a}{2} \left(4+i_1 +2|\xi|^1+|\beta| \right)} \sqrt{\mathbb{E}[h_2](t)}. \end{eqnarray} It then remains to remark that, by the bootstrap assumptions \eqref{bootf2} and \eqref{bootf3}, \begin{itemize}
\item $\mathbb{E}[h_2](t) \leq \log^{ (i_2+2|\xi_2|+|\beta|)a}(3+t) \mathbb{E}^0_{N-1}[f](t) \lesssim \epsilon \log^{ (i_2+2|\xi_2|+|\beta|)a}(3+t)$, if $|\xi_2| \leq N-2$, or
\item $\mathbb{E}[h_2](t) \leq \log^{a i_2}(3+t) \overline{\mathbb{E}}_{N}[f](t) \lesssim \epsilon (1+t)^{\eta} \log^{a i_2}(3+t)$, if $|\xi_2| = N-1$. \end{itemize} \end{proof}
Let us move now on the expressions of \eqref{eq:cat4}. The ones where $|\gamma|=N$ are the more critical terms and will be treated later. \begin{Pro}\label{M41}
Suppose that $N-2 \leq |\gamma| \leq N-1$. Then, if $|\xi_2| \leq N-2$, \begin{flalign*}
& \hspace{1cm} I_4=\int_0^t \int_{\Sigma_s} \int_v \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \left| h \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}} \log^{(3+j+|\xi_1|+|\xi_2|+|\beta|)a}(3+t) & \end{flalign*} and $I_4 \lesssim \epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta}$ otherwise. \end{Pro} For similar reasons as those given in Remark \ref{nopb}, these bounds are sufficient to close the energy estimates on $\overline{\mathbb{E}}_N[f]$ and $\mathbb{E}^0_{N-1}[f]$.
\begin{proof}
Denoting by $(\alpha, \underline{\alpha}, \rho, \sigma )$ the null decomposition of $\mathcal{L}_{Z^{\gamma}}(\widetilde{F})$ and using $|v^A| \lesssim \sqrt{v^0v^{\underline{L}}}$, we have \begin{eqnarray}
\nonumber \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| & \lesssim & |\alpha (\mathcal{L}_{ Z^{\gamma}}(F))|+|\sigma (\mathcal{L}_{ Z^{\gamma}}(F)) |+|\rho (\mathcal{L}_{ Z^{\gamma}}(F)) |+\sqrt{\frac{v^{\underline{L}}}{v^0}}|\underline{\alpha} (\mathcal{L}_{ Z^{\gamma}}(F)) | \\ \nonumber
& \lesssim & |\alpha|+|\rho|+|\sigma|+\sqrt{\frac{ v^{\underline{L}}}{v^0}}|\underline{\alpha}|+\left| \mathcal{L}_{Z^{\gamma}}( \overline{F} ) \right|. \end{eqnarray} and we can then bound $I_4$ by $I_{\alpha,\sigma,\rho}+I_{\underline{\alpha}}+I_{\overline{F}}$ (these quantities will be clearly defined below). Note now that \begin{equation}\label{eq:alphasigma}
\left\| \sqrt{\tau_+} |\alpha| +\sqrt{\tau_+} |\rho|+\sqrt{\tau_+} |\sigma| \right\|^2_{L^2(\Sigma_s)}+\left\| \sqrt{\tau_-} |\underline{\alpha}| \right\|^2_{L^2(\Sigma_s)} \hspace{1mm} \lesssim \hspace{1mm} \mathcal{E}^{Ext}_{N}[\widetilde{F}](s)+ \mathcal{E}_{N-1}[F](s) \hspace{1mm} \lesssim \hspace{1mm} \epsilon \log^{2M}(3+s). \end{equation} Then, using the Cauchy-Schwarz inequality twice (in $(t,x)$ and then in $v$), the estimates \eqref{eq:h1} and \eqref{eq:h11} as well as $a = M+1$, we get \begin{eqnarray}
\nonumber I_{\underline{\alpha}} & := & \int_0^t \int_{\Sigma_s} \tau_+ |\underline{\alpha}| \int_v \sqrt{\frac{ v^{\underline{L}}}{v^0}} |h| \frac{dv}{v^0} dx ds \\ \nonumber
& \lesssim & \left| \int_0^t \frac{\|\sqrt{\tau_-} |\underline{\alpha}| \|^2_{L^2(\Sigma_s)}}{1+s}ds \int_{u=-\infty}^t \int_{C_u(t)} \frac{\tau_+^2(1+s)}{\tau_-} \left| \int_v \sqrt{ \frac{ v^{\underline{L}}}{v^0}} |h|\frac{ dv}{v^0} \right|^2 dC_u(t) du \right|^{\frac{1}{2}} \\ \nonumber
& \lesssim & \sqrt{\epsilon} \log^{M+\frac{1}{2}}(3+t) \left| \int_{u=-\infty}^t \frac{1}{\tau_-} \left\| \tau_+^2(1+s) \int_v |h_1| \frac{dv}{(v^0)^2} \right\|_{L^{\infty}(C_u(t))} \int_{C_u(t)} \int_v \frac{ v^{\underline{L}}}{v^0} |h_2| dv dC_u(t) du \right|^{\frac{1}{2}} \\ \nonumber
& \lesssim & \epsilon^{\frac{3}{2}} \log^{-\frac{1}{2}+\frac{a}{2}(5+i_1+2|\xi^1|+|\beta|)}(3+t) \sqrt{\mathbb{E}[h_2](t)} \left| \int_{u=-\infty}^0 \frac{du}{\tau_-^{\frac{3}{2}}}+\int_{u=0}^t \frac{du}{\tau_-} \right|^{\frac{1}{2}} \\ \nonumber
& \lesssim & \epsilon^{\frac{3}{2}} \log^{\frac{a}{2}(6+i_1+2|\xi^1|+|\beta|)}(3+t) \sqrt{\mathbb{E}[h_2](t)}. \end{eqnarray} Similarly, one has \begin{eqnarray}
\nonumber I_{\alpha, \rho, \sigma} & := & \int_0^t \int_{\Sigma_s} \tau_+ (|\alpha|+|\rho|+|\sigma|) \int_v |h| \frac{dv}{v^0} dx ds \\ \nonumber
& \lesssim & \int_0^t \| \sqrt{\tau_+}|\alpha|+\sqrt{\tau_+} |\rho|+\sqrt{\tau_+} |\sigma| \|_{L^2(\Sigma_s)} \left\| \sqrt{\tau_+} \int_v |h| \frac{dv}{v^0} \right\|_{L^2(\Sigma_s)} ds \\ \nonumber
& \lesssim & \int_0^t \sqrt{\epsilon} \log^M(3+s) \left\| \tau_+ \int_v |h_1|\frac{dv}{(v^0)^2} \right\|_{L^{\infty}(\Sigma_s)}^{\frac{1}{2}} \left\| \int_v |h_2| dv \right\|_{L^1(\Sigma_s)}^{\frac{1}{2}} ds \\ \nonumber
& \lesssim & \epsilon \log^{\frac{a}{2}(6+i_1+2|\xi^1|+|\beta|)} (3+t) \left\| \mathbb{E}[h_2] \right\|_{L^{\infty}([0,t])}^{\frac{1}{2}}. \end{eqnarray}
For the last integral, recall from Propositions \ref{propcharge} and \ref{decayF} that $\overline{F}(t,x)$ vanishes for all $t-|x| \geq -1$ and that $|\mathcal{L}_{Z^{\gamma}}(\overline{F})| \lesssim \epsilon \tau_+^{-2}$. We are then led to bound \begin{eqnarray}
\nonumber I_{\overline{F}} & := & \int_0^t \int_{|x| \geq s+1} \tau_+ |\mathcal{L}_{Z^{\gamma}}(\overline{F})|\int_v |h| \frac{dv}{v^0} dx ds \\ \nonumber
& \lesssim & \int_0^t \frac{\sqrt{\epsilon}}{1+s} \int_{\Sigma_s} \int_v \sqrt{ \left| h_1 h_2 \right| } dv dx ds \hspace{1mm} \lesssim \hspace{1mm} \int_0^t \frac{\sqrt{\epsilon}}{1+s} \left| \int_{\Sigma_s} \int_v \left| h_1 \right| dv dx \int_{\Sigma_s} \int_v \left| h_2 \right| dv dx \right|^{\frac{1}{2}} ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \log(3+t) \left\| \mathbb{E}[h_1] \right\|_{L^{\infty}([0,t])}^{\frac{1}{2}}\left\| \mathbb{E}[h_2] \right\|_{L^{\infty}([0,t])}^{\frac{1}{2}} . \end{eqnarray}
Thus, as $\left\| \mathbb{E}[h_1] \right\|_{L^{\infty}([0,t])} \lesssim \epsilon \log^{(i_1+2|\xi_1|+|\beta|)a}(3+t)$ and $i_1+i_2=2j$, we have \begin{itemize}
\item $I_4 \lesssim \epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta}$ if $|\xi_2|=N-1$, since $\mathbb{E}[h_2](t) \leq \log^{ai_2}(3+t) \overline{\mathbb{E}}_N[f](t) \leq \epsilon (1+t)^{\eta} \log^{ai_2}(3+t) $, and
\item $I_4 \lesssim \epsilon^{\frac{3}{2}} \log^{(3+j+|\xi_1|+|\xi_2|+|\beta|)a}(3+t)$ otherwise, as $\mathbb{E}[h_2] \leq \log^{(i_2+2|\xi^2|+|\beta|)a}(3+t) \mathbb{E}_{N-1}^0[f](t)$ . \end{itemize} \end{proof}
A better pointwise decay estimate on $\int_v |h_1|(v^0)^{-2}dv$ is requiered to bound sufficiently well $I_4$ when $|\gamma|=N$. We will then treat this case below, in the last part of this section. However, note that all the Propositions already proved in this section imply \eqref{improvebootf2}, for $Q=N-1$, and then $\mathbb{E}^0_{N-1}[f] \leq 3\epsilon$ on $[0,T[$.
\subsection{Estimates for $ \mathbb{E}^X_{N-1}[f]$, $ \mathbb{E}^X_{N}[f]$ and obtention of optimal decay near the lightcone for velocity averages}\label{Ximpro}
The purpose of this subsection is to establish that\footnote{Note that we cannot unify these norms because of a lack of weights $z \in \mathbf{k}_1$. As we will apply Proposition \ref{ComuPkp} with $N_0=2N-1$, we cannot propagate more than $2N-2$ weights and avoid in the same time the problematic terms.} $ \mathbb{E}^X_{N-1}[f]$, $ \mathbb{E}^X_{N}[f] \leq 3\epsilon$ on $[0,T[$ and then to deduce optimal pointwise decay estimates on the velocity averages of the particle density. Remark that, according to the energy estimate of Proposition \ref{energyf}, $ \mathbb{E}^X_{N}[f] \leq 3\epsilon$ follows, if $\epsilon$ is small enough, from
\begin{equation}\label{4:eq} \int_0^t \int_{\Sigma_s} \int_v \left| T_F \left( z^q P^X_{\xi} (\Phi ) Y^{\beta} f \right) \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}\log^{2q} (3+t), \end{equation} \begin{itemize}
\item for all multi-indices $\beta$ and $\xi$ such that $\max(|\beta|+|\xi|,|\xi|+1) \leq N$ and \item for all $z \in \mathbf{k}_1$ and $q \in \mathbb{N}$ such that $q \leq 2N-2-\xi_P-\beta_P$. \end{itemize} Most of the work has already been done. Indeed, the commutation formula of Proposition \ref{ComuPkpX} (applied with $N_0=2N-1$) leads us to bound only terms of \eqref{eq:cat0} and \eqref{eq:cat1} since $q \leq 2N-2-\xi_P-\beta_P$. Note that we control quantities of the form
$$ z^j P_{\xi^1}(\Phi) Y^{\beta^1} f, \hspace{5mm} \text{with} \hspace{5mm} |\xi^1|+|\beta^1| \leq N, \hspace{5mm} |\xi^1| \leq N-1 \hspace{5mm} \text{and} \hspace{5mm} j \leq 2N-1-\xi^1_P-\beta^1_P.$$ Consequently, \eqref{4:eq} ensues from Propositions \ref{M1}, \ref{M2} and \ref{M21}. $\mathbb{E}_{N-1}^X[f]$ can be estimated similarly since we also control quantities such as
$$ z^j P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\kappa} f, \hspace{5mm} \text{with} \hspace{5mm} \max(|\xi^1|+|\kappa|,|\xi^2|+|\kappa|) \leq N-1 \hspace{5mm} \text{and} \hspace{5mm} j \leq 2N-1-\xi^1_P-\xi^2_P-\kappa_P.$$
Note that \eqref{4:eq} also provides us, through Theorem \ref{decayopti}, that, for all $\max ( |\xi|+|\beta|, 1 +|\xi| ) \leq N-3$,
$$ \forall \hspace{0.5mm} |x| \leq t < T, \hspace{3mm} z \in \mathbf{k}_1, \hspace{3mm} j \leq 2N-5-\xi_P-\beta_P, \hspace{5mm} \int_v \left|z^j P^X_{\xi} (\Phi) Y^{\beta} f \right| \frac{dv}{(v^0)^2} \lesssim \epsilon \frac{\log^{2j}(3+t)}{\tau_+^3}.$$
For the exterior region, use Proposition \ref{decayopti2} and $ \mathbb{E}^{X}_N[f] \leq 3\epsilon$ to derive, for all $\max ( |\xi|+|\beta|, |\xi|+1 ) \leq N-3$,
$$ \forall \hspace{0.5mm} (t,x) \in V_0(T), \hspace{3mm} z \in \mathbf{k}_1, \hspace{3mm} j \leq 2N-6-\xi_P-\beta_P, \hspace{5mm} \int_v \left|z^j P^X_{\xi} (\Phi) Y^{\beta} f \right| \frac{dv}{(v^0)^2} \lesssim \epsilon \frac{\log^{2(j+1)}(3+t)}{\tau_+^3\tau_-}.$$ We summerize all these results in the following proposition (the last estimate comes from Corollary \ref{KS2}). \begin{Pro}\label{Xdecay}
If $\epsilon$ is small enough, then $ \mathbb{E}^{X}_{N-1}[f] \leq 3 \epsilon$ and $ \mathbb{E}^{X}_{N}[f] \leq 3 \epsilon$ hold on $[0,T]$. Moreover, we have, for all $\max ( |\xi|+|\beta|, |\xi|+1 ) \leq N-3$, $z \in \mathbf{k}_1$ and $j \leq 2N-6 - \xi_P-\beta_P$, \begin{eqnarray}
\nonumber \forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{10mm} \int_v \left|z^j P^X_{\xi} (\Phi) Y^{\beta} f \right| \frac{dv}{(v^0)^2} & \lesssim & \epsilon \frac{\log^{2j}(3+t)}{\tau_+^3} \mathds{1}_{t \geq |x|}+ \epsilon\frac{\log^{2(j+1)}(3+t)}{\tau_+^3\tau_-} \mathds{1}_{|x| \geq t}, \\ \nonumber
\forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{10mm} \int_v \left|z^j P^X_{\xi} (\Phi) Y^{\beta} f \right| dv & \lesssim & \epsilon \frac{\log^{2j}(3+t)}{\tau_+^2 \tau_-}. \end{eqnarray} \end{Pro}
\subsection{The critical terms}\label{sec86}
We finally bound $I_4$, defined in Proposition \ref{M41}, when $|\gamma|=N$, which concerns only the improvement of the bound of the higher order energy norm $\overline{\mathbb{E}}_N[f]$. We keep the notations introduced in Subsection \ref{ref:L2elec} and we start by precising them. Using the properties of the terms of \eqref{eq:cat4}, we remark that we necessarily have
$$P_{\xi^0}(\Phi)=Y^{\xi^0} \Phi, \hspace{5mm} |\xi^0|=N-1, \hspace{5mm} |\beta^0| \leq 1, \hspace{5mm} |\xi^1|=0, \hspace{5mm} \beta = \beta^0, \hspace{5mm} \gamma_T = \xi^0_T \hspace{5mm} \text{and} \hspace{5mm} j=q.$$ We are then led to prove
$$I_4 = \int_0^t \int_{\Sigma_s} \int_v \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \left| z^{q} P_{\xi^2}(\Phi) Y^{\beta^0} f \right| \frac{dv}{v^0} dx ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} (1+t)^{\eta} \log^{aq}(3+t).$$
If $\gamma_T=\xi^0_T \geq 1$, one can use inequality \eqref{eq:zeta2} of Proposition \ref{ExtradecayLie} and $|v^A| \lesssim \sqrt{v^0 v^{\underline{L}}}$ in order to obtain
$$ \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \lesssim \left(1+ \frac{\sqrt{ v^{\underline{L}}}\tau_+}{\sqrt{v^0}\tau_-} \right) \sum_{|\gamma_0| \leq N} \left| \nabla_{Z^{\gamma_0}} F \right|+ \frac{\tau_+}{\tau_-} \sum_{|\gamma_0| \leq N} \left| \alpha ( \mathcal{L}_{Z^{\gamma_0}}(F)) \right|+\left| \rho ( \mathcal{L}_{Z^{\gamma_0}}(F)) \right|$$ and then split $I_4$ in four parts and bound them by $\epsilon^{\frac{3}{2}}$ or $\epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta}$, as $I^1_1$, $I^3_1$, $I^1_3$ and $I^2_3$ in Propositions \ref{M21} and \ref{M32}. Otherwise, $\xi^0_P=N-1$ and $q \leq N-\xi^2_P-\beta^0_P$ so that we take $i_2 \leq 2N-1-2\xi^2_P-\beta^0_P$ and $i_1 \leq 1-\beta^0_P$. Then, we divide $[0,t] \times \mathbb{R}^3$ in two parts, $V_0(t)$ and its complement. Following the proof of Proposition \ref{M41}, one can prove, as $\mathcal{E}_{N}^{Ext}[\widetilde{F}] \lesssim \epsilon$ and $|\mathcal{L}_{Z^{\gamma}}(\overline{F})| \lesssim \epsilon \tau_+^{-2}$ on $[0,T[$, that
$$ \int_0^t \int_{\overline{\Sigma}^0_s} \int_v \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| \left| z^{q} P_{\xi^2}(\Phi) Y^{\beta^0} f \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}} (1+t)^{\frac{3}{4} \eta}.$$
To lighten the notations, let us denote the null decomposition of $\mathcal{L}_{Z^{\gamma}}(F)$ by $(\alpha, \underline{\alpha}, \rho, \sigma)$. Recall from Lemma \ref{weights1} that $ \tau_+ |v^A| \lesssim v^0 \sum_{w \in \mathbf{k}_1} |w|$ and $\tau_+v^{\underline{L}} \lesssim \tau_-v^0+v^0\sum_{w \in \mathbf{k}_1} |w|$, so that \begin{eqnarray}
\nonumber \tau_+ \left| \frac{v^{\mu}}{v^0} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \right| & \lesssim & \tau_+\left(|\alpha|+|\rho|\right)+\tau_+\frac{v^{\underline{L}}}{v^0} |\underline{\alpha}|+ \tau_+ \frac{|v^A|}{v^0}\left( |\sigma|+|\underline{\alpha}| \right) \\ \nonumber
& \lesssim & \left(\tau_+|\alpha|+\tau_+|\rho|+\tau_-|\underline{\alpha}| \right)+\sum_{w \in \mathbf{k}_1} |w| \left( |\sigma|+|\underline{\alpha}| \right). \end{eqnarray}
We can then split the remaining part of $I_4$ in two integrals. The one associated to $\sum_{w \in \mathbf{k}_1} |w|(|\sigma|+|\underline{\alpha}|)$ can be bounded by $\epsilon^{\frac{3}{2}}$ as $I^1_1$ in Proposition \ref{M21} since $i_1+1 \leq 2N-1-\beta_P^0$. For the one associated to $\left(\tau_+|\alpha|+\tau_+|\rho|+\tau_-|\underline{\alpha}| \right)$, $\overline{I}_4$, we have \begin{eqnarray}
\nonumber \overline{I}_4 & := & \int_0^t \int_{\Sigma^0_s} \left(\tau_+|\alpha|+\tau_+|\rho|+\tau_-|\underline{\alpha}| \right) \int_v \left| z^{q} P_{\xi^2}(\Phi) Y^{\beta^0} f \right| \frac{dv}{v^0} dx ds \\ \nonumber
& \lesssim & \int_0^t \sqrt{\mathcal{E}_N[F](s)} \left\| \sqrt{\tau_+} \int_v \left| z^{q} P_{\xi^2}(\Phi) Y^{\beta^0} f \right| \frac{dv}{v^0} \right\|_{L^2(\Sigma^{0}_s)} ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \int_0^t \sqrt{\mathcal{E}_N[F](s)} \left\| \tau_+ \int_v \left| z^{i_1} Y^{\beta^0} f \right| \frac{dv}{(v^0)^2} \right\|^{\frac{1}{2}}_{L^{\infty}(\Sigma^0_s)} \left\| \int_v \left| z^{i_2} P_{\xi^2}(\Phi)^2 Y^{\beta^0} f \right| dv \right\|^{\frac{1}{2}}_{L^1(\Sigma^0_s)} ds . \end{eqnarray}
Using the bootstrap assumptions \eqref{bootf3}, \eqref{bootF4} and the pointwise decay estimate on $\int_v \left| z^{i_1} Y^{\beta^0} f \right| \frac{dv}{(v^0)^2}$ given in Proposition \ref{Xdecay}, we finally obtain $$\overline{I}_4 \hspace{2mm} \lesssim \hspace{2mm} \sqrt{\epsilon} \int_0^t (1+s)^{\frac{\eta}{2}} \frac{\sqrt{\epsilon} \log^{i_1}(3+s)}{1+s} \sqrt{\epsilon} (1+s)^{\frac{\eta}{2}} \log^{\frac{a}{2}i_2}(3+s) ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} (1+t)^{\eta} \log^{a q}(3+t),$$ which concludes the improvement of the bootstrap assumption \eqref{bootf3}. \begin{Rq} In view of the computations made to estimate $\overline{I}_4$, note that. \begin{itemize}
\item The use of Theorem \ref{decayopti}, instead of \eqref{decayf} combined with $1 \lesssim v^0 v^{\underline{L}}$ and Lemma \ref{weights1}, was necessary. Indeed, for the case $q=0$, a decay rate of $\log^2 (3+t) \tau_+^{-3}$ on $\int_v \left| Y^{\beta^0} f \right| \frac{dv}{(v^0)^2}$ would prevent us from closing energy estimates on $\mathcal{E}_N[F]$ and $\overline{\mathbb{E}}_N[f]$.
\item Similarly, it was crucial to have a better bound on $\mathcal{E}^{Ext}_{N}[G](t)$ than $\epsilon (1+t)^{\eta}$ as the decay rate given by Proposition \ref{Xdecay} on $\int_v \left| Y^{\beta^0} f \right| \frac{dv}{(v^0)^2}$ is weaker, in the $t+r$ direction, outside the light cone. \end{itemize} \end{Rq} Note that Propositions \ref{M2}, \ref{M3}, \ref{MM3}, \ref{M21}, \ref{M32} and \ref{M41} also prove that \begin{equation}\label{Auxenergy}
\mathbb{A}[f](t) := \sum_{i=1}^2 \sum_{\begin{subarray}{} |\xi^i|+|\beta| \leq N \\ |\xi^i| \leq N-2 \end{subarray}}\sum_{\begin{subarray}{} |\zeta^i|+|\beta| \leq N \\ |\zeta^i| \leq N-1 \end{subarray}} \mathbb{E} \left[ P_{\xi^1}(\Phi) P_{\xi^2}(\Phi) Y^{\beta} f \right] (t)+\mathbb{E} \left[ P_{\zeta^1}^X(\Phi) P_{\zeta^2}^X(\Phi) Y^{\beta} f \right] (t) \lesssim \epsilon (1+t)^{\frac{3}{4} \eta}. \end{equation}
Indeed, to estimate this energy norm, we do not have to deal with the critical terms of this subsection (as $|\xi^i| \leq N-2$ and according to Proposition \ref{ComuPkpX}).
\section{$L^2$ decay estimates for the velocity averages of the Vlasov field}\label{sec11}
In view of the commutation formula of Propositions \ref{ComuMaxN} and \ref{CommuFsimple}, we need to prove enough decay on quantities such as $\left\| \sqrt{\tau_-} \int_v | Y^{\beta} f | dv \right\|_{L^2_x}$, for all $|\beta| \leq N$. Applying Proposition \ref{Xdecay}, we are already able to obtain such estimates if $|\beta| \leq N-3$ (see Proposition \ref{estiL2} below). The aim of this section is then to treat the case of the higher order derivatives. For this, we follow the strategy used in \cite{FJS} (Section $4.5.7$). Before exposing the proceding, let us rewrite the system. Let $I_1$, $I_2$ and $I^q_1$, for $N-5 \leq q \leq N$, be the sets defined as \begin{flalign*}
& \hspace{0.7cm} I_1 := \left\{ \beta \hspace{2mm} \text{multi-index} \hspace{2mm} / \hspace{2mm} N-5 \leq |\beta| \leq N \right\} = \{ \beta^1_{1}, \beta^1_{2}, ..., \beta^1_{|I_1|} \}, \hspace{1.3cm} I^q_1 := \left\{ \beta \in I_1 \hspace{2mm} / \hspace{2mm} |\beta| = q \right\}, & \\
& \hspace{0.7cm} I_2 := \left\{ \beta \hspace{2mm} \text{multi-index} \hspace{2mm} / \hspace{2mm} |\beta| \leq N-5 \right\} = \{ \beta^2_{1}, \beta^2_{2}, ..., \beta^2_{|I_2|} \}, & \end{flalign*}
and $R^1$ and $R^2$ be two vector valued fields, of respective length $|I_1|$ and $|I_2|$, such that $$ R^1_j= Y^{\beta^1_{j}}f \hspace{2cm} \text{and} \hspace{2cm} R^2_j= Y^{\beta^2_{j}}f. $$
We will sometimes abusively write $j \in I_i$ instead of $\beta^i_{j} \in I_i$ (and similarly for $j \in I^k_1$). The goal now is to prove $L^2$ estimates on $\int_v |R^1| dv$. Finally, we denote by $\mathbb{V}$ the module over the ring $C^0([0,T[ \times \mathbb{R}^3_x \times \mathbb{R}^3_v)$ engendered by $( \partial_{v^l})_{1 \leq l \leq 3}$. In the following lemma, we apply the commutation formula of Proposition \ref{ComuVlasov} in order to express $T_F(R^1)$ in terms of $R^1$ and $R^2$ and we use Lemma \ref{GammatoYLem} for transforming the vector fields $\Gamma^{\sigma} \in \mathbb{G}^{|\sigma|}$. \begin{Lem}\label{bilanL2}
There exists two matrix functions $A :[0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3_v \rightarrow \mathfrak M_{|I_1|}(\mathbb{V})$ and $B :[0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3_v \rightarrow \mathfrak M_{|I_1|,|I_2|}(\mathbb{V})$ such that $T_F(R^1)+AR^1=B R^2$. Furthermore, if $1 \leq i \leq |I_1|$, $A$ and $B$ are such that $T_F(R^1_i)$ is a linear combination, with good coefficients $c(v)$, of the following terms, where $r \in \{1,2 \}$ and $\beta^r_j \in I_r$. \begin{itemize} \item \begin{equation}\label{eq:com11}
z^d P_{k,p}(\Phi) \frac{v^{\mu}}{v^0}\mathcal{L}_{Z^{\gamma}}(F)_{ \mu \nu} R^r_j, \tag{type 1}
\end{equation}
where \hspace{2mm} $z \in \mathbf{k}_1$, \hspace{2mm} $d \in \{ 0,1 \}$, \hspace{2mm} $\max ( |\gamma|, |k|+|\beta^r_j| ) \leq |\beta^1_i|$, \hspace{2mm} $|k| \leq |\beta^1_i|- 1$, \hspace{2mm} $|k|+|\gamma|+|\beta^r_j| \leq |\beta^1_i|+1$ \hspace{2mm} and \hspace{2mm} $p+k_P+(\beta^r_j)_P+d \leq (\beta^1_i)_P$. \item \begin{equation}\label{eq:com22} P_{k,p}(\Phi) \mathcal{L}_{X Z^{\gamma_0}}(F) \Big( v, \nabla_v \left( c(v) P_{q,s}(\Phi) R^r_j \right) \Big), \tag{type 2} \end{equation}
where \hspace{2mm} $|k|+|q|+|\gamma_0|+|\beta^r_j| \leq |\beta^1_i|-1$, \hspace{2mm} $|q| \leq |\beta^1_i|-2$,\hspace{2mm} $p+s+k_P+q_P+(\beta^r_j)_P \leq (\beta^1_i)_P$ \hspace{2mm} and \hspace{2mm} $p \geq 1$. \item \begin{equation}\label{eq:com44}
P_{k,p}(\Phi) \mathcal{L}_{ \partial Z^{\gamma_0}}(F) \Big( v, \nabla_v \left( c(v) P_{q,s}(\Phi) R^r_j \right) \Big), \tag{type 3}
\end{equation}
where \hspace{2mm} $|k|+|q|+|\gamma_0|+|\beta^r_j| \leq |\beta^1_i|-1$, \hspace{2mm} $|q| \leq |\beta^1_i|-2$, \hspace{2mm} $p+s+|\gamma_0| \leq |\beta^1_i|-1$ \hspace{2mm} and \hspace{2mm} $p+s+k_P+q_P+(\beta^r_j)_P \leq (\beta^1_i)_P$. \end{itemize}
We also impose that $|\beta^2_j| \leq N-6$ on the terms of \eqref{eq:com22}, \eqref{eq:com44} and that $|\beta^1_j| \geq N-4$ on the terms of \eqref{eq:com11}, which is possible since $ \beta \in I_1 \cap I_2$ if $|\beta| = N-5$. \end{Lem} \begin{Rq}\label{rqcondiH}
Note that if $\beta^1_i \in I^{N-5}_1$, then $A^q_i =0$ for all $q \in \llbracket 1, |I_1| \rrbracket$. If $1 \leq n \leq 5$ and $\beta^1_i \in I^{N-5+n}_1$, then the terms composing $A_i^q$ are such that $\max(|k|+1,|\gamma|) \leq n$ or $|k|+|q|+|\gamma_0| \leq n-1$. \end{Rq} Let us now write $R=H+G$, where $H$ and $G$ are the solutions to $$\left\{
\begin{array}{ll}
T_F(H)+AH=0 \hspace{2mm}, \hspace{2mm} H(0,.,.)=R(0,.,.),\\
T_F(G)+AG=BR^2 \hspace{2mm}, \hspace{2mm} G(0,.,.)=0.
\end{array} \right.$$
The goal now is to prove $L^2$ estimates on the velocity averages of $H$ and $G$. As the derivatives of $F$ and $\Phi$ composing the matrix $A$ are of low order, we will be able to commute the transport equation satisfied by $H$ and to bound the $L^1$ norm of its derivatives of order $3$ by estimating pointwise the electromagnetic field and the $\Phi$ coefficients, as we proceeded in Subsection \ref{sec82}. The required $L^2$ estimates will then follow from Klainerman-Sobolev inequalities. Even if we will be lead to modify the form of the equation defining $G$, the idea is to find a matrix $K$ satisfying $G=KR^2$, such that $\mathbb{E}[KKR^2]$ do not grow too fast, and then to take advantage of the pointwise decay estimates on $\int_v |R^2|dv$ in order to obtain the expected decay rate on $\| \int_v |G| dv \|_{L^2_x}$. \begin{Rq}
As in \cite{massless}, we keep the $v$ derivatives in the construction of $H$ and $G$. It has the advantage of allowing us to use Lemma \ref{vradial}. If we had already transformed the $v$ derivatives, as in \cite{dim4}, we would have obtained terms such as $x^{\theta} \partial g$ from $\left( \nabla_v g \right)^r$. Indeed, Lemma \ref{vradial} would have led us to derive coefficients such as $\frac{x^k}{|x|}$ and then to deal, for instance, with factors such as $\frac{t^3}{|x|^3}$ (apply three boost to $\frac{x^k}{|x|}$). We would then have to work with an another commutation formula leading to terms such as $x^{\theta} \frac{v^{\mu}}{v^0}\partial(F)_{\mu \nu} H_j$ and would then need at least a decay rate of $\tau_+^{-\frac{3}{2}}$ on $\rho$, in the $t+r$ direction, in order to close the energy estimates on $H$. This could be obtained by assuming more decay on $F$ initially in order to use the Morawetz vector field $ \overline{K}_0$ or $\tau_-^{-b} \overline{K}_0$ as a multiplier.
However, this creates two technical difficulties compared to what we did in \cite{dim4}. The first one concerns $H$ and will lead us to consider a new hierarchy (see Subsection \ref{subsecH}). The other one concerns $G$ and we will circumvent it by modifying the source term of the transport equation defining it (see Subsecton \ref{subsecG}). \end{Rq} \begin{Rq}
In Subsection \ref{subsecG}, we will consider a matrix $D$ such that $T_F(R^2)=DR^2$ and we will need to estimate pointwise and independently of $M$, in order to improve the bootstrap assumption on $\mathcal{E}_{N-1}[F]$, the derivatives of the electromagnetic field of its components. It explains, in view of Remark \ref{lowderiv}, why we take $I_2$ such as $|\beta^2_j| \leq N-5$. \end{Rq} \subsection{The homogeneous part}\label{subsecH}
The purpose of this subsection is to bound $L^1$ norms of components of $H$ and their derivatives. We will then be able to obtain the desired $L^2$ estimates through Klainerman-Sobolev inequalities. For that, we will make use of the hierarchy between the components of $H$ given by $(\beta^1_i)_P$. However, as, for $N-4 \leq q \leq N$ and $\beta_i^1 \in I^q_1$, we need information on $\| \widehat{Z}^{\kappa} H_j \|_{L^1_{x,v}}$, with $\beta^1_j \in I^{q-1}_1$ and $|\kappa|=4$, in order to close the energy estimate on $\widehat{Z}^{\xi} H_i$, with $|\xi|=3$, we will add a new hierarchy in our energy norms. This leads us to define, for $\delta \in \{ 0,1 \}$,
$$ \mathbb{E}_H^{\delta}(t) := \sum_{ z \in \mathbf{k}_1} \sum_{q=0}^5 \sum_{|\beta| \leq 3+q} \sum_{i \in I^{N-q}_1} \sum_{j=0}^{2N+2+\delta-\beta_P-\beta^1_P} \log^{-j(\delta a+2)}(3+t) \mathbb{E} \left[ z^j \widehat{Z}^{\beta} H_i \right](t).$$ \begin{Lem}\label{Comuhom2}
Let $\widetilde{N} \geq N+3$, $0 \leq q \leq 5$, $i \in I^{N-q}_1$, $|\beta| \leq 3+q$, $ z \in \mathbf{k}_1$ and $j \leq \widetilde{N}-\beta_P-(\beta^1_i)_P$. Then, $T_F( z^j \widehat{Z}^{\beta} H_i)$ can be bounded by a linear combination of the following terms, where $$p \leq 3N, \hspace{5mm} \max(|k|+1,|\gamma|) \leq 8, \hspace{5mm} |\kappa| \leq |\beta|+1, \hspace{5mm} |\beta^1_l| \leq |\beta^1_i| \hspace{5mm} \text{and} \hspace{5mm} |\kappa|+|\beta^1_l| \leq |\beta^1_i|.$$ \begin{itemize} \item \begin{equation}\label{eq:cat0H}
\left| F \left(v, \nabla_v \left( z^j \right) \right) Y^{\beta} H_i \right|. \tag{category $0-H$} \end{equation} \item \begin{equation}\label{eq:cat1H}
\left| P_{k,p}(\Phi) \right| \left| w^{r} Y^{\kappa} H_l \right| \left( \left| \nabla_{Z^{\gamma}} F \right| +\frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right|+\frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \right), \tag{category $1-H$} \end{equation} where \hspace{2mm} $w \in \mathbf{k}_1$ \hspace{2mm} and \hspace{2mm} $r \leq \widetilde{N} -k_P-\kappa_P-(\beta^1_l)_P$. \item \begin{equation}\label{eq:cat3H}
\hspace{-10mm} \frac{\tau_+}{\tau_-} |\rho \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) | \left|z^{j-1} Y^{\kappa} H_l \right| \hspace{8mm} \text{and} \hspace{8mm} \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}}\left| \underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right| \left| z^r Y^{\kappa} H_l \right|, \tag{category $2-H$} \end{equation} where \hspace{2mm} $j-1$, $r=\widetilde{N}-\kappa_P-(\beta^1_l)_P$ and $r \leq j$. \end{itemize} The terms of \eqref{eq:cat3H} can only appear if $j=\widetilde{N}-\beta_P-(\beta^1_i)_P$. \end{Lem} \begin{proof}
We merely sketch the proof as it is very similar to previous computations. One can express $T_F(\widehat{Z}^{\beta} H_i)$ using Lemma \ref{bilanL2} and following what we did in the proof of Proposition \ref{ComuVlasov}. It then remains to copy the proof of Proposition \ref{ComuPkp} with $|\zeta_0|=0$, which explains that we do not have terms of \eqref{eq:cat4}. Note that $\max(|k|+1,|\gamma|) \leq 8$ comes from Remark \ref{rqcondiH} and the fact that $|\kappa|$ can be equal to $|\beta|+1$ ensues from the transformation of the $v$ derivative in the terms obtained from those of \eqref{eq:com22} and \eqref{eq:com44}. \end{proof} \begin{Rq}\label{rqrqrq}
As $|\gamma| \leq 8 \leq N-3$, we have at our disposal pointwise decay estimates on the electromagnetic field (see Proposition \eqref{decayF}). Similarly, as $|k| \leq 7 \leq N-4$, Remark \ref{estiPkp} gives us $|P_{k,p}(\Phi)| \lesssim \log^{M_2}(1+\tau_+)$. \end{Rq}
We are now ready to bound $\mathbb{E}^{\delta}_H$ and then to obtain estimates on $\int_v |z^j H_i|dv$. \begin{Pro}\label{estidecayH}
We have $\mathbb{E}^1_H +\mathbb{E}^0_H \lesssim \epsilon$ on $[0,T[$. Moreover, for $0 \leq q \leq 5$ and $|\beta| \leq q$,
$$\forall \hspace{0.5mm} (t,x) \in [0,T[ \times \mathbb{R}^3, \hspace{3mm} z \in \mathbf{k}_1, \hspace{3mm} i \in I^{N-q}_1, \hspace{3mm} j \leq 2N-1-\beta_P-(\beta^1_i)_P, \hspace{5mm} \int_v |z^j Y^{\beta} H_i| dv \lesssim \epsilon \frac{\log^{2j+M_1}(3+t)}{\tau_+^2 \tau_-}.$$ \end{Pro} \begin{proof}
In the same spirit as Corollary \ref{coroinit} and in view of commutation formula of Lemma \ref{Comuhom2} (applied with $\widetilde{N} = 2N+3$) as well as the assumptions on $f_0$, there exists $C_H >0$ such that $\mathbb{E}^0_H(0) \leq \mathbb{E}^1_H(0) \leq C_H \epsilon$. We can prove that they both stay bounded by $3 C_H \epsilon$ by the continuity method. As it is very similar to what we did previously, we only sketch the proof. Consider $\delta \in \{ 0 , 1 \}$, $0 \leq r \leq 5$, $i \in I^{N-r}_1$, $|\beta| \leq 3+r$, $z \in \mathbf{k}_1$ and $j \leq 2N+2+\delta-\beta_P-(\beta^1_i)_P$. The goal is to prove that
$$ \int_0^t \int_{\Sigma_s} \int_v \left| T_F( z^j H_i ) \right| \frac{dv}{v^0}dxds \lesssim \epsilon^{\frac{3}{2}} \log^{j(\delta a+2)}(3+t).$$ According to Lemma \ref{Comuhom2} (still applied with $\widetilde{N}=2N+3$), it is sufficient to obtain, if $\delta=1$, that the integral over $[0,t] \times \mathbb{R}^3_x \times \mathbb{R}^3_v$ of all terms of \eqref{eq:cat0H}-\eqref{eq:cat3H} are bounded by $\epsilon^{\frac{3}{2}} \log^{j( a+2)}(3+t)$. If $\delta=0$, we only have to deal with terms of \eqref{eq:cat0H} and \eqref{eq:cat1H} and to estimate their integrals by $\epsilon^{\frac{3}{2}} \log^{2j}(3+t)$. In view of Remark \ref{rqrqrq}, we only have to apply (or rather follow the computations of) Propositions \ref{M1}, \ref{M2} and \ref{M3}. The pointwise decay estimates then ensue from the Klainerman-Sobolev inequality of Corollary \ref{KS3}. \end{proof} \begin{Rq} A better decay rate, $\log^{2j}(3+t) \tau_+^{-2} \tau_-^{-1}$, could be proved in the previous proposition by controling a norm analogous to $\mathbb{E}_N^{X}[f]$ but we do not need it to close the energy estimates on $F$. \end{Rq} \begin{Rq}\label{rqgainH} We could avoid any hypothesis on the derivatives of order $N+1$ and $N+2$ of $F^0$ (see Subsection $17.2$ of \cite{FJS3}). \end{Rq} \subsection{The inhomogeneous part}\label{subsecG}
As the matrix $B$ in $T_F(G)+AG=BR^2$ contains top order derivatives of the electromagnetic field, we cannot commute the equation and prove $L^1$ estimates on $\widehat{Z} G$. Let us explain schematically how we will obtain an $L^2$ estimate on $\int_v |G|dv$ by recalling how we proceeded in \cite{dim4}. We did not work with modified vector field and the matrices $A$ and $B$ did not hide $v$ derivatives of $G$. Then we introduced $K$ the solution of $T_F(K)+AK+KD=B$ which initially vanishes and where $T_F(R^2)=DR^2$. Thus $G=KR^2$ and we proved $\mathbb{E}[|K|^2 |R^2|] \leq \epsilon$ so that the expected $L^2$ decay estimate followed from
$$\left\| \int_v |G|dv \right\|_{L^2_x} \lesssim \left\| \int_v |R^2| dv \right\|^{\frac{1}{2}}_{L^{\infty}_x} \mathbb{E}[|K|^2 |R^2|]^{\frac{1}{2}}.$$ The goal now is to adapt this process to our situation. There are two obstacles. \begin{itemize} \item The $v$ derivatives hidden in the matrix $A$ will then be problematic and we need first to transform them. \item The components of the (transformed) matrix $A$ have to decay sufficiently fast. We then need to consider a larger vector valued field than $G$ by including components such as $z^j G_i$ in order to take advantage of the hierarchies in the source terms already used before. \end{itemize} Recall from Definition \ref{orderk1} that we considered an ordering on $\mathbf{k}_1$ and that, if $\kappa$ is a multi-index, we have
$$z^{\kappa}= \prod_{i=1}^{|\kappa|} z_{\kappa_i} \hspace{3mm} \text{and} \hspace{3mm} |z^{\kappa}| \leq \sum_{w \in \mathbf{k}_1} |w|^{|\kappa|}.$$ In this section, we will sometimes have to work with quantities such as $z^{\kappa}$ rather than with $z^j$, where $j \in \mathbb{N}$. \begin{Def} Let $I$ and $I^q$, for $N-5 \leq q \leq N$, be the sets
$$ I := \{ (\kappa, \beta ) \hspace{1mm} / \hspace{1mm} N-5 \leq |\beta| \leq N \hspace{2mm} \text{and} \hspace{2mm} |\kappa| \leq N-\beta_P \} = \{ (\kappa_1,\beta_1),...,(\kappa_{|I|}, \beta_{|I|}) \}, \hspace{5mm} I^q := \{ (\kappa, \beta ) \in I \hspace{1mm} / \hspace{1mm} |\beta|=q \}.$$
Define now $L$, the vector valued fields of length $|I|$, such that
$$L_i = z^{\kappa_i} G_j, \hspace{8mm} \text{with} \hspace{8mm} \beta_j^1 = \beta_i, \hspace{8mm} \text{and} \hspace{8mm} [i]_I:=|\kappa_i|.$$
Moreover, for $Y \in \mathbb{Y}$, $1 \leq j \leq |I_1|$ and $1 \leq i \leq |I|$, we define $j_Y$ and $i_Y$ the indices such that $$R^1_{j_{Y}}=Y Y^{\beta^1_j} f \hspace{8mm} \text{and} \hspace{8mm} L_{i_Y} = z^{\kappa_{i_Y}} G_{j_Y}.$$ \end{Def} The following result will be useful for transforming the $v$ derivatives. \begin{Lem}\label{deriG} Let $Y \in \mathbb{Y}$ and $ \beta^1_i \in I_1 \setminus I^{N}_1$. Then $$Y G_i = G_{i_Y}+H_{i_Y}-Y H_i.$$ \end{Lem} \begin{proof} Recall that $R=H+G$ and remark that $Y R^1_i = Y Y^{\beta^1_i} f= R^1_{i_Y}$. \end{proof} We now describe the source terms of the equations satisfied by the components of $L$. \begin{Pro}\label{bilanG}
There exists $N_1 \in \mathbb{N}^*$, a vector valued field $W$ and three matrix-valued functions $\overline{A} : [0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3 \rightarrow \mathfrak M_{|I|}(\mathbb{R})$, $\overline{B} : [0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3 \rightarrow \mathfrak M_{|I|,N_1}(\mathbb{R})$, $\overline{D} : [0,T[ \times \mathbb{R}^3 \times \mathbb{R}^3 \rightarrow \mathfrak M_{N_1}(\mathbb{R})$ such that
$$T_F(L)+\overline{A}L= \overline{B} W, \hspace{8mm} T_F(W)= \overline{D} W \hspace{8mm} \text{and} \hspace{8mm} \sum_{z \in \mathbf{k}_1} \int_v |z^{2} W| dv \lesssim \epsilon \frac{\log^{3N+M_1} (3+t)}{\tau_+^2 \tau_-}.$$
In order to depict these matrices, we use the quantity $[q]_W$, for $1 \leq q \leq N_1$, which will be defined during the construction of $W$ in the proof. $\overline{A}$ and $\overline{B}$ are such that $T_F(L_i)$ can be bounded, for $1 \leq i \leq |I|$, by a linear combination of the following terms, where \hspace{1mm} $|\gamma| \leq 5$, \hspace{1mm} $1 \leq j,q \leq |I|$ \hspace{1mm} and \hspace{1mm} $1 \leq r \leq N_1$.
\begin{equation}\label{eq:cat0G}
\big( \tau_- \left( |\rho (F) |+|\sigma(F) |+|\underline{\alpha}(F)| \right)+\tau_+ |\alpha(F)| \big) \left| L_j \right|, \hspace{5mm} \text{with} \hspace{5mm} [j]_I = [i]_I-1. \tag{category $0-\overline{A}$} \end{equation}
\begin{equation}\label{eq:cat1G}
\log^{M_1}(3+t) \left| L_j \right| \left( \left| \nabla_{Z^{\gamma}} F \right|+ \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right|+ \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \right) .\tag{category $1-\overline{A}$} \end{equation}
\begin{equation}\label{eq:cat3G}
\frac{\tau_+}{\tau_-} |\rho \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) | \left| L_j \right| + \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}}\left| \underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right| \left| L_q \right|, \hspace{5mm} \text{with} \hspace{5mm} [j]_I+1, \hspace{1mm} [q]_I \leq [i]_I. \tag{category $2-\overline{A}$} \end{equation}
\begin{equation}\label{eq:cat1Y}
\left| P_{k,p}(\Phi) \right| \left| W_r \right| \left( \left| \nabla_{Z^{\zeta}} F \right|+ \frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\zeta}}(F) \right) \right|+ \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\zeta}}(F) \right) \right| \right), \tag{category $1-\overline{B}$} \end{equation}
where \hspace{1mm} $p \leq 2N$, \hspace{1mm} $|k| \leq N-1$ \hspace{1mm} and \hspace{1mm} $|k|+|\zeta| \leq N$. Moreover, if $|k| \geq 6$, there exists $\kappa$ and $\beta$ such that \hspace{1mm} $W_r = z^{\kappa} Y^{\beta} f$, \hspace{1mm} $|k|+|\beta| \leq N$ \hspace{1mm} and \hspace{1mm} $|\kappa| \leq N+1-k_P-\beta_P$.
The matrix $\overline{D}$ is such that, for $1 \leq i \leq N_1$, $T_F(W_i)$ is bounded by a linear combination of the following expressions, where \hspace{1mm} $|\gamma| \leq N-5$ \hspace{1mm} and \hspace{1mm} $1 \leq j,q \leq N_1$.
\begin{equation}\label{eq:cat0W}
\left( \tau_- \left( |\rho (F) |+|\sigma(F) |+|\underline{\alpha}(F)| \right)+\tau_+ |\alpha(F)| \right) \left| W_j \right|, \hspace{5mm} \text{with} \hspace{5mm} [j]_W = [i]_W-1. \tag{category $0-\overline{D}$} \end{equation} \begin{equation}\label{eq:cat1W}
\log^{M_1}(3+t) \left| W_j \right| \left( \left| \nabla_{Z^{\gamma}} F \right|+\frac{\tau_+}{\tau_-} \left| \alpha \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right|+ \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}} \left| \sigma \left( \mathcal{L}_{Z^{\gamma}}(F) \right) \right| \right). \tag{category $1-\overline{D}$} \end{equation} \begin{equation}\label{eq:cat3W}
\frac{\tau_+}{\tau_-} |\rho \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) | \left| W_j \right| + \frac{\tau_+}{\tau_-} \sqrt{\frac{v^{\underline{L}}}{v^0}}\left| \underline{\alpha} \left( \mathcal{L}_{ Z^{\gamma}}(F) \right) \right| \left| W_q \right|, \hspace{5mm} \text{with} \hspace{5mm} [j]_W+1, \hspace{1mm} [q]_W \leq [i]_W. \tag{category $2-\overline{D}$} \end{equation} \end{Pro} \begin{proof} The main idea is to transform the $v$ derivatives in $AG$, following the proof of Lemma \ref{nullG}, and then to apply Lemma \ref{deriG} in order to eliminate all derivatives of $G$ in the source term of the equations. We then define $W$ as the vector valued field, and $N_1$ as its length, containing all the following quantities \begin{itemize}
\item $z^j Y^{\beta} f $, \hspace{1mm} with \hspace{1mm} $z \in \mathbf{k}_1$, \hspace{1mm} $|\beta| \leq N-5$ \hspace{1mm} and \hspace{1mm} $j \leq N+1- \beta_P$, \item $z^j\left(H_{i_Y}-Y H_i \right)$, \hspace{1mm} with \hspace{1mm} $z \in \mathbf{k}_1$, \hspace{1mm} $Y \in \mathbb{Y}$, \hspace{1mm} $\beta_i^1 \in I_1 \setminus I_1^N$ \hspace{1mm} and \hspace{1mm} $j \leq N+3-\left(\beta^1_{i_Y} \right)_P$.
\item $z^j Y^{\beta} H_i$, \hspace{1mm} with \hspace{1mm} $z \in \mathbf{k}_1$, \hspace{1mm} $|\beta|+|\beta^1_i| \leq N$ \hspace{1mm} and \hspace{1mm} $ j \leq N+3-\beta_P-(\beta^1_i)_P$. \end{itemize} Let us make three remarks. \begin{itemize} \item If $1 \leq i \leq N_0$, we can define, in each of the three cases, $[i]_W:=j$. \item Including the terms $z^{N+1- \beta_P} Y^{\beta} f$ and $z^{N+1-\left(\beta^1_{i_Y} \right)_P}\left(H_{i_Y}-Y H_i \right)$ in $W$ allows us to avoid any term of category $2$ related to $\overline{B}$. \item The components such as $z^j Y^{\beta} H_i$ are here in order to obtain an equation of the form $T_F(W)= \overline{D} W$. \end{itemize}
The form of the matrix $\overline{D}$ then follows from Proposition \ref{ComuPkp} if $Y_i = z^j Y^{\beta} f $ and from Lemma \ref{Comuhom2}, applied with $\widetilde{N}=N+3$, otherwise (we made an additional operation on the terms of category $0$ which will be more detailed for the matrix $\overline{A}$). Note that we use Remark \ref{estiPkp} to estimate all quantities such as $P_{k,p}(\Phi)$. The decay rate on $\int_v |z^2 W| dv$ follows from Proposition \ref{Xdecay} and \ref{estidecayH}.
We now turn on the construction of the matrices $\overline{A}$ and $\overline{B}$. Consider then $1 \leq i \leq |I|$ and $1 \leq q \leq |I_1|$ so that $L_i=z^{\kappa_i} G_q$ and $|\kappa_i| \leq N-(\beta^1_q)_P$. Observe that $$T_F(L_i)= T_F(z^{\kappa_i})G_q+z^{\kappa_i} T_F(G_q)= F \left( v, \nabla_v (z^{\kappa_i}) \right)G_q+z^{\kappa_i} T_F(G_q).$$ The first term on the right hand side gives terms of \eqref{eq:cat0G} and \eqref{eq:cat1G} as, following the computations of Proposition \ref{M1}, we have
$$\nabla_v \left( \prod_{r=1}^{|\kappa_i|} z_r \right) = \sum_{p=1}^{|\kappa_i|} \nabla_v(z_p) \prod_{r \neq p} z_r, \hspace{5mm} \left| F(v,\nabla_v z_p) \right| \lesssim \tau_- \left( |\rho (F) |+|\sigma(F) |+|\underline{\alpha}(F)| \right)+\tau_+ |\alpha(F)|+\sum_{w \in \mathbf{k}_1} |w F| .$$ The remaining quantity, $z^{\kappa_i} T_F(G_q)=-z^{\kappa_i} A_q^r G_r+z^{\kappa_i} B_q^r R^2_r$, is described in Lemma \ref{bilanL2}. Express the terms given by $z^{\kappa_i} A_q^r G_r$ in null components and transform the $v$ derivatives\footnote{Note that this is possible since $\partial_v G_r$ can only appear if $\beta^1_r \in I_1 \setminus I^N_1$.} of $G_r$ using Lemma \ref{deriG}, so that, schematically (see \eqref{equ:proof}), \begin{eqnarray} \nonumber v^0\left( \nabla_v G_r \right)^r & = & Y G_r+(t-r) \partial G_r = G_{r_Y}+H_{r_Y}-Y H_r+(t-r)(G_{r_{\partial}}+H_{r_{\partial}}-\partial H_r) \hspace{5mm} \text{and} \hspace{5mm} \\ \nonumber v^0 \partial_{v^b} G_r & = & Y_{0b} G_r+x \partial G_r = G_{r_{Y_{0b}}}+H_{r_{Y_{0b}}}-Y_{0b} H_r+x(G_{r_{\partial}}+H_{r_{\partial}}-\partial H_r) . \end{eqnarray}
By Remark \ref{rqcondiH}, the $\Phi$ coefficients and the electromagnetic field are both derived less than $5$ times. We then obtain, with similar operations as those made in proof of Proposition \ref{ComuPkp}, the matrix $\overline{A}$ and the columns of the matrix $\overline{B}$ hitting the component of $W$ of the form $z^j\left(H_{l_Y}-Y H_l \right)$. For $z^{\kappa_i} B_q^r R^2_r$, we refer to the proof of Proposition \ref{ComuPkp}, where we already treated such terms. \end{proof}
To lighten the notations and since there will be no ambiguity, we drop the index $I$ (respectively $W$) of $[i]_I$ for $1 \leq i \leq |I|$ (respectively $[j]_W$ for $1 \leq j \leq N_1$). Let us introduce $K$ the solution of $T_F(K)+\overline{A}K+K\overline{D}=\overline{B}$, such as $K(0,.,.)=0$. Then, $KY= L$ since they are solution of the same system and they both initially vanish. The goal now is to control $\mathbb{E}[|K|^2|Y|]$. As, for $ 1 \leq i \leq |I|$ and $1 \leq j,p \leq N_1$, \begin{equation}\label{eq:K}
T_F\left( |K^j_i|^2 W_p\right) = |K^j_i |^2\overline{D}^q_pW_q-2\left(\overline{A}^q_i K^j_q +K^q_i \overline{D}^j_q \right) K^j_i W_p+2\overline{B}^j_iK^j_iW_p, \end{equation}
we consider $\mathbb{E}_L$, the following hierarchized energy norm,
$$\mathbb{E}_L(t):= \sum_{ \begin{subarray}{} 1 \leq j,p \leq N_1 \\ \hspace{1mm} 1 \leq i \leq |I| \end{subarray}} \log^{-4[i]-2[p]+4[j]}(3+t) \mathbb{E} \left[\left|K_i^j\right|^2 W_p \right](t). $$ The sign in front of $[j]$ is related to the fact that the hierarchy is inversed on the terms coming from $K \overline{D}$. It prevents us to expect a better estimate than $\mathbb{E}_L(t) \lesssim \log^{4N+12}(3+t)$. \begin{Lem}\label{Inho1} We have, for $M_0 = 4N+12$ and if $\epsilon$ small enough, $\mathbb{E}_L(t) \lesssim \epsilon \log^{M_0}(3+t)$ for all $t \in [0,T[$. \end{Lem} \begin{proof} We use again the continuity method. Let $T_0 \in [0,T[$ be the largest time such that $\mathbb{E}_L(t) \leq 2 \epsilon \log^{M_0}(3+t)$ for all $t \in [0,T_0[$ and let us prove that, if $\epsilon$ is small enough, \begin{equation}\label{thegoal} \forall \hspace{0.5mm} t \in [0,T_0[, \hspace{3mm} \mathbb{E}_L(t) \lesssim \epsilon^{\frac{3}{2}} \log^{M_0}(3+t). \end{equation}
As $T_0 >0$ by continuity ($K$ vanishes initially), we would deduce that $T_0=T$. We fix for the remainder of the proof $1 \leq i \leq |I|$ and $1 \leq j, p \leq N_1$. According to the energy estimate of Proposition \ref{energyf}, \eqref{thegoal} would follow if we prove that \begin{eqnarray}
\nonumber I_{\overline{A}, \overline{D}} & := & \int_0^t \int_{\Sigma_s} \int_v \left| |K^j_i |^2 \overline{D}^q_pW_q-2\left( \overline{A}^k_i K^j_k +K^r_i \overline{D}^j_r \right) K^j_i W_p \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}} \log^{M_0+4[i]+2[p]-4[j]}(3+t), \\ \nonumber I_{\overline{B}} & := & \int_0^t \int_{\Sigma_s} \int_v \left| B_i^j \right| \left| K^j_i W_p \right| \frac{dv}{v^0} dx ds \lesssim \epsilon^{\frac{3}{2}}. \end{eqnarray}
Let us start by $I_{\overline{A}, \overline{D}}$ and note that in all the terms given by Proposition \ref{bilanG}, the electromagnetic field is derived less than $N-5$ times so that we can use the pointwise decay estimates given by Remark \ref{lowderiv}. The terms of \eqref{eq:cat1G} and \eqref{eq:cat1W} can be easily handled (as in Proposition \ref{M2}). We then only treat the following cases, where $|\gamma| \leq N-5$ (the other terms are similar).
$$ \left| \overline{D}^j_r \right| = \tau_- \left( |\rho (F) |+|\sigma(F) |+|\underline{\alpha}(F)| \right)+\tau_+ |\alpha(F)|, \hspace{5mm} \text{with} \hspace{5mm} [j]=[r]-1,$$
$$ \left| \overline{A}^k_i \right| \lesssim \frac{\tau_+ \sqrt{v^{\underline{L}}}}{\tau_- \sqrt{v^0}} |\underline{\alpha} (\mathcal{L}_{Z^{\gamma}}(F))|, \hspace{3mm} \text{with} \hspace{3mm} [k] \leq [i], \hspace{5mm} \text{and} \hspace{5mm} \left| \overline{D}^q_p \right| \lesssim \frac{\tau_+}{\tau_-} |\rho (\mathcal{L}_{Z^{\gamma}}(F))|, \hspace{3mm} \text{with} \hspace{3mm} [q] < [p]. $$ Without any summation on the indices $r$, $k$ and $q$, we have, using Remark \ref{lowderiv}, $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$ and the Cauchy-Schwarz inequality several times, \begin{eqnarray}
\nonumber \int_0^t \int_{\Sigma_s} \int_v \left| K^r_i \overline{D}^j_r K^j_i W_p \right| \frac{dv}{v^0} dx ds & \lesssim & \sqrt{\epsilon} \int_0^t \frac{\log (3+s)}{1+s} \left| \mathbb{E} \left[ \left| K^r_i \right|^2 W_p \right] \hspace{-0.5mm} (s) \hspace{0.5mm} \mathbb{E} \left[ \left| K^j_i \right|^2 W_p \right] \hspace{-0.5mm} (s) \right|^{\frac{1}{2}} ds \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \log^{2+M_0+4[i]+2[p]-2[r]-2[j]}(3+t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \log^{M_0+4[i]+2[p]-4[j]}(3+t), \\ \nonumber
\int_0^t \int_{\Sigma_s} \int_v \left|\overline{A}^k_i K^j_k K^j_i W_p\right| \frac{dv}{v^0} dx ds & \lesssim & \sqrt{\epsilon}\int_{u=-\infty}^t \frac{\tau_+}{\tau_+\tau_-^{\frac{3}{2}}} \int_{C_u(t)} \int_v \frac{v^{\underline{L}}}{v^0} \left| K^j_k \right| \left| W_p\right|^{\frac{1}{2}} \left| K^j_i \right| \left| W_p\right|^{\frac{1}{2}} dv dC_u(t) du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \left| \mathbb{E} \left[ \left| K^j_k \right|^2 W_p \right] \hspace{-0.5mm} (t) \hspace{0.5mm} \mathbb{E} \left[ \left| K^j_i \right|^2 W_p \right] \hspace{-0.5mm} (t) \right|^{\frac{1}{2}} \int_{u=-\infty}^{+\infty} \frac{du}{\tau_-^{\frac{3}{2}}} \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \log^{M_0+2[k]+2[i]+2[p]-4[j]}(3+t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \log^{M_0+4[i]+2[p]-4[j]}(3+t), \\ \nonumber
\nonumber \int_0^t \int_{\Sigma_s} \int_v \left| K^j_i \right|^2 \left| \overline{D}^q_pW_q \right| \frac{dv}{v^0} dx ds & \lesssim & \sqrt{\epsilon} \int_0^t \int_{\Sigma_s} \int_v \log (3+s) \frac{\sqrt{v^{\underline{L}}v^0}}{\tau_+^{\frac{1}{2}} \tau_-^{\frac{3}{2}}} \left| K^j_i \right|^2 \left| W_q \right| \frac{dv}{v^0} dx ds \\ \nonumber
& \lesssim & \sqrt{\epsilon} \left( \int_0^t \frac{\log(3+s)}{1+s} ds+\log(3+t) \int_{-\infty}^{+\infty} \frac{du}{\tau_-^3} \right) \sup_{[0,t]} \mathbb{E} \left[ \left| K^j_i \right|^2 W_q \right] \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \log^{2+M_0+4[i]+2[q]-4[j]}(3+t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}} \log^{M_0+4[i]+2[p]-4[j]}(3+t). \end{eqnarray} It remains to study $I_{\overline{B}}$. The form of $\overline{B}^j_i$ is given by Propoposition \ref{bilanG} and the computations are close to the ones of Proposition \ref{M21}. We then only consider the following two cases,
$$ \left| \overline{B}^j_i K_i^j W_p \right| \lesssim \log^{M_1}(1+\tau_+)\frac{\tau_+ \sqrt{v^{\underline{L}}}}{\tau_-\sqrt{v^0}} \left| \sigma (\mathcal{L}_{Z^{\zeta}} (F) ) \right| \left|K_i^j \right| |W_p|, \hspace{5mm} \text{with} \hspace{5mm} |\zeta| \leq N \hspace{5mm} \text{and} \hspace{5mm} $$
$$ \left| \overline{B}^j_i K_i^j W_p \right| \lesssim \left|\Phi^r P_{\xi}(\Phi)|| \nabla_{ Z^{\gamma}} F \right| \left| K_i^j W_p \right|, \hspace{5mm} \text{with} \hspace{5mm} r \leq 2N, \hspace{6mm} |\xi|+|\gamma| \leq N \hspace{5mm} \text{and} \hspace{5mm} 6 \leq |\xi| \leq N-1.$$ In the first case, using the Cauchy-Schwarz inequality twice (in $(t,x)$ and then in $v$), we get \begin{eqnarray}
\nonumber I_{\overline{B}} & \lesssim & \int_{u=-\infty}^t \left\| \sigma (\mathcal{L}_{Z^{\zeta}} (F) ) \right\|_{L^2(C_u(t))} \left| \int_{C_u(t)} \log^{2M_1}(1+\tau_+)\frac{\tau_+^2}{\tau_-^2} \left| \int_v \sqrt{ \frac{v^{\underline{L}}}{v^0}} \left| K^j_i W_p \right| \frac{dv}{v^0} \right|^2 dC_u(t) \right|^{\frac{1}{2}} du \\ \nonumber
& \lesssim & \sqrt{\epsilon} \sum_{q=0}^{+ \infty} \int_{u=-\infty}^t \frac{1}{\tau_-^{\frac{5}{4}}} \left\| \tau_+^{\frac{11}{4}} \int_v \left| W_p \right| \frac{dv}{(v^0)^2} \right\|^{\frac{1}{2}}_{L^{\infty}(C^q_u(t))} \left\| \int_v \frac{v^{\underline{L}}}{v^0} \left| K^j_i \right|^2 \left| W_p \right| dv \right\|_{L^{1}(C^q_u(t))}^{\frac{1}{2}} du \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_{-\infty}^{+\infty} \frac{du}{\tau_-^{\frac{5}{4}}} \sum_{q=0}^{+\infty} \frac{\log^{\frac{M_0+4[i]+2[p]+3N+M_1}{2}}(3+t_{q+1})}{(1+t_{q})^{\frac{1}{8}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}, \end{eqnarray}
using the bootstrap assumption on $\mathbb{E}_L$ and $\int_v |W_p| \frac{dv}{(v^0)^2} \lesssim \int_v |W_p| \frac{v^{\underline{L}}}{v^0} dv \lesssim \epsilon \log^{3N+M_1}(3+t) \tau_+^{-3}$, which comes from Proposition \ref{bilanG} and Lemma \ref{weights}. For the remaining case, we have $|\gamma| \leq N-6$ and we can then use the pointwise decay estimates on the electromagnetic field given by Proposition \ref{decayF}. Moreover, by Proposition \ref{bilanG}, we have that
$$W_p = z^{\kappa} Y^{\beta} f , \hspace{5mm} \text{with} \hspace{5mm} |\xi|+|\beta| \leq N \hspace{5mm} \text{and} \hspace{5mm} |\kappa| \leq N+1-\beta_P-\xi_P.$$
Suppose first that $|\kappa| \leq 2N-1-\beta_P-2\xi_P$. Then, since $|\Phi|^r | \nabla_{Z^{\gamma}} F| \lesssim \sqrt{\epsilon} \tau_+^{-\frac{3}{4}}\tau_-^{-1}$ and $1 \lesssim \sqrt{v^0 v^{\underline{L}}}$, we get
$$\left| \overline{B}^j_i K^j_i W_p \right| \lesssim \sqrt{\epsilon} \left( \frac{v^0}{\tau_+^{\frac{5}{4}}}+\frac{v^{\underline{L}}}{\tau_+^{\frac{1}{4}} \tau_-^2} \right) \left( \left| z^{\kappa} P_{\xi}(\Phi)^2 Y^{\beta} f \right|+\left|K^j_i \right|^2 \left| W_p \right| \right).$$ Hence, we can obtain $I_{\overline{B}} \lesssim \epsilon^{\frac{3}{2}}$ by following the computations of Proposition \ref{M2}, as, by the bootstrap assumptions on $\overline{\mathbb{E}}_N[f]$ and $\mathbb{E}_L$,
$$\mathbb{E}[ z^{\kappa} P_{\xi}(\Phi)^2 Y^{\beta} f ](t)+\mathbb{E} \left[ \left| K^j_i \right|^2 W_p \right](t) \lesssim \epsilon (1+t)^{\frac{1}{8}}.$$
Otherwise, $|\kappa| = 2N-\beta_P-2\xi_P$ so that $\xi_P=N-1$, $|\beta| \leq 1$ and $|\kappa| = 2-\beta_P$. We can then write $z^{\kappa}=z z^{\kappa_0}$ and find $q \in \llbracket 1,N_1 \rrbracket$ such that $W_q = z^2 z^{\kappa_0} Y^{\beta}f$. It remains to follow the previous case after noticing that
$$\left| \overline{B}^j_i K^j_i W_p \right| \lesssim \sqrt{\epsilon} \left( \frac{v^0}{\tau_+^{\frac{5}{4}}}+\frac{v^{\underline{L}}}{\tau_+^{\frac{1}{4}} \tau_-^2} \right) \left( \left| z^{\kappa_0} P_{\xi}(\Phi)^2 Y^{\beta} f \right|+\left|K^j_i \right|^2 \left| W_q \right| \right) \hspace{5mm} \text{and} \hspace{5mm} |\kappa_0| \leq 2N-1-2\xi_P-\beta_P.$$ \end{proof}
\subsection{$L^2$ estimates on the velocity averages of $f$}
We finally end this section by proving several $L^2$ estimates. The first one is clearly not sharp but is sufficient for us to close the energy estimates for the electromagnetic field. \begin{Pro}\label{estiL2}
Let $z \in \mathbf{k}_1$, $p \leq 3N$, $|k| \leq N-1$ and $\beta$ such that $|k|+|\beta| \leq N$. Then, for all $t \in [0,T[$,
$$ \left\| \frac{1}{\sqrt{\tau_+}} \int_v \left| zP_{k,p}(\Phi) Y^{\beta}f \right| dv \right\|_{L^2 (\Sigma_t)} \lesssim \frac{1}{1+t} \left\| \sqrt{\tau_+} \int_v \left| zP_{k,p}(\Phi) Y^{\beta}f \right| dv \right\|_{L^2 (\Sigma_t)} \lesssim \frac{\epsilon}{(1+t)^{\frac{5}{4}}}$$ \end{Pro} \begin{proof}
The first inequality ensues from $1+t \leq \tau_+$ on $\Sigma_t$. For the other one, we start by the case $|\beta| \leq N-3$. Write $P_{k,p}(\Phi)=\Phi^n P_{\xi}(\Phi)$ and notice that $|\Phi|^n \lesssim \log^{2p}(1+\tau_+)$. Then, using the bootstrap assumption \eqref{bootf3} and Proposition \ref{Xdecay}, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_+} \int_v \left|z P_{k,p}(\Phi) Y^{\beta}f \right| dv \right\|^2_{L^2 (\Sigma_t)} & \lesssim & \left\| \tau_+ \log^{4p}(1+\tau_+) \int_v \left| P_{\xi}(\Phi)^2 Y^{\beta}f \right| dv \int_v \left|z^2 Y^{\beta}f \right| dv \right\|_{L^{1} (\Sigma_t)} \\ \nonumber
& \lesssim & \left\| \tau_+ \log^{4p}(1+\tau_+) \int_v \left|z^2 Y^{\beta}f \right| dv \right\|_{L^{\infty} (\Sigma_t)} \overline{\mathbb{E}}_N[f](t) \\ \nonumber & \lesssim & \epsilon \frac{\log^{4p+6}(3+t)}{1+t} (1+t)^{\eta} \hspace{2mm} \lesssim \hspace{2mm} \frac{\epsilon^2}{(1+t)^{\frac{3}{4}}}. \end{eqnarray}
Otherwise, $|\beta| \geq N-2$ so that $|k| \leq 2$ and, according to Remark \ref{estiPkp}, $P_{k,p}(\Phi) \lesssim \tau_+^{\frac{1}{8}}$. Moreover, as there exists $i \in \llbracket 1, |I_1| \rrbracket$ such that $\beta=\beta^1_i$, we obtain
$$ \left\|\tau_+^{\frac{1}{2}} \int_v \left| zP_{k,p}(\Phi) Y^{\beta} \right| dv \right\|_{L^2 (\Sigma_t)} \lesssim \left\| \tau_+^{\frac{5}{8}} \int_v \left| z H_i \right| dv \right\|_{L^2 (\Sigma_t)}+\left\| \tau_+^{\frac{5}{8}} \int_v \left| z G_i \right| dv \right\|_{L^2 (\Sigma_t)}.$$ Applying Proposition \ref{estidecayH}, one has
$$\left\| \tau_+^{\frac{5}{8}} \int_v \left| z H_i \right| dv \right\|^2_{L^2 (\Sigma_t)} \lesssim \left\| \tau_+^{\frac{5}{4}} \int_v \left| z^2 H_i \right| dv \right\|_{L^{\infty} (\Sigma_t)}\left\| \int_v \left| H_i \right| dv \right\|_{L^1 (\Sigma_t)} \lesssim \frac{\epsilon^2}{(1+t)^{\frac{1}{2}}}.$$
As there exists $q \in \llbracket 1, |I| \rrbracket$ such that $ G_i=L_q=K_q^j W_j$, we have, using this time Proposition \ref{Inho1} and the decay estimate on $\int_v |z^2 W| dv$ given in Proposition \ref{bilanG}, \begin{eqnarray}
\nonumber \left\| \tau_+^{\frac{5}{8}} \int_v \left| z G_i \right| dv \right\|^2_{L^2 (\Sigma_t)} & = & \left\| \tau_+^{\frac{5}{8}} \int_v \left| z K_q^j W_j \right| dv \right\|^2_{L^2 (\Sigma_t)} \\ \nonumber
& \lesssim & \sum_{j=0}^{N_1} \left\| \tau_+^{\frac{5}{4}} \int_v \left| z^2 W_j \right| dv \right\|_{L^{\infty} (\Sigma_t)} \left\| \int_v \left| K_q^j \right|^2 \left| W_j \right| dv \right\|_{L^1 (\Sigma_t)} \\ \nonumber & \lesssim & \epsilon \frac{\log^{3N+M_1}(3+t)}{(1+t)^{\frac{3}{4}}}\log^{4[q]}(3+t) \mathbb{E}_L(t) \hspace{2mm} \lesssim \hspace{2mm} \frac{\epsilon^2}{(1+t)^{\frac{1}{2}}}. \end{eqnarray} \end{proof} This proposition allows us to improve the bootstrap assumption \eqref{bootL2} if $\epsilon$ is small enough. More precisely, the following result holds. \begin{Cor}
For all $t \in [0,T[$, we have $\sum_{|\beta| \leq N-2} \left\| r^{\frac{3}{2}} \int_v \frac{v^A}{v^0} \widehat{Z}^{\beta} f dv \right\|_{L^2(\Sigma_t)} \lesssim \epsilon$. \end{Cor} \begin{proof}
Let $t \in [0,T[$. Using $\tau_+|v^A| \lesssim v^0 \sum_{z \in \mathbf{k}_1} |z|$ and rewritting $\widehat{Z}^{\beta}$ in terms of modified vector fields through the identity \eqref{lifttomodif}, one has
$$ \sum_{|\beta| \leq N-2} \left\| r^{\frac{3}{2}} \int_v \frac{v^A}{v^0} \widehat{Z}^{\beta} f dv \right\|_{L^2(\Sigma_t)} \hspace{2mm} \lesssim \hspace{2mm} \sum_{z \in \mathbf{k}_1} \sum_{p \leq N-2} \sum_{\begin{subarray}{} |q|+|\kappa| \leq N-2 \\ \hspace{2mm} |q| \leq N-3 \end{subarray}} \left\| \sqrt{r} \int_v \left| P_{q,p}(\Phi) Y^{\kappa} f \right| dv \right\|_{L^2(\Sigma_t)}.$$ It then only remains to apply the previous proposition. \end{proof} The two following estimates are crucial as a weaker decay rate would prevent us to improve the bootstrap assumptions.
\begin{Pro}\label{crucial1}
Let $\beta$ and $\xi$ such that $|\xi|+|\beta| \leq N-1$. Then, for all $t \in [0,T[$, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_t)} & \lesssim & \epsilon \frac{1}{1+t} \hspace{5mm} \text{if} \hspace{5mm} |\beta| \leq N-3 \\ \nonumber & \lesssim & \epsilon \frac{\log^M(3+t)}{1+t} \hspace{5mm} \text{otherwise}. \end{eqnarray} \end{Pro} \begin{proof}
Suppose first that $|\beta| \leq N-3$. Then, by Proposition \ref{Xdecay}, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \right\|^2_{L^2(\Sigma_t)} & \lesssim & \left\| \tau_- \int_v \left| Y^{\beta} f \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left| P^X_{\xi}(\Phi)^2 Y^{\beta} f \right| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber
& \lesssim & \frac{\epsilon}{(1+t)^2} \mathbb{E}^X_{N-1}[f](t) \hspace{2mm} \lesssim \hspace{2mm} \left| \frac{\epsilon}{1+t} \right|^2 . \end{eqnarray} Otherwise, \begin{itemize}
\item $|\beta| \geq N-2$, so $|\xi| \leq 1$ and then $|P^X_{\xi}(\Phi)| \lesssim \log^{\frac{3}{2}} (1+\tau_+)$ by Proposition \ref{Phi1}.
\item There exists $i \in \llbracket 1, |I_1| \rrbracket$ and $q \in \llbracket 1, |I| \rrbracket$ such that $Y^{\beta} f = H_i+G_i=H_i+L_q$. \end{itemize} Using Proposition \ref{estidecayH} (for the first estimate) and Propositions \ref{bilanG}, \ref{Inho1} (for the second one), we obtain \begin{eqnarray}
\nonumber \nonumber \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) H_i \right| dv \right\|^2_{L^2(\Sigma_t)} & \lesssim & \left\| \tau_- \log^{3}(1+\tau_+) \int_v \left| H_i \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left| H_i \right| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber
& \lesssim & \left\| \epsilon \frac{\tau_- \log^{3+M_1}(1+\tau_+)}{\tau_+^2 \tau_-} \right\|_{L^{\infty}(\Sigma_t)} \mathbb{E}[H_i](t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^2 \frac{\log^{3+M_1}(3+t)}{(1+t)^2}, \\ \nonumber
\left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) L_q \right| dv \right\|^2_{L^2(\Sigma_t)} & = & \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) K_q^j W_j \right| dv \right\|^2_{L^2(\Sigma_t)} \\ \nonumber
& \lesssim & \sum_{j=0}^{N_1} \left\| \tau_- \log^{3}(1+\tau_+) \int_v \left| W_j \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left|K_q^j \right|^2 |W_j| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber & \lesssim & \epsilon \frac{\log^{3+3N+M_1}(3+t)}{(1+t)^2} \epsilon \log^{M_0+4[q]}(3+t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^2 \frac{\log^{M_0+M_1+3N+3}(3+t)}{(1+t)^2}, \end{eqnarray} since $[q]=0$. This concludes the proof if $M$ is choosen such that\footnote{Recall from Remark \ref{estiPkp} that $M_1$ is independent of $M$.} $2M \geq M_0+M_1+3N+3$. \end{proof} The following estimates will be needed for the top order energy norm. As it will be used combined with Proposition \ref{CommuFsimple}, the quantity $P_{q,p}(\Phi)$ will contain $\mathbb{Y}_X$ derivatives of $\Phi$. \begin{Pro}\label{crucial2}
Let $\beta$, $q$ and $p$ be such as $|q|+|\beta| \leq N$, $|q| \leq N-1$ and $p \leq q_X+\beta_T$. Then, for all $t \in [0,T[$,
$$\left\| \sqrt{\tau_-} \int_v \left| P_{q,p}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma^0_t)} \lesssim \frac{\epsilon}{(1+t)^{1-\frac{\eta}{2}}}.$$ \end{Pro} \begin{proof}
We consider various cases and, except for the last one, the estimates are clearly not sharp. Let us suppose first that $|\beta| \geq N-2$. Then $|q| \leq 2$ and $|P_{q,p}(\Phi)| \lesssim \log^{M_1}(3+t)$ on $\Sigma^0_t$ by Remark \ref{estiPkp}, so that, using Proposition \ref{crucial1},
$$ \left\| \sqrt{\tau_-} \int_v \left| P_{k,p}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma^0_t)} \hspace{1mm} \lesssim \hspace{1mm} \log^{M_1}(3+t) \left\| \sqrt{\tau_-} \int_v \left| Y^{\beta} f \right| dv \right\|_{L^2(\Sigma^0_t)} \hspace{1mm} \lesssim \hspace{1mm} \epsilon \frac{\log^{M+M_1}(3+t)}{1+t}.$$
Let us write $P_{q,p}(\Phi)=\Phi^r P_{\xi}(\Phi)$ with $r \leq p$ and $(\xi_T,\xi_P,\xi_X)=(q_T,q_P,q_X)$. If $|\beta| \leq N-3$ and $|q| \leq N-2$, then by the Cauchy-Schwarz inequality (in $v$), \eqref{Auxenergy} as well as Propositions \ref{Phi1} and \ref{Xdecay}, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_-} \int_v \left| P_{k,p}(\Phi) Y^{\beta} f \right| dv \right\|^2_{L^2(\Sigma_t)} & \lesssim & \left\| \tau_- \int_v \left| \Phi^{2r} Y^{\beta} f \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left| P_{\xi}(\Phi)^2 Y^{\beta} f \right| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber
& \lesssim & \left\|\tau_-\frac{\epsilon \log^{4r}(1+\tau_+)}{\tau_+^2\tau_-} \right\|_{L^{\infty}(\Sigma_t)}\mathbb{A}[f](t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^2 \frac{\log^{8N}(3+t)}{(1+t)^{2-\frac{3}{4}\eta}}. \end{eqnarray}
The remaining case is the one where $|q|=N-1$ and $|\beta| \leq 1$. Hence, $p \leq k_X+1$. \begin{itemize}
\item If $p \geq 2$, we have $k_X \geq 1$ and then, schematically, $P_{\xi}(\Phi)=P^X_{\xi^1}(\Phi) P_{\xi^2}(\Phi)$, with $|\xi^1| \geq 1$ and $|\xi^1|+|\xi^2| = N-1$. If $|\xi^2| \geq 1$, we have $\min(|\xi^1|, |\xi^2|) \leq \frac{N-1}{2} \leq N-6$ and one of the two factor can be estimated pointwise, which put us in the context of the case $|k| \leq N-2$ and $|\beta| \leq N-3$. Otherwise, $P_{k,p}(\Phi)= \Phi^r P^X_{\xi^1}(\Phi)$ and, using again \eqref{Auxenergy}, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_-} \int_v \left| P_{k,p}(\Phi) Y^{\beta} f \right| dv \right\|^2_{L^2(\Sigma_t)} & \lesssim & \left\| \tau_- \int_v \left| \Phi^{2r} Y^{\beta} f \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left| P^X_{\xi^1}(\Phi)^2 Y^{\beta} f \right| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber
& \lesssim & \left\|\tau_-\frac{\epsilon \log^{4r}(1+\tau_+)}{\tau_+^2\tau_-} \right\|_{L^{\infty}(\Sigma_t)} \mathbb{A}[f](t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^2 \frac{\log^{8N}(3+t)}{(1+t)^{2-\frac{3}{4} \eta} }. \end{eqnarray} \item If $p=1$, we have $P_{k,p}(\Phi)=Y^{\kappa} \Phi$ and, using $\overline{\mathbb{E}}_N[f](t) \leq 4 \epsilon (1+s)^{\eta}$, \begin{eqnarray}
\nonumber \left\| \sqrt{\tau_-} \int_v \left| P_{k,p}(\Phi) Y^{\beta} f \right| dv \right\|^2_{L^2(\Sigma_t)} & \lesssim & \left\| \tau_- \int_v \left| Y^{\beta} f \right| dv \right\|_{L^{\infty}(\Sigma_t)} \left\| \int_v \left| Y^{\kappa} \Phi \right|^2 \left| Y^{\beta} f \right| dv \right\|_{L^1(\Sigma_t)} \\ \nonumber
& \lesssim & \left\|\tau_-\frac{\epsilon}{\tau_+^2\tau_-} \right\|_{L^{\infty}(\Sigma_t)} \mathbb{E} \left[ \left| Y^{\kappa} \Phi \right|^2 Y^{\beta} f \right](t) \hspace{2mm} \lesssim \hspace{2mm} \epsilon^2 \frac{(1+t)^{\eta}}{(1+t)^2}. \end{eqnarray} \end{itemize} \end{proof}
\section{Improvement of the energy estimates of the electromagnetic field}\label{sec12} In order to take advantage of the null structure of the system, we start this section by a preparatory lemma. \begin{Lem}\label{calculGFener}
Let $G$ be a $2$-form and $g$ a function, both sufficiently regular and recall that $J(g)^{\nu}= \int_v \frac{v^{\nu}}{v^0} g dv$, $\left| \overline{S}^{L} \right| \lesssim \tau_+$ and $\left| \overline{S}^{\underline{L}} \right| \lesssim \tau_-$. Then, using several times Lemma \ref{weights1} and Remark \ref{rqweights1}, \begin{eqnarray}
\nonumber \left| G_{ 0 \nu} J(g)^{\nu} \right| & \lesssim & |\rho|\int_v |g|dv+(|\alpha_A|+|\underline{\alpha}_A| )\int_v \frac{|v^A|}{v^0}|g|dv \hspace{1.5mm} \lesssim \hspace{1.5mm} |\rho|\int_v |g|dv+\frac{1}{\tau_+}\sum_{w \in \mathbf{k}_1}(|\alpha|+|\underline{\alpha}| )\int_v |wg|dv, \\
\nonumber \left|\overline{S}^{\mu} G_{ \mu \nu} J(g)^{\nu} \right| & \lesssim & \tau_+ |\rho| \int_v \frac{v^{\underline{L}}}{v^0} |g|dv+\tau_- |\rho| \int_v \frac{v^L}{v^0} |g|dv+\tau_+|\alpha| \int_v \frac{|v^A|}{v^0} |g| dv+\tau_-|\underline{\alpha}| \int_v \frac{|v^A|}{v^0}|g| dv \\ \nonumber
& \lesssim & \left( |\alpha|+|\rho|+ \frac{\tau_-}{\tau_+}|\underline{\alpha}| \right) \sum_{z \in \mathbf{k}_1} \int_v |z g | dv \hspace{1cm} \text{if $|x| \geq t$},\\ \nonumber
& \lesssim & |\rho| \int_v \left( \tau_-+\sum_{z \in \mathbf{k}_1} |z| \right) |g| dv+\left( |\alpha|+ \frac{\tau_-}{\tau_+}|\underline{\alpha}| \right) \int_v \sum_{z \in \mathbf{k}_1} |z g | dv \hspace{1cm} \text{otherwise}. \end{eqnarray} \end{Lem} We are now ready to improve the bootstrap assumptions concerning the electromagnetic field. \subsection{For $\mathcal{E}^0_N[F]$} Using Proposition \ref{energyMax1} and commutation formula of Proposition \ref{CommuFsimple}, we have, for all $t \in [0,T]$, \begin{equation}\label{28:eq}
\mathcal{E}^0_N[F](t)-2\mathcal{E}^0_N[F](0) \lesssim \sum_{|\gamma| \leq N} \sum_{\begin{subarray}{l} p \leq |k|+|\beta| \leq N \\ \hspace{3mm} |k| \leq N-1 \end{subarray}} \int_0^t \int_{\Sigma_s} |\mathcal{L}_{Z^{\gamma}}(F)_{\mu 0} J(P_{k,p}(\Phi) Y^{\beta} f)^{\mu} | dx ds. \end{equation}
We fix $|k|+|\beta| \leq N$, $p \leq N$ and $|\gamma| \leq N$. Denoting the null decomposition of $\mathcal{L}_{Z^{\gamma}}(F)$ by $(\alpha, \underline{\alpha}, \rho, \sigma)$, $P_{k,p}(\Phi) Y^{\beta} f$ by $g$ and applying Lemma \ref{calculGFener}, one has
$$ \int_0^t \int_{\Sigma_s} |\mathcal{L}_{Z^{\gamma}}(F)_{\mu 0} J(P_{k,p}(\Phi) Y^{\beta} f)^{\mu} | dx ds \lesssim \int_0^t \int_{\Sigma_s} |\rho| \int_v |g| dv +\left( |\alpha|+|\underline{\alpha}| \right) \sum_{w \in \mathbf{k}_1} \frac{1}{\tau_+} \int_v \left|w g \right|dv dx ds.$$ On the one hand, using Proposition \ref{estiL2}, \begin{eqnarray}
\nonumber \sum_{w \in \mathbf{k}_1} \int_0^t \int_{\Sigma_s} \left( |\alpha|+|\underline{\alpha}| \right) \int_v \frac{1}{\tau_+} \left|w g \right|dv dx ds & \lesssim & \sum_{w \in \mathbf{k}_1} \int_0^t \sqrt{\mathcal{E}^0_N[F](s)} \left\| \frac{1}{\tau_+} \int_v |wg| dv \right\|_{L^2(\Sigma_s)} ds \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}}. \end{eqnarray} On the other hand, as $\rho = \rho (\mathcal{L}_{Z^{\gamma}}(\widetilde{F})) +\rho ( \mathcal{L}_{Z^{\gamma}}(\overline{F}))$ and $\rho ( \mathcal{L}_{Z^{\gamma}}(\overline{F})) \lesssim \epsilon \tau_+^{-2}$ , we have, using Proposition \ref{estiL2} and the bootstrap assumptions \eqref{bootf3}, \eqref{bootext} and \eqref{bootF4}, \begin{eqnarray}
\nonumber \int_0^t \int_{\Sigma_s} |\rho| \int_v |g| dv dx ds & \lesssim & \int_0^t \left\| \sqrt{\tau_+} \rho ( \mathcal{L}_{Z^{\gamma}}(\widetilde{F})) \right\|_{L^2(\Sigma_s)} \left\| \frac{1}{\sqrt{\tau_+}} \int_v |g| dv \right\|_{L^2(\Sigma_s)}+\int_{\Sigma_s} \rho ( \mathcal{L}_{Z^{\gamma}}(\overline{F})) \int_v |g| dv dx ds \\ \nonumber & \lesssim & \int_0^t \sqrt{\mathcal{E}^{Ext}_N[\widetilde{F}](s)+\mathcal{E}_N[F](s)} \frac{\epsilon }{(1+s)^{\frac{5}{4}}}ds+ \int_0^t \frac{\epsilon}{(1+s)^2} \mathbb{E}[g](s) ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} The right-hand side of \eqref{28:eq} is then bounded by $\epsilon^{\frac{3}{2}}$, implying that $\mathcal{E}^0_N[f] \leq 3\epsilon$ on $[0,T[$ if $\epsilon$ is small enough.
\subsection{The weighted norm for the exterior region}
Applying Proposition \ref{energyMax1} and using $\mathcal{E}^{Ext}_N[\widetilde{F}](0) \leq \epsilon$ as well as $\widetilde{F}=F-\overline{F}$, we have, for all $t \in [0,T[$,
$$ \mathcal{E}_N^{Ext}[\widetilde{F}](t) \leq 6\epsilon+\sum_{|\gamma| \leq N} \int_0^t \int_{\overline{\Sigma}^0_s} \left| \overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(F)_{\lambda} }^{ \nu} \right| dx ds+\int_0^t \int_{\overline{\Sigma}^0_s} \left| \overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\lambda} }^{ \nu} \right| dx ds .$$
Let us fix $|\gamma| \leq N$ and denote the null decomposition of $\mathcal{L}_{Z^{\gamma}}(\widetilde{F})$ by $(\alpha, \underline{\alpha}, \rho , \sigma)$. As previously, using Proposition \ref{CommuFsimple},
$$\int_0^t \int_{\overline{\Sigma}^0_s} \left| \overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(F)_{\lambda} }^{ \nu} \right| dx ds \lesssim \sum_{\begin{subarray}{l} p \leq |k|+|\beta| \leq |\gamma| \\ \hspace{3mm} |k| \leq |\gamma|-1 \end{subarray}} \int_0^t \int_{\overline{\Sigma}^0_s} |\overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} J(P_{k,p}(\Phi) Y^{\beta} f)^{\nu} | dx ds.$$
We fix $|k|+|\beta| \leq N$, $p \leq N$ and $|\gamma| \leq N$ and we denote again $P_{k,p}(\Phi) Y^{\beta} f$ by $g$. Using successively Lemma \ref{calculGFener}, the Cauchy-Schwarz inequality, the bootstrap assumption \eqref{bootext} and Proposition \ref{estiL2}, we obtain \begin{eqnarray}
\nonumber \int_0^t \int_{\overline{\Sigma}^0_s} |\overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} J(P_{k,p}(\Phi) Y^{\beta} f)^{\nu} | dx ds & \lesssim & \int_0^t \int_{\Sigma_s} \left(|\rho|+ |\alpha|+\frac{\sqrt{\tau_-}}{\sqrt{\tau_+}}|\underline{\alpha}| \right) \sum_{w \in \mathbf{k}_1} \int_v \left|w g \right|dv dx ds. \\ \nonumber
& \lesssim & \sum_{w \in V} \int_0^t \sqrt{\mathcal{E}_N^{Ext}[F](s)} \left\| \frac{1}{\sqrt{\tau+}} \int_v \left|w g \right|dv \right\|_{L^2(\Sigma_s)} ds \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_0^{+\infty} \frac{ds}{(1+s)^{\frac{5}{4}}} \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray}
Using Proposition \ref{propcharge} and iterating commutation formula of Proposition \ref{basiccom}, we have,
$$\tau_+^2\left| \nabla^{\mu} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\mu} }^{ L} \right|(t,x) +\tau_+^4\left| \nabla^{\mu} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\mu} }^{ \underline{L}} \right|(t,x) + \tau_+^3 \left| \nabla^{\mu} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\mu} }^{ A} \right|(t,x) \lesssim |Q(F)| \mathds{1}_{-2 \leq t-|x| \leq -1}(t,x).$$
Consequently, as $|Q(F)| \leq \| f_0 \|_{L^1_{x,v}} \leq \epsilon$, $\left| \overline{S}^L \right| \lesssim \tau_+$ and $\left| \overline{S}^{\underline{L}} \right| \lesssim \tau_-$,
$$|\overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\lambda} }^{\nu}| \lesssim \left( \tau_+ |\rho |\frac{\epsilon}{\tau_+^4}+\tau_- |\rho |\frac{\epsilon}{\tau_+^2}+\tau_+ |\alpha |\frac{\epsilon}{\tau_+^3}+\tau_- |\underline{\alpha} | \frac{\epsilon}{\tau_+^3} \right) \mathds{1}_{-2 \leq t-|x| \leq -1}(t,x).$$
Note now that $\tau_- \mathds{1}_{-2 \leq t-|x| \leq -1} \leq \sqrt{5}$, so that, using the bootstrap assumption \eqref{bootext} and the Cauchy-Schwarz inequality, \begin{eqnarray}
\nonumber \int_0^t \int_{\overline{\Sigma}^0_s} \left| S^{\mu} \mathcal{L}_{Z^{\gamma}}(\widetilde{F})_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(\overline{F})_{\lambda} }^{ \nu} \right| dx ds & \lesssim & \int_0^t \frac{\epsilon}{(1+s)^{\frac{5}{2}}} \int_{s+1 \leq |x| \leq s+2} \sqrt{\tau_+}|\rho|+\sqrt{\tau_+}|\alpha|+|\underline{\alpha}| dx ds \\ \nonumber & \lesssim & \int_0^t \frac{\epsilon}{(1+s)^{\frac{5}{2}}} \sqrt{\mathcal{E}_N^{Ext}[\widetilde{F}](s)} \sqrt{s^2+1} ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray} Thus, if $\epsilon$ is small enough, we obtain $\mathcal{E}^{Ext}_N[\widetilde{F}] \leq 7 \epsilon$ on $[0,T[$ which improves the bootstrap assumption \eqref{bootext}.
\subsection{The weighted norms for the interior region}
Recall from Proposition \ref{energyMax1} that we have, for $Q \in \{ N-3, N-1, N \}$ and $t \in [0,T[$, \begin{equation}\label{ensource}
\mathcal{E}_Q[F](t) \hspace{2mm} \leq \hspace{2mm} 24\epsilon +\sum_{|\gamma| \leq Q} \int_0^t \int_{\Sigma^0_s} \left| \overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \nabla^{\lambda} {\mathcal{L}_{Z^{\gamma}}(F)_{\lambda} }^{ \nu} \right| dx ds, \end{equation} since $\mathcal{E}^{Ext}_{N}[\widetilde{F}] \leq 8\epsilon$ on $[0,T[$ by the bootstrap assumption \eqref{bootext}). The remainder of this subsection is divided in two parts. We consider first $Q \in \{ N-3, N-1 \}$ and we end with $Q=N$ as we need to use in that case a worst commutation formula in order to avoid derivatives of $\Phi$ of order $N$, which is the reason of the stronger loss on the top order energy norm.
\subsubsection{The lower order energy norms} Let $Q \in \{ N-3, N-1 \}$. According to commutation formula of Proposition \ref{ComuMaxN}, we can bound the last term of \eqref{ensource} by a linear combination of the following ones. \begin{eqnarray}
\hspace{-4mm} \mathcal{I}_1 \hspace{-1mm} & := & \hspace{-1mm} \int_0^t \int_{|x| \leq s} \left| \overline{S}^{\mu} \mathcal{L}_{ Z^{\gamma}}(F)_{\mu \nu} \int_v \frac{v^{\nu}}{v^0} P^X_{\xi}(\Phi) Y^{\beta} f dv \right| dx ds, \hspace{5mm} \text{with} \hspace{5mm} |\gamma| , \hspace{1mm} |\xi|+|\beta| \leq Q, \label{pevar} \\
\hspace{-4mm} \mathcal{I}_2 \hspace{-1mm} & := & \hspace{-1mm} \int_0^t \int_{|x| \leq s} \left| \overline{S}^{\mu} \mathcal{L}_{ Z^{\gamma}}(F)_{\mu \nu} \int_v \frac{z}{\tau_+} P_{k,p}( \Phi) Y^{\beta} f dv \right| dxds, \hspace{5mm} \text{with} \hspace{5mm} |\gamma| , \hspace{1mm} |k|+|\beta| \leq Q, \hspace{3mm} z \in \mathbf{k}_1, \label{pevarbis} \end{eqnarray}
$0 \leq \nu \leq 3$ and $ p \leq 3N$. Fix $|\gamma| \leq Q$ and denote the null decomposition of $\mathcal{L}_{Z^{\gamma}}(F)$ by $(\alpha, \underline{\alpha}, \rho, \sigma)$. We start by \eqref{pevarbis}, which can be estimated independently of $Q$. Recall that $\left| \overline{S}^{L} \right| \lesssim \tau_+$ and $\left| \overline{S}^{\underline{L}} \right| \lesssim \tau_-$, so that, using Proposition \ref{estiL2} and the bootstrap assumption \eqref{bootF3}, \begin{eqnarray}
\nonumber \mathcal{I}_2 & \lesssim & \int_0^t \int_{\Sigma^0_s} \left( \tau_+ |\rho|+\tau_+ |\alpha|+\tau_- |\underline{\alpha}| \right) \int_v \left| \frac{z}{\tau_+} P_{k,p}( \Phi) Y^{\beta} f \right| dvdxds \\ \nonumber
& \lesssim & \int_0^t \sqrt{\mathcal{E}_{N-1}[F](s)} \left\| \frac{1}{\sqrt{\tau_+}} \int_v \left| z P_{k,p}( \Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_s)} ds \\ \nonumber & \lesssim & \epsilon^{\frac{3}{2}} \int_0^t \frac{\log^M(3+s)}{(1+s)^{\frac{5}{4}}} ds \hspace{2mm} \lesssim \hspace{2mm} \epsilon^{\frac{3}{2}}. \end{eqnarray}
We now turn on \eqref{pevar} and we then consider $|\xi|+|\beta| \leq Q$. Start by noticing that, by Lemma \ref{calculGFener},
$$ \left| \overline{S}^{\mu} \mathcal{L}_{ Z^{\gamma}}(F)_{\mu \nu} \hspace{-0.3mm} \int_v \frac{v^{\nu}}{v^0} P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \lesssim \tau_-|\rho| \int_v \left| P^X_{\xi}(\Phi) Y^{\beta} f \right| dv+\left(|\rho|+ |\alpha|+\frac{\tau_-}{\tau_+} |\underline{\alpha}| \right) \hspace{-1mm} \sum_{w \in \mathbf{k}_1} \hspace{-0.5mm} \int_v \left| w P^X_{\xi}(\Phi) Y^{\beta} f \right| dv .$$ Consequently, by the bootstrap assumption \eqref{bootF3} and Proposition \ref{estiL2}, \begin{eqnarray}
\nonumber \mathcal{I}_1 & \lesssim & \int_0^t \sqrt{\mathcal{E}_{Q}[F](s)} \left( \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_s)}+ \sum_{w \in \mathbf{k}_1} \left\| \frac{1}{\sqrt{\tau_+}} \int_v \left|w P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_s)} \right) ds \\ \nonumber
& \lesssim & \epsilon^{\frac{3}{2}} + \int_0^t \sqrt{\mathcal{E}_{Q}[F](s)} \left\| \sqrt{\tau_-} \int_v \left| P^X_{\xi}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_s)}ds. \end{eqnarray} The last integral to estimate is the source of the small growth of $\mathcal{E}_Q[F]$. We can bound it, using again the bootstrap assumptions \eqref{bootF2}, \eqref{bootF3} and Proposition \ref{crucial1}, by \begin{itemize} \item $\epsilon^{\frac{3}{2}} \log^2(3+t)$ if $Q=N-3$ and \item $\epsilon^{\frac{3}{2}} \log^{2M}(3+t)$ otherwise. \end{itemize} Hence, combining this with \eqref{ensource} we obtain, for $\epsilon$ small enough, that \begin{itemize} \item $\mathcal{E}_{N-3}[F](t) \leq 25 \epsilon \log^2(3+t)$ for all $t \in [0,T[$ and \item $\mathcal{E}_{N-1}[F](t) \leq 25 \epsilon \log^{2M}(3+t)$ for all $t \in [0,T[$. \end{itemize}
\subsubsection{The top order energy norm} We consider here the case $Q=N$ and we then apply this time the commutation formula of Proposition \ref{CommuFsimple}, so that the last term of \eqref{ensource} can be bounded by a linear combination of terms of the form
$$ \mathcal{I}:= \int_0^t \int_{\Sigma^{0}_s} \left| \overline{S}^{\mu} \mathcal{L}_{Z^{\gamma}}(F)_{\mu \nu} \int_v \frac{v^{\mu}}{v^0} P_{q,p}(\Phi) Y^{\beta} f dv \right| dx ds,$$
with $|\gamma| \leq N$, $|q|+|\beta| \leq N$, $|q| \leq N-1$ and $ p \leq q_X+\beta_T$. Let us fix such parameters. Following the computations made previously to estimate $\mathcal{I}_1$ and using $\mathcal{E}_{N}[F](s) \lesssim \sqrt{\epsilon} (1+s)^{\eta} \lesssim \sqrt{\epsilon} (1+s)^{\frac{1}{8}}$, we get \begin{eqnarray}
\nonumber \mathcal{I}_1 & \lesssim & \int_0^t \sqrt{\mathcal{E}_{N}[F](s)} \left( \left\| \sqrt{\tau_-} \int_v \left| P_{q,p}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma^0_s)}+ \sum_{w \in \mathbf{k}_1} \left\| \frac{1}{\sqrt{\tau_+}} \int_v \left| w P_{q,p}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma_s)} \right) ds \\
& \lesssim & \epsilon^{\frac{3}{2}} + \sqrt{\epsilon} \int_0^t (1+s)^{\frac{\eta}{2}} \left\| \sqrt{\tau_-} \int_v \left| P_{q,p}(\Phi) Y^{\beta} f \right| dv \right\|_{L^2(\Sigma^0_s)}ds. \label{lafin} \end{eqnarray} Applying now Proposition \ref{crucial2}, we can bound \eqref{lafin} by $\epsilon^{\frac{3}{2}}(1+t)^{\eta}$. Thus, if $\epsilon$ is small enough, we obtain $\mathcal{E}_N[F](t) \leq 25 \epsilon (1+t)^{\eta}$ for all $t \in [0,T[$, which concludes the improvement of the bootstrap assumption \eqref{bootF4} and then the proof.
\end{document} |
\begin{document}
\pagestyle{headings}
\title{Art Gallery Localization\thanks{This work was supported by NSERC.}}
\titlerunning{Art Gallery Localization}
\author{Prosenjit Bose\inst{1} \and Jean-Lou De Carufel\inst{2} \and Alina Shaikhet\inst{1} \and Michiel Smid\inst{1}}
\authorrunning{P. Bose, J.-L. De Carufel, A. Shaikhet and M. Smid}
\institute{School of Computer Science, Carleton University, Ottawa, Canada,\\ \email{\{jit, michiel\}@scs.carleton.ca, [email protected]},\\ \and School of Electrical Engineering and Computer Science, U. of Ottawa, Canada,\\ \email{[email protected]}}
\maketitle
\begin{abstract} We study the problem of placing a set $T$ of broadcast towers in a simple polygon $P$ in order for any point to locate itself in the interior of $P$. Let $V(p)$ denote the \emph{visibility polygon} of a point $p$, as the set of all points $q \in P$ that are visible to $p$. For any point $p \in P$: for each tower $t \in T \cap V(p)$ the point $p$ receives the coordinates of $t$ and the exact Euclidean distance between $t$ and $p$. From this information $p$ can determine its coordinates. We show a tower-positioning algorithm that computes such a set $T$ of size at most $\lfloor 2n/3\rfloor$, where $n$ is the size of $P$. This improves the previous upper bound of $\lfloor 8n/9\rfloor$ towers~\cite{DBLP:conf/cccg/DippelS15}. We also show that $\lfloor 2n/3\rfloor$ towers are sometimes necessary. \keywords{art gallery, trilateration, GPS, polygon partition, localization} \end{abstract}
\section{Introduction} \label{sec:introduction} \pdfbookmark[1]{Introduction}{sec:introduction}
The art gallery problem was introduced in 1973 when Victor Klee asked how many guards are sufficient to \emph{guard} the interior of a simple polygon having $n$ vertices. Although it has been shown by Chv{\'a}tal that $\lfloor n/3\rfloor$ guards are always sufficient and sometimes necessary~\cite{Chvatal197539}, and such a set of guards can be computed easily~\cite{Fisk1978374}, such solutions are usually far from optimal in terms of minimizing the number of guards for a particular input polygon. Moreover, it was shown that determining an optimal number of guards is NP-hard, even for simple polygons~\cite{Lee:1986:CCA:13643.13657}. Determining the actual locations of those guards is even harder~\cite{DBLP:conf/compgeom/AbrahamsenAM17}.
\emph{Trilateration} is the process of determining absolute or relative locations of points by measurement of distances, using the geometry of the environment. In addition to its interest as a geometric problem, trilateration has practical applications in surveying and navigation, including global positioning systems (GPS). Every GPS satellite transmits information about its position and the current time at regular intervals. These signals are intercepted by a GPS receiver, which calculates how far away each satellite is based on how long it took for the messages to arrive. GPS receivers take this information and use trilateration to calculate the user's location.
In our research we combine the art gallery problem with trilateration. We address the problem of placing broadcast \emph{towers} in a simple polygon $P$ in order for a point in $P$ (let us call it an \emph{agent}) to locate itself. Towers can be defined as points, which can transmit their coordinates together with a time stamp to other points in their visibility region. The agent receives messages from all the towers that belong to its visibility region. Given a message from the tower $t$, the agent can determine its distance to $t$. In our context, \emph{trilateration} is the process, during which the agent can determine its absolute coordinates from the messages the agent receives. Receiving a message from one tower only will not be sufficient for the agent to locate itself (unless the agent and the tower are at the the same location). In Euclidean plane, two distinct circles intersect in at most two points. If a point lies on two circles, then the circle centers and the two radii provide sufficient information to narrow the possible locations down to two. Additional information may narrow the possibilities down to one unique location.
In relation to GPS systems, towers can be viewed as GPS satellites, while agents (query points interior to the polygon) can be compared to GPS receivers. Naturally, we would like to minimize the number of towers.
Let $P$ be a simple polygon in general position (no three vertices are collinear) having a total of $n$ vertices on its boundary (denoted by $\partial P$). Note that $\partial P \subset P$. Two points $u, v \in P$ are \emph{visible to each other} if the segment $\overline{uv}$ is contained in $P$. We also say that $u$ \emph{sees} $v$. Note that $\overline{uv}$ may touch $\partial P$ in one or more points. For $u \in P$, we let $V(u)$ denote the \emph{visibility polygon} of $u$, as the set of all points $q \in P$ that are visible to $u$. Notice that $V(u)$ is a star-shaped polygon contained in $P$ and $u$ belongs to its \emph{kernel} (the set of points from which all of $V(u)$ is visible).
\textbf{Problem Definition:} Let $T$ be a set of points (called \emph{towers}) in $P$ satisfying the following properties. For any point $p \in P$: for each $t \in T \cap V(p)$, the point $p$ receives the coordinates of $t$ and can compute the Euclidean distance between $t$ and $p$, denoted $d(t,p)$. From this information, $p$ can determine its coordinates. We consider the following problems: \begin{enumerate} \item Design an algorithm that, on any input polygon $P$ in general position, computes a ``small'' set $T$ of towers. \item Design a localization algorithm. \end{enumerate}
We show how to compute such a set $T$ of size at most $\lfloor 2n/3\rfloor$ by using the polygon partition method introduced by T{\'o}th~\cite{Toth2000121}. T{\'o}th showed that any simple polygon with $n$ vertices can be guarded by $\lfloor n/3\rfloor$ point guards whose range of vision is $180^\circ$. T{\'o}th partitions a polygon into subpolygons, on which he then can apply induction. He cuts along diagonals whenever it is possible, otherwise he cuts along a continuation of some edge of $P$; along a two-line segment made of an extension of two edges of $P$ that intersect inside $P$; or along the bisector of a reflex vertex of $P$. Notice that the three latter types of cuts may introduce new vertices that are not necessarily in general position with the given set of vertices. Succeeding partitions of the subpolygons may create polygons that are not simple. However, T{\'o}th assumed that his partition method creates subpolygons whose vertices are in general position (refer to~\cite{Toth2000121} Section $2$). We lift this assumption and show how to adapt his method to a wider range of polygons, which we define in Section~\ref{sec:partition}. Under the new conditions the partition of $P$ may contain star-shaped polygons whose kernel is a single point. It does not pose an obstacle to T{\'o}th's problem, but it is a severe complication to our problem, because we require a pair of distinct towers in the kernel of each polygon of the partition. We modify T{\'o}th's partition method and show how to use it with respect to our problem. It is important to notice that we assume that the input polygon is in general position, while non-general position may occur for subpolygons of the partition (refer to Definition~\ref{def:polygon}). We show that after the modification each $180^\circ$-guard $g$ can be replaced with a pair of towers \textbf{close} to $g$. We embed the orientation of the $180^\circ$-guard into the coordinates of the towers. That is, we specify to which side of the line $L$ through the pair of towers their primary localization region resides. We do it by positioning the towers at a distance that is an exact rational number. The parity of the numerator of the rational number (in the reduced form) defines which one of the two half-planes (defined by $L$) the pair of towers are responsible for. We call it the \emph{parity trick}. For example, if we want a pair $t_1$, $t_2$ of towers to be responsible for the half-plane to the left of the line through $t_1$ and $t_2$, then we position the towers at a distance, which is a reduced rational number whose numerator is even. The localization algorithm is allowed to use this information.
Our interest in this problem started with the paper by Dippel and Sundaram~\cite{DBLP:conf/cccg/DippelS15}.
They provide the first non-trivial bounds on agent localization in simple polygons, by showing that $\lfloor 8n/9\rfloor$ towers suffice for any non-degenerate polygon of $n$ vertices, and present an $O(n \log n)$ algorithm for the corresponding placement. Their approach is to decompose the polygon into at most $\lfloor n/3\rfloor$ fans. A polygon $P'$ is a \emph{fan} if there exist a vertex $u$, such that for every other vertex $v$ not adjacent to $u$, $\overline{u v}$ is a diagonal of $P'$; the vertex $u$ is called the \emph{center} of the fan. In each fan with fewer than $4$ triangles Dippel and Sundaram position a pair of towers on an edge of the fan; every fan with $4$ or more triangles receives a triple of towers in its kernel. In a classical trilateration, the algorithm for locating an agent knows the coordinates of the towers that can see $p$ together with distances between $p$ and the corresponding towers. However, the localization algorithm presented in~\cite{DBLP:conf/cccg/DippelS15} requires a lot of additional information, such as a complete information about the polygon, its decomposition into fans and the coordinates of \textbf{all} towers.
Our localization algorithm has no information about $P$. It receives as input only the coordinates of the towers that can see $p$ together with their distances to $p$. In addition our algorithm is empowered by the knowledge of the parity trick. When only a pair $t_1$, $t_2$ of towers can see $p$ then the coordinates of the towers together with the distances $d(t_1,p)$ and $d(t_2,p)$ provide sufficient information to narrow the possible locations of $p$ down to two. Refer to Figures~\ref{fig:example2},\ref{fig:example3}. Those two locations are reflections of each other over the line through $t_1$ and $t_2$. In this situation our localization algorithm uses the parity trick. It calculates the distance between the two towers and judging by the parity of this number decides which of the two possible locations is the correct position of $p$.
We show how to position at most $\lfloor 2n/3\rfloor$ towers inside $P$, which is an improvement over the previous upper bound in~\cite{DBLP:conf/cccg/DippelS15}. We also show that $\lfloor 2n/3\rfloor$ towers are sometimes necessary. The comb polygon from the original art gallery problem can be used to show a lower bound. Refer to Fig.~\ref{fig:comb}. No point in the comb can see two different comb spikes. Thus we need at least two towers per spike to localize all of the points in its interior. In addition we need to know the parity trick. Or, alternatively, we need to know $P$, its exact location and orientation. We show in Theorem~\ref{thm:no_map} that without any additional information (such as the parity trick or the complete knowledge about $P$ including its partition) it is \textbf{not} possible to localize an agent in a simple $n$-gon (where $n = 3k + q$, for integer $k \geq 1$ and $q = 0$, $1$ or $2$) with less than $n - q$ towers.
In Section~\ref{sec:preliminaries} we give basic definitions and present some properties and observations. Section~\ref{sec:partition} shows some of our modifications of the polygon partition given by T{\'o}th~\cite{Toth2000121} and its adaptation to our problem. In Section~\ref{sec:localization} we present a localization algorithm.
\section{Preliminaries} \label{sec:preliminaries} \pdfbookmark[1]{Preliminaries}{sec:preliminaries}
Consider a point (an agent) $p$ in the interior of $P$, whose location is unknown. Let $C(x,r)$ denote the circle centered at a point $x$ with radius $r$. If only one tower $t$ can see $p$ then $p$ can be anywhere on $C(t,d(p,t)) \cap V(t)$ (refer to Fig.~\ref{fig:example1}), which may not be enough to identify the location of~$p$. By the \emph{map} of $P$ we mean the complete information about $P$ including the coordinates of all the vertices of $P$ and the vertex adjacency list. Notice that one must know the map of $P$ to calculate $V(t)$. If two towers $t_1$ and $t_2$ can see $p$ then the location of $p$ can be narrowed down to at most two points $C(t_1,d(p,t_1)) \cap C(t_2,d(p,t_2)) \cap V(t_1) \cap V(t_2)$. Refer to Fig.~\ref{fig:example2}. The two points are reflections of each other over the line through $t_1$ and $t_2$. We call this situation the \emph{ambiguity along the line}, because without any additional information we do not know to which one of the two locations $p$ belongs. To avoid this uncertainty we can place both towers on the same edge of $P$. Consider for example Fig.~\ref{fig:example3} where two towers are placed on the line segment $kernel(P) \cap \partial P$. In this example, if the map of $P$ is known (and thus we know $V(t_1)$ and $V(t_2)$) then the intersection $C(t_1,d(p,t_1)) \cap C(t_2,d(p,t_2)) \cap V(t_1) \cap V(t_2)$ is a single point (highlighted in red). Alternatively, if the map of $P$ is unknown, we can place a triple of non-collinear towers in the kernel of $P$ (highlighted in cyan on Fig.~\ref{fig:example3}) to localize any point interior to $P$.
\begin{figure}
\caption{Trilateration example. \textbf{(a)} The point $p$ can be anywhere on $C(t,d(p,t)) \cap V(t)$, which is highlighted in red. \textbf{(b)} Ambiguity along the line $L(t_1, t_2)$. \textbf{(c)} If the map of $P$ is known then the location of $p$ can be identified precisely. The kernel of $P$ is highlighted in cyan.}
\label{fig:example1}
\label{fig:example2}
\label{fig:example3}
\label{fig:example}
\end{figure}
For a simple polygon $P$ in general position, we can partition it into star-shaped polygons $P_1, P_2, \ldots P_l$ such that $kernel(P_i)$, for every $1 \leq i \leq l$, does not degenerate into a single point. In every $P_i$ ($1 \leq i \leq l$) we can position a pair of towers on a line segment in $kernel(P_i) \cap \partial P_i$ (such that the towers belong to the same edge of $P_i$) or a triple of towers in $kernel(P_i)$ if $kernel(P_i) \cap \partial P_i$ is empty or contains a single point. Notice that a pair of towers positioned on the edge of $P_i$ will not necessarily be on the boundary of $P$. Thus, to localize an agent, it is not enough to know the map of $P$. We need to know more, for example, if in addition to the map of $P$ we know the partition of $P$ into star-shaped polygons and which pair of towers is responsible for which subpolygon then the agent can be localized.
In our solution we do not use this extra information or the map of $P$. Moreover, to get a tight bound of $\lfloor 2n/3\rfloor$ towers, we abstain from placing a triple of towers per subpolygon, since some polygons cannot be partitioned into less than $\lfloor n/3\rfloor$ star-shaped subpolygons. The idea is to use a \emph{parity trick}.
\textbf{Parity trick:} Let $L(u, v)$ be the line through points $u$ and $v$. Let $L(u, v)^+$ denote the half plane to the left of $L(u,v)$ (or above, if $L$ is horizontal). Similarly, $L(u, v)^-$ denotes the half plane to the right (or below) of $L$. We embed information about the primary orientation of the pair of towers into their coordinates. If we want a pair $t_1$, $t_2$ of towers to be responsible for $L(t_1, t_2 )^+$ (respectively $L(t_1, t_2 )^-$), then we position the towers at a distance which is a reduced rational number whose numerator is even (respectively odd). In this way, we specify on which side of $L(t_1 , t_2 )$ the primary localization region of $t_1$ and $t_2$ resides. Refer to Sect~\ref{subsec:PartitionAlg} where the parity trick is explained in greater detail.
To achieve localization with at most $\lfloor 2n/3\rfloor$ towers we should partition $P$ into at most $\lfloor n/3\rfloor$ star-shaped polygons $P_1,\ldots, P_l$ such that there exists a line segment $\overline{uv} \in kernel(P_i) \cap \partial P_i$ such that $P_i \in L(u, v )^+$ or $P_i \in L(u, v )^-$ for every $1 \leq i \leq l$ (we assume that $u$ and $v$ are distinct points).
\begin{theorem}[Chv{\'a}tal's Theorem~\cite{Chvatal197539}] \label{thm:chvatal} Every triangulation of a polygon with $n$ vertices can be partitioned into $m$ fans where $m \leq \lfloor n/3\rfloor$. \end{theorem}
The statement of the following lemma may seem trivial, still we provide its proof for completeness.
\begin{lemma} \label{lem:pentagon} Any simple polygon $P$ with $3$, $4$ or $5$ sides is star-shaped and its kernel contains a boundary segment that is not a single point. \end{lemma} \begin{proof} Let $n$ be the number of vertices of $P = v_1 v_2 \ldots v_n$. By Theorem~\ref{thm:chvatal}, $P$ can be partitioned into $\lfloor n/3\rfloor = 1$ fans (since $n = 3$, $4$ or $5$). Notice that a fan is star-shaped by definition, from which $P$ is star-shaped. The kernel of $P$ is an intersection of $n$ half-planes defined by the edges of $P$. Let $r$ be the number of reflex angles of $P$. There are three cases to consider: \begin{enumerate} \item $r = 0$: $P$ is a convex polygon and thus $kernel(P) = P$, implying $\partial P \in kernel(P)$. \item $r = 1$: Let $v_1$ be the vertex of $P$ at the reflex angle. Refer to Fig.~\ref{fig:KernelBoundary1} and~\ref{fig:KernelBoundary2} for possible polygons. The angles at $v_2$ and $v_n$ are smaller than $\pi$ by the simplicity of $P$. The claim of the lemma then is implied by inspection. Consider the edge $\overline{v_3 v_4}$. It contains a line segment that belongs to $kernel(P)$. \begin{figure}
\caption{The kernel of $P$ is highlighted in cyan.}
\label{fig:KernelBoundary1}
\label{fig:KernelBoundary2}
\label{fig:KernelBoundary3}
\label{fig:KernelBoundary4}
\label{fig:KernelBoundary}
\end{figure} \item $r = 2$: In this case $n = 5$. Let $v_1$ be the vertex of $P$ at one of the reflex angles. Refer to Fig.~\ref{fig:KernelBoundary3} and~\ref{fig:KernelBoundary4} for possible polygons. The vertex at the other reflex angle is either adjacent to $v_1$ or not. Consider first the case where the vertices at the reflex angles of $P$ are not adjacent. Without loss of generality let $v_4$ be a vertex of $P$ at reflex angle (refer to Fig.~\ref{fig:KernelBoundary3}). The angles at $v_2$, $v_3$ and $v_5$ are smaller than $\pi$ by the simplicity of $P$. It follows that the edge $\overline{v_2 v_3}$ must contain a line segment that belongs to $kernel(P)$. Consider now the case where the vertices at the reflex angles of $P$ are adjacent. Without loss of generality let $v_5$ be a vertex at reflex angle of $P$ (refer to Fig.~\ref{fig:KernelBoundary4}). The claim of the lemma holds similarly to the discussion presented in the case for $r = 1$, Fig.~\ref{fig:KernelBoundary1}. \end{enumerate} \qed \end{proof}
The problem we study is twofold: \begin{enumerate} \item We are given a simple polygon $P$ of size $n$. Our goal is to position at most $\lfloor 2n/3\rfloor$ towers inside $P$ such that every point $p \in P$ can be localized. \item We want to design a localization algorithm which does not know $P$, but knows that the locations of the towers were computed using the parity trick. For any point $p \in P$, its location can be found by using the coordinates of the towers that \textbf{see} $p$ and the distances from those towers to $p$. \end{enumerate}
It may seem counter-intuitive, but the knowledge of the parity trick is \textbf{stronger} than the knowledge of the map of $P$. Some towers (while still on the boundary of some subpolygon of the partition) may end up in the interior of $P$. This is not a problem when the parity trick is used but may lead to ambiguities when only the map of $P$ is known (refer for example to Fig.~\ref{fig:example2}).
The following theorem shows that additional information like the parity trick or the map of $P$ (including its partition) is necessary to achieve localization with the use of less than $n$ towers.
\begin{theorem} \label{thm:no_map}
Let $P$ be a simple polygon with $n$ vertices. An agent cannot localize itself inside $P$ when less than $n-(n\bmod 3)$ towers are used if the {\em only} information available to the localization algorithm are the coordinates of the towers and the distances of the agent to the towers visible to it. \end{theorem} \begin{proof} Let $P$ be an arbitrary simple polygon with $n=3$ vertices. Assume to the contrary, that $P$ can be trilaterated with $2$ towers. Given the coordinates of the two towers $t_1$ and $t_2$ together with the distances to a query point $p$ one can deduce that $p$ is in one of the two possible locations $C(t_1,d(p,t_1)) \cap C(t_2,d(p,t_2)) = \{p_1, p_2\}$. But without additional information it is impossible to choose one location over another. Refer to Fig.~\ref{fig:no_map}. Similarly, any quadrilateral or pentagon requires at least $3$ towers to trilaterate it.
\begin{figure}
\caption{How to locate $p$ if the map of the polygon is unavailable? \textbf{(a)} Ambiguity along $L(t_1, t_2)$. We know that $p_1$ and/or $p_2$ belongs to $P$. \textbf{(b)} We need additional information to tell if $P$ (and $p$) is above or below $L(t_1, t_2)$.}
\label{fig:no_map1}
\label{fig:no_map2}
\label{fig:no_map}
\end{figure}
Let $P$ be a comb polygon with $n = 3k + q$ vertices (for integer $k \geq 1$ and $q = 0$, $1$ or $2$) such that one of its $k$ spikes contains $q$ extra vertices. Refer to Fig.~\ref{fig:comb_full}. No point of $P$ can see the complete interior of two different spikes. Assume to the contrary that $P$ can be trilaterated with less than $n - q$ towers. In other words, assume that $P$ can be trilaterated with $3k - 1$ towers. It follows that one of the spikes contains less than $3$ towers. We showed for smaller polygons (with $n = 3$, $4$ or $5$) that two towers cannot trilaterate it. Even if we know that the two towers are positioned on the same edge of $P$, the polygon can be mirrored along this edge to avoid unique trilateration. Observe that no polygon or spike can be trilaterated with one or no towers.
\begin{figure}
\caption{Comb polygon with $n = 3k + q$ vertices. \textbf{(a)} One of the $k$ spikes $S$ is shown in cyan. \textbf{(b)} One of the spikes when $q = 1$. \textbf{(c)} One of the spikes when $q = 2$.}
\label{fig:comb}
\label{fig:comb1}
\label{fig:comb2}
\label{fig:comb_full}
\end{figure} \qed \end{proof}
\section{T{\'o}th's Partition} \label{sec:partition} \pdfbookmark[1]{T{\'o}th's Partition}{sec:partition}
A simple polygonal chain $c$ is a \emph{cut} of a polygon $P$ if the two endpoints of $c$ belong to $\partial P$ and all interior points of $c$ are interior points of $P$. A cut $c$ decomposes $P$ into two polygons $P_1$ and $P_2$ such that $P = P_1 \cup P_2$ and $c = P_1 \cap P_2$.
\begin{definition}[Diagonal] A \emph{diagonal} of $P$ is a line segment that connects non-adjacent vertices of $P$ and is contained in or on the boundary of $P$. If $c$ is a cut and a diagonal of $P$ then it is called a \emph{diagonal cut}. \end{definition}
Notice that the above definition of the diagonal is non-standard. We define a diagonal as a line segment that can contain boundary points in its interior, while a diagonal in a standard definition is a line segment whose intersection with the boundary is precisely its two endpoints.
T{\'o}th showed in~\cite{Toth2000121} that any simple polygon in general position of size $n$ can be guarded by $\lfloor n/3\rfloor$ point guards whose range of vision is $180^\circ$ (let us call this result \textbf{T{\'o}th's Theorem}). His approach is to decompose $P$ into subpolygons via \emph{cuts} and to specify the locations of the guards. The cuts are composed of one or two line segments and are not restricted to be diagonal cuts. He uses a constructive induction to show his main result. Let $n_1$ (respectively $n_2$) denote the size of $P_1$ (respectively $P_2$). When $P$ contains a cut that satisfies $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$, the proof of T{\'o}th's Theorem can be completed by applying induction to both $P_1$ and $P_2$.
T{\'o}th's method heavily relies upon the partitioning of a polygon into subpolygons (on which he can apply induction). He performs diagonal cuts whenever it is possible, otherwise he cuts along a continuation of some edge of $P$; along a two-line segment made of an extension of two edges of $P$ that intersect inside $P$; or along the bisector of a reflex vertex of $P$. Notice that the three latter types of cuts may introduce new vertices that are not necessarily in general position with the given set of vertices.
However, T{\'o}th assumes that every cut produces polygons in general position, which is a very strong assumption. We strengthen the work~\cite{Toth2000121} by lifting this assumption and reproving T{\'o}th's result. We assume that the input polygon is in general position, while non-general position may occur for subpolygons of the partition. Moreover, we found and fixed several mistakes in~\cite{Toth2000121}.
\begin{definition}[Good Cut] Let $n > 5$. A cut is called a \emph{good cut} if it satisfies the following: $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$. If a good cut is a diagonal then it is called a \emph{good diagonal cut}. \end{definition}
\begin{definition}[Polygon*] \label{def:polygon} \emph{Polygon*} is a weakly simple polygon (as defined in~\cite{DBLP:conf/soda/ChangEX15}) whose vertices are distinct and whose interior angles are strictly bigger than $0$ and strictly smaller than $2\pi$. Notice that polygon* includes simple polygons. \end{definition}
We assume that every polygon of the partition (i.e. subpolygon of $P$) is polygon*. To avoid confusion, let $P'$ refer to a polygon* to which we apply the inductive partitioning. In other words, $P'$ can refer to the input polygon $P$ before any partition was applied as well as to any subpolygon of $P$ obtained during the partitioning of $P$. Recall that $P$ is in general position, while $P'$ may not be in general position.
\begin{definition}[Triangulation] A decomposition of a polygon into triangles by a maximal set of non-intersecting (but possibly overlapping) diagonals is called a \emph{triangulation} of the polygon. Notice that a triangulation of polygon* may contain triangles whose three vertices are collinear. \end{definition}
Notice that the above definition of triangulation is different from the classical one by that that it allows overlapping diagonals and thus permits triangles with three collinear vertices.
If the polygon is not in general position then its triangulation may include triangles whose three vertices are collinear. We call such triangles \emph{degenerate triangles}. Refer to Fig.~\ref{fig:case01}, showing an example of a degenerate triangle $\triangle v_1 v_2 v_3$. The diagonal $\overline{v_1 v_3}$ cannot be a good diagonal cut even if it partitions $P'$ into $P_1$ and $P_2$ such that $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$, because interior points of a cut cannot contain points of $\partial P'$.
To extend T{\'o}th's partition to polygons* we need to extend the definition of a cut. A simple polygonal chain $c'$ is a \emph{dissection} of $P'$ if the two endpoints of $c'$ belong to $\partial P'$ and all interior points of $c'$ are either interior points of $P'$ or belong to $\partial P'$. A dissection $c'$ decomposes $P'$ into two polygons* $P_1$ and $P_2$ (that are not necessarily in general position) such that $P' = P_1 \cup P_2$ and $c' = P_1 \cap P_2$. If $c'$ is a dissection and a diagonal of $P'$ then it is called a \emph{diagonal dissection}.
\begin{definition}[Good Dissection] Let $n > 5$. A dissection is called a \emph{good dissection} if $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$. \end{definition}
We extend the results in~\cite{Toth2000121} by removing the assumption that the partitioning produces subpolygons in general position and by thus strengthening the result. In this paper, we need to refer to many Lemmas, Propositions and Claims from~\cite{Toth2000121}. Indeed, we apply some of these results, we fill the gaps in some proofs of these results and we generalize some others. In order for this paper to be self-contained, we write the statements of all these results with their original numbering~\cite{Toth2000121}.
\noindent \textbf{Simplification Step:} If $P'$ has consecutive vertices $v_1$, $v_2$ and $v_3$ along $\partial P'$ that are collinear then: \begin{enumerate} \item If the angle of $P'$ at $v_2$ is $\pi$ then replace $v_2$ together with its adjacent edges by the edge $\overline{v_1 v_3}$. \item If the angle of $P'$ at $v_2$ is $0$ or $2\pi$ then delete $v_2$ together with its adjacent edges and add the edge $\overline{v_1 v_3}$. Assume w.l.o.g. that the distance between $v_3$ and $v_2$ is smaller than the distance between $v_1$ and $v_2$. The line segment $\overline{v_2 v_3}$ will be guarded despite that it was removed from $P'$. Refer to the following subsections for examples. \end{enumerate} In both cases we denote the updated polygon by $P'$; its number of vertices decreased by $1$.
Assume for simplicity that $P'$ does not contain consecutive collinear vertices along $\partial P'$. Let $n > 5$ be the number of vertices of $P'$. Let $k$ be a positive integer and $q \in \{0, 1, 2\}$ such that $n = 3k +q$. Notice that a diagonal dissects $P'$ into $P_1$ (of size $n_1$) and $P_2$ (of size $n_2$) such that $n_1 + n_2 = n+2$.
The proofs of the following three propositions are identical to those in T{\'o}th's paper.
\begin{mydefP} \label{TothProp1} For $P'$ with $n = 3k$ any diagonal is a good dissection. \end{mydefP}
\begin{mydefP} \label{TothProp2} For $P'$ with $n = 3k+1$ there exists a diagonal that represents a good dissection. \end{mydefP}
\begin{mydefP} \label{TothProp3} For $P'$ with $n = 3k+2$ a dissection is a good dissection if it decomposes $P'$ into polygons* $P_1$ and $P_2$ (not necessarily simple polygons) such that $n_1 = 3k_1 + 2$ and $n_2 = 3k_2 + 2$ (for $k_1 +k_2 = k$). \end{mydefP}
\subsection{Case Study} \label{subsec:CaseStudy} \pdfbookmark[2]{Case Study}{subsec:CaseStudy}
\begin{wrapfigure}{r}{0.35\textwidth}
\centering \includegraphics[width=0.3\textwidth]{case01.pdf} \caption{The diagonal $\overline{v_1 v_3}$ contains a vertex $v_2$ of $P'$. $P_2 = P_a \cup P_b$. Notice that $v_2 \notin P_1$.} \label{fig:case01}
\end{wrapfigure} In this subsection we study how Propositions~\ref{TothProp1},~\ref{TothProp2} and~\ref{TothProp3} can be applied to polygons*. Let $\triangle v_1 v_2 v_3$ be a minimum degenerate triangle in $P'$, i.e. it does not contain any vertex of $P'$ other than $v_1$, $v_2$ or $v_3$. Consider the example depicted in Fig.~\ref{fig:case01}. The diagonal $\overline{v_1 v_3}$ partitions $P'$ into two polygons: $P_1$ and $P_2$ ($P_2$ can be further viewed as a union of two subpolygons $P_a$ and $P_b$). Notice that $v_2 \notin P_1$. There is a possibility for $P'$ to have a vertex $v' \notin \{v_1, v_2, v_3\}$ such that $v' \in L_{v_1 v_3}$. However $v'$ cannot belong to the line segment $\overline{v_1 v_3}$, otherwise, the triangle $\triangle v_1 v_2 v_3$ is not minimum, and we can choose $\triangle v_1 v_2 v'$ or $\triangle v' v_2 v_3$ instead. It follows that no vertex of $P_1$ (respectively $P_a$, $P_b$) is located on the edge $\overline{v_1 v_3}$ (respectively $\overline{v_1 v_2}$, $\overline{v_2 v_3}$). Notice that $P'$ may contain an edge $e'$ that contains $\overline{v_1 v_3}$; $P_1$ inherits it (since $v_2 \notin P_1$), but it won't cause any trouble because of the ``simplification'' step described in the beginning of this section. Notice also, that two vertices of $P'$ cannot be at the same location.
Assume that $\overline{v_1 v_3}$ is a good dissection, i.e. it decomposes $P'$ into $P_1$ and $P_2$ such that $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$. Assume also that $n>5$. Let $n_a$ (respectively $n_b$) be the number of vertices of $P_a$ (respectively $P_b$). Notice that $n_a + n_b = n_2 + 1$ because $v_2$ was counted twice. When it is possible, we prefer to avoid cutting along the diagonal $\overline{v_1 v_3}$. However, when necessary, it can be done in the following way. We consider three cases (refer to Fig.~\ref{fig:case01}):
{\color{red} \textbf{Case 1:}} $n = 3k$. By T{\'o}th's Proposition~\ref{TothProp1} every diagonal is a good dissection. If $P_a$ is \textbf{not} composed of the line segment $\overline{v_1 v_2}$ only, then $\overline{v_1 v_2}$ is a good diagonal dissection, that partitions $P'$ into two polygons $P_a$ and $P_1 \cup P_b$. Otherwise, $\overline{v_2 v_3}$ is a good diagonal dissection, that partitions $P'$ into two polygons $P_b$ and $P_1 \cup P_a$. Notice that $\overline{v_1 v_2}$ and $\overline{v_2 v_3}$ cannot be both edges of $P'$ because of the simplification step we applied to $P'$.
Alternatively, we can dissect $P'$ along $\overline{v_1 v_3}$. The polygon $P_2$ has an edge $\overline{v_1 v_3}$ that contains vertex $v_2$. If a $180^\circ$-guard is located at $v_2$ for any subpolygon $P_2'$ of $P_2$, then we position the towers close to $v_2$ on a line segment that contains $v_2$ (but not necessarily in $kernel(P_2') \cap \partial P_2'$) only if $v_2$ is not a vertex of the original polygon $P$. However, if $v_2 \in P$ then we position our towers in the close proximity to $v_2$ but in the interior of $P_1$, oriented towards $P_2$.
{\color{red} \textbf{Case 2:}} $n = 3k+1$. Since $\overline{v_1 v_3}$ decomposes $P'$ into $P_1$ and $P_2$ such that $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$ then either $n_1 = 3k_1 + 1$ and $n_2 = 3k_2 + 2$, or $n_1 = 3k_1 + 2$ and $n_2 = 3k_2 + 1$ for $k_1 +k_2 = k$. If $\overline{v_1 v_2}$ is an edge of $P'$ (meaning that $P_a$ consists of a line segment $\overline{v_1 v_2}$ only) then we can dissect $P'$ either along $\overline{v_2 v_3}$ or along $\overline{v_1 v_3}$. In the former case $P'$ will be decomposed into $P_1 \cup \{v_2\}$ and $P_b$ ($v_2$ will be deleted from $P_1 \cup \{v_2\}$ during the simplification step to obtain $P_1$). In the latter case $P'$ will be decomposed into $P_1$ and $P_b \cup \{v_1\}$ ($v_1$ will be removed from $P_b \cup \{v_1\}$ during the simplification step to obtain $P_b$). In either case we do not have to specifically guard the line segment $\overline{v_1 v_2}$. If $P_1$ is guarded then so is $\overline{v_1 v_2}$. Similarly treat the case where $\overline{v_2 v_3}$ is an edge of $P'$.
Assume that $n_a, n_b >2$. Recall, that $n_b = n_2 - n_a + 1$. Several cases are possible: \begin{itemize} \item[$\blacktriangleright$] $n_a = 3k_a + 1$. Then we dissect $P'$ along $\overline{v_1 v_2}$. This partition creates the polygons $P_a$ of size $n_a = 3k_a + 1$ and $P_1 \cup P_b$ of size $3(k_1 + k_2 - k_a) +2$. \item[$\blacktriangleright$] $n_a = 3k_a + 2$. Then we dissect $P'$ along $\overline{v_1 v_2}$. This partition creates the polygons $P_a$ of size $n_a = 3k_a + 2$ and $P_1 \cup P_b$ of size $3(k_1 + k_2 - k_a) +1$. \item[$\blacktriangleright$] $n_a = 3k_a$. Then: \begin{itemize} \item $n_1 = 3k_1 + 2$ and $n_2 = 3k_2 + 1$. Then we dissect $P'$ along $\overline{v_2 v_3}$. This partition creates the polygons $P_b$ of size $n_b = 3(k_2 - k_a)+2$ and $P_1 \cup P_a$ of size $3(k_1 +k_a) +1$. \item $n_1 = 3k_1 + 1$ and $n_2 = 3k_2 + 2$. Then both $\overline{v_1 v_2}$ and $\overline{v_2 v_3}$ are \textbf{not} good diagonal cuts. We cannot avoid dissecting along $\overline{v_1 v_3}$. If a $180^\circ$-guard eventually needs to be positioned at $v_2$ (for any subpolygon of $P_2$) and $v_2$ is a vertex of $P$, then we position our towers close to $v_2$ but in the interior of $P_1$, oriented towards~$P_2$. \end{itemize} \end{itemize}
{\color{red} \textbf{Case 3:}} $n = 3k+2$. In this case $n_1 = 3k_1+2$ and $n_2 = 3k_2+2$. If $n_a = 2$ then either dissect $P'$ along $\overline{v_2 v_3}$ (and later delete $v_2$ from $P_1 \cup v_2$ during the simplification step), or dissect $P'$ along $\overline{v_1 v_3}$ (delete $v_1$ from $P_b \cup v_1$ during the simplification step). Notice that in both cases $\overline{v_1 v_2}$ will be guarded. The case where $n_b = 2$ is similar.
If $n_a, n_b >2$ then we have no choice but to dissect along $\overline{v_1 v_3}$. This creates a polygon $P_2$ whose kernel degenerates into a single point $v_2$. It is not a problem for $180^\circ$-guards but it is a serious obstacle for our problem. If $v_2$ is a vertex of $P$ and a $180^\circ$-guard is located at $v_2$ for any subpolygon of $P_2$ then we position our towers close to $v_2$ but in the interior of $P_1$, and orient those towers towards $P_2$. We consider the general approach to this problem in the following subsection.
\subsection{Point-Kernel Problem} \label{subsec:PointKernel} \pdfbookmark[2]{Point-Kernel Problem.}{subsec:PointKernel}
In Section~\ref{subsec:CaseStudy} we studied simple cases where a diagonal dissection is applied to polygons* (or polygons in non-general position). In this subsection, we show how to circumvent some difficulties that arise when adapting T{\'o}th's partitioning to our problem.
The dissection of $P$ may create subpolygons that are polygons*. This means that the partition of $P$ may contain star-shaped polygons whose kernels degenerate into a single point. While this is not a problem for $180^\circ$-guards, it is a serious obstacle for tower positioning. Indeed, we need at least two distinct points in the kernel of each part of the partition to trilaterate~$P$.
\begin{wrapfigure}{r}{0.42\textwidth} \centering \includegraphics[width=0.38\textwidth]{PointKernel1.pdf} \caption{$\overline{v_1 v_4}$ is a good diagonal dissection that contains two vertices of $P$: $v_2$ and $v_3$. $P_1 = P_a\cup P_b$ and $P_2 = P_c\cup P_d$. Notice that $v_2 \notin P_a$ and $v_3 \notin P_c$.} \label{fig:PointKernel1}
\end{wrapfigure}
Let $\overline{v_1 v_4}$ be a good dissection, i.e. it decomposes $P'$ into $P_1$ and $P_2$ such that $\left \lfloor \frac{n_1}{3} \right \rfloor + \left \lfloor \frac{n_2}{3} \right \rfloor \leq \left \lfloor \frac{n}{3} \right \rfloor$. Assume also that $n>5$. Assume that $\overline{v_1 v_4}$ contains at least $2$ more vertices of $P'$ in its interior. Let $v_2, v_3 \in \overline{v_1 v_4}$. Refer to Fig.~\ref{fig:PointKernel1}. Notice that $v_2$ and $v_3$ belong to subpolygons of $P'$ on the opposite sides of $L(v_1, v_4)$. Recall that the original polygon $P$ is given in general position. Thus at most two vertices of $P$ belong to the same line. If $v_2$ and $v_3$ are not both vertices of $P$ then there is no problem for tower positioning, and the dissection via $\overline{v_1 v_4}$ can be applied directly. We then follow T{\'o}th's partition and replace every $180^\circ$-guard with a pair of towers in the guard's vicinity. However, if $v_2$ and $v_3$ are vertices of $P$ then the visibility of towers can be obstructed. Consider the example of Fig.~\ref{fig:PointKernel1}. Assume that a $180^\circ$-guard is positioned at $v_2$ to observe a subpolygon of $P_c \cup P_d$ (say, pentagon $v_1 v_4 v_c v_2 v_d$ for $v_c \in P_c$ and $v_d \in P_d$). We cannot replace the $180^\circ$-guard with a pair of towers because the kernel of the pentagon degenerates into a single point $v_2$. Our attempt to position towers in the vicinity of $v_2$ but interior to $P_a$ is not successful either, because $v_3$, as a vertex of $P$, blocks visibility with respect to $P_c$. We have to find another dissection. In general, we are looking for a dissection that destroys the diagonal $\overline{v_1 v_4}$.
Recall that in this subsection $P'$ is a subpolygon of $P$ with collinear vertices $v_1$, $v_2$, $v_3$ and $v_4$. Assume that $v_2$ and $v_3$ are vertices of $P$. It follows that there are no vertices of $P$ on $L(v_1, v_4)$ other than $v_2$ and $v_3$. Assume that $\overline{v_1 v_4}$ is a good dissection of $P'$. If one of $\overline{v_1v_3}$, $\overline{v_1v_2}$, $\overline{v_3v_4}$, $\overline{v_2v_4}$ or $\overline{v_2v_3}$ is a good dissection then we cut along it. Only the cut along $\overline{v_2v_3}$ destroys the diagonal $\overline{v_1v_4}$ and eliminates the problem of having towers in the vicinity of $v_2$ (respectively $v_3$) with visibility obstructed by $v_3$ (respectively $v_2$). The cut along $\overline{v_1v_3}$, $\overline{v_1v_2}$, $\overline{v_3v_4}$ or $\overline{v_2v_4}$ only reduces $P'$. So assume that none of the diagonals $\overline{v_1v_3}$, $\overline{v_1v_2}$, $\overline{v_3v_4}$, $\overline{v_2v_4}$ or $\overline{v_2v_3}$ represent a good dissection. It follows from T{\'o}th's Proposition~\ref{TothProp1} that $n \neq 3k$. Consider the following cases:
{\color{red} \textbf{Case 1:}} $n = 3k+1$. By T{\'o}th's Proposition~\ref{TothProp2} either $n_1 = 3k_1 + 2$ and $n_2 = 3k_2 + 1$, or $n_1 = 3k_1 + 1$ and $n_2 = 3k_2 + 2$ for $k_1 +k_2 = k$, where $P_1 = P_a\cup P_b$ and $P_2 = P_c\cup P_d$. Assume w.l.o.g. that $n_1 = 3k_1 + 2$ and $n_2 = 3k_2 + 1$. We assumed that $\overline{v_2v_3}$ is not a good dissection and thus the size of $P_c \cup P_b$ is a multiple of $3$ and the size of $P_d \cup P_a$ is a multiple of $3$. Consider the following three subcases:
{\color{blue} \textbf{Case 1.1:}} $|P_d| = 3k_d$; thus $|P_c| = 3k_c+2$ or $|P_c| = 2$. Since the size of $P_c \cup P_b$ is a multiple of $3$, we have $|P_b| = 3k_b+2$ or $|P_b| = 2$. Notice that $P_b$ and $P_c$ cannot be both of size $2$, otherwise $v_4$ would be deleted during the simplification step. Both diagonals $\overline{v_3v_4}$ and $\overline{v_2v_4}$ represent a good dissection. Thus, if $|P_c| \neq 2$ then dissect along $\overline{v_2v_4}$; if $|P_b| \neq 2$ then dissect along $\overline{v_3v_4}$. The polygon $P_a \cup P_d \cup \triangle v_2 v_3 v_4$ will be simplified and it will loose $\overline{v_3 v_4}$ (which is guarded by $P_b$ or/and $P_c$).
{\color{blue} \textbf{Case 1.2:}} $|P_d| = 3k_d+1$ or $|P_d| = 3k_d+2$. Then $\overline{v_1v_2}$ is a good diagonal dissection.
{\color{blue} \textbf{Case 1.3:}} $|P_d| = 2$. Then $|P_a| = 3k_a +2$ and thus $\overline{v_1v_3}$ is a good diagonal dissection, which creates the polygons $P_a$ and $P_b \cup P_c \cup \{v_1\}$. The vertex $v_1$ will be deleted from $P_b \cup P_c \cup \{v_1\}$ during the simplification step. Notice that the line segment $\overline{v_1v_2}$ will be guarded by $P_a$.
{\color{red} \textbf{Case 2:}} $n = 3k+2$. Since $\overline{v_1 v_4}$ is a good dissection, it partitions $P'$ into $P_1 = P_a\cup P_b$ of size $n_1 = 3k_1 +2$ and $P_2 = P_c\cup P_d$ of size $n_2 = 3k_2 +2$.
{\color{blue} \textbf{Case 2.1:}} $|P_b| = 3k_b+1$; thus $|P_a|=3k_a +2$ or $|P_a| = 2$. We assumed that $\overline{v_1v_3}$ is not a good dissection and thus \begin{wrapfigure}{r}{0.42\textwidth}
\centering \includegraphics[width=0.4\textwidth]{PointKernel2.pdf}
\caption{$\overline{v_1 v_4}$ is a good diagonal dissection that contains two vertices of $P$: $v_2$ and $v_3$. $P_a = \{v_1, v_3\}$, $|P_b| = 3k_b+1$, $|P_c| = 3k_c$ and $|P_d| = 3k_d$.} \label{fig:PointKernel2}
\end{wrapfigure}
$|P_a| \neq3 k_a +2$. It follows that $P_a$ is a line segment $\overline{v_1v_3}$. Symmetrically, if $|P_d| = 3k_d+1$ then $|P_c| = 2$. If $|P_b| = 3k_b+1$ and $|P_d| = 3k_d+1$ then we have a good dissection $\overline{v_2 v_3}$, which is a contradiction.
The case where $|P_b| = 3k_b+1$ and $|P_c| = 3k_c+1$ is not possible because in this case $P_a = \overline{v_1v_3}$ and $P_d = \overline{v_1v_2}$ and thus $v_1$ would not survive the simplification step.
Thus, if $|P_b| = 3k_b+1$ then $|P_c| = 3k_c$ and $|P_d| = 3k_d$. Refer to Fig.~\ref{fig:PointKernel2}. Let $v_2'$ be an immediate neighbour of $v_2$ such that $v_2'$ is a vertex of $P_c$ and $v_2' \neq v_4$. If $v_2'$ and $v_3$ see each other, then $\overline{v_2' v_3}$ is a diagonal of $P'$. It dissects $P'$ into two subpolygons $P_d \cup \triangle v_2v_3v_2'$ of size $3k_d+2$ and $(P_b \cup P_c) \setminus \triangle v_2v_3v_2'$ of size $3k_b+3k_c-1 = 3(k_b+k_c-1)+2$. Assume now that $v_2'$ and $v_3$ do not see each other. Let us give numerical labels to the vertices of $P_c$ as follows: $v_2$ gets label $1$, $v_2'$ gets $2$, and so on, $v_4$ will be labelled $3k_c$. If $v_3$ can see a vertex of $P_c$ with label whose value modulo $3$ equals $2$ then dissect $P'$ along the diagonal that connects $v_3$ and that vertex. If no such vertex can be seen from $v_3$, we do the following. Let $v_3'$ be an immediate neighbour of $v_3$ such that $v_3'$ is a vertex of $P_b$ and $v_3' \neq v_4$. Let $v_3''$ be the point on $\partial P_c$ where the line supporting $\overline{v_3' v_3}$ first hits $\partial P_c$ (refer to Fig.~\ref{fig:PointKernel2}). If $v_3''$ belongs to an edge with vertices whose labels modulo $3$ equal $1$ and $2$ then dissect $P'$ along $\overline{v_3 v_3''}$. This dissection creates polygons $P_d \cup \{1,2, \ldots, 3k'+1, v_3'', v_3\}$ of size $3(k_d+k')+2$ and $(\{v_3'', 3k'+2, \ldots 3k_c\} \cup P_b) \setminus \{v_3\}$ of size $3(k_c-k'+k_b-1)+2$.
\begin{wrapfigure}{r}{0.42\textwidth}
\centering \includegraphics[width=0.36\textwidth]{PointKernel3.pdf}
\caption{$v_2$ and $v_3$ are vertices of $P$. $|P_b| = 3k_b+1$, $|P_c| = 3k_c$ and $|P_d| = 3k_d$. } \label{fig:PointKernel3}
\end{wrapfigure} We are still in case $2.1$ and all its assumptions are still applicable. If the above scenario (about dissecting $P_c$ via a line that contains $v_3$) has not worked, we consider the case that we tried to avoid all along: the dissection of $P'$ along $\overline{v_1 v_4}$ such that the successive partitioning of $P_2$ created a subpolygon with a single-point kernel at $v_2$ whose edge contains $v_3$ in its interior. In other words, we assume that $P_2 = P_c \cup P_d$ has undergone some partitioning that resulted in the creation of the pentagon $v_1' v_4' v_c v_2 v_d$ for $v_c, v_4' \in P_c$, $v_d, v_1' \in P_d$ such that $v_1', v_4' \in L(v_2, v_3)$ and $v_4' \in \overline{v_3 v_4}$ (refer to Fig.~\ref{fig:PointKernel3}). It is impossible to localize an agent in the pentagon $v_1' v_4' v_c v_2 v_d$ with a pair of towers only. However, if $v_4' \in \overline{v_2 v_3}$ or $v_4' = v_3$ then a pair of towers in the vicinity of $v_2$ can oversee the pentagon, because $v_3$ is not causing an obstruction in this case. Despite that $P_a$ is a line segment, the original polygon $P$ has a non-empty interior in the vicinity of $v_2$ to the right of the ray $\overrightarrow{v_3 v_2}$. Notice that $v_1'$ may be equal to $v_1$; $v_4'$ may be equal to $v_4$. Moreover, $v_c$ and $v_4'$ may not be vertices of $P_1$ but were created during the partition. We also assume that $P_1 = P_b \cup \{v_1\}$ was not partitioned yet. Let $P_c'$ be a subpolygon of $P_c$ that inherits the partition of $P_c$ to the side of the dissection $\overline{v_c v_4'}$ that contains $v_4$ (refer Fig.~\ref{fig:PointKernel45}). Notice that $v_c$ and $v_3$ can see each other. If $\overline{v_c v_4'}$ is a diagonal dissection then instead use $\overline{v_c v_3}$. In this case $v_4'$ can be deleted and $v_3$ is added to $P_c'$. The size of $P_c'$ does not change and the number of guards required to guard $P_c'$ does not increase. A pair of towers in the vicinity of $v_2$ can oversee the pentagon $v_1' v_3 v_c v_2 v_d$.
If the dissection $\overline{v_c v_4'}$ is a continuation of an edge of $P_c$ (let $v$ be a vertex that is adjacent to this edge) then two cases are possible. In the first case, the edge $\overline{v_c v}$ that produced the dissection is contained in $\overline{v_c v_4'}$ (refer Fig.~\ref{fig:PointKernel4}). In this case, instead of dissecting along $\overline{v_c v_4'}$ use $\overline{v v_3}$. The size of $P_c'$ does not change and the hexagon $v_1' v_3 v v_c v_2 v_d$ can be guarded by a pair of towers in the vicinity of $v_2$ (because $v_2 v_c v v_3$ is convex). In the second case the edge $\overline{v_c v}$ that produced the dissection $\overline{v_c v_4'}$ is not contained in $\overline{v_c v_4'}$ (refer Fig.~\ref{fig:PointKernel5}). In the worst case the size of $P_c'$ is $3k_{c'}+2$. If instead of dissecting along $\overline{v_c v_4'}$ we use $\overline{v_c v_3}$ then the size of $P_c'$ increases by $1$, which results in an increased number of guards. But, if we dissect along $\overline{v_c v_3}$ then the line segment $\overline{v_1 v_3}$ can be removed from $P_1$ (because it is guarded by the guard of $v_1' v_3 v_c v_2 v_d$) and $P_b$ can be joined with the updated $P_c'$. The size of $(P_b \cup P_c' \cup\{v_c\}) \setminus\{v_4'\}$ is $3(k_b+k_{c'})+2$, so the number of guards of $P'$ does not increase as a result of adjusting the partition. However, the current partition of $P_c'$ may no longer be relevant and may require repartition as part of the polygon $(P_b \cup P_c' \cup\{v_c\}) \setminus\{v_4'\}$.
\begin{figure}
\caption{We avoid dissecting along $\overline{v_c v_4'}$.}
\label{fig:PointKernel4}
\label{fig:PointKernel5}
\label{fig:PointKernel45}
\end{figure}
{\color{blue} \textbf{Case 2.2:}} $|P_b| = 3k_b$. Then $|P_a| = 3k_a$. If $|P_c| = 3k_c$ and thus $|P_d| = 3k_d$ then $\overline{v_2 v_3}$ is a good diagonal dissection, which is a contradiction. Thus either $|P_c| = 3k_c+1$ and $|P_d| = 2$ or $|P_d| = 3k_d+1$ and $|P_c| = 2$. Those cases are symmetrical to the the case 2.1.
\subsection{$n = 3k+2$ and $P'$ has no good diagonal dissection} \label{subsec:Partition} \pdfbookmark[2]{$n = 3k+2$ and $P'$ has no good diagonal dissection}{subsec:Partition}
In this subsection we assume that $n = 3k+2$ and every diagonal of $P'$ decomposes it into polygons of size $n_1 = 3k_1$ and $n_2 = 3k_2+1$, where $k_1 + k_2 = k + 1$. Let $\mathcal{T}$ be a fixed triangulation of $P'$, and let $G(\mathcal{T})$ be the dual graph of $\mathcal{T}$. Notice that $G(\mathcal{T})$ has $n-2$ nodes. (Later, during the algorithm, we may change this triangulation.)
The proofs of the following Propositions~\ref{TothProp4} and~\ref{TothProp5} are identical to those in~\cite{Toth2000121}. We prove the following Lemmas~\ref{TothLem1},~\ref{TothLem2} and~\ref{TothLem3} in this paper.
\begin{mydefP} \label{TothProp4} $G(\mathcal{T})$ has exactly $k+1$ leaves. \end{mydefP}
\begin{mydefL} \label{TothLem1} If $P'$ has $n = 3k+2$ vertices and has no good dissection then $P'$ has at most $k$ reflex angles. \end{mydefL}
\begin{mydefP} \label{TothProp5} If $P'$ has at most $k$ reflex angles, then $k$ $180^\circ$-guards can monitor $P'$. \end{mydefP}
\begin{mydefL} \label{TothLem2} If $P'$ has size $n = 3k+2$ then at least one of the following two statements is true: \begin{enumerate} \item $P'$ has a good dissection. \item For every triangle $\triangle A B C$ in $\mathcal{T}$ that corresponds to a leaf of $G(\mathcal{T})$ with $AC$ being a diagonal of $P'$, either $\angle A < \pi$ or $\angle C < \pi$ in $P'$. \end{enumerate} \end{mydefL}
\begin{mydefL} \label{TothLem3} If a convex angle of $P'$ is associated to two leaves in $G(\mathcal{T})$, then $\left \lfloor \frac{n}{3} \right \rfloor$ $180^\circ$-guards can monitor~$P'$. \end{mydefL}
\subsubsection{Adaptation of T{\'o}th's Lemma~\ref{TothLem3} to our problem} \label{subsubsec:Lemma3} \pdfbookmark[3]{Adaptation of T{\'o}th's Lemma~\ref{TothLem3} to our problem.}{subsubsec:Lemma3}
\\ \\ Let $v_1$ be a vertex at a convex angle of $P'$ associated to two leaves in $G(\mathcal{T})$ (refer to Fig.~\ref{fig:TothLemma3}). Let $P_1$ be the polygon formed by all triangles of $\mathcal{T}$ adjacent to $v_1$. Notice that $P_1$ is a fan; $v_1$ is a center of the fan; and the dual of the triangulation of $P_1$, inherited from $P'$, is a path of nodes. Recall that the center of a fan $P_f$ is a vertex of $P_f$ that can see all other vertices of $P_f$.
\begin{wrapfigure}{r}{0.6\textwidth}
\centering \includegraphics[width=0.58\textwidth]{TothLemma3.pdf} \caption{$P'$ has size $n = 20 = 3k + 2$ for $k = 6$, and does not have good diagonal dissection. $G(\mathcal{T})$ is drawn on top of $\mathcal{T}$. $P_1$ is a fan with dominant point $v_1$ and it is highlighted in cyan.} \label{fig:TothLemma3}
\end{wrapfigure}
The original proof of T{\'o}th's Lemma~\ref{TothLem3} still holds for polygons*. However, this is one of the places where a $180^\circ$-guard is explicitly positioned at $v_1$ to monitor all the triangles of $\mathcal{T}$ adjacent to $v_1$. If $kernel(P_1)$ is not a single point, then the positioning can be reused for our problem. We can put a pair of towers on the same edge of $P'$ in $kernel (P_1) \cap \partial P_1$ to locate an agent in $P_1$. However, since there can be degenerate triangles among those adjacent to $v_1$, the kernel of $P_1$ can degenerate into a single point: $kernel (P_1) = v_1$. In this case, the location of an agent in $P_1$ cannot be determined with a pair of towers only. To overcome this problem we prove the following lemma.
\begin{lemma} \label{lem:TL3} There exist a triangulation of $P'$ such that its part, inherited by $P_1$ (i.e. triangles incident to $v_1$), does not contain any degenerate triangle. \end{lemma} \begin{proof} Let us first observe that a triangle of $\mathcal{T}$, corresponding to a leaf in $G(\mathcal{T})$, cannot be degenerate due to the simplification step performed on $P'$. There are two cases to consider:
{\color{red} \textbf{Case 1:}} Suppose that the triangulation/fan of $P_1$ contains two or more degenerate triangles adjacent to each other. Let $v_1, u_1, u_2, \ldots, u_i$ for some $i > 2$ be the vertices of the degenerate triangles sorted according to their distance from $v_1$. Notice that $v_1, u_1, u_2, \ldots, u_i$ belong to the same line. Those degenerate triangles must be enclosed in $P_1$ between a pair of non-degenerate triangles, let us call them $\triangle_1$ and $\triangle_2$. Since $P'$ does not have angles of size $2\pi$, the diagonal $\overline{v_1 u_1}$ must be shared with one of $\triangle_1$ or $\triangle_2$. Assume, without loss of generality, that $u_1$ is a vertex of $\triangle_1$. Let $u_j$ be a vertex of $\triangle_2$, for some $1 < j \leq i$. It is possible to re-triangulate $P'$ such that $P_1$ will contain only one degenerate triangle $\triangle v_1 u_1 u_j$ between $\triangle_1$ and $\triangle_2$ and other triangles of $P_1$ (that are not between $\triangle_1$ and $\triangle_2$) will not be affected. The shape of $P_1$ may or may not change but its size will decrease.
{\color{red} \textbf{Case 2:}} Suppose that the triangulation/fan of $P_1$ contains one degenerate triangle $\triangle v_1 u_1 u_j$ enclosed between a pair of non-degenerate triangles $\triangle_1$ and $\triangle_2$. Assume, without loss of generality, that $\triangle_2$ shares a diagonal $\overline{v_1 u_j}$ with $\triangle v_1 u_1 u_j$. Let $w$ be the third vertex of $\triangle_2$, so $\triangle_2 = \triangle v_1 u_j w$. Notice that $u_1$ and $w$ see each other, because $u_1$ belongs to the line segment $\overline{v_1 u_j}$. We can flip the diagonal $\overline{v_1 u_j}$ into $\overline{u_1 w}$ in $\mathcal{T}$. As a result, $P_1$ will now contain $\triangle v_1 u_1 w$ instead of $\triangle v_1 u_1 u_j$ and $\triangle_2$. Notice that $\triangle v_1 u_1 w$ is non-degenerate.
We showed that we can obtain a triangulation of $P'$ in which all the triangles incident to $v_1$ are non-degenerate. Thus, $P_1$ will have no degenerate triangles and still contain vertex $v_1$ together with two leaves of $G(\mathcal{T})$ associated with $v_1$. \qed \end{proof}
\subsubsection{Proof of T{\'o}th's Lemma~\ref{TothLem2} and its adaptation to our problem} \label{subsubsec:Lemma2} \pdfbookmark[3]{Proof of T{\'o}th's Lemma~\ref{TothLem2} and its adaptation to our problem.}{subsubsec:Lemma2}
\\ \\ Section~\ref{subsubsec:Lemma2} is devoted to the proof of T{\'o}th's Lemma~\ref{TothLem2}.
T{\'o}th defines two types of elements of $G(\mathcal{T})$. A leaf of $G(\mathcal{T})$ is called a \emph{short leaf} if it is adjacent to a node of degree $3$. If a leaf of $G(\mathcal{T})$ is adjacent to a node of degree $2$ then this leaf is called a \emph{long leaf}. Since we are under the assumption that $n = 3k+2$ and $P'$ has no good diagonal dissection then the node of $G(\mathcal{T})$ adjacent to a long leaf is also adjacent to a node of degree~$3$.
\begin{wrapfigure}{r}{0.32\textwidth}
\centering \includegraphics[width=0.3\textwidth]{TothShortLeaf_1.pdf} \caption{$\triangle ABC$ corresponds to a short leaf in $G(\mathcal{T})$. $\triangle ACD$ can be degenerate.} \label{fig:TothShortLeaf_1}
\end{wrapfigure} In this subsection we keep T{\'o}th's original names and notations to simplify cross-reading.
The angle $\angle ABC$ is the angle that the ray $\overrightarrow{BA}$ makes while rotating counter-clockwise towards the ray $\overrightarrow{BC}$. For example, the angle of $P'$ at $B$ is $\angle CBA$ (refer to Fig.~\ref{fig:TothShortLeaf_1}).
Let $\triangle ABC$ correspond to a short leaf in $G(\mathcal{T})$, where $\overline{AC}$ is a diagonal of $P'$, and let $\triangle ACD$ correspond to a node in $G(\mathcal{T})$ adjacent to the leaf $\triangle ABC$. Refer to Fig.~\ref{fig:TothShortLeaf_1}. Notice that $\triangle ABC$ cannot be degenerate, otherwise the vertex $B$ would be deleted during the simplification step. However, $\triangle ACD$ can be degenerate. The diagonals $\overline{AD}$ and $\overline{CD}$ decompose $P'$ into polygons $P_a$, $ABCD$ and $P_c$, where $A \in P_a$ and $C \in P_c$. Let $n_a$ be size of $P_a$ and $n_c$ be size of $P_c$.
\begin{mydefC} \label{claimT1} $n_a = 3 k_a + 1$ and $n_c = 3 k_c + 1$. \end{mydefC}
\begin{mydefC} \label{claimT2} $ABCD$ is a non-convex quadrilateral, i.e. it has a reflex vertex at $A$ or $C$. \end{mydefC}
The original proof of both claims (refer to~\cite{Toth2000121}) can be reused for polygons*, so we do not present their proofs here. However, T{\'o}th's Claim~\ref{claimT3} (presented below) requires a slightly different proof to hold true for polygons*. In addition, there was a mistake in the original proof of T{\'o}th's Claim~\ref{claimT3} that we fixed here. But first we would like to clarify the framework we work in, to explain tools and assumptions we use.
Assume, without loss of generality, that the reflex vertex of $ABCD$ is at $C$ (which exists by T{\'o}th's Claim~\ref{claimT2}). Assume that $\mathcal{T}$ is the triangulation of $P'$ in which $n_a$ is \textbf{minimal}. This assumption together with the fact that $P'$ does not have a good diagonal dissection implies that there does not exist a vertex of $P_a$ in the interior of a line segment $\overline{DA}$. By T{\'o}th's Claim~\ref{claimT1}, $n_a \geq 4$ (notice that $n_a \neq 1$ because $A$, $D \in P_a$). Let $A_0$ and $D_0$ be vertices of $P_a$ adjacent to $A$ and $D$ respectively (refer to Fig.~\ref{fig:TothShortLeaf_1}). By $\overrightarrow{uv}$ we denote the ray that starts at $u$ and passes through $v$. Let $A'$ be a point such that $A' \in \partial P' \cap \overrightarrow{BA}$ and there exists a point $A'' \in \partial P'$ strictly to the \textbf{right} of $\overrightarrow{BA}$ such that $A'$ and $A''$ belong to the same edge of $P'$ and both visible to $B$ and $A$. Let $D'$ be a point such that $D' \in \partial P' \cap \overrightarrow{CD}$ and there exist a point $D'' \in \partial P'$ strictly to the \textbf{left} of $\overrightarrow{CD}$ such that $D'$ and $D''$ belong to the same edge of $P'$ and both visible to $C$ and $D$. Let $C'$ be a point defined similarly to $D'$ but with respect to the ray $\overrightarrow{BC}$. Notice that if $A=A'$ then $\angle A = \angle B A A_0 < \pi$ in $P'$ and thus the second condition of T{\'o}th's Lemma~\ref{TothLem2} holds. Therefore, assume that $A \neq A'$.
\begin{mydefC} \label{claimT3} The points $A'$ and $D'$ belong to the same edge of $P'$. \end{mydefC} \begin{proof} Let us rotate the ray $\overrightarrow{CA'}$ around $C$ in the direction of $D'$. Notice that in the original proof, T{\'o}th uses $\overrightarrow{CA}$. However, there are polygons (even in general position) for which T{\'o}th's proof does not hold. For example, when the ray $\overrightarrow{CA}$ hits $A_0$, then T{\'o}th claims that $\angle BAA_0 < \pi$. Figure~\ref{fig:TothShortLeaf_1} can serve as a counterexample to this claim, because $A_0$ is indeed the first point hit by the rotating ray $\overrightarrow{CA}$, however $\angle BAA_0 > \pi$.
The structure of the original proof can be used with respect to $\overrightarrow{CA'}$, assuming that $A'$ is visible to $C$. Thus, assume first that $C$ and $A'$ can see each other. We rotate $\overrightarrow{CA'}$ around $C$ in the direction of $D'$. Let $O$ be the first vertex of $P'$ visible from $C$ that was hit by the ray (in case there are several collinear such vertices of $P_a$, then let $O$ be the one that is closest to $C$).
If $O = D$ then $A'$ and $D'$ belong to the same edge of $P'$ (notice that it is possible that $D' = D$), so the claim holds.
If $O = A_0$ then $A=A'$, $\angle A = \angle B A A_0 < \pi$ and thus the second claim in T{\'o}th's Lemma~\ref{TothLem2} holds.
If $O \neq D$ and $O \neq A_0$, then $\overline{AO}$ and $\overline{CO}$ are diagonals of $P'$. Refer to Fig.~\ref{fig:TothShortLeaf_2}. There exists a triangulation of $P'$ that contains $\triangle A C O$ and has $\triangle A B C$ as a short leaf. Consider quadrilateral $ABCO$. It is non-convex by T{\'o}th's Claim~\ref{claimT2}. By construction, $O$ is to the right of $\overrightarrow{BA}$, thus $\angle B A O < \pi$. It follows, that the only possible reflex angle in $ABCO$ is $\angle O C B$, which is a contradiction to the minimality of $P_a$ (recall, we assumed that we consider the triangulation of $P'$ in which $n_a$ is minimal), and thus, such a vertex $O$ does not exist.
\begin{figure}
\caption{$\triangle ABC$ corresponds to a short leaf in $G(\mathcal{T})$; $\triangle ACD$ can be degenerate. $\angle D_0 D C > \pi$.}
\label{fig:TothShortLeaf_2}
\end{figure}
Assume now that $A'$ is \textbf{not} visible to $C$. It follows that some part of $\partial P'$ belongs to the interior of $\triangle A A'C$. Recall, that $A'$ is defined as a point such that $A' \in \partial P' \cap \overrightarrow{BA}$ and there exists a point $A'' \in \partial P'$ strictly to the \textbf{right} of $\overrightarrow{BA}$ such that $A'$ and $A''$ belong to the same edge of $P'$ and both visible to $B$ and $A$. Thus, there is no intersection between the part of $\partial P'$ that belongs to the interior of $\triangle A A'C$ and $\overline{AA'}$. It follows, that $\partial P'$ crossed $\overline{CA'}$ at least twice, and thus there must be a vertex of $P_a$ interior to $\triangle A A'C$. Among all the vertices of $P_a$ that lie in $\triangle A A'C$, let $O'$ be the vertex that is closest to the line passing through $\overline{CA}$. Notice, that $O'$ is visible to $B$ (because $\triangle B A' C$ contains $\triangle A A'C$). Notice also that $\overline{AO'}$ and $\overline{CO'}$ are diagonals of $P'$. One of $\overline{AO'}$, $\overline{BO'}$ or $\overline{CO'}$ is a good diagonal dissection, which is a contradiction to the main assumption of Sect~\ref{subsec:Partition} that $P'$ has no good diagonal dissection. Thus, $A'$ is visible to $C$. \qed \end{proof}
It follows from T{\'o}th's Claim~\ref{claimT3} that the quadrilateral $A'BCD'$ has no common points with $\partial P'$ in its interior, but on the boundary only.
We have derived several properties satisfied by $P'$ and now we are ready to show the existence of good (non-diagonal) dissections that consist of one or two line segments. We discuss the following three cases, that span over Claims $4,5$ in~\cite{Toth2000121}.
{\color{red} \textbf{Case 1:}} $\angle D_0 D C < \pi$. In this case $D = D'$. It follows from T{\'o}th's Claim~\ref{claimT3} that $A', C' \in \overline{D D_0}$. Refer to Fig.~\ref{fig:TothShortLeaf_3}.
Line segment $\overline{CC'}$ represents a good dissection that splits $P'$ into two polygons: $P_c \cup \triangle CC'D$ of size $n_c + 1 = 3 k_c + 2$ and $(P' \setminus P_c) \setminus \triangle CC'D$ of size $n_a + 1 = 3 k_a + 2$. Notice that $\overline{BC'}$ is an edge of the subpolygon of $P'$ to the left of $\overrightarrow{BC}$ and thus $C$ is not a vertex of this subpolygon. If $\triangle ACD$ is degenerate (in which case $C$ is between $A$ and $D$), then $\overline{CC'}$ is still a good dissection. However, if $D_0$ is also collinear with $D$, $C$ and $A$, then $D_0$ is visible to $C$ and $\overline{CD_0}$ represents a good diagonal dissection.
\begin{figure}
\caption{$\triangle ABC$ corresponds to a short leaf in $G(\mathcal{T})$; $\triangle ACD$ can be degenerate. $\angle D_0 D C < \pi$; $\overline{CC'}$ is a good dissection.}
\label{fig:TothShortLeaf_3}
\end{figure}
{\color{red} \textbf{Case 2:}} $\angle D_0 D C = \pi$. In this case $D_0 = D'$ and $\overline{CD}$ is a good diagonal dissection that splits $P'$ into $P_c$ of size $n_c = 3 k_c + 1$ and $P_a \cup ABCD$ of size $n_a + 1= 3 k_a + 2$. Notice that $\overline{CD_0}$ is an edge in $P_a \cup ABCD$ and thus $D$ is not a vertex in $P_a \cup ABCD$.
{\color{red} \textbf{Case 3:}} $\angle D_0 D C > \pi$. Let $D_0'$ be the point closest to $D$ where the ray $\overrightarrow{D_0 D}$ reaches $\partial P'$. If the line segments $\overline{CC'}$ and $\overline{DD_0'}$ intersect inside the quadrilateral $C A A' D'$ at $Q$ (refer to Fig.~\ref{fig:TothShortLeaf_4}), then $\overline{DQ} \cup \overline{QC}$ is a good dissection, that splits $P'$ into polygon $P_c \cup \triangle CDQ$ of size $n_c + 1 = 3 k_c + 2$ and polygon $P_a \cup DQBA$ of size $n_a + 1 = 3 k_a + 2$. However, if $C A A' D'$ degenerates into a line segment (which happens when $\triangle ACD$ is degenerate and there exists an edge $\overline{IJ}$ of $P_a$ that contains $\overline{AD}$) then $Q$ cannot be defined. Refer to Fig.~\ref{fig:TothShortLeaf_5}. \begin{figure}
\caption{$\triangle ABC$ corresponds to a short leaf in $G(\mathcal{T})$; $\angle D_0 D C > \pi$. \textbf{(a)} $\overline{DQ} \cup \overline{QC}$ is a good dissection of $P'$. \textbf{(b)} $\triangle ACD$ is degenerate; the edge $\overline{IJ}$ of $P_a$ contains $\overline{DA}$. $P_a$ is highlighted in pink. Notice that $C$ is not a vertex of $P_a$. $P_a'$ is a subpolygon of $P_a$ that contains $D_0$ and $\overline{ID}$ is its edge.}
\label{fig:TothShortLeaf_4}
\label{fig:TothShortLeaf_5}
\label{fig:TothShortLeaf_45}
\end{figure} In this case we show that $P'$ has a good diagonal dissection. Notice that $\overline{ID}$ is a diagonal of $P_a$; it splits $P_a$ into two subpolygons. Let $P_a'$ be a subpolygon of $P_a$ that contains $D_0$. Let $n_a'$ be the size of $P_a'$. We consider three cases: \begin{itemize} \item[$\blacktriangleright$] $n_a' = 3k_a'$: In this case $\overline{IA}$ is a good diagonal dissection. The size of $P_a' \cup P_c \cup \triangle ABC$ is $3k_a' + 3k_c + 1 + 3 - 2 = 3(k_a' + k_c) +2$. Notice that the ``$-2$'' in the previous formula stands for vertices $D$ and $C$ that were counted twice. \item[$\blacktriangleright$] $n_a' = 3k_a' + 1$: In this case $\overline{CJ}$ is a good diagonal dissection. The size of $P_a' \cup P_c \cup IDCJ$ is $3k_a' + 1 + 3k_c + 1 + 1 - 1 = 3(k_a' + k_c) +2$. \item[$\blacktriangleright$] $n_a' = 3k_a' + 2$: In this case $\overline{ID}$ is a good diagonal dissection. \end{itemize}
In this subsection we assumed that $P'$ has no good diagonal dissection, thus we deduce that $C A A' D'$ cannot degenerate into a line segment. Notice that $Q$ exists even when $\triangle ACD$ is degenerate.
\begin{wrapfigure}{r}{0.42\textwidth}
\centering \includegraphics[width=0.36\textwidth]{TothShortLeaf_6.pdf} \caption{$\triangle ABC$ corresponds to a short leaf in $G(\mathcal{T})$; $\angle D_0 D C > \pi$; $\triangle ACD$ can be degenerate. One of $\overline{DD_0'}$, $\overline{CC'}$ and $\overline{AA'}$ is a good dissection of $P'$.} \label{fig:TothShortLeaf_6}
\end{wrapfigure} If $\overline{CC'}$ and $\overline{DD_0'}$ do not intersect inside $C A A' D'$, then $D_0'$ belongs to the line segment $C'D'$. Refer to Fig.~\ref{fig:TothShortLeaf_6}. T{\'o}th shows in Claim $5$ in~\cite{Toth2000121} that one of the line segments $\overline{DD_0'}$, $\overline{CC'}$ and $\overline{AA'}$ is a good dissection.
It was shown so far that if $\triangle ABC$ is a short leaf in $G(\mathcal{T})$, then either $P'$ has a good dissection or the angle at vertex $A$ or $C$ in $P'$ is convex. It is left to prove that T{\'o}th's Lemma~\ref{TothLem2} is true for long leaves. Notice that if $\triangle ABC$ is a long leaf in $G(\mathcal{T})$ but there exists a triangulation of $P'$ where $\triangle ABC$ is a short leaf then T{\'o}th's Lemma~\ref{TothLem2} is true for $\triangle ABC$.
Let $\triangle ABC$ be a long leaf of $G(\mathcal{T})$ such that there does not exist a triangulation of $P'$ where $\triangle ABC$ is a short leaf. Recall that in this subsection, we assumed that $P'$ has no good diagonal dissection. Thus, the node of $G(\mathcal{T})$ adjacent to a long leaf is also adjacent to a node of degree $3$. We also concluded that $\triangle ABC$ cannot be degenerate.
\stepcounter{mydefC} \stepcounter{mydefC} \begin{mydefC} \label{claimT6} If $\triangle ABC$ is a long leaf of $G(\mathcal{T})$ for every triangulation $\mathcal{T}$ of $P'$ then the node of $G(\mathcal{T})$ adjacent to the node $\triangle ABC$ corresponds to the same triangle for every~$\mathcal{T}$. \end{mydefC}
T{\'o}th's Claim~\ref{claimT6} is true for a triangulation $\mathcal{T}$ that contains degenerate triangles and thus it is true for the $P'$ defined in this paper.
Let $\triangle ACD$ be a triangle adjacent to $\triangle ABC$ in $\mathcal{T}$. The ray $\overrightarrow{CA}$ (respectively $\overrightarrow{CD}$, $\overrightarrow{BC}$, $\overrightarrow{BA}$) reaches $\partial P'$ at $A'$ (respectively $D'$, $C'$, $B'$). Notice that $A'$ is defined differently than in the case with short leaves. Refer to Fig.~\ref{fig:TothShortLeaf_7}. By T{\'o}th's Claim~\ref{claimT6}, $\triangle ACD$ is unique. Notice that $\triangle ACD$ can be degenerate, but because it is unique, it does not contain any other vertex of $P'$. Moreover, there does not exist an edge $\overline{IJ}$ of $P'$ that contains $\triangle ACD$, otherwise $P'$ has a good diagonal dissection since $A$, $C$, $D$, $I$ and $J$ can see each other. T{\'o}th's Claim~\ref{claimT7}, that follows this discussion, is thus true for polygons whose triangulation may contain degenerate triangles.
\begin{figure}
\caption{$\triangle ABC$ corresponds to a long leaf in $G(\mathcal{T})$. $\overline{IJ}$ is an edge of $P'$; it contains $A'$, $B'$, $C'$ and $D'$. One of $\overline{DD'}$, $\overline{CC'}$ or $\overline{AB'}$ is a good dissection of $P'$. $P_1$ is highlighted in cyan.}
\label{fig:TothShortLeaf_7}
\end{figure}
\begin{mydefC} \label{claimT7} The points $A'$ and $D'$ belong to the same edge of $P'$ or the angle of $P'$ at $A$ is convex and thus the second condition of T{\'o}th's Lemma~\ref{TothLem2} holds for $\triangle ABC$. Refer to Fig.~\ref{fig:TothShortLeaf_7}. \end{mydefC}
It follows that $C'$ and $B'$ belong to the same edge as $A'$ and $D'$.
Assume that the angles of $P'$ at $A$ and $C$ are reflex, otherwise the second condition of T{\'o}th's Lemma~\ref{TothLem2} holds for $\triangle ABC$ and our proof is complete.
Consider the angle of $P'$ at $D$. It can be either convex or reflex. Notice that non of the angles of $P'$ equals $\pi$ or $0$ because of the simplification step. We discuss the following two cases, that span over Claims $8,9$ in~\cite{Toth2000121}, and show that in either case $P'$ has a good dissection.
{\color{red} \textbf{Case 1:}} $\angle D_0 D C > \pi$. One of the line segments $\overline{DD'}$, $\overline{CC'}$ or $\overline{AB'}$ is a good dissection of $P'$. Refer to Fig.~\ref{fig:TothShortLeaf_7}. $\overline{DD'}$ partitions $P'$ into two subpolygons. Let $P_1$ be one of them that contains $D_0$ (it is highlighted in cyan on Fig.~\ref{fig:TothShortLeaf_7}). The size of $P_1$ is $n_1 = 3k_1 + q_1$; the size of $P_1 \cup CDD'C'$ is $n_1 + 1$; the size of $P_1 \cup CDD'C' \cup ACC'B'$ is $n_1 + 2$. If $q_1 = 2$ (respectively $q_1 = 1$, $q_1 = 0$) then $\overline{DD'}$ (respectively $\overline{CC'}$, $\overline{AB'}$) is a good dissection of $P'$.
{\color{red} \textbf{Case 2:}} $\angle D_0 D C < \pi$. It follows that $D'=D$, $A' \in \overline{DD_0}$ and $A' \neq D_0$, otherwise $\overline{AD_0}$ is a good diagonal dissection. Refer to Fig.~\ref{fig:TothShortLeaf_89}. Let $A_0'$ be the point where $\overrightarrow{A_0A}$ reaches $\partial P'$. T{\'o}th's Claim~\ref{claimT7} implies that $A_0' \in \overline{CD}$ or $A_0' \in \overline{DB'}$. If $A_0' \in \overline{DB'}$ (refer to Fig.~\ref{fig:TothShortLeaf_8}) then $\overline{AA_0'}$ is a good dissection of $P'$. It creates a pentagon $ABCDA_0'$ and a polygon $P' \setminus ABCDA_0'$ whose size is $n-3 = 3(k-1)-2$ (notice that $\overline{AA_0'}$ is an edge of $P' \setminus ABCDA_0'$ and thus $A \notin P' \setminus ABCDA_0'$). Notice that if $A_0' = D$ then $A_0$ can see $D$ and thus $\overline{A_0D}$ is a good diagonal dissection~of~$P'$.
\begin{figure}
\caption{$\triangle ABC$ corresponds to a long leaf in $G(\mathcal{T})$; $\angle D_0 D C < \pi$. \textbf{(a)} Case where$A_0' \in \overline{DB'}$. $\overline{AA_0'}$ is a good dissection of $P'$. \textbf{(b)} Case where $A_0' \in \overline{CD}$.}
\label{fig:TothShortLeaf_8}
\label{fig:TothShortLeaf_9}
\label{fig:TothShortLeaf_89}
\end{figure}
Assume that $A_0' \in \overline{CD}$. Refer to Fig.~\ref{fig:TothShortLeaf_9}. Let us assign labels to the vertices of $P'$ according to their order around $\partial P'$ as follows: $v_0 = A$, $v_1 = A_0$, $v_2$, $\ldots$, $v_{n-4} = D_0$, $v_{n-3} = D$, $v_{n-2} = C$, $v_{n-1} = B$. $\overline{DA}$ is a diagonal of $P'$ and thus at least some interval of $\overline{AA_0}$ is visible to $D$. Let us rotate $\overrightarrow{DA}$ towards $A_0$. The ray hits $v_i$ for $1 < i \leq v_{n-5}$ (notice that the ray cannot hit $A_0$, otherwise $\overline{DA_0}$ is a good diagonal dissection). Observe that the angle of $P'$ at $v_i$ must be reflex. Let $X_1$ (respectively $X_2$) be a point where $\overrightarrow{v_{i+1} v_i}$ (respectively $\overrightarrow{v_{i-1}v_i}$) reaches $\partial P'$. By construction, $X_1 \in \overline{AA_0}$ or $X_1 \in \overline{CD}$. The same is true for $X_2$. If $i$ is not a multiple of 3 then one of $\overline{v_i X_1}$ or $\overline{v_i X_2}$ is a good dissection. \begin{itemize} \item[$\blacktriangleright$] $i \equiv 1$ mod $3$: \begin{itemize}
\item $X_1 \in \overline{AA_0}$. Then the subpolygon $v_1 v_2 \ldots v_i X_1$ has size $3k'+2$, and the subpolygon $A X_1 v_{i+1} v_{i+2} \ldots v_{n-1}$ has size $3k''+2$ for some $k' +k'' = k$ (recall that $n = 3k+2$).
\item $X_1 \in \overline{CD}$. Then the polygon $C B A v_1 v_2 \ldots v_i X_1$ has size $3k'+2$, and the subpolygon $X_1 v_{i+1} v_{i+2} \ldots v_{n-3}$ has size $3k''+2$ for some $k' +k'' = k$.
\end{itemize}
\item[$\blacktriangleright$] $i \equiv 2$ mod $3$: \begin{itemize}
\item $X_2 \in \overline{AA_0}$. Then the polygon $v_1 v_2 \ldots v_{i-1} X_2$ has size $3k'+2$, and the subpolygon $A X_2 v_{i} v_{i+1} \ldots v_{n-1}$ has size $3k''+2$ for some $k' +k'' = k$.
\item $X_2 \in \overline{CD}$. Then the polygon $C B A v_1 v_2 \ldots v_{i-1} X_2$ has size $3k'+2$, and the subpolygon $X_2 v_{i} v_{i+1} \ldots v_{n-3}$ has size $3k''+2$ for some $k' +k'' = k$. \end{itemize} \end{itemize}
If $i$ is a multiple of $3$ then we repeat the above procedure and rotate the ray $\overrightarrow{Dv_i}$ towards $v_{i+1}$. Notice that if the ray hits $v_{i+1} \neq D_0$ then $\overline{D v_{i+1}}$ is a good diagonal dissection (which is a contradiction to our main assumption). Thus the ray hits $v_j$ for $i+1 < j \leq v_{n-5}$. If $j$ is not a multiple of $3$ then one of the rays $\overrightarrow{v_{j+1} v_j}$ or $\overrightarrow{v_{j-1} v_j}$ contains a good dissection. Observe that those rays reach $\partial P'$ at $\overline{CD}$, $\overline{AA_0}$ or $\overline{v_i v_{i+1}}$. Since we perform counting modulo $3$, those edges are considered to be identical in terms of vertices' indices. It means that we do not have to know where exactly $\overrightarrow{v_{j+1} v_j}$ or $\overrightarrow{v_{j-1} v_j}$ reach $\partial P'$ to decide which dissection to apply. That is, if $j \equiv 1$ mod $3$ then we use $\overrightarrow{v_{j+1} v_j}$; if $j \equiv 2$ mod $3$ then we use $\overrightarrow{v_{j-1} v_j}$.
If $j$ is a multiple of $3$ then the procedure is repeated again. Eventually, the ray spinning around $D$ must hit $D_0$. Recall that $D_0 =v_{n-4}$; $n-4 = 3k+2-4 = 3(k-1)+1$, which is not a multiple of $3$. At this point T{\'o}th comes to a contradiction and states that the angle of $P'$ at $D$ cannot be convex (refer to Claim $9$ in~\cite{Toth2000121}). However there is no contradiction. We show that the situation is possible and discuss how to find a good dissection in this case.
Let $v_z$ be the last vertex hit by the ray spinning around $D$ before it hit $D_0$. Notice that $z$ is a multiple of $3$. Two cases are possible: \begin{enumerate} \item $v_{z+1} \neq D_0$: notice that $D_0$ can see $v_z$. The size of the subpolygon $v_z v_{z+1} \ldots v_{n-4}$ is $3k' + 2$ for some integer $k'>1$ ($k$ is strictly bigger than $1$ because $v_{z+1} \neq D_0$). Therefore, the diagonal $\overline{v_z D_0}$ is a good diagonal dissection, meaning that this case is not possible. \item $v_{z+1} = D_0$: in this case $z = n-5$ and the angle of $P'$ at $D_0$ is convex. Refer to Fig.~\ref{fig:TothShortLeaf_1011}. \begin{itemize} \item[$\blacktriangleright$] If $v_z$ can see $A$ then dissect $P'$ along $\overline{v_z A}$. This dissection creates two subpolygons: $Av_zD_0DCB$ of size $6$ and $Av_1 v_2 \ldots v_z$ of size $n-4 = 3(k-1)+1$. Notice that the hexagon $Av_zD_0DCB$ has a non-empty kernel whose intersection with the boundary of $Av_zD_0DCB$ is $\overline{C'B'}$. One $180^\circ$-guard on $\overline{C'B'}$ can monitor $Av_zD_0DCB$. Similarly, for our problem, two distinct towers on $\overline{C'B'}$ can localise an agent in $Av_zD_0DCB$ (notice that $C' \neq B'$ and thus $\overline{C'B'}$ contains at least two distinct points). \item[$\blacktriangleright$] If $v_z$ cannot see $A$ then consider the ray $\overrightarrow{D_0v_z}$. Let $Z$ be a point where $\overrightarrow{D_0v_z}$ reaches $\partial P$ or $\overline{AA'}$ (whichever happens first). \begin{itemize} \item If $Z \in \overline{AA'}$ (refer to Fig.~\ref{fig:TothShortLeaf_10}) then dissect $P'$ along the two line segments $\overline{AZ} \cup \overline{Zv_z}$. $P'$ falls into two subpolygons: $AZv_zD_0DCB$ of size $7$ (which is technically a hexagon since $Z$, $v_z$ and $D_0$ are collinear) and $ZAv_1 v_2 \ldots v_z$ of size $n-3 = 3(k-1)+2$. Similarly to the hexagon from the previous case, the heptagon $AZv_zD_0DCB$ can be guarded by one $180^\circ$-guard on $\overline{C'B'}$ and our agent can be localised by a pair of distinct towers positioned on $\overline{C'B'}$. \item If $Z \notin \overline{AA'}$ (refer to Fig.~\ref{fig:TothShortLeaf_11}) then there must be a vertex $v_x$ to the left of $\overrightarrow{AA'}$ visible to $D_0$ and to $D$. Recall that for every vertex $v_x$ visible from $D$, the index $x$ is a multiple of $3$.
Thus $\overline{v_xD_0}$ is a good diagonal dissection. It means that this case is not possible. \end{itemize} \end{itemize} \end{enumerate}
\begin{figure}
\caption{$\triangle ABC$ corresponds to a long leaf in $G(\mathcal{T})$; $\angle D_0 D C < \pi$; $A_0' \in \overline{CD}$. \textbf{(a)}Two line segments $\overline{AZ} \cup \overline{Zv_z}$ is a good dissection. \textbf{(b)} Impossible case, because $\overline{v_xD_0}$ is a good diagonal dissection.}
\label{fig:TothShortLeaf_10}
\label{fig:TothShortLeaf_11}
\label{fig:TothShortLeaf_1011}
\end{figure}
This completes the proof of T{\'o}th's Lemma~\ref{TothLem2}.
\subsection{Partition algorithm} \label{subsec:PartitionAlg} \pdfbookmark[2]{Partition algorithm}{subsec:PartitionAlg}
We are given a simple polygon $P$ in general position of size $n = 3k +q$ where $k$ is a positive integer and $q \in \{0, 1, 2\}$. If $P$ has at most $k$ reflex angles then $P$ can be partitioned simply by bisecting $k-1$ of the reflex angles. This creates $k$ star-shaped subpolygons of $P$ that can be watched by $2k$ towers. If the number of reflex angles of $P$ is bigger than $k$ then we look for a good diagonal dissection. In Section~\ref{subsec:PointKernel} we modified T{\'o}th's partition~\cite{Toth2000121} to deal with subpolygons of $P$ (polygons*) that are not in general position. The important difference to notice is that we avoided dissecting along diagonals of $P'$ that contain vertices of $P$ in their interior. If the dissection is unavoidable we showed how to position towers and in the worst case - repartition subpolygons of $P'$.
If $P'$ has no good diagonal dissection then its size is $n=3k+2$. In this case we look for a good dissection via the \emph{short} or \emph{long leaf} approach discussed in Section~\ref{subsubsec:Lemma2}. If no good cut is found then by T{\'o}th's Lemma~\ref{TothLem2} every leaf in $G(\mathcal{T})$ is associated to two convex angles of $P'$. It follows by T{\'o}th's Lemma~\ref{TothLem3} that $P'$ can be monitored by $\left \lfloor \frac{n}{3} \right \rfloor$ $180^\circ$-guards. Refer to Section~\ref{subsubsec:Lemma3} on how to adapt T{\'o}th's Lemma~\ref{TothLem3} for tower positioning. We refer to T{\'o}th's Lemma~\ref{TothLem1} to show the coherence of the algorithm. T{\'o}th's Lemma~\ref{TothLem1} states that if $P'$ has $n = 3k+2$ vertices and has no good diagonal or other dissection then $P'$ has at most $k$ reflex angles and thus $P'$ would be treated during the first step of the algorithm.
The obtained partition together with the locations of $180^\circ$-guards is reused for tower positioning. Every $180^\circ$-guard that guards subpolygon $P'$ is positioned on the boundary of $P'$ (either on an edge or a convex vertex of $P'$) and oriented in such a way that $P'$ completely belongs to the half-plane $H_l$ monitored by the guard. In our problem every $180^\circ$-guard is replaced by a pair of towers $t_1$ and $t_2$ on the same edge of $P'$ in $\partial P' \cap kernel(P')$ and close to the $180^\circ$-guard. The orientation of $180^\circ$-guard is embedded into the tower coordinates via the \textbf{parity trick}. Specifically, given the partition of $P$ we can calculate a line segment $\overline{ab}$ suitable for tower positioning for each polygon $P'$ of the partition. Towers can be anywhere on $\overline{ab}$ as long as the distance between them is a rational number. Notice, that the coordinates of the endpoints of $\overline{ab}$ can be irrational numbers because $a$ and $b$ can be vertices of $P$ or $P'$ or intersection points between lines that support edges of $P$ or $P'$. However, we will show that it is always possible to to find the desired positions for the two towers: We place the tower $t_1$ at the point $a$. The tower $t_2$ will be placed at the point $c = a + \lambda (b-a)$ for an appropriate choice of $\lambda$. Let $s \geq 1$ be the smallest integer such that $1/3^s \leq d(a,b)$. If we take $\lambda = \frac{1}{3^{s+1} d(a,b)}$, then $d(a,c) = d(t_1,t_2) = \frac{1}{3^{s+1}}$, which is a reduced rational number whose numerator is odd. On the other hand, if we take $\lambda = \frac{2}{3^{s+1} d(a,b)}$, then $d(a,c) = d(t_1,t_2) = \frac{2}{3^{s+1}}$, which is a reduced rational number whose numerator is even.
If we want the pair of towers $t_1$ and $t_2$ to be responsible for the half-plane $L(t_1, t_2)^+$ (respectively $L(t_1, t_2)^-$) we position the towers at $\frac{2}{3^{t+1}}$ (respectively $\frac{1}{3^{t+1}}$) distance from each other.
Notice that $L(t_1, t_2)$ is not always parallel to the line that supports $H_l$. If the $180^\circ$-guard is positioned at a convex vertex $v$ of $P'$ then only one tower can be positioned at $v$. Another tower is placed on the edge adjacent to $v$ in $\partial P' \cap kernel(P')$. If $kernel(P')$ is a single point then we position our towers outside of $P'$ and close to $kernel(P')$, such that $L(t_1, t_2)$ is parallel to the line that supports $H_l$. If $L(t_1, t_2)^+$ (respectively $L(t_1, t_2)^-$) contains $H_l$ then we position the towers at a distance, which is a rational number whose numerator is even (respectively odd).
Algorithm~\ref{alg_Partition} partitions $P$ and positions at most $\left \lfloor \frac{2n}{3} \right \rfloor$ towers that can localize an agent anywhere in $P$. The localization algorithm (Algorithm~\ref{alg_Locate}) can be found in Section~\ref{sec:localization}.
\begin{algorithm} \caption{Polygon Partition; Tower Positioning}\label{alg_Partition} \KwIn{$P'$ of size $n = 3k +q$, for positive integer $k$ and $q \in \{0, 1, 2\}$} \KwOut{Set of at most $\left \lfloor \frac{2n}{3} \right \rfloor$ towers in $P'$} \BlankLine If $P'$ has at most $k$ reflex angles then position $2k$ towers by bisecting reflex angles. Halt\; Simplify $P'$\; \eIf{there exists a good diagonal dissection that contains at most one vertex of $P$ in its interior}{apply it and run this algorithm on $P_1$ and $P_2$} (\tcp*[f]{$n = 3k +2$}) { \eIf{there exist a good dissection via a continuation of an edge}{apply it; run this algorithm on $P_1$ and $P_2$}{ \eIf{there exists a good diagonal dissection} {this dissection contains two vertices of $P$ in its interior\; apply it and run this algorithm on $P_2$\; \eIf{pentagon with a pair of vertices of $P$ in the interior of the same edge is created}{Repartition $P_2$ as described in Section~\ref{subsec:PointKernel}}{run this algorithm on $P_1$} } (\tcp*[f]{$n = 3k +2$ and $P'$ has no good diagonal dissection}) { {\If{$P'$ has a good dissection via \emph{short} or \emph{long leaf} approach (refer to~\cite{Toth2000121} and Section~\ref{subsubsec:Lemma2})}{use it; repeat algorithm on $P_1$ and $P_2$}
}
} } } \end{algorithm}
The running time of Algorithm~\ref{alg_Partition} is $O(n^3)$ because of the cases where repartitioning of already partitioned subpolygons is required.
In Section~\ref{sec:partition} we showed how to use the polygon partition method introduced by T{\'o}th~\cite{Toth2000121} for wider range of polygons by lifting his assumption that the partition method creates subpolygons whose vertices are in general position. We reproved T{\'o}th's results and showed how to use his partition method for localization problem. We showed how to compute a set $T$ of size at most $\lfloor 2n/3\rfloor$ that can localize an agent anywhere in $P$. The results of Section~\ref{sec:partition} are summarized in the following theorem.
\begin{theorem} \label{thm:main_result} Given a simple polygon $P$ in general position having a total of $n$ vertices. Algorithm~\ref{alg_Partition} computes a set $T$ of broadcast towers of cardinality at most $\lfloor 2n/3\rfloor$, such that any point interior to $P$ can localize itself. \end{theorem}
\subsection{Counterexample to T{\'o}th's conjecture} \label{sec:counterexample} \pdfbookmark[2]{Counterexample to T{\'o}th's conjecture}{sec:counterexample}
We refute the conjecture given by T{\'o}th in~\cite{Toth2000121}.
\textbf{Conjecture:} Any simple polygon of $n$ sides can be guarded by $\lfloor n/3\rfloor$ $180^\circ$-guards that are located exclusively on the boundary of the polygon.
Figure~\ref{fig:counterexample} shows a simple polygon $P$ with $n=8$ that can be guarded by $2$ general $180^\circ$-guards but requires at least $3$ $180^\circ$-guards that must reside on the boundary of $P$. Observe that $P$ is not a star-shaped polygon. Figure~\ref{fig:counterexample11} shows a possible partition of $P$ into two star-shaped polygons: $v_1 v_2 v_3 v_4 v_5$ and $v_1 v_5 v_6 v_7 v_8$. \begin{figure}
\caption{Counterexample on $n=8$ vertices. Guards are highlighted in red. \textbf{(b)} $P$ can be guarded by $2$ general $180^\circ$-guards\textbf{(c)} $P$ requires at least $3$ $180^\circ$-guards that must reside on the boundary of $P$.}
\label{fig:counterexample1}
\label{fig:counterexample11}
\label{fig:counterexample12}
\label{fig:counterexample}
\end{figure} The polygon $v_1 v_2 v_3 v_4 v_5$ can be guarded by the $180^\circ$-guard $g_2$ located on $\overline{v_1 v_5}$ and oriented upwards (i.e. $g_2$ observes $L(v_1, v_5)^+$). The second $180^\circ$-guard $g_1$ is located on $\overline{v_1 v_8}$ in $kernel(v_1 v_5 v_6 v_7 v_8)$. It is oriented to the right of $L(v_1, v_8)$ (i.e. $g_1$ observes $L(v_1, v_8)^-$) and thus guards $v_1 v_5 v_6 v_7 v_8$. Consider Figure~\ref{fig:counterexample12}. The visibility region, from where the complete interior of $v_1 v_2 v_3 v_4 v_5$ can be seen, is highlighted in magenta. We want to assign a single $180^\circ$-guard that can see both vertices $v_2$ and $v_4$ and be located on $\partial P$. Notice that the intersection of this visibility region with $\partial P$ contains a single point $v_3$. However, the angle of $P$ at $v_3$ is reflex and the guards have a restricted $180^\circ$ field of vision. Thus it is impossible to guard $v_1 v_2 v_3 v_4 v_5$ with a single $180^\circ$-guard located on $\partial P$. Notice that the visibility region of the vertex $v_7$ does not intersect with the visibility region of $v_2$ and the visibility region of $v_4$. Thus it requires an additional guard. It follows that $P$ requires at least $3$ $180^\circ$-guards located on $\partial P$. Notice that $\lfloor n/3\rfloor = \lfloor 8/3\rfloor = 2$. This is a contradiction to the above conjecture.
In general, consider the polygon shown in Fig.~\ref{fig:counterexample2}. It has $n = 5s + 2$ vertices, where $s$ is the number of \emph{double-spikes}. Each spike requires its own guard on the boundary of $P$ (two guards per double-spike), resulting in $2s$ boundary guards in total. This number is strictly bigger than $\lfloor n/3\rfloor = \lfloor 5s + 2/3\rfloor$ for $s \geq 3$.
\begin{figure}
\caption{Counterexample to T{\'o}th's conjecture. The polygon $P$ is in general position.}
\label{fig:counterexample2}
\end{figure}
\section{Localization Algorithm} \label{sec:localization} \pdfbookmark[1]{Localization Algorithm}{sec:localization}
In Section~\ref{sec:partition} we showed how to position at most $\left \lfloor \frac{2n}{3} \right \rfloor$ towers in a given polygon. We used a modification of T{\'o}th's partition method that dissects a polygon into at most $\lfloor n/3\rfloor$ star-shaped polygons each of which can be monitored by a pair of towers. In this section we show how we can localize an agent $p$ in the polygon. Our localization algorithm receives as input only the coordinates of the towers that can see $p$ together with their distances to $p$. In this sense, our algorithm uses the classical trilateration input. In addition, our algorithm knows that the parity trick was used to position the towers. Based on this information alone, and without any additional information about $P$, the agent can be localized. When only a pair of towers $t_1$ and $t_2$ can see the point $p \in P$ then the coordinates of the towers together with the distances $d(t_1,p)$ and $d(t_2,p)$ provide sufficient information to narrow the possible locations of $p$ down to two. Those two locations are reflections of each other over the line through $t_1$ and $t_2$. In this situation our localization algorithm uses the parity trick. It calculates the distance between the two towers and judging by the parity of the numerator of this rational number decides which of the two possible locations is the correct one. Refer to Algorithm~\ref{alg_Locate}.
\begin{algorithm}[h] \caption{Compute the coordinates of point $p$.}\label{alg_Locate} \KwIn{\begin{list}{}{} \item $t_1$, \ldots, $t_k$ -- coordinates of the towers that see $p$. \item $d_1$, \ldots, $d_{\ell}$ -- distances between the corresponding towers and $p$. \end{list}} \KwOut{coordinates of $p$.} \BlankLine \eIf{$\ell \geq 3$}{
$p = C(t_1, d_1) \cap C(t_2, d_2) \cap C(t_3, d_3)$\; }(\tcp*[f]{$\ell = 2$}) {
\eIf{the numerator of $d(t_1,t_2)$ is even}{
$p = C(t_1, d_1) \cap C(t_2, d_2) \cap L(t_1, t_2)^+$\;
}
{
$p = C(t_1, d_1) \cap C(t_2, d_2) \cap L(t_1, t_2)^-$\;
} } Return $p$\; \end{algorithm}
\section{Concluding Remarks} \label{sec:conclusion} \pdfbookmark[1]{Concluding Remarks}{sec:conclusion}
We presented a tower-positioning algorithm that computes a set of size at most $\lfloor 2n/3\rfloor$ towers, which improves the previous upper bound of $\lfloor 8n/9\rfloor$~\cite{DBLP:conf/cccg/DippelS15}. We strengthened the work~\cite{Toth2000121} by lifting the assumption that the polygon partition produces polygons whose vertices are in general position. We reproved T{\'o}th's result. We found and fixed mistakes in claims $2$ and $7$ in~\cite{Toth2000121}.
We believe it is possible to avoid the repartition step (described in Section~\ref{subsec:PointKernel}) and as a consequence bring the running time of Algorithm~\ref{alg_Partition} to $O(n^2)$ instead of $O(n^3)$.
As a topic for future research we would like to show that determining an optimal number of towers for polygon trilateration is NP-hard.
\end{document} |
\begin{document}
\title{Complete polynomial vector fields on ${\mathbb C}^2$, {\sc Part I}}
\author{Julio C. Rebelo}
\date{}
\maketitle \thispagestyle{empty} \def-30pt{0 pt} \def0 pt{0 pt} \def0 pt{0 pt} \defPublished in modified form:{Published in modified form:} \def\SBIMSMark#1#2#3{
\font\SBF=cmss10 at 10 true pt
\font\SBI=cmssi10 at 10 true pt
\setbox0=\hbox{\SBF \hbox to 0 pt{\relax}
Stony Brook IMS Preprint \##1}
\setbox2=\hbox to \wd0{\hfil \SBI #2}
\setbox4=\hbox to \wd0{\hfil \SBI #3}
\setbox6=\hbox to \wd0{\hss
\vbox{\hsize=\wd0 \parskip=0pt \baselineskip=10 true pt
\copy0 \break
\copy2 \break
\copy4 \break}}
\dimen0=\ht6 \advance\dimen0 by \vsize \advance\dimen0 by 8 true pt
\advance\dimen0 by -\pagetotal
\advance\dimen0 by -30pt
\dimen2=\hsize \advance\dimen2 by .25 true in
\advance\dimen2 by 0 pt
\openin2=publishd.tex
\ifeof2\setbox0=\hbox to 0pt{}
\else
\setbox0=\hbox to 3.1 true in{
\vbox to \ht6{\hsize=3 true in \parskip=0pt \noindent
{\SBI Published in modified form:}\hfil\break
\input publishd.tex
}}
\fi
\closein2
\ht0=0pt \dp0=0pt
\ht6=0pt \dp6=0pt
\setbox8=\vbox to \dimen0{
\hbox to \dimen2{\copy0 \hss \copy6}}
\ht8=0pt \dp8=0pt \wd8=0pt
\copy8
\message{*** Stony Brook IMS Preprint #1, #2. #3 ***} }
\def-30pt{-30pt}
\SBIMSMark{2002/03}{October 2002}{}
\begin{abstract} In this work, under a mild assumption, we give the classification of the complete polynomial vector fields in two variables up to algebraic automorphisms of ${\mathbb C}^2$. The general problem is also reduced to the study of the combinatorics of certain resolutions of singularities. Whereas we deal with ${\mathbb C}$-complete vector fields, our results also apply to ${\mathbb R}$-complete ones thanks to a theorem of Forstneric \cite{forst}. \end{abstract}
\noindent \hspace{0.9cm} {\small Key-words: line at infinity - vector fields - singular foliations}
\noindent \hspace{0.9cm} {\small AMS-Classification 34A20, 32M25, 32J15}
\section{Introduction}
\hspace{0.4cm} Recall that a {\it holomorphic flow} on ${\mathbb C}^2$ is a holomorphic mapping $ \Phi \, : \; {\mathbb C} \times {\mathbb C}^2 \longrightarrow {\mathbb C}^2 $ satisfying the two conditions below:
\noindent $\bullet$ $\Phi (0, p) = p$ for every $p \in {\mathbb C}^2$;
\noindent $\bullet$ $\Phi (T_1 + T_2 ,p ) = \Phi (T_1 , \Phi (T_2, p))$.
A holomorphic flow $\Phi$ on ${\mathbb C}^2$ induces a {\it holomorphic vector field} $X$ on ${\mathbb C}^2$ by the equation $$
X (p) = \left. \frac{d \Phi (T,p)}{dT} \right|_{T=0} \, . $$ Conversely a holomorphic vector field $X$ on ${\mathbb C}^2$ is said to be {\it complete} if it is associated to a holomorphic flow $\Phi$. Since every polynomial vector field of degree~$1$ is complete, we assume that $X$ has degree~$2$ or greater. A polynomial vector field $X$ can be considered as a meromorphic vector field on ${\mathbb C} {\mathbb P} (2)$ therefore inducing a singular holomorphic foliation ${\mathcal F}_X$ on ${\mathbb C} {\mathbb P} (2)$. The singularities of ${\mathcal F}_X$ lying in the ``line at infinity'' $\Delta$ will be denoted by $p_1 ,\ldots ,p_k$. A singularity $p_i$ as above is called {\it dicritical} if there are infinitely many analytic curves invariant by ${\mathcal F}_X$ and passing through $p_i$. The first result of this paper is the following:
\noindent {\bf Theorem A} {\sl Let $X$ be a complete polynomial vector field on ${\mathbb C}^2$ with degree~$2$ or greater and let ${\mathcal F}_X$ be the singular foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$. Assume that ${\mathcal F}_X$ has a dicritical singularity in $\Delta$. Then $X$ is conjugate by a polynomial automorphism to one of the following vector fields: \begin{enumerate}
\item $P(y) x^{\epsilon} \partial /\partial x$, where $P(y)$ is a polynomial in $y$ and $\epsilon=0,\, 1$.
\item $x^ny^m(mx {\partial /\partial x} - ny {\partial /\partial y})$, where ${\rm g.c.d} (m,n) =1$ and $m,n \in {\mathbb N}$;
\end{enumerate} }
In view of Theorem~A, we just need to consider vector fields all of whose singularities belonging to $\Delta$ are not dicritical. Again let $X$ be such a vector field and let ${\mathcal F}_X$ be its associated foliation. Consider a singularity $p_i$ of ${\mathcal F}_X$ in the line at infinity $\Delta$ and a vector field $\widetilde{X}$ obtained through a finite sequence of blowing-ups of $X$ beginning at $p_i$. Denote by ${\mathcal E}$ the corresponding exceptional divisor and by $D_i$, $i=1, \ldots ,l$, its irreducible components which are all rational curves. We say that $X$ has adapted poles at $p_i$ if, for every sequence of blow-ups as above and every irreducible component $D_i$ of the corresponsding exceptional divisor ${\mathcal E}$, either $X$ vanishes identically on $D_i$ or $D_i$ consists of pole of $\widetilde{X}$ (in other words $\widetilde{X}$ is not regular on $D_i$). Clearly the great majority of polynomial vector fields have adapted poles at its singularities at infinity. We then have:
\noindent {\bf Theorem B} {\sl Let $X$ be a complete polynomial vector field on ${\mathbb C}^2$ and denote by $p_i$, $i=1, \ldots ,k$ the singularities of the associated foliation ${\mathcal F}_X$ belonging to the line at infinity $\Delta$. Suppose that $X$ has adapted poles at each $p_i$. Then ${\mathcal F}_X$ possesses a dicritical singularity in the line at infinity $\Delta$.}
There is a good amount of literature devoted to complete vector fields on ${\mathbb C}^2$, in particular our results are complementary to the recent results obtained by Cerveau and Scardua in \cite{cesc}. Note however that the points of view adopted in both papers are almost disjoint. For more information on complete polynomial vector fields the reader can consult the references at the end as well as references in \cite{cesc}.
After Theorems~A and~B, in order to classify all complete polinomial vector fields on ${\mathbb C}^2$ we just have to consider vector fields which do not have adapted poles at one of the singularities $p_1, \ldots , p_k$. In particular we can always assume that none of these singularities is dicritical.
Let us close this Introduction by giving a brief description of the structure of the paper. First we observe that the method developed here may be pushed forward to deal with vector fields which do not have adapted poles. Indeed the assumption that $X$ has adapted poles in $\Delta$ is used only in Section~6. More precisely, from Section~2 to Section~5, the classification of complete polynomial vector fields is reduced to a problem of understanding the possible configurations of rational curves arising from blow-ups of the singularities of ${\mathcal F}_X$ in the line at infinity $\Delta$. The role of our main assumption is to make the ``combinatorics'' of these configurations simpler so as to allow for the complete description given in Section~6. It is reasonable to expect that a more detailed study of these configurations will lead to the general classification of complete polynomial vector fields.
Another feature of our method is its local nature. Indeed most of our results are local and therefore have potential to be applied in other situations (especially to other Stein surfaces). We mention in particular the results of Sections~3 and~5 (cf. Theorem~(\ref{selano}), Proposition~(\ref{prop4.2})). Also the local vector fields $Z_{1,11}, \, Z_{0,12}, \, Z_{1,00}$ introduced in Proposition~(\ref{prop4.2}) might have additional interest.
This paper is also a sequence of \cite{re3} where it was observed, in particular, that the problem of understanding complete polynomial vector fields on ${\mathbb C}^2$ can be unified with classical problems in Complex Geometry through the notion of {\it meromorphic semi-complete vector fields}. The method employed in the proof of our main result relies heavily on this connection. Indeed an important part of the proof the preceding theorems is a discussion of semi-complete singularities of meromorphic vector fields. The study of these singularities was initiated in \cite{re3} but the present discussion is based on a different setting.
\noindent {\bf Acknowledgements}: I am grateful to D. Cerveau and B. Scardua who raised my interest in this problem by sending me their preprint \cite{cesc}.
\section{Basic notions and results}
\hspace{0.4cm} The local orbits of a polynomial vector field $X$ induce a singular holomorphic foliation ${\mathcal F}_X$ on ${\mathbb C}^2$. Besides, considering ${\mathbb C} {\mathbb P} (2)$ as the natural compactification of ${\mathbb C}^2$ obtained by adding the line at infinity $\Delta$, the foliation ${\mathcal F}_X$ extend to a holomorphic foliation, still denoted by ${\mathcal F}_X$, on the whole of ${\mathbb C} {\mathbb P} (2)$. This extension may or may not leave the line at infinity $\Delta$ invariant. On the other hand, the vector field $X$ possesses a {\it meromorphic} extension to ${\mathbb C} {\mathbb P} (2)$, also denoted by $X$, whose {\it pole divisor} coincides with $\Delta$. Note that the meromorphic extension of $X$ to ${\mathbb C} {\mathbb P} (2)$ happens to be holomorphic if and only if the degree of $X$ is $1$ or if $X$ has degree $2$ and the line at infinity $\Delta$ is not invariant by ${\mathcal F}_X$ (for further details cf. below). Let us make these notions more precise.
Recall that a {\it meromorphic}\, vector field $Y$ on a neighborhood $U$ of the origin $(0, \ldots , 0) \in {\mathbb C}^n$ is by definition a vector field of the form $$ Y = F_1 \frac{\partial}{\partial z_1} + \cdots + F_n \frac{\partial}{\partial z_n} \, , $$ where the $F_i$'s are meromorphic functions on $U$ (i.e. $F_i = f_i /g_i$ with $f_i ,g_i$ holomorphic on $U$). Note that $Y$ may not be defined on the whole $U$ even though we consider $\infty$ as a value since $F_i$ may have indeterminacy points. We denote by $D_Y$ the union of the sets $\{ g_i =0 \}$. Of course $D_Y$ is a divisor consisting of poles and indeterminacy points of $Y$ which is called the {\it pole divisor} of $Y$
\begin{defnc} The meromorphic vector field $Y$ is said to be semi-complete on $U$ if and only if there exists a meromorphic map $\Phi_{sg} : \Omega \subseteq {\mathbb C} \times U \rightarrow U$, where $\Omega$ is an open set of ${\mathbb C} \times U$, satisfying the conditions below. \begin{enumerate}
\item $$
\left. \frac{d \Phi_{sg} (T, x)}{dT} \right|_{T=0} = Y(x) \; \, \mbox{for all $x \in U \setminus D_x$;} $$
\item $\Phi_{sg} (T_1 + T_2, x) = \Phi_{sg} (T_1, \Phi_{sg} (T_2 ,x))$ provided that both sides are defined;
\item If $(T_i ,x)$ is a sequence of points in $\Omega$ converging to a point $(\hat{T} ,x)$ in the boundary of $\Omega$, then $\Phi_{sg} (T_i ,x)$ converges to the boundary of $U \setminus D_Y$ in the sense that the sequence leaves every compact subset of $U \setminus D_Y$. \end{enumerate} \end{defnc}
The map ${\Phi_{sg}}$ is called the meromorphic semi-global flow associated to $Y$ (or induced by $Y$).
Assume we are given a meromorphic vector field $Y$ defined on a neighborhood of $(0,0) \in {\mathbb C}^2$. It is easy to see that $Y$ has the form $Y = f Z/g$ where $f,g$ are holomorphic functions and $Z$ is a holomorphic vector field having at most an isolated singularity at the origin. Naturally we suppose that $f,g$ do not have a (non-trivial) common factor, so that $f,g$ and $Z$ are unique (up to a trivial, i.e. inversible, factor). Next, let ${\mathcal F}$ denote the local singular foliation defined by the orbits of $Z$. We call ${\mathcal F}$ the foliation associated to $Y$ and note that either ${\mathcal F}$ is regular at the origin or the origin is an isolated singularity of ${\mathcal F}$. An analytic curve ${\mathcal S}$ passing through the origin and invariant by ${\mathcal F}$ is said to be a {\it separatrix} of ${\mathcal F}$ (or of $Y, Z$).
The rest of this section is devoted to establishing some preliminary results concerning both theorems in the Introduction. Particular attention will be paid to meromorphic semi-complete vector fields which appear when we restrict $X$ to a neighborhood of the line at infinity $\Delta$. As it will be seen, a large amount of information on $X$ arises from a detailed study of this restriction. To begin with, let us recall the notion of {\it time-form} $dT$ of a meromorphic vector field and the basic lemma about integrals of $dT$ over curves. For other general facts about meromorphic semi-complete vector fields the reader is referred to Section~2 of \cite{re3}.
Let $Y$ be a meromorphic vector field defined on an open set $U$ and let ${\mathcal F}$ denote its associated foliation. The regular leaves of ${\mathcal F}$ (after excluding possible punctures corresponding to {\it zeros} or poles of $Y$) are naturally equipped with a {\it foliated} holomorphic $1$-form $dT$ defined by imposing $dT . Y = 1$. As a piece of terminology, whenever the $1$-form $dT$ is involved, the expression ``regular leaf of ${\mathcal F}$'' should be understood as a regular leaf $L$ (in the sense of the foliation ${\mathcal F}$) from which the intersection with the set of {\it zeros} or poles of $Y$ was deleted. Hence the restriction of $dT$ to a regular leaf $L$ is, by this convention, always holomorphic. Note also that $dT$ is ``foliated'' in the sense that it is defined only on the tangent spaces of the leaves. We call $dT$ the {\it time-form} associated to (or induced by) $Y$. Lemma~(\ref{timeform}) below is the most fundamental result about semi-complete vector fields (cf. \cite{re1}, \cite{re3}).
\begin{lema} \label{timeform} Let $Y$, $U$, ${\mathcal F}$ and $dT$ be as above. Consider a regular leaf $L$ of ${\mathcal F}$ and an embedded (open) curve $c: [0,1] \rightarrow L$. If $Y$ is semi-complete on $U$ then the integral of $dT$ over $c$ does not vanish.
\fbox{} \end{lema}
Let us now go back to a complete polynomial vector field $X$ on ${\mathbb C}^2$ whose degree is $d \in {\mathbb N}$. Set \begin{equation} X = X_0 + X_1 + \cdots + X_d \label{equa1} \end{equation} where $X_i$, $i =1 ,\ldots ,d$, stands for the homogeneous component of degree $i$ of $X$. With this notation the vector fields whose associated foliations ${\mathcal F}_X$ do not leave the line at infinity $\Delta$ invariant admit an elementary characterization namely: ${\mathcal F}_X$ does not leave $\Delta$ invariant if and only if $X_d$ has the form $F(x,y) (x {\partial /\partial x} \, + \, y {\partial /\partial y})$, where $F$ is a homogeneous polynomial of degree $d-1$. Furthermore, viewing $X$ as a meromorphic vector field on ${\mathbb C} {\mathbb P} (2)$, a direct inspection shows that the order of the pole divisor $\Delta$ is $d-1$ provided that $\Delta$ is invariant under ${\mathcal F}_X$. If $\Delta$ is not invariant under ${\mathcal F}_X$ then this order is $d-2$. On the other hand, given a point $p \in \Delta$ and a neighborhood $U \subset {\mathbb C} {\mathbb P} (2)$ of $p$, it is clear that $X$ defines a meromorphic semi-complete vector field on $U$.
Our first lemma shows that we can suppose that the line at infinity is invariant by the associated foliation ${\mathcal F}_X$. Whereas the proof is elementary, we give a detailed account since some basic ideas will often be used later on.
\begin{lema} \label{lema2.1} Consider a complete polynomial vector field $X$ on ${\mathbb C}^2$ and denote by ${\mathcal F}_X$ the foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$. Assume that the line at infinity $\Delta$ is not invariant under ${\mathcal F}_X$. Then the degree of $X$ is at most $1$. \end{lema}
\noindent {\it Proof}\,: First we set $X = F Z$ where $F$ is a polynomial of degree $0 \leq n \leq d$ and $Z$ is a polynomial vector field of degree $d-n$ and isolated {\it zeros}. In other words, we have $Z = P {\partial /\partial x} + Q {\partial /\partial y}$ where $P, Q$ for polynomials $P,Q$ without non-trivial common factors.
First we suppose for a contradiction that $d$ is strictly greater than $2$. In view of the preceding discussion, the line at infinity $\Delta$ is the polar divisor of $X$ and has order $d-2 \geq 1$. Let ${\mathcal C} \subset {\mathbb C} {\mathbb P} (2)$ be the algebraic curve induced in affine coordinates by $F=0$. Finally consider a ``generic'' point $p \in \Delta$ and a neighborhood $U$ of $p$ such that $U \cap {\mathcal C} = \emptyset$.
Let ${\mathcal F}_X$ be the singular foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$ and notice that $p$ is a regular point of ${\mathcal F}_X$. Besides the leaf $L$ containing $p$ is transverse at $p$ to $\Delta$. Thus we can introduce coordinates $(u,v)$ on $U$, identifying $p$ with $(0,0) \in {\mathbb C}^2$, in which $X$ becomes $$ X(u,v) = u^{2-d}. f. \frac{\partial}{\partial u} $$ where $f$ is a holomorphic function such that $f(0,0) \neq 0$ and $\{ u = 0 \} \subset \Delta$ (here we use the fact that $U \cap {\mathcal C} = \emptyset$). The axis $\{ v=0 \}$ is obviously invariant under ${\mathcal F}$ and the time-form $dT_{\{v = 0\}}$ induced on $\{ v =0 \}$ by $X$ is given by $dT_{\{v = 0\}} = u^{d-2} du / f(u,0)$. Since $d \geq 3$ and $f(0,0) \neq 0$, it easily follows the existence of an embedded curve $c:[0,1] \rightarrow \{ v=0 \} \setminus (0,0)$ on which the integral of $dT_{\{v = 0 \}}$ (cf. Remark~(\ref{obsidiota})). The resulting contradiction shows that $d \leq 2$.
It only remains to check the case $d=2$. Modulo a linear change of coordinates, we have $X= X_0 + X_1 + x( x {\partial /\partial x} \, + \, y {\partial /\partial y})$. The above calculation shows that the natural extension of $X$ to ${\mathbb C} {\mathbb P} (2)$ is, in fact, holomorphic. Therefore $X$ is complete on ${\mathbb C} {\mathbb P} (2)$ since ${\mathbb C} {\mathbb P} (2)$ is compact. Besides a generic point $p$ of $\Delta$ is regular for $X$ and has its (local) orbit transverse to $\Delta$. It follows that points in the orbit of $p$ reaches $\Delta$ in finite time. Thus the restriction of $X$ to the affine ${\mathbb C}^2$ cannot be complete as its flow goes off to infinity in finite time.
\fbox{}
In view of Lemma~(\ref{lema2.1}) all complete polynomial vector fields considered from now on will be such that the associated foliation ${\mathcal F}_X$ leaves the line at infinity $\Delta$ invariant. In particular, in the sequel, the extension of $X$ to ${\mathbb C} {\mathbb P} (2)$ is strictly meromorphic. Furthermore the pole divisor is constituted by the line at infinity $\Delta$ and has order $d-1$ (where $d$ is the degree of $X$).
\begin{lema} \label{lema2.2} Let $X$ be as above and let $X_d$ be its top-degree homogeneous component (as in (\ref{equa1})). Then $X_d$ is semi-complete on the entire ${\mathbb C}^2$. \end{lema}
\noindent {\it Proof}\,: For each integer $k \in {\mathbb N}$, we consider the homothety $\Lambda_k (x,y) = (k x , k y)$ of ${\mathbb C}^2$. The vector fields defined by $\Lambda_k^{\ast} X$ are obviously complete, and therefore semi-complete, on ${\mathbb C}^2$. Next we set $X^k = k^{1-d} \Lambda_k^{\ast} X$. Since $X^k$ and $\Lambda_k^{\ast} X$ differ by a multiplicative constant, it is clear that $X^k$ is complete on ${\mathbb C}^2$. Finally, when $k$ goes to infinity, $X^k$ converges uniformly on ${\mathbb C}^2$ towards $X_d$. Since the space of semi-complete vector fields is closed under uniform convergence (cf. \cite{ghre}), it results that $X_d$ is semi-complete on ${\mathbb C}^2$. The lemma is proved.
\fbox{}
The lemma above has an immediate application. In fact, a homogeneous polynomial vector field has a $1$-parameter group of symmetries consisting of homotheties. Hence these vector fields can essentially be integrated. In other words, it is possible to describe all homogeneous polynomial vector fields which are semi-complete on ${\mathbb C}^2$. This classification was carried out in \cite{ghre} and, combined with Lemma~(\ref{lema2.2}), yields:
\begin{coro} \label{lema2.3} Let $X$ and $X_d$ be as in the statement of Lemma~(\ref{lema2.2}). Then, up to a linear change of coordinates of ${\mathbb C}^2$, $X_d$ has one of the following normal forms: \begin{enumerate}
\item $X_d = y^a f(x,y) {\partial /\partial x}$ where $f$ has degree strictly less than~$3$ and $a \in {\mathbb N}$.
\item $X_d = x(x {\partial /\partial x} + ny {\partial /\partial y})$, $n \in N$, $n\neq 1$.
\item $X_d = x^i y^j (mx {\partial /\partial x} - n y {\partial /\partial y})$ where $m,n \in {\mathbb N}^{\ast}$ and $ni-mj = -1, \, 0 \, 1$. We also may have $X_d = (xy)^a (x {\partial /\partial x} - y {\partial /\partial y})$, $a \in {\mathbb N}$.
\item $X_d = x^2 {\partial /\partial x} - y(nx-(n+1)y) {\partial /\partial y}$, $n \in {\mathbb N}$.
\item $X_d = [xy(x-y)]^a [x(x-2y) {\partial /\partial x} + y(y-2x) {\partial /\partial y}]$, $a \in {\mathbb N}$.
\item $X_d = [xy(x-y)^2]^a [x(x-3y) {\partial /\partial x} + y(y-3x) {\partial /\partial y}]$, $a \in {\mathbb N}$.
\item $X_d = [xy^2(x-y)^3]^a [x(2x-5y) {\partial /\partial x} + y(y-4x) {\partial /\partial y}]$, $a \in {\mathbb N}$.
\fbox{} \end{enumerate} \end{coro} As an application of Corollary~(\ref{lema2.3}), we shall prove Lemma~(\ref{lema2.4}) below. This lemma estimates the number of singularities that the foliation ${\mathcal F}_X$ induced by $X$ on ${\mathbb C} {\mathbb P} (2)$ may have in the line at infinity. This estimate will be useful in Section~7. Also recall that the line at infinity is supposed to be invariant by ${\mathcal F}_X$ (cf. Lemma~(\ref{lema2.1})).
\begin{lema} \label{lema2.4} Let $X$ be a complete polynomial vector field on ${\mathbb C}^2$ and denote by ${\mathcal F}_X$ the foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$. Then the line at infinity contains at most $3$ singularities of ${\mathcal F}_X$. \end{lema}
\noindent {\it Proof}\,: We consider the change of coordinates $u=1/x$ and $v = y/x$ and note that in the coordinates $(u,v)$ the line at infinity $\Delta$ is represented by $\{ u=0\}$.
First we set $X_d = F. Y_d$ where $F$ is a polynomial of degree $k$ and $Y_d$ is a polynomial vector field of degree $d-k$. Next let us consider the algebraic curve ${\mathcal C} \subset {\mathbb C} {\mathbb P} (2)$ induced on ${\mathbb C} {\mathbb P} (2)$ by the affine equation $F=0$. We also consider the foliation ${\mathcal F}_d$ induced on ${\mathbb C} {\mathbb P} (2)$ by $Y_d$. Finally denote by $\Delta \cap {\rm Sing} ({\mathcal F}_X)$ (resp. $\Delta \cap {\rm Sing} ({\mathcal F}_d)$) the set of singularities of ${\mathcal F}_X$ (resp. ${\mathcal F}_d$) belonging to $\Delta$. An elementary calculation with the coordinates $(u,v)$ shows that $$ \Delta \cap {\rm Sing} ({\mathcal F}_X) \subseteq (\Delta \cap {\rm Sing} ({\mathcal F}_d)) \cup ({\mathcal C} \cap \Delta) \, . $$ Now a direct inspection in the list of Corollary~(\ref{lema2.3}) implies that the set $(\Delta \cap {\rm Sing} ({\mathcal F}_d)) \cup ({\mathcal C} \cap \Delta)$ consists of at most~$3$ points. The proof of the lemma is over.
\fbox{}
\section{Simple semi-complete singularities}
\hspace{0.4cm} In this section we shall begin the study of a certain class of semi-complete singularities. The results obtained here will largely be used in the remaining sections. In the sequel $Y$ stands for a meromorphic vector field defined on a neighborhood of $(0,0) \in {\mathbb C}^2$. We always set $Y = fZ/g$ where $f,g$ are holomorphic functions without common factors and $Z$ is a holomorphic vector field for which the origin is either a regular point or an isolated singularity. Also ${\mathcal F}$ will stand for the foliation associated to $Y$ (or to $Z$). We point out that the decomposition $Y = fZ/g$ is unique up to an inversible factor.
If $Z$ is singular at $(0,0)$, we can consider its eigenvalues at this singularity. Three cases can occur:
\noindent {\bf a-} Both eigenvalues, $\lambda_1 , \lambda_2$ of $Z$ vanish at $(0,0)$.
\noindent {\bf b-} Exactly one eigenvalue, $\lambda_2$, vanishes at $(0,0)$.
\noindent {\bf c-} None of the eigenvalues $\lambda_1 , \lambda_2$ vanishes at $(0,0)$
\noindent Whereas $Z$ is defined up to an inversible factor, all the cases {\bf a}, {\bf b} and {\bf c} are well-defined. In the case {\bf c}, the precise values of $\lambda_1 , \lambda_2$ are not well-defined but so is their ratio $\lambda_1 / \lambda_2$. Following a usual abuse of notation, in the case {\bf c}, we shall say that the foliation ${\mathcal F}$ associated to $Z$ has eigenvalues $\lambda_1 , \lambda_2$ different from {\it zero}. In other words, given a singular holomorphic foliation ${\mathcal F}$, we say that ${\mathcal F}$ has eigenvalues $\lambda_1 ,\lambda_2$ if there exists $Z$ as before having $\lambda_1 ,\lambda_2$ as its eigenvalues at $(0,0)$. The reader will easily check that all the relevant notions discussed depend only on the ratio $\lambda_1 / \lambda_2$. A singularity is said to be {\it simple} if it has at least one eigenvalue different from {\it zero}. A simple singularity possessing exactly one eigenvalue different from {\it zero} is called a {\it saddle-node}.
More generally the {\it order} of ${\mathcal F}$ at $(0,0)$ is defined as the order of $Z$ at $(0,0)$, namely it is the degree of the first non-vanishing homogeneous component of the Taylor series of $Z$ based at $(0,0)$. It is obvious that the order of ${\mathcal F}$ does not depend on the vector field with isolated singularities $Z$ chosen.
\begin{obs} \label{obsidiota} {\rm A useful fact is the non-existence, in dimension~$1$, of {\it strictly meromorphic} semi-complete vector fields. In other words, if $Y = f(x) \partial /\partial x$ with $f$ meromorphic, then $Y$ is not semi-complete on a neighborhood of $0 \in {\mathbb C}$. In fact, fixed a neighborhood $U$ of $0 \in {\mathbb C}$, we have $f(x) = x^{-n}. h(x)$ where $n \geq 1$, $h$ is holomorphic and $h (0) \neq 0$. Thus the corresponding time-form is $dT = x^n dx /h$. It easily follows the existence of an embedded curve $c : [0,1] \rightarrow U \setminus \{ (0,0) \}$ on which the integral the integral of $dT$ vanishes. Similarly we can also prove that $Y$ is not semi-complete provided that $0 \in {\mathbb C}$ is an essential singularity of $f$.
Summarizing the preceding discussion, the fact that $Y$ is semi-complete implies that $f$ is holomorphic at $0 \in {\mathbb C}$. Consider now that $f$ is holomorphic but $f(0) = f'(0) = f''(0) =0$. An elementary estimate (cf. \cite{re1}) shows that in this case $Y$ is not semi-complete. Finally when $Y$ is semi-complete but $f(0) = f'(0) =0$ it is easy to see that $Y$ is conjugate to $x^2 {\partial /\partial x}$ (cf. \cite{ghre}). These elementary results give a complete description of semi-complete singularities in dimension~$1$.} \end{obs}
Let us say that $P = P_{\alpha} /P_{\beta}$ is a {\it homogeneous rational function} if $P_{\alpha}$ and $P_{\beta}$ are homogeneous polynomials (possibly with different degrees). The next lemma is borrowed from \cite{re3}
\begin{lema} \label{le3.1} Consider the linear vector field $Z = x \partial /\partial x + \lambda y \partial /\partial y$. Suppose that $P = P_{\alpha} /P_{\beta}$ is a (non-constant) homogeneous rational function and that $\lambda \not\in {\mathbb R}_{+}$. Suppose also that $P Z$ is semi-complete. Then one has
\noindent 1. $\lambda$ is rational, i.e. $\lambda =-n/m$ for appropriate coprime positive integers $m,n$.
\noindent 2. $P$ has one of the forms below:
\noindent {\bf 2i} $P = x^c y^d$ where $mc-nd=0$ or $\pm 1$.
\noindent {\bf 2ii} If $\lambda =-1$, then $P$ is $x^c y^d$ ($mc-nd=0$ or $\pm 1$) or $P = (x-y)(xy)^a$ for $a \in {\mathbb Z}$.
\fbox{} \end{lema}
\begin{obs} \label{ipc} {\rm Consider a holomorphic vector field of the form $$ x^a y^b h(x,y) [ x(1 + {\rm h.o.t.}) {\partial /\partial x} \; - \; y(1 + {\rm h.o.t.}) {\partial /\partial y} \, , $$ where $a,b \in {\mathbb Z}$ and $h$ is holomorphic with $h(0,0) =0$. Of course we suppose that $h$ is not divisible by $x,y$. Next we assume that $X$ is semi-complete on a neighborhood of $(0,0)$. Denote by $h^k$ the homogeneous component of the first non-trivial jet of $h$ at $(0,0)$. The same argument employed in the proof of Lemma~(\ref{lema2.2}), modulo replacing $k$ by $1/k$, shows that the vector field $x^a y^b h^k (x {\partial /\partial x} - y {\partial /\partial y})$ is semi-complete. From the preceding lemma it then follows that $h^k = x-y$ and $a=b$. However a much stronger conclusion holds: $X$ admits the normal form $$ (xy)^a (x-y) g(x {\partial /\partial x} - y {\partial /\partial y}) \, , $$ where $g$ is a holomorphic function satisfying $g(0,0) \neq 0$. In fact, in order to deduce the normal form above, we just need to check that $x(1 + {\rm h.o.t.}) {\partial /\partial x} \; - \; y(1 + {\rm h.o.t.}) {\partial /\partial y}$ is linearizable. After \cite{mamo}, this amounts to prove that the local holonomy of their separatrizes is the identity. However the integral of time-form on a curve $c$ projecting onto a loop around $0$ in $\{ y=0\}$ is cearly equal to {\it zero}. Since $X$ is semi-complete, such curve must be closed which means that the holonomy in question is trivial.} \end{obs}
Of course the next step is to discuss the case $\lambda >0$. However, at this point, we do not want to consider only linear vector fields. This discussion will naturally lead us to consider singularities having an infinite number of separatrizes. Recall that a singularity of a holomorphic foliation ${\mathcal F}$ is said to be {\it dicritical} if ${\mathcal F}$ possesses infinitely many separatrizes at $p$. Sometimes we also say that a vector field $Y$ defined on a neighborhood of $(0,0) \in {\mathbb C}^2$ is dicritical to say that $(0,0)$ is a dicritical singularity of the foliation associated to $Y$. Let us begin with the following:
\begin{lema} \label{le3.2} Consider a semi-complete meromorphic vector field $Y$ defined on a neighborhood of $(0,0) \in {\mathbb C}^2$ and having the form $$ Y = \frac{f}{g} Z $$ where $f,g$ are holomorphic functions with $f(0,0)g(0,0)=0$ and $Z$ is a holomorphic vector field with an isolated singularity at $(0,0) \in {\mathbb C}^2$ whose eigenvalues are $1$ and $\lambda$. Assume that $\lambda >0$ but neither $\lambda$ nor $1/\lambda$ belongs to ${\mathbb N}$. Then $Z,Y$ are dicritical vector fields. \end{lema}
\noindent {\it Proof}\,: Note that Poincar\'e's linearization theorem \cite{ar} ensures that $Z$ is linearizable. Therefore, in appropriate coordinates, we have $Y =f(x {\partial /\partial x} + \lambda y {\partial /\partial y}) / g$. If $\lambda$ is rational equal to $n/m$, then $Z$ admits the meromorphic first integral $x^n y^{-m}$ and therefore admits an infinite number of separatrizes.
In order to prove the lemma is now sufficient to check that $\lambda$ is rational provided that $Y$ is semi-complete. Let $P_{\alpha}$ (resp. $P_{\beta}$) be the first non-vanishing homogeneous component of the Taylor series of $f$ (resp. $g$) at $(0,0) \in {\mathbb C}^2$. The same argument carried out in the proof of Lemma~(\ref{lema2.2}), modulo replacing $k$ by $1/k$, shows that the vector field $Y^{\rm ho} = P_{\alpha} (x {\partial /\partial x} + \lambda y {\partial /\partial y}) /P_{\beta}$ is semi-complete on ${\mathbb C}^2$. We are going to see that this implies that $\lambda$ is rational.
Suppose for a contradiction that $\lambda$ is not rational. In this case the only separatrizes of $Y^{\rm ho}$ are the axes $\{ x=0 \}$, $\{ y=0\}$. Since in dimension~$1$ there is no meromorphic semi-complete vector field, it follows that the {\it zero set} of $P_{\beta}$ has to be invariant under the foliation ${\mathcal F}_{Y^{\rm ho}}$ associated to $Y^{\rm ho}$. Thus $P_{\beta}$ must have the form $x^a y^b$ for some $a,b \in {\mathbb N}$. Therefore we can write $P$ as $x^cy^d Q(x,y)$ where $Q$ is a homogeneous polynomial.
Observe that the orbit $L$ of $Y^{\rm ho}$ (or ${\mathcal F}_{Y^{\rm ho}}$) passing through the point $(x_1 ,y_1)$ ($x_1 y_1 \neq 0$) is parametrized by $\textsf{A} : \, T \mapsto (x_1 e^T , y_1 e^{\lambda T})$. The restriction to $L$ of the vector field $P Z$ is given in the coordinate $T$ by $P(x_1 e^T , y_1e^{\lambda T}) \partial /\partial T$. Because $\lambda$ is not rational, the parametrization $\textsf{A}$ is an one-to-one map from ${\mathbb C}$ to $L$. It results that the one-dimensional vector field $x_1^cy_1^d e^{(c+\lambda d)T} Q(x_1e^T , y_1^{\lambda T}) \partial /\partial T$ is semi-complete on the entire ${\mathbb C}$. On the other hand the function $T \mapsto e^{(c+\lambda d)T} Q(x_1e^T , y_1^{\lambda T})$ is defined on the whole of ${\mathbb C}$. Since $\lambda$ is not rational and $Q$ is a polynomial, we conclude that this function has an essential singularity at infinity. This contradicts the fact that this function corresponds to a semi-complete vector field (cf. Remark~(\ref{obsidiota})). The lemma is proved.
\fbox{}
Let us now consider the case $\lambda \in {\mathbb N}$ since the case $1/\lambda \in {\mathbb N}$ is analogous. Thus we denote by $1$ and $n \in {\mathbb N}^{\ast}$ the eigenvalues of $Z$ at $(0,0)$. Such $Z$ is either linearizable or conjugate to its Poincar\'e's-Dulac normal form \cite{ar} \begin{equation} (nx + y^n) {\partial /\partial x} \; + \; y {\partial /\partial y} \, . \label{awui} \end{equation} When $Z$ is linearizable, it has infinitely many separatrizes. Thus we are, in fact, interested in the case in which $Z$ is conjugate to the Poincar\'e-Dulac normal form~(\ref{awui}). In particular $\{ y =0 \}$ is the unique separatrix of $Z$ (or $Y$).
\begin{lema} \label{le3.3} Let $Y$ be $Y = fZ/g$ where $Z$ is a vector field as in~(\ref{awui}) and $f,g$ are holomorphic functions satisfying $f(0,0) g (0,0) =0$. Assume that $Y$ is semi-complete and that the regular orbits of $Y$ can contain at most one singular point whose order is necessarily~$1$. Then $n=1$. Furthermore, up to an inversible factor, $g(x,y) = y$ and $\{ f= 0\}$ defines a smooth analytic curve which is not tangent to $\{ y=0\}$. \end{lema}
\noindent {\it Proof}\,: Since the divisor of poles of $Y$ is contained in the union of the separatrizes, it follows that $Y$ has the form $$ Y = y^{k} F(x,y) [(nx + y^n) {\partial /\partial x} \; + \; y {\partial /\partial y}] \, , $$ where $k \in {\mathbb Z}$ and $F$ is a holomorphic function which is not divisible by $y$. Clearly $k < 0$, otherwise the first homogeneous component of $Y$ would not be semi-complete (cf. Corollary~(\ref{lema2.3}) and Remark~(\ref{obsidiota})).
Let us first deal with the case $n=1$. Note that we are going to strongly use Theorem~(\ref{selano}) which is the next result to be proved. This theorem is concerned with the so-called {\it saddle-node} singularities which are those having exactly one eigenvalue different from {\it zero}. Blowing-up the vector field $Y$ we obtain a new vector field $\widetilde{Y}$ defined and semi-complete on a neighborhood of the exceptional divisor $\pi^{-1} (0)$ (where $\pi$ stands for the blow-up map). Denote by $\widetilde{\mathcal F}$ the foliation associated to $\widetilde{Y}$ and note that $\widetilde{\mathcal F}$ has a unique singularity $p \in \pi^{-1} (0)$. More precisely, $\widetilde{Y}$ on a neighborhood of $p$ is given in standard coordinates $(x,t)$ ($\pi (x,t) = (x, tx)$), by $$ H (x,t) [(x(1+t) {\partial /\partial x} \; - \; t^2 \partial /\partial t] \, , $$ where $H$ is meromorphic function. Because of Theorem~(\ref{selano}), we know that the restriction of $\widetilde{Y}$ to the exceptional divisor $\{ x=0\}$ has to be regular i.e. $H$ is not divisible by $x$ or $x^{-1}$. This implies that the order of $F$ at $(0,0) \in {\mathbb C}^2$ is $k$ and, in particular, $F(0,0) =0$.
On the other hand, $H$ has the form $t^k h (x,t)$ where $h$ is holomorphic on a neighborhood of $x=t=0$ and not divisible by $t$ or $t^{-1}$. Again Theorem~(\ref{selano}) shows that $h$ has to be an inversible factor, i.e. $h(0,0) \neq 0$. In other words, the proper transform of the (non-trivial) analytic curve $F=0$ intersects $\pi^{-1} (0)$ at points different from $x=t=0$.
The restriction of $\widetilde{Y}$ to $\pi^{-1} (0)$ is a holomorphic vector field which has a singularity at $\{x=t=0\}$ whose order is $2-k$. In particular $k \in \{ 0, 1,2\}$. The other singularities correspond to the intesection of $\pi^{-1} (0)$ with the proper transform of $\{ F= 0\}$. The statement follows since the regular orbits of $X$ can contain only one singular point whose order is~$1$.
\fbox{}
In the rest of this section we briefly discuss the case of singularities as in {\bf b}, that is, those singularities having exactly one eigenvalue different from {\it zero}. As mentioned they are called {\it saddle-nodes} and were classified in \cite{mara}. A consequence of this classification is the existence of a large moduli space. The subclass of saddle-nodes consisting of those associated to semi-complete holomorphic vector fields was characterized in \cite{re4}. In the sequel we summarize and adapt these results to meromorphic semi-complete vector fields.
To begin with, let $\omega$ be a singular holomorphic $1$-form defining a saddle-node ${\mathcal F}$. According to Dulac \cite{dulac}, $\omega$ admits the normal form $$ \omega (x,y) = [x(1+ \lambda y^p) + yR(x,y)] \, dy \; - \; y^{p+1} \, dx \; , $$ where $\lambda \in {\mathbb C}$, $p \in {\mathbb N}^{\ast}$ and $R(x,0) = o (\mid x \mid^p)$. In particular ${\mathcal F}$ has a (smooth) separatrix given in the above coordinates by $\{ y=0\}$. This separatrix is often referred to as the {\it strong invariant manifold} of ${\mathcal F}$. Furthermore there is also a {\it formal} change of coordinates $(x,y) \mapsto (\varphi (x,y) ,y)$ which brings $\omega$ to the form \begin{equation} \omega (x,y) = x(1 +\lambda y^{p+1}) \, dy \; - y^{p+1} \, dx \; . \label{fnf} \end{equation} The expression in (\ref{fnf}) is said to be the {\it formal normal form} of ${\mathcal F}$. In these formal coordinates the axis $\{ x=0\}$ is invariant by ${\mathcal F}$ and called the {\it weak invariant manifold} of ${\mathcal F}$. Note however that the weak invariant manifold of ${\mathcal F}$ does not necessarily correspond to an actual separatrix of ${\mathcal F}$ since the change of coordinates $(x,y) \mapsto (\varphi (x,y) ,y)$ does not converge in general. Finally it is also known that a saddle-node ${\mathcal F}$ possesses at least one and at most two separatrizes (which are necessarily smooth) depending on whether or not the weak invariant manifold of ${\mathcal F}$ is convergent.
A general remark about saddle-nodes is the following one: denoting by $\pi_2$ the projection $\pi_2 (x,y) = y$, Dulac's normal form implies that the fibers of $\pi_2$, namely the vertical lines, are transverse to the leaves of ${\mathcal F}$ away from $\{ y=0\}$. This allows us to define the monodromy of ${\mathcal F}$ as being the ``first return map'' to a fixed fiber.
As to semi-complete vector fields whose associated foliation ${\mathcal F}$ is a saddle-node, one has:
\begin{teo} \label{selano} Suppose that $Y$ is a meromorphic semi-complete vector field defined around $(0,0) \in {\mathbb C}^2$. Suppose that the foliation ${\mathcal F}$ associated to $Y$ is a saddle-node. Then, up to an inversible factor, $Y$ has one of the following normal forms: \begin{enumerate} \item $Y = x(1+ \lambda y) {\partial /\partial x} + y^2 {\partial /\partial y}$, $\lambda \in {\mathbb Z}$.
\item $Y = y^{-p} [(x(1+ \lambda y^p) + yR(x,y)) {\partial /\partial x} + y^{p+1} {\partial /\partial y}]$.
\item $Y = y^{1-p} [(x(1+ \lambda y^p) + yR(x,y)) {\partial /\partial x} + y^{p+1} {\partial /\partial y}]$ and the monodromy induced by ${\mathcal F}$ is trivial (in particular $\lambda \in {\mathbb Z}$ and the weak invariant manifold of ${\mathcal F}$ is convergent). \end{enumerate} \end{teo}
\noindent {\it Proof}\,: The proof of this theorem relies heavily on the methods introduced in Section~4 of~\cite{re2} and Section~4 of~\cite{re4}. For convenience of the reader we summarize the argument below.
First we set $Y=fZ/g$ where $Z$ is a holomorphic vector field with an isolated singularity at $(0,0)$. Note that when $f(0,0) .g(0,0) \neq 0$ (i.e. when $Y$ is holomorphic with an isolated singularity at $(0,0)$), then $Y$ has the normal form~1. Indeed this is precisely the content of Theorem~4.1 in Section~4 of \cite{re4}.
Next we observe that the pole divisor $\{ g=0\}$ is contained in the strong invariant manifold of ${\mathcal F}$. To verify this assertion we first notice that $\{ g=0\}$ must be invariant under ${\mathcal F}$ as a consequence of the general fact that there is no one-dimensional meromorphic semi-complete vector field (cf. Remark~(\ref{obsidiota})). Thus $\{ g=0\}$ is contained in the union of the separatrizes of ${\mathcal F}$. Next we suppose for a contradiction that the weak invariant manifold of ${\mathcal F}$ is convergent (i.e. defines a separatrix) and part of the pole divisor of $Y$. In this case the technique used in the proof of Proposition~4.2 of \cite{re2} applies word-by-word to show that the resulting vector field $Y$ is not semi-complete. This contradiction implies that $\{ g=0 \}$ must be contained in the strong invariant manifold of ${\mathcal F}$ as desired.
Combining the information above with Dulac's normal form, we conclude that $Y$ possesses the form $$ Y = \frac{f}{y^k} \left[ (x(1+ \lambda y^p) + yR(x,y)) \partx + y^{p+1} \party \right] \, . $$ Now we are going to prove that $f(0,0) \neq 0$ i.e. $f$ is an inversible factor. Hence we assume for a contradiction that $f(0,0) =0$ but $f$ is not divisible by $y$. Still according to the terminology of Section~4 of \cite{re2}, we see that the ``asymptotic order of the divided time-form'' induced by $Y$ on $\{ y=0\}$ is at least $2$ since this form is $dx /(xf(x,0))$ (this is also a consequence of the fact that the index of the strong invariant manifold of a saddle-node is {\it zero}, cf. Section~5). However this order cannot be greater than~$2$ since $Y$ is semi-complete. Furthermore when this order happens to be~$2$, the local holonomy of the separatrix in question must be the identity provided that $Y$ is semi-complete. Nonetheless the local holonomy of the strong invariant manifold of a saddle-node is never the identity. In fact, using Dulac's normal form, an elementary calculation shows that this holonomy has the form $H(z) = z + z^p + \cdots$. We then conclude that $f(0,0) \neq 0$.
Therefore we have so far $$ Y = y^{-k} H(x,y) \left[ (x(1+ \lambda y^p) + yR(x,y)) \partx + y^{p+1} \party \right] \, , $$ where $H$ is holomorphic and satisfies $H(0,0) \neq 0$.
Recall that $\pi_2$ denotes the projection $\pi_2 (x,y) =y$ whose fibers are transverse to the leaves of ${\mathcal F}$ away from $\{ y=0\}$. Let $L$ be a regular leaf of ${\mathcal F}$ and consider an embedded curve $c: [0,1] \rightarrow L$. If $dT_L$ stands for the time-form induced on $L$ by $Y$, we clearly have $$ \int_c \, dT_l \; = \; \int_{\pi_2 (c)} \, h(y) y^{p-k -1} dy \, , $$ where $h (y) = H(0,y)$ so that $h (0) \neq 0$. Since the integral on the left hand side is never {\it zero}, it follows that $p-k -1 =0$ or~$1$. The case $k=p-1$ does not require further comments. On the other hand, if $k=p-2$, then the integral of $dT_L$ over $c$ is {\it zero} provided that $\pi_2 (c)$ is a loop around the origin $0 \in \{ x=0\}$. This implies that $c$ must be closed itself. In other words the monodromy of ${\mathcal F}$ with respect to the fibration induced by $\pi_2$ is trivial. Conversely it is easy to check that the normal forms 1, 2 and 3 in the statement are, in fact, semi-complete. This finishes the proof of the theorem.
\fbox{}
Before closing the section, it is interesting to translate the condition in the item~3 of Theorem~(\ref{selano}) in terms of the classifying space of Martinet-Ramis \cite{mara}. Note however that this translation will not be needed for the rest of the paper.
Fix $p \in {\mathbb N}^{\ast}$ and consider the foliation ${\mathcal F}_{p, \lambda}$ whose leaves are ``graphs'' (over the $y$-axis) of the form $$ x = {\rm const}\, .\, y^{\lambda} \exp (-1/px^p) \, . $$ Given $\lambda \in {\mathbb C}$, the moduli space of saddle-nodes ${\mathcal F}$ having $p, \lambda$ fixed is obtained from the foliation above through the following data: \begin{itemize}
\item $p$ translations $z \mapsto z+ c_i$, $z, c_i \in {\mathbb C}$ denoted by $g_1^+ , \ldots , g_p^+$.
\item $p$ local diffeomorphisms $z \mapsto z + \cdots$, $z \in {\mathbb C}$, tangent to the identity denoted by $g_1^- , \ldots , g_p^-$. \end{itemize}
These diffeomorphisms induce a permutation of (part of) the leaves of ${\mathcal F}_{p, \lambda}$. More precisely the total permutation (after one tour around $0 \in {\mathbb C}$) is given by the composition $$ g_p^- \circ g_p^+ \circ \ldots \circ g_1^- \circ g_1^+ \, . $$ However recall that our saddle-node has trivial monodromy. One easily checks that this cannot happen in the presence of the ramification $y^{\lambda}$. Thus we conclude that $\lambda$ belongs to ${\mathbb Z}$ and, in particular, the model ${\mathcal F}_{p, \lambda}$ introduced above has itself trivial monodromy. Hence the monodromy of ${\mathcal F}$ itself is nothing but $g_p^- \circ g_p^+ \circ \ldots \circ g_1^- \circ g_1^+$. In other words the condition is $$ g_p^- \circ g_p^+ \circ \ldots \circ g_1^- \circ g_1^+ = Id \, . $$ In particular note that, if $p =1$, the above equation implies that $g_1^- = g_1^+ = Id$. This explains why in item~1 of Theorem~(\ref{selano}) the saddle-node in question is analytically conjugate to its formal normal form.
\section{Polynomial vector fields and first integrals}
\hspace{0.4cm} Here we want to specifically consider complete polynomial vector fields whose associated foliation ${\mathcal F}$ has a singularity in the line at infinity which admits infinitely many separatrizes. Recall that such a singularity is said to be dicritical. The main result of the section is Proposition~(\ref{jouano}) below.
\begin{prop} \label{jouano} Let $X$ be a complete polynomial vector field on ${\mathbb C}^2$ and let ${\mathcal F}_X$ denote the foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$. Assume that ${\mathcal F}_X$ possesses a dicritical singularity $p$, belonging to the line at infinity $\Delta$. Then ${\mathcal F}_X$ has a meromorphic first integral on ${\mathbb C} {\mathbb P} (2)$. Furthermore, modulo a normalization, the closure of the regular leaves of ${\mathcal F}_X$ is isomorphic to ${\mathbb C} {\mathbb P} (1)$. \end{prop}
We begin with a weakened version of this proposition which is the following lemma.
\begin{lema} \label{prejoa} Let $X ,\; {\mathcal F}_X$ be as in the statement of Proposition~(\ref{jouano}) and denote by $p \in \Delta$ a dicritical singularity of $X$. Then $X$ possesses a meromorphic first integral on ${\mathbb C}^2$. Moreover, if this first integral is not algebraic, then the set of leaves of ${\mathcal F}_X$ that pass through $p$ only once contains an open set. \end{lema}
\noindent {\it Proof}\,: Suppose that ${\mathcal F}_X$ as above possesses a singularity $p \in \Delta$ with infinitely many separatrizes. We consider coordinates $(u,v)$ around $p$ such that $\{ u=0 \} \subset \Delta$. Given a small neighborhood $V$ of $p$, we consider the restriction $X_{\mid_V}$ of $X$ to $V$. Clearly $X_{\mid_V}$ defines a meromorphic semi-complete vector field on $V$.
Obviously only a finite number of separatrizes of ${\mathcal F}_X$ going through $p$ may be contained in the divisor of poles or zeros of $X$. Thus there are (infinitely many) separatrizes ${\mathcal S}$ of ${\mathcal F}_X$ at $p$ which are (local) regular orbits of $X$. We fix one of these separatrizes ${\mathcal S}$. Recall that ${\mathcal S}$ has a Puiseux parametrization $\textsf{A}(t) = (a(t), b(t))$, $\textsf{A}(0) = (0,0)$, defined on a neighborhood $W$ of $0 \in {\mathbb C}$. Furthermore $\textsf{A}$ is injective on $W$ and a diffeomorphism from $W \setminus \{ 0 \}$ onto ${\mathcal S} \setminus \{ (0,0) \}$. Since ${\mathcal S}$ is invariant under $X$, the restriction to ${\mathcal S} \setminus \{ (0,0) \}$ of $X$ can be pulled-back by $\textsf{A}$ to give a meromorphic vector field $Z$ on $W$, i.e. $Z(t) = \textsf{A}^{\ast} \left( X\! \! \mid_{{\mathcal S}} \right)$ where $t \in W \setminus \{ (0,0) \}$ for a sufficiently small neighborhood $W$ of $0 \in {\mathbb C}$. We also have that $Z$ is semi-complete on $W$ since $X_{\mid_V}$ is semi-complete on $V$ and $\textsf{A}$ is injective on $W$. It follows from Remark~(\ref{obsidiota}) that $Z$ admits a holomorphic extension to $0 \in {\mathbb C}$ which is still denoted by $Z$. Moreover, letting $Z(t) = h(t) \partial /\partial t$, we cannot have $h(0) = h'(0) = h'' (0) =0$. However we must have at least $h(0) =0$. Otherwise the semi-global flow of $Z$ would reach the origin $0 \in {\mathbb C}$ in finite time. Since $\textsf{A} (0)$ lies in the line at infinity $\Delta$, it would follow that points in the orbit of $X$ containing ${\mathcal S}$ reach $\Delta$ in finite time. This is impossible since $X$ is complete on ${\mathbb C}^2$.
Therefore we have only two cases left, namely $h(0) =0$ but $h'(0) \neq 0$ and $h(0) = h'(0) =0$ but $h''(0) \neq 0$. Let us discuss them separately. First suppose that $h(0) = h'(0) =0$ but $h'' (0) \neq 0$. Modulo a normalization we can suppose that ${\mathcal S}$ is smooth at $p$. Again we denote by $L$ the global orbit of $X$ containing ${\mathcal S}$. By virtue of the preceding $L$ is a Riemann surface endowed with a complete holomorphic vector field $X_{\mid_L}$ which has a singularity of order~$2$ (where $X_{\mid_L}$ stands for the restriction of $X$ to $L$). It immediately results that $L$ has to be compactified into ${\mathbb C} {\mathbb P} (1)$. In other words the closure of $L$ is a rational curve and, in particular, an algebraic invariant curve of ${\mathcal F}_X$. Since there are infinitely many such curves, Jouanolou's theorem \cite{joa} ensures that ${\mathcal F}_X$ has a meromorphic first integral (alternatively we can also apply Borel-Nishino's theorem, cf. \cite{la}). Furthermore the level curves of this first integral are necessarily rational curves (up to normalization) as it follows from the discussion above.
Suppose now that $h(0) =0$ but $h'(0) \neq 0$. In this case the vector field $Z$ has a non-vanishing residue at $0 \in {\mathbb C}$. We then conclude that ${\mathcal S}$ possesses a {\it period} i.e. there exists a loop $c:[0,1] \rightarrow {\mathcal S}$ ($c(0) = c(1)$) on which the integral of the corresponding time-form is different from {\it zero}. If $L$ is the global orbit of $X$ containing ${\mathcal S}$ the preceding implies that $L$ is isomorphic to ${\mathbb C}^{\ast}$.
On the other hand the characterization of singularities with infinitely many separatrizes obtained through Seidenberg's theorem \cite{sei} (cf. Section~7 for further details) ensures that the set of orbits $L$ as above has positive logarithmic capacity. In fact it contains an open set. Thus Suzuki's results in \cite{suzu1}, \cite{suzu2} apply to provide the existence of a non-constant meromorphic first integral for $X$. If this first integral is not algebraic, then a ``generic'' orbit passes through $p$ once but not twice. Otherwise the leaf would contain two singularities of $X$ and therefore it would be a rational curve (up to normalization). In this case the mentioned Jouanolou's theorem would provide an algebraic first integral for ${\mathcal F}_X$. The proof of the proposition is over.
\fbox{}
The preceding lemma suggests a natural strategy to establish Proposition~(\ref{jouano}). Namely we assume for a contradiction that ${\mathcal F}_X$ does not have an algebraic first integral. Then we have to show that a generic leaf passing through a dicritical singularity $p$ must return and cross $\Delta$ once again (maybe through another dicritical singularity). The resulting contradiction will then complete our proof.
Using Seidenberg's theorem, we reduce the singularities of ${\mathcal F}_X$ in $\Delta$ so that they all will have at least one eigenvalue different from {\it zero}. In particular we obtain a normal crossing divisor ${\mathcal E}$ whose irreducible components are rational curves, one of them being the proper transform of $\Delta$ (which will still be denoted by $\Delta$). The other components were introduced by the punctual blow-ups performed and are denoted by $D_i$, $i =1, \ldots ,s$. The fact that $p$ is a dicritical singularity implies that one of the following assertions necessarily holds: \begin{enumerate}
\item There is a component $D_{i_0}$ of ${\mathcal E}$ which is not invariant by $\widetilde{\mathcal F}_X$ (where $\widetilde{\mathcal F}_X$ stands for the proper transform of ${\mathcal F}_X$).
\item There is a singularity $p_0$ of $\widetilde{\mathcal F}$ in ${\mathcal E}$ which is dicritical and has two eigenvalues different from {\it zero}. \end{enumerate}
We fix a local cross section $\Sigma$ through a point $q$ of $\Delta$ which is regular for $\widetilde{\mathcal F}_X$. Note that a regular leaf $L$ of $\widetilde{\mathcal F}_X$ necessarily meets $\Sigma$ infinitely many times unless $L$ is algebraic. Indeed first we observe that $L$ is properly embedded in the affine part ${\mathbb C}^2$ since ${\mathcal F}_X$ possesses a meromorphic first integral on ${\mathbb C}^2$. Thus all the accumulation points of $L$ are contained in $\Delta$. Obviously if $L$ accumulates a regular point of $\Delta$ then $L$ intersects $\Sigma$ infinitely many times as required. On the other hand, if $L$ accumulates only points of $\Delta$ which are singularities of ${\mathcal F}_X$, then Remmert-Stein theorem shows that the closure of $L$ is algebraic. Summarizing, using Jouanolou's or Borel-Nishino's theorem, we can suppose without loss of generality that all the leaves of $\widetilde{\mathcal F}_X$ intersects $\Sigma$ an infinite number of times (and in fact these intersection points approximate the point $q = \Sigma \cap \Delta$).
To prove that ${\mathcal F}_X$ has infinitely many leaves cutting the exceptional divisor ${\mathcal E}$ more than once, we fix a neighborhood $\mathcal U$ of ${\mathcal E}$. Proposition~(\ref{jouano}) is now a consequence of the next proposition.
\begin{prop} \label{dutrans} Under the above assumptions, there is an open neighborhood $V \subset \Sigma$ of $q$ in $\Sigma$ and an open subset $W \subset V$ of $V$ with the following property: any leaf $L$ passing through a point of $W$ intersects the exceptional divisor ${\mathcal E}$ before leaving the neighborhood $\mathcal U$. \end{prop}
In fact, since the set of leaves meeting ${\mathcal E}$ contains an open set and all of them (with possible exception of a finite number) cross $\Sigma$ and accumulates $\Delta$, Proposition~(\ref{dutrans}) clearly shows the existence of infinitely many leaves (orbits of $X$) intersecting ${\mathcal E}$ more than one time thus providing the desired contradiction.
In order to prove Proposition~(\ref{dutrans}) we keep the preceding setting and notations. We are naturally led to discuss the behavior of the leaves of ${\mathcal F}_X$ on a neighborhood of the point $p_{ij}$ of intersection of the irreducible components $D_i , D_j$ belonging to ${\mathcal E}$.
Now let us fix coordinates $(x,y)$ around $p_{ij}$ ($p_{ij} \simeq (0,0)$) such that $\{ y= 0\} \subseteq D_i$ and $\{ x=0 \} \subseteq D_j$. Without loss of generality we can suppose that the domain of definition of the $(x,y)$-coordinates contains the bidisc of radius~$2$. Next we fix a segment of vertical line $\Sigma_x$ (resp. horizontal line $\Sigma_y$) passing through the point $(1,0)$ (resp. $(0,1)$). We assume that $D_i$ is invariant under $\widetilde{\mathcal F}_X$ but $D_j$ may or may not be invariant under $\widetilde{\mathcal F}_X$. Let us also make the following assumptions: \begin{description}
\item[{\bf A)}] $p_{ij} \simeq (0,0)$ is not a dicritical singularity of $\widetilde{\mathcal F}$.
\item[{\bf B)}] $\widetilde{\mathcal F}_X$ has at least one eigenvalue different from {\it zero} at $p_{ij} \simeq (0,0)$.
\item[{\bf C)}] The vector field $X$ whose associated foliation is $\widetilde{\mathcal F}$ is meromorphic semi-complete in the domain of the coordinates~$(x,y)$. \end{description}
We are going to discuss a variant of the so-called {\it Dulac's transform}, namely if leaves intersecting $\Sigma_x$ necessarily cut $\Sigma_y$. Precisely we fix a neighborhood ${\bf U}$ of $\{ x=0 \} \cup \{ y=0 \}$, we then have:
\begin{lema} \label{passara} Under the preceding assumptions, there is an open neighborhood $V_x \subset \Sigma_x$ of $(1,0)$ in $\Sigma_x$ and an open set $W_x \subset V_x$ with the following property: any leaf $L$ of ${\mathcal F}$ passing through a point of $W_x$ meets $\Sigma_y$ before leaving $U$. In particular, if $D_j$ is not invariant by ${\mathcal F}$, then the leaves of ${\mathcal F}$ passing through points of $W_x$ cross the axis~$\{ x=0 \}$ before leaving ${\bf U}$. In addition, by choosing $V_x$ very small, the ratio between the area of $W_x$ and the area of $V_x$ becomes arbitrarily close to~$1$. \end{lema}
Before proving this lemma, let us deduce the proof of Proposition~(\ref{dutrans}).
\noindent {\it Proof of Proposition~(\ref{dutrans})}\,: Recall that $\widetilde{\mathcal F}_X$, the proper transform of ${\mathcal F}_X$, has only simple singularities in ${\mathcal E}$. In particular, if ${\bf p} \in {\mathcal E}$ is a dicritical singularity of $\widetilde{\mathcal F}_X$, then $\widetilde{\mathcal F}_X$ has~$2$ eigenvalues different from {\it zero} at ${\bf p}$. Hence $\widetilde{\mathcal F}_X$ is linearizable around ${\bf p}$ and, as a consequence, there is a small neighborhood $U_{\bf p}$ of ${\bf p}$ such that any regular leaf $L$ of $\widetilde{\mathcal F}_X$ entering $U_{\bf p}$ must cross ${\mathcal E}$ before leaving $U_{\bf p}$. This applies in particular if ${\bf p}$ coincides with the intersection of two irreducible components $D_i, D_j$ of ${\mathcal E}$.
On the other hand $\Delta$ is invariant by $\widetilde{\mathcal F}_X$ and all but a finite number of leaves of $\widetilde{\mathcal F}_X$ intersect $\Sigma$ in arbitrarily small neighborhoods of $q= \Sigma \cap \Delta$. In particular if $\widetilde{\mathcal F}_X$ has a dicritical singularity on $\Delta$, then the statement follows from the argument above.
Suppose now that $\widetilde{\mathcal F}_X$ does not have a dicritical singularity on $\Delta$. Let $D_1$ be another irreducible component of ${\mathcal E}$ which intersects $\Delta$ at $p_{01}$. Note that $p_{01}$ is not a dicritical singularity of $\widetilde{\mathcal F}_X$ by the preceding discussion. If $D_1$ is not invariant by $\widetilde{\mathcal F}_X$, then Lemma~(\ref{passara}) allows us to find infinitely many leaves of $\widetilde{\mathcal F}_X$ intersecting ${\mathcal E}$. Thus the proposition would follow. On the other hand, if $D_1$ is invariant by $\widetilde{\mathcal F}_X$, then Lemma~(\ref{passara}) still allows us to find a local transverse section $\Sigma_1$ through a regular point $q_1$ of $D_1$ with the desired property namely: excepted for a set of leaves whose volume can be made arbitrarily small (modulo choosing $V_x$ sufficiently small), all leaves of $\widetilde{\mathcal F}_X$ meet $\Sigma_1$ in arbitrarily small neighborhoods of $q_1 = \Sigma_1 \cap D_1$. We then continue the procedure replacing $\Delta$ by $D_1$. Since we eventually will find an irreducible component of ${\mathcal E}$ which is not invariant by $\widetilde{\mathcal F}_X$ or contains a dicritical singularity, the proposition is proved. This also concludes the proof of Proposition~(\ref{jouano}).
\fbox{}
The rest of the section is devoted to the proof of Lemma~(\ref{passara}). Let us begin with the easier case in which $D_j$ is not invariant by $\widetilde{\mathcal F}_X$.
\begin{lema} \label{10.fim1} The vector field $\widetilde{X}$ vanishes with order~$1$ on $D_j$. It also has poles of order~$1$ on $D_i$ and the origin $(0,0)$ is a LJ-singularity of $\widetilde{\mathcal F}_X$. \end{lema}
\noindent {\it Proof}\,: By assumption $D_j$ is contained in ${\mathcal E}$ and is not invariant by $\widetilde{\mathcal F}_X$. In particular $\widetilde{X}$ cannot have poles on $D_j$ since there is no strictly meromorphic semi-complete vector field in dimension~$1$. Neither can $\widetilde{X}$ be regular on $D_j$ otherwise certain points of ${\mathbb C}^2$ would reach the infinity in finite time, thus contradicting the fact that $\widetilde{X}$ is complete. Finally the order of $\widetilde{X}$ on $D_j$ cannot be greater than~$2$, otherwise $\widetilde{X}$ would not be semi-complete. Besides, if this order is~$2$, then infinitely many orbits of $X$ will be compactified into rational curves. This would imply that ${\mathcal F}_X$ has an algebraic first integral which is impossible. This shows that $\widetilde{X}$ vanishes with order~$1$ on $D_j$.
Because $\widetilde{X}$ vanishes on $D_j$ and $D_j$ is not invariant by the associated foliation $\widetilde{\mathcal F}_X$, it follows from Section~4 that either $(0,0)$ is a LJ-singularity of $\widetilde{\mathcal F}_X$ or $\widetilde{\mathcal F}_X$ is linearizable with eigenvalues~$1,-1$ at $(0,0)$. In the latter case the conclusions of Lemma~(\ref{passara}) are obvious. Thus we can suppose that $(0,0)$ is a LJ-singularity. It follows from Lemma~(\ref{le3.3}) that $\widetilde{X}$ must have a pole divisor of order~$1$ on $D_i$.
\fbox{}
\noindent {\it Proof of Lemma~(\ref{passara}) when $D_j$ is not invariant by $\widetilde{\mathcal F}_X$}\,: Modulo blowing-up $(0,0)$, the problem is immediately reduced to the discussion of the Dulac's transform between the strong and the weak invariant manifolds of the saddle-node determined by $$ x(1+y) \, dy \; \, + \; \, y^2 \, dx \; . $$ Thinking of this foliation as a differential equation, we obtain the solution $$ x(T) = \frac{x_0 e^T}{1-y_0 T} \; \; \, {\rm and} \; \; \, y(T) = \frac{y_0}{1-y_0 T} \, . $$ Let us fix $x_0 =1$. Given $y_0$, we search for $T_0$ so that $y(T_0) =1$. Furthermore we also require that the norm of $x(T)$ stays ``small'' during the procedure. This is clearly possible if $y_0$ is real negative (sufficiently close to {\it zero}). Actually we set $T_0 =(1-y_0) /y_0 \in {\mathbb R}_-$ so that $T$ can be chosen real negative during the procedure. Thus the norm of $x(T)$ will remain controlled by that of $y_0$.
On the other hand, let $y_0$ be in the transverse section $\Sigma_x$ and suppose that $y_0$ is not real positive. Then the orbit of $y_0$ under the local holonomy of the strong invariant manifold converges to {\it zero} and is asymptotic to ${\mathbb R}_-$. Indeed this local holonomy is represented by a local diffeomorphism of the form $z \mapsto z + z^2 + {\rm h.o.t.}$ and the local topological dynamics of these diffeomorphisms is simple and well-understood (known as a ``flower'', in the present case this dynamics is also called the parabolic bifurcation). Hence for a sufficiently large iterate of $y_0$ (without leaving $V_x$), the above ``Dulac's transform'' is well-defined. Thus the statement of Lemma~(\ref{passara}) is verified as long as we take $W_x = V_x \setminus {\mathbb R}_-$ in the above coordinates.
\fbox{}
From now on we can suppose that $D_j$ is invariant under ${\mathcal F}_X$. We have three cases to check:
\noindent 1) ${\mathcal F}_X$ has two eigenvalues different from {\it zero} at $(0,0)$ and is locally linearizable.
\noindent 2) ${\mathcal F}_X$ has two eigenvalues different from {\it zero} at $(0,0)$ but is not locally linearizable. In this case the quotient of the eigenvalues is real negative.
\noindent 3) ${\mathcal F}_X$ defines a saddle-node at $(0,0)$.
In the Case~$1$ the verification is automatic and left to the reader. Case~2 follows from \cite{mamo} (note that our convention of signs is opposite to the convention of \cite{mamo}). So we just need to consider the case of saddle-nodes. Of course all the possible saddle-nodes necessarily have a convergent weak invariant manifold. Without loss of generality we can suppose that $D_i$ is the strong invariant manifold so that $D_j$ is the weak invariant manifold (the other possibility is analogous). All the background material about saddle-nodes used in what follows can be found in \cite{mara}.
Thanks to Lemma~(\ref{selano}), we can find coordinates $(x,y)$ as above where the vector field $\widetilde{X}$ becomes $$ \widetilde{X} = y^{-k} [ (x(1+ \lambda y^p) +yR) {\partial /\partial x} \; + \; y^{p+1} {\partial /\partial y} ] \, . $$ Since our problem depends only on the foliation associated to $\widetilde{X}$, we drop the factor $y^{-k}$ in the sequel. We also notice that $\widetilde{X}$ is regular on $D_j$. The argument which is going to be employed here is a generalization of the one employed to deal with the saddle-node appearing after blowing-up the LJ-singularity in the previous case.
Following \cite{mara} we consider open sets $V_i \subset {\mathbb C}$, $i = 0, \ldots ,2p-1$, defined by $V_i = \{ z \in {\mathbb C} \; ; \; (2i+1)\pi /2p - \pi /p < \arg z < (2i+1)\pi /2p + \pi /p \}$. The $V_i$'s, $i=1, \ldots ,2p-1$, define a covering of ${\mathbb C}^{\ast}$ and, besides, each $V_i$ intersects only $V_{i-1}$ and $V_{i+1}$ (where $V_{-1} = V_{2p-1}$). We also let $W_i^+$ (resp. $W_i^-$) be defined by $$ W_i^+ = \left\{ \frac{(4i-1) \pi}{2p} < \arg z < \frac{(4i+1) \pi}{2p} \right\} \, \; \; {\rm and} \, \; \; W_i^- = \left\{ \frac{(4i+1) \pi}{2p} < \arg z < \frac{(4i+3) \pi}{2p} \right\} \, . $$ for $i=0 ,\ldots ,2p-1$. We point out that ${\rm Re} \, (y^p) < 0$ (resp. ${\rm Re} \, (y^p) > 0$) provided that $y \in W_i^-$ (resp. $y \in W_i^+$). In addition we have $W_i^+ = V_{2i} \cap V_{2i+1}$ and $W_i^- = V_{2i+1} \cap V_{2i+2}$ (unless $p=1$ where $V_0 \cap V_1 = W_0^+ \cup W_0^-$). Given $\varepsilon >0$, we set $$
U_{i, V} = \{ (x,y) \in {\mathbb C}^2 \; ; \; \| x \| < \varepsilon
\, , \; \, \| y \| < \varepsilon \; \; {\rm and} \; \; y \in V_i \} \, . $$ According to Hukuara-Kimura-Matuda, there is a bounded holomorphic mapping $\phi_{U_{i, V}} (x,y) = (\varphi_{U_{i,V}} (x,y) , y)$ defined on $U_{i, V}$ which brings the vector field $\widetilde{X}$ to the form \begin{equation} x(1+ \lambda y^p) {\partial /\partial x} \; + \; y^{p+1} {\partial /\partial y} \, . \label{quasela} \end{equation} The vector field in~(\ref{quasela}) can be integrated to give \begin{equation} x (T) = \frac{x_0 e^T}{\sqrt[p]{(1-py_0^pT)^{\lambda}}} \; \; {\rm and} \; \; y(T) = \frac{y_0}{\sqrt[p]{1-p y_0^p T}} \, . \label{intee} \end{equation}
\noindent {\it Proof of Lemma~(\ref{passara})}\,: We keep the preceding notations. On $U_{i,V}$ we consider a normalizing mapping $\phi_{U_{i, V}}$ such that $\widetilde{X}$ is as in~(\ref{quasela}). In this coordinate we fix a vertical line $\Sigma_x$ as before and let $\Sigma_{y,i}$ denote the intersection of the horizontal line through $(0,1)$, $\Sigma_y$, with the sector $V_i$. Again we want to know which leaves of $\widetilde{\mathcal F}$ passing through a point of $\Sigma_x$ will intersect $\Sigma_y$ as well. Thus starting with $x_0 =1$, we search for $y (T_0) =1$. Thanks to equations~(\ref{intee}), it is enough to choose $T_0=(1-y_0^p) /py_0^p$. In particular $T_0 \in {\mathbb R}_-$ provided that $y_0^p \in {\mathbb R}_-$. The formula for $x(T)$ in~(\ref{intee}) shows that $x(T)$ remains in the fixed neighborhood ${\bf U}$ of $\{ x=0 \} \cup \{ y=0 \}$ provided that we keep $T \in {\mathbb R}_-$ during the procedure and choose $y_0$ sufficiently small. More generally if the real part ${\rm Re} \, (y_0^p)$ is negative and the quotient between imaginary and real parts is bounded, then the same argument applies. In other words, if $y_0$ belongs to a compact subsector of $W_{[i+1/2]-1}^-$, the set $W_j^-$ contained in $V_i$, then the Dulac's transform in question is well-defined modulo choosing $y_0$ uniformly small.
The local holonomy associated to $D_i$ (the strong invariant manifold) has the form $\textsf{h} (z) = z + z^{p+1} + {\rm h.o.t.}$ The dynamical picture corresponding to this diffeomorphims is still a ``flower''. However, in general, it is not true that the orbit of a ``generic'' point will intersect a fixed $W_{[i+1/2]-1}^-$ since $\textsf{h}$ may have invariant sectors. However, the above argument can be applied separately for each $i$. Clearly to each $i$ fixed we have a different $\Sigma_y \cap V_i$ associated. Nonetheless they are all equivalent since the weak invariant manifold of the foliation is convergent. It follows that apart from a finite number of curves whose union has empty interior, the leaf through a point of $\Sigma_x$ meets $\Sigma_y$ before leaving the neighborhood ${\bf U}$. As mentioned the case where $D_i$ is the (convergent) weak invariant manifold and $D_j$ is the strong invariant manifold is analogous. The statement follows.
\fbox{}
\section{Arrangements of simple singularities}
\hspace{0.4cm} Now we are going to study the possible arrangements of simple semi-complete singularities over a rational curve of self-intersection~$-1$ which are obtained by blowing-up a semi-complete vector field on a neighborhood of $(0,0) \in {\mathbb C}^2$.
We shall make a number of assumptions which are always satisfied in our cases. We denote by $\widetilde{\mathbb C}^2$ the blow-up of ${\mathbb C}^2$ at the origin and by $\pi : \widetilde{\mathbb C}^2 \rightarrow {\mathbb C}^2$ the corresponding blow-up map. Given a vector field $Y$ (resp. foliation ${\mathcal F}$) defined on a neighborhood $U$ of the origin, $\pi$ naturally induces a vector field $\widetilde{Y}$ (resp. foliation $\widetilde{\mathcal F}$) defined on $\pi^{-1} (U)$. Furthermore $Y$ is semi-complete on $U$ if and only if $\widetilde{Y}$ is semi-complete on $\pi^{-1} (U)$.
Given a meromorphic vector field $Y = fZ/g$ with $f,g, Z$ holomorphic, we call the vector field $fZ$ the {\it holomorphic part}\, of $Y$. In this section it is discussed the nature of a meromorphic semi-complete vector field $Y$ defined on a neighborhood of the origin $(0,0) \in {\mathbb C}^2$ which satisfies the following assumptions: \begin{enumerate} \item $Y = x^{k_1} y^{k_2} f_1 Z $ where $Z$ is a holomorphic vector field having an isolated singularity at $(0,0) \in {\mathbb C}^2$, $f_1$ is holomorphic function and $k_1 , k_2 \in {\mathbb Z}$.
\item The regular orbits of $Y$ contain at most $1$ singular point. Furthermore the order of $Y$ at this singular point is one.
\item The regular orbits of $Y$ contains at most~$1$ period (i.e. there is only one homology class containing loops on which the integral of the time-form is different from zero).
\item The foliation ${\mathcal F}$ associated to $Y$ (or to $Z$) has both eigenvalues equal to {\it zero} at $(0,0) \in {\mathbb C}^2$.
\item The blow-up $\widetilde{\mathcal F}$ of ${\mathcal F}$ is such that every singularity $\tilde{p} \in \pi^{-1} (0)$ of $\widetilde{\mathcal F}$ is simple.
\item ${\mathcal F}$ is not dicritical at $(0,0)$.
\end{enumerate}
Before continuing let us introduce two basic definitions. Assume that ${\mathcal F}$ is a singular holomorphic foliation defined on a neighborhood of a singular point $p$. Let ${\mathcal S}$ be a smooth separatrix of ${\mathcal F}$ at $p$. We want to define the {\it order }\, of ${\mathcal F}$ with respect to ${\mathcal S}$ at $p$, ${\rm ord}_{{\mathcal S}} ({\mathcal F} ,p)$ (also called the multiplicity of ${\mathcal F}$ along ${\mathcal S}$), and the {\it index}\, of ${\mathcal S}$ w.r.t. ${\mathcal F}$ at $p$, ${\rm Ind}_{p} ({\mathcal F} , {\mathcal S})$ (cf. \cite{cama}). In order to do that, we consider coordinates $(x,y)$ where ${\mathcal S}$ is given by $\{ y=0 \}$ and a holomorphic $1$-form $\omega = F(x,y) \, dy - G (x,y)\, dx$ defining ${\mathcal F}$ and having an isolated singularity at $p$. Then we let \begin{eqnarray} {\rm ord}_{{\mathcal S}} ({\mathcal F} , p) & = & {\rm ord} \, (F(x,0)) \; \; \, {\rm at} \; \; \, 0 \in {\mathbb C} \label{ordem1} \; \; \, {\rm and} \\ {\rm Ind}_p ({\mathcal F} ,{\mathcal S} ) & = & {\rm Res} \, \frac{\partial}{\partial y} \left( \frac{G}{F} \right) (x,0) \, dx \; . \label{index1} \end{eqnarray} In the above formulas ${\rm ord} \, (F(x,0))$ stands for the order of the function $x \mapsto F(x,0)$ at $0 \in {\mathbb C}$ and ${\rm Res}$ for the residue of the $1$-form in question.
Let $p_1 ,\ldots ,p_r$ denote the singularities of $\widetilde{\mathcal F}$ belonging to $\pi^{-1} (0)$. Since $\pi^{-1} (0)$ naturally defines a separatrix for every $p_i$, we can consider both ${\rm ord}_{\pi^{_1} (0)} (\widetilde{\mathcal F} , p_i)$ and ${\rm Ind}_{p_i} (\widetilde{\mathcal F} ,\pi^{-1} (0))$. Easy calculations and the Residue Theorem then provides (cf. \cite{mamo} , \cite{cama}): \begin{eqnarray} & & {\rm ord}_{(0,0)} ({\mathcal F}) + 1 = \sum_{i=1}^r {\rm ord}_{\pi^{-1} (0)} (\widetilde{\mathcal F} , p_i) \; , \label{ordem2} \\ & & \sum_{i=1}^r {\rm Ind}_{p_i} (\widetilde{\mathcal F}, \pi^{-1} (0)) = -1 \; . \label{index2} \end{eqnarray}
On the other hand the order of $\pi^{-1} (0)$ as a divisor of zeros or poles of $\widetilde{Y}$ is \begin{equation} {\rm ord}_{\pi^{-1} (0)} \widetilde{Y} = {\rm ord}_{(0,0)} (f) + {\rm ord}_{(0,0)} ({\mathcal F}) -{\rm ord}_{(0,0)} (g) -1 \, . \label{equa2} \end{equation} In particular if this order is {\it zero}, then $\widetilde{Y}$ is regular on $\pi^{-1} (0)$.
To abridge notations, the local singular foliation induced by the linear vector field $$ (x+y) {\partial /\partial x} + y {\partial /\partial y} $$ will be called a LJ-singularity (where LJ stands for linear and in the Jordan form).
To simplify the statement of the main result of this section, namely Proposition~(\ref{prop4.2}), we first introduce~$3$ types, or models, of vector fields. Let us keep the preceding notation. \begin{description}
\item[{\bf Model} $Z_{1,11}$:] Let ${\mathcal F}_{1,11}$ be the foliation associated to $Z_{1,11}$ and $\widetilde{\mathcal F}_{1,11}$ its blow-up. Then $\widetilde{\mathcal F}_{1,11}$ contains~$3$ singularities $p_1 ,p_2 ,p_3$ on $\pi^{-1} (0)$ whose eigenvalues are respectively $1,1$, $-1,1$ and $-1,1$. The singularity $p_1$ is a LJ-singularity and the blow-up $\widetilde{Z}_{1,11}$ has a pole of order~$1$ on $\pi^{-1} (0)$. The separatrix of $p_2$ (resp. $p_3$) transverse $\pi^{-1} (0)$ is a pole divisor of $\widetilde{Z}_{1,11}$ of order~$1$ as well. Finally $\widetilde{Z}_{1,11}$ has a curve of zeros passing through $p_1$ which is not invariant under $\widetilde{\mathcal F}_{1,11}$.
\item[{\bf Model} $Z_{0, 12}$:] With similar notations, $\widetilde{\mathcal F} _{0,12}$ has~$3$ singularities $p_1, p_2 ,p_3$ on $\pi^{-1} (0)$ of eigenvalues equal to $1,0$, $-1,2$ and $-1, 2$. The singularity $p_1$ is a saddle-node with strong invariant manifold contained in $\pi^{-1} (0)$. The separatrix of $p_2$ (resp. $p_3$) transverse to $\pi^{-1} (0)$ is a pole of order~$d\neq 0$ of $\widetilde{Z}_{0,12}$. The $\pi^{-1} (0)$ is a pole of order~$2d-1$ of $\widetilde{Z}_{0,12}$. There is no other component of the divisor of zeros or poles of $\widetilde{Z}_{0,12}$.
\item[{\bf Model} $Z_{1, 00}$:] $\widetilde{\mathcal F}_{1,00}$ still has~$3$ singularities $p_1, p_2 ,p_3$ whose eigenvalues are $-1,1$, $1,0$ and~$1,0$. The singularities $p_2, p_3$ are saddle-nodes with strong invariant manifolds contained in $\pi^{-1} (0)$. The separatrix of $p_1$ transverse to $\pi^{-1} (0)$ is a pole of $\widetilde{Z}_{1,00}$ of order~$d\neq 0$. The exceptional divisor $\pi^{-1} (0)$ is a pole of order~$d-1$ and there is no other component of the divisor of zeros or poles of $\widetilde{Z}_{1,00}$,
\end{description}
Note in particular that Formula~(\ref{ordem2}) implies that ${\mathcal F}_{1,11}$ (resp. ${\mathcal F}_{0,12}$, ${\mathcal F}_{1,00}$) has a singularity of order~$2$ at the origin.
\begin{prop} \label{prop4.2} Let $Y$, $\widetilde{Y}$ be as above. Assume that the order of $\widetilde{Y}$ on $\pi^{-1} (0)$ is different from {\it zero}. Then the structure of the singularities of $\widetilde{\mathcal F}$ on $\pi^{-1} (0)$ is equal to that of one of the models $Z_{1,11}$, $Z_{0,12}$ or $Z_{1,00}$. \end{prop}
\begin{lema} \label{lema4.3} Denote by $\lambda_1^i , \lambda_2^i$ the eigenvalues of $\widetilde{\mathcal F}$ at $p_i$ ($i=1 ,\ldots ,r$). Then one of the following possibilities holds: \begin{description}
\item[($\imath$)] $\lambda_1^i /\lambda_2^i = -n/m$ where $n,m$ belong to ${\mathbb N}^{\ast}$.
\item [($\imath \imath$)] $p_i$ is a saddle-node (i.e. the eigenvalues are $1$ and {\it zero}) whose strong invariant manifold coincides with $\pi^{-1} (0)$.
\item[($\imath \imath \imath $)] $p_i$ is a LJ-singularity. \end{description} Furthermore there may exist at most one LJ-singularity and, when such singularity does exist, all the remaining singularities are as in ($\imath$). \end{lema}
\noindent {\it Proof}\,: First let us suppose that one of the eigenvalues $\lambda_1^i ,\lambda_2^i$ vanishes. In this case $\widetilde{\mathcal F}$ defines a saddle-node at $p_i$. Moreover $p_i$ belongs to the divisor of zeros or poles of $\widetilde{Y}$, so that Theorem~(\ref{selano}) shows that the strong invariant manifold of $\widetilde{\mathcal F}$ at $p_i$ coincides with $\pi^{-1} (0)$ as required.
On the other hand, if both $\lambda_1^i ,\lambda_2^i$ are different from {\it zero}, then they satisfy condition ($\imath$) as a consequence of Lemma~(\ref{le3.1}). Finally it remains only to consider the case where $p_1$ is a LJ-singularity. Thus in local coordinates $(x,t)$, $\{ x=0 \} \subset \pi^{-1} (0)$, around $p_1$, $\widetilde{Y}$ is given by $$ x^{-1} h [ (t+x) \partial /\partial t + x {\partial /\partial x}] \, . $$ Hence the regular orbits of $\widetilde{Y}$ contain a {\it zero} of $\widetilde{Y}$ corresponding to their intersection with $h=0$. Because of condition~2, this implies that only one of the $p_i$'s can be a LJ-singularity. Furthermore, by the same reason, the holonomy of $\pi^{-1} (0) \setminus \{ p_1 ,\ldots ,p_r \}$ has to be trivial. In particular none of the remaining singularities can be a saddle-node.
\fbox{}
Combining the information contained in the preceding lemma with Formula~(\ref{ordem2}) we obtain:
\begin{coro} \label{lema4.1} The order of ${\mathcal F}$ at $(0,0) \in {\mathbb C}$ equals $r-1$, i.e. ${\rm ord}_{(0,0)} ({\mathcal F}) = r-1$.
\fbox{} \end{coro}
The case where $p_1$ is a LJ-singularity is indeed easy to analyse. After the preceding lemma and the fact that the holonomy of $\pi^{-1} (0) \setminus \{ p_1 ,\ldots ,p_r \}$ is trivial, we conclude that all the remaining singularities $p_2 ,\ldots ,p_r$ have eigenvalues $1$ and $-1$. Now using formulas~(\ref{ordem2}) and~(\ref{index2}) we conclude that $\widetilde{Y}$ is as in the model~$Z_{1,11}$.
Hereafter we suppose without loss of generality that none of the $p_i$'s is a LJ-singularity. For $s \leq r$, we denote by $p_1 ,\ldots ,p_r$ the singularities of $\widetilde{\mathcal F}$ where $\widetilde{\mathcal F}$ has two non-vanishing eigenvalues (whose quotient has the form $-n/m$, $m,n \in {\mathbb N}$). The remaining $p_{s+1} , \ldots ,p_r$ singularities are therefore saddle-nodes. Recall that the strong invariant manifolds of these saddle-nodes coincide with $\pi^{-1} (0)$ thanks to Theorem~(\ref{selano}). Next we have:
\begin{lema} \label{lema4.2} At least one of the $p_i$'s is a saddle-node (i.e. $s$ is strictly less than $r$). \end{lema}
\noindent {\it Proof}\,: The proof relies on Section~4 of \cite{re3}. Suppose for a contradiction that none of the $p_i$'s is a saddle-node. Given that there is no $LJ$-singularity, it follows that the quotient $\lambda_1^i /\lambda_2^i$ is negative rational for every $i=1, \ldots ,r$. Hence the hypotheses of Proposition~4.2 of \cite{re3} are verified. It results that $X$ has one of the normal forms indicated in that proposition. As is easily seen, all those vector fields have orbits with~$2$ distinct periods which is impossible in our case. The lemma is proved.
\fbox{}
To complete the proof of Proposition~(\ref{prop4.2}) we proceed as follows. For $i \in \{ 1, \ldots ,s\}$, we consider local coordinates $(x_i ,t_i)$, $\{ x_i =0 \} \subset \pi^{-1} (0)$, around $p_i$. In these coordinates $\widetilde{Y}$ has the form \begin{equation} \widetilde{Y} = x_i^{({\rm ord}_{\pi^{-1} (0)} (\widetilde{Y}))} t_i^{d_i} h_i [ m_i x_i (1 + {\rm h.o.t.}) \partial /\partial x_i - n_i t_i (1 + {\rm h.o.t}) \partial /\partial t_i] \label{forca1} \end{equation} where $d_i \in {\mathbb Z}$, $m_i ,n_i \in {\mathbb N}$ and $h_i$ is holomorphic but not divisible by either $x_i, t_i$. Similarly, around the saddle-nodes singularities $p_{s+1} ,\ldots ,p_r$, we have \begin{equation} \widetilde{Y} = x_i^{({\rm ord}_{\pi^{-1} (0)} (\widetilde{Y}))} h_i[x_i^{p_i+1} \partial /\partial x_i + t_i(1 + {\rm h.o.t}) \partial /\partial t_i] \, . \label{forca2} \end{equation}
We claim that $h_i (0,0) \neq 0$. This is clear in equation~(\ref{forca2}) thanks to Theorem~(\ref{selano}). As to equation~(\ref{forca1}), let us suppose for a contradiction that $h_i (0,0) =0$. Hence the regular leaves of $\widetilde{Y}$ have a zero corresponding to the intersection of these leaves with $\{ h_i =0 \}$. Given condition~2, it results that only one of the $h_i$'s may verify $h_i (0,0) =0$. Without loss of generality we suppose that $h_1 (0,0) =0$. From Lemma~(\ref{le3.1}) and Remark~(\ref{ipc}), it follows that $(x_i ,t_i)$ can be chosen so as to have $\widetilde{Y} = (xy)^a (x-y) (x_i \partial /\partial x_i - t_i \partial /\partial t_i)$. Formula~(\ref{index2}) then shows that all the remaining singularities have to be saddle-nodes since the sum of the indices is~$-1$. Nonetheless, again condition~2, implies that the holonomy of $\pi^{-1} (0) \setminus \{ p_1 ,\ldots , p_r \}$ is trivial. Thus no saddle-node can appear on $\pi^{-1} (0)$. In other words $r$ must be equal to~$1$ which is impossible.
\noindent {\it Proof of Proposition(\ref{prop4.2})}\,: Considering the normal forms~(\ref{forca1}) and~(\ref{forca2}), we can suppose that $h_i (0,0) \neq 0$. Set $\epsilon_i = ({\rm ord_{\pi^{-1} (0)}} (\widetilde{Y})) m_i - n_i d_i$ so that $\epsilon_i \in \{ -1, 0 ,1\}$ (cf. Lemma~(\ref{le3.1})). Alternatively we let $d_i= ({\rm ord_{\pi^{-1} (0)}} (\widetilde{Y})) m_i/n_i - \epsilon_i /n_i$.
On the other hand, Formula~(\ref{index2}), in the present context, becomes $$ \sum_{i=1}^r m_i /n_i = 1 \, , $$ where $m_i=0$ if and only if $p_i$ is a saddle-node and $n_i \neq 0$. Since all $m_i ,n_i$ are non-negative, only one of the $n_i$'s can be equal to~$1$ provided that $m_i \neq 0$. In this case, we must have $m_i=1$ as well and the remaining singularities are saddle-nodes. We claim that this implies that $h_i (0,0)$ in~(\ref{forca1}) is always different from {\it zero}. Indeed if, say $h_1 (0,0) =0$, then $m+1 =n_1 =1$ and the remaining singularities are saddle-nodes. The fact that the holonomy associated to the strong invariant manifold of a saddle-node is has order infinity, implies that this case cannot be produced. The resulting contradiction establishes the claim.
Now the fact that $h_i (0,0) \neq 0$ show that $\sum_{i=1}^r d_i = {\rm ord}_{(0,0)} (f) - {\rm ord}_{(0,0)} (g)$. Therefore \begin{eqnarray*} {\rm ord}_{(0,0)} (f) - {\rm ord}_{(0,0)} & = & \sum_{i=1}^r d_i = ({\rm ord_{\pi^{-1} (0)}} (\widetilde{Y})) (\sum_{i=1}^r m_i /n_i = 1) - \sum_{i=1}^r \epsilon_i /n_i \\
& = & - \sum_{i=1}^r \epsilon_i /n_i + {\rm ord}_{(0,0)} (f) + r-1 -{\rm ord}_{(0,0)} (g) -1 \, . \end{eqnarray*} In other words, one has \begin{equation} \sum_{i=1}^s (1- \epsilon_i /n_i) = 2 + s -r <2\, .\label{2sr=2} \end{equation} As mentioned, only one of the $n_i$'s may be equal to~$1$. In this case the remaining singularities are saddle-nodes and we obtain the model~$Z_{1,00}$.
Next assume that all the $n_i$'s are strictly greater than~$1$. In particular $1-\epsilon_i /n_i \geq 1/2$. The only new possibility is to have $n_1=n_2=2$ and $r-s=1$. Thus we obtain the model~$Z_{0,12}$ completing the proof of our proposition.
\fbox{}
\begin{obs} \label{locallinear} {\rm To complement the description of the Models $Z_{1,11}$, $Z_{0,12}$ and $Z_{1,00}$, we want to point out that excepted for the saddle-nodes, all the singularities appearing in the exceptional divisor after blowing-up are linearizable. Indeed this results from the finiteness of the local holonomies associated to their separatrizes. To check that these holonomies are finite we just have to use an argument analogous to the one employed in Remark~(\ref{ipc}).
As a consequence of the above fact, we conclude that the two saddle-nodes appearing as singularities of $\widetilde{\mathcal F}_{1,00}$ are identical. In particular either both have convergent weak invariant manifold or both have divergent weak invariant manifold. } \end{obs}
\section{The combinatorics of the reduction of singularities}
\hspace{0.4cm} In this last section we are going to prove our main results. Since we are going to work in local coordinates, we can consider a meromorphic semi-complete vector field $Y$ defined around $(0,0) \in {\mathbb C}^2$. As usual let ${\mathcal F}$ be the foliation associated to $Y$. In view of Seidenberg's theorem \cite{sei}, there exists a sequence of punctual blow-ups $\pi_j$ together with singular foliations $\widetilde{\mathcal F}^j$, \begin{equation} {\mathcal F} = \widetilde{\mathcal F}^0 \stackrel{\pi_1}{\longleftarrow} \widetilde{\mathcal F}^1 \stackrel{\pi_2}{\longleftarrow} \cdots \stackrel{\pi_r}{\longleftarrow} \widetilde{\mathcal F}^r \, , \label{resotree} \end{equation} where $\widetilde{\mathcal F}^j$ is the blow-up of $\widetilde{\mathcal F}^{j-1}$, such that all singularities of $\widetilde{\mathcal F}^r$ are simple. Furthermore each $\pi_j$ is centered at a singular point where $\widetilde{\mathcal F}^{j-1}$ has vanishing eigenvalues. The sequence $(\widetilde{\mathcal F}^j , \pi_j)$ is said to be the {\it resolution tree} of ${\mathcal F}$. Fixed $j \in \{ 1, \ldots ,r \}$, we denote by ${\mathcal E}^j$ the total exceptional divisor $(\pi_1 \circ \cdots \circ \pi_j)^{-1} (0,0)$ and by $D^j$ the irreducible component of ${\mathcal E}^j$ introduced by $\pi_j$. Note that $D^j$ is a rational curve given as $\pi_j^{-1} (\tilde{p}^{j-1})$ where $\tilde{p}^{j-1}$ is a singularity of $\widetilde{\mathcal F}^{j-1}$. Finally we identify curves and their proper transforms in the obvious way. Also $\widetilde{Y}^j$ will stand for the corresponding blow-up of $Y$.
Throughout this section $Y, {\mathcal F}$ are supposed to verify the following assumptions:
\noindent {\bf A}. $Y = y^{-k} fZ$ where $k \geq 2$, $Z$ is a holomorphic vector field having an isolated singularity at $(0,0) \in {\mathbb C}^2$ and $f$ is a holomorphic function.
\noindent {\bf B}. Assumptions~2 and~3 of Section~4.
\noindent {\bf C}. The origin is not a dicritical singularity of ${\mathcal F}$.
\noindent It immediately results from the above assumptions that the axis $\{ y=0\}$ is a smooth separatrix of ${\mathcal F}$. Letting $Z= f{\partial /\partial x} + g {\partial /\partial y}$, recall that the multiplicity of ${\mathcal F}$ along $\{ y=0\}$ (or the order of ${\mathcal F}$ w.r.t. $\{y=0\}$ and $(0,0)$) is by definition the order at $0 \in {\mathbb C}$ of the function $x \mapsto f(x,0)$.
The main result of this section is Theorem~(\ref{aque11}) below.
\begin{teo} \label{aque11} Let $Y, {\mathcal F}$ be as above. Suppose that the divisor of zeros/poles of $\widetilde{Y}^r$ contains ${\mathcal E}^r$ (i.e. there is no component $D^j$ of ${\mathcal E}^r$ where $\widetilde{Y}^r$ is regular). Then the multiplicity of ${\mathcal F}$ along $\{ y=0\}$ is at most~$2$ (in particular the order of ${\mathcal F}$ at $(0,0)$ is not greater than~$2$). \end{teo}
As a by-product of our proof, the cases in which the multiplicity of ${\mathcal F}$ along $\{ y=0\}$ is~$2$ are going to be characterized as well. Also note that assumption~{\bf C} ensures that all the components $D^j$ are invariant by ${\mathcal F}^r$. Moreover none of the singularities of $\widetilde{\mathcal F}^r$ is dicritical. In what follows we shall obtain the proof of Theorem~(\ref{aque11}) through a systematic analyse of the structure of the resolution tree of ${\mathcal F}$.
\begin{obs} \label{obs11} {\rm Let ${\mathcal F}$ be a foliation defined on a neighborhood of $(0,0) \in {\mathbb C}^2$ and consider a separatrix ${\mathcal S}$ of ${\mathcal F}$. Denote by $\widetilde{\mathcal F}$ the blow-up of ${\mathcal F}$ and by $\widetilde{{\mathcal S}}$ the proper transform of ${\mathcal S}$. Naturally $\widetilde{{\mathcal S}}$ constitutes a separatrix for some singularity $p$ of $\widetilde{\mathcal F}$. In the sequel the elementary relation \begin{equation} {\rm Ind}_{p} (\widetilde{\mathcal F} , \widetilde{{\mathcal S}}) = {\rm Ind}_{(0,0)} ({\mathcal F} ,{\mathcal S}) -1 \, \label{baixa1} \end{equation} will often be used.} \end{obs}
Consider the resolution tree~(\ref{resotree}) of ${\mathcal F}$. By assumption ${\mathcal E}^r$ contains a rational curve $D^r$ of self-intersection~$-1$. Blowing-down (collapsing) this curve yields a foliation $\widetilde{\mathcal F}^{r_1}$ together with the total exceptional divisor ${\mathcal E}^{r_1}$. If ${\mathcal E}^{r_1}$ contains a rational curve with self-intersection~$-1$ where all the singularities of $\widetilde{\mathcal F}^{r_1}$ are simple, we then continue by blowing-down this curve. Proceeding recurrently in this way, we eventually arrive to a foliation $\widetilde{\mathcal F}^{r_1}$, $1 \leq r_1 < r$, together with an exceptional divisor ${\mathcal E}^{r_1}$ such that every irreducible component of ${\mathcal E}^{r_1}$ with self-intersection~$-1$ contains a singularity of $\widetilde{\mathcal F}^{r_1}$ with vanishing eigenvalues. Let $\widetilde{Y}^{r_1}$ be the vector field corresponding to $\widetilde{\mathcal F}^{r_1}, \; {\mathcal E}^{r_1}$, using Proposition~(\ref{prop4.2}) we conclude the following:
\begin{lema} \label{sequence1} ${\mathcal E}^{r_1}$ contains (at least) one rational curve $D^{r_1}$ of self-intersection~$-1$. Moreover if $p$ is a singularity of $\widetilde{\mathcal F}^{r_1}$ belonging to $D^{r_1}$ then either $p$ is simple for $\widetilde{\mathcal F}^{r_1}$ or $\widetilde{Y}^{r_1}$ has one of the normal forms $Z_{1,11},\; Z_{0,12}, \; Z_{1,00}$ around $p$. Finally there is at least one such singularity $p_1$ which is not simple for $\widetilde{\mathcal F}^{r_1}$.
\fbox{} \end{lema}
The next step is to consider the following description of the models $Z_{1,11}, \; Z_{0,12}, \; Z_{1,00}$ (and their respective associated foliations ${\mathcal F}_{1,11} , \; {\mathcal F}_{0,12}, \; {\mathcal F}_{1,00}$) which results at once from the definition of these models given in Section~5. While this description is slightly less precise than the previous one, it emphasizes the properties more often used in the sequel.
\noindent $\bullet$ $Z_{1,11}, \; {\mathcal F}_{1,11}$\,: ${\mathcal F}_{1,11}$ has exactly 2 separatrizes ${\mathcal S}_1 ,{\mathcal S}_2$ which are smooth, transverse and of index {\it zero}. ${\mathcal F}_{1,11}$ has order~$2$ at the origin. The multiplicity of ${\mathcal F}_{1,11}$ along ${\mathcal S}_1 , \; {\mathcal S}_2$ is~$2$. The vector field $Z_{1,11}$ has poles of order~$1$ on each of the separatrizes ${\mathcal S}_1$, ${\mathcal S}_2$.
\noindent $\bullet$ $Z_{0,12}, \; {\mathcal F}_{0,12}$\,: ${\mathcal F}_{0,12}$ has order~$2$ at the origin and 2 smooth, transverse separatrizes coming from the separatrizes associated to the singularities of eigenvalues~$-1,2$ of $\widetilde{\mathcal F}_{0,12}$ (they are denoted ${\mathcal S}_1 ,{\mathcal S}_2$ and called strong separatrizes of ${\mathcal F}_{0,12}$). The multiplicity of ${\mathcal F}_{0,12}$ along ${\mathcal S}_1 , \; {\mathcal S}_2$ is~$2$ and the corresponding indices are both~$-1$. ${\mathcal F}_{0,12}$ has still a formal third separatrix ${\mathcal S}_3$, referred to as the weak separatrix of ${\mathcal F}_{0,12}$, which may or may not be convergent. The vector field $Z_{0,12}$ has poles of order~$d \in {\mathbb N}^{\ast}$ on both ${\mathcal S}_1 ,{\mathcal S}_2$.
\noindent $\bullet$ $Z_{1,00}, \; {\mathcal F}_{1,00}$\,: ${\mathcal F}_{1,00}$ has order~$2$ and 1 smooth separatrix ${\mathcal S}_1$ coming from the singularity of $\widetilde{\mathcal F}_{1,00}$ whose eigenvalues are~$-1,1$ which will be called the strong separatrix of ${\mathcal F}_{1,00}$. Note that the multiplicity of ${\mathcal F}_{1,00}$ along ${\mathcal S}_1$ is~$2$ and the index of ${\mathcal S}_1$ is {\it zero}. $\widetilde{\mathcal F}_{1,00}$ also has 2 additional formal separatrizes ${\mathcal S}_2 ,{\mathcal S}_3$ coming from the weak invariant manifold of the saddle-nodes singularities of $\widetilde{\mathcal F}_{1,00}$ and, accordingly, called the weak separatrizes of ${\mathcal F}_{1,00}$. Naturally the weak separatrizes of ${\mathcal F}_{1,00}$ may or may not converge. Finally the vector field $Z_{1,00}$ has poles of order~$d \in {\mathbb N}^{\ast}$ on ${\mathcal S}_1$.
\begin{obs} \label{combin} {\rm The content of this remark will not be proved in these notes and therefore will not be formally used either. Nonetheless it greatly clarifies the structure of the combinatorial discussion that follows. Consider a meromorphic vector field $X$ having a smooth separatrix ${\mathcal S}$. Using appropriate coordinates $(x,y)$ we can identify ${\mathcal S}$ with $\{ y=0 \}$ and write $X$ as $y^d (f{\partial /\partial x} + yg {\partial /\partial y})$ where $d \in {\mathbb Z}$ and $f(x,0)$ is a non-trivial meromorphic function. We define the {\it asymptotic order} of $X$ at ${\mathcal S}$ (at $(0,0)$), ${\rm ord\,asy}_{(0,0)} (X ,{\mathcal S})$, by means of the formula \begin{equation} {\rm ord\,asy}_{(0,0)} (X, {\mathcal S}) = {\rm ord}_0 f(x,0) + d. {\rm Ind}_{(0,0)} ({\mathcal F} ,{\mathcal S}) \, \label{defioras} \end{equation} where ${\mathcal F}$ is the foliation associated to $X$. It can be proved that $0 \leq {\rm ord\,asy}_{(0,0)} (X ,{\mathcal S}) \leq 2$ provided that $X$ is semi-complete. Besides, if ${\mathcal S}$ is induced by a global rational curve (still denoted by ${\mathcal S}$) and $p_1 ,\ldots, p_s$ are the singularities of ${\mathcal F}$ on ${\mathcal S}$, then we have \begin{equation} \sum_{i=1}^s {\rm ord\,asy}_{p_i} (X, {\mathcal S}) = 2 \label{somou2} \end{equation} provided that $X$ is semi-complete on a neighborhood of ${\mathcal S}$. Note that the above formula indeed generalizes formula~(\ref{2sr=2}).} \end{obs}
Now we go back to the vector field $\widetilde{Y}^{r_1}$ on a neighborhood of $D^{r_1}$. Again we denote by $p_1 ,\ldots ,p_s$ the singularities of $\widetilde{\mathcal F}^{r_1}$ on $D^{r_1}$ and, without loss of generality, assume that $\widetilde{Y}^{r_1}$ admits one of the normal forms $Z_{1,11},\; Z_{0,12}, \; Z_{1,00}$ around $p_1$.
\begin{lema} \label{sequence2} All the singularities $p_2 ,\ldots ,p_s$ are simple for $\widetilde{\mathcal F}^{r_1}$. \end{lema}
\noindent {\it Proof}\,: We have to prove that it is not possible to exist two singularities with one of the normal forms $Z_{1,11},\; Z_{0,12}, \; Z_{1,00}$ on $D^{r_1}$. Clearly we cannot have two singularities of type $Z_{1,11}$ otherwise a ``generic'' regular orbit of $\widetilde{Y}^{r_1}$ would contain two singular points which contradicts assumption~{\bf B}. Indeed the fact that a ``generic'' regular orbit of $\widetilde{Y}^{r_1}$ effectively intersects the divisor of zeros of both singularities results from the method employed in the preceding section. In the present case the discussion is simplified since the singularity appearing in the intersection of the two irreducible components of the exceptional divisor is linear with eigenvalues~$-1,1$ (cf. description of $Z_{1,11}$).
To prove that the other combinations are also impossible, it is enough to repeat the argument employed in Section~5, in particular using the fact that the order $d_1\neq 0$ of $\widetilde{Y}^{r_1}$ on $D^{r_1}$ does not depend of the singularity $p_i$. If the reader takes for grant Formula~(\ref{somou2}), this verification can easily be explained. In fact, the asymptotic order of $Z_{0,12}$ (resp. $Z_{1,00}$) with respect to its strong separatrizes is already~$2$. Furthermore the asymptotic order of $Z_{1,11}$ with respect to its separatrizes is~$1$. Since the asymptotic order of a semi-complete singularity cannot be negative, it becomes obvious that two such singularities cannot co-exist on $D^{r_1}$ provided that $Y^{r_1}$ is semi-complete.
\fbox{}
Now we analyse each of the three possible cases.
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_1}$ around $p_1$ is $Z_{1,11}$: First recall that $D^{r_1}$ is an irreducible component of order~$1$ of the pole divisor of $\widetilde{Y}^{r_1}$. Since the number of singularities of regular orbits of $\widetilde{Y}^{r_1}$ is at most~$1$, it follows that none of the remaining singularities $p_2 ,\ldots , p_s$ can be a LJ-singularity for $\widetilde{\mathcal F}^{r_1}$. Otherwise there would be another curve of {\it zeros} of $\widetilde{Y}^{r_1}$ which is not invariant by $\widetilde{\mathcal F}^{r_1}$ so that ``generic'' regular orbits of $\widetilde{Y}^{r_1}$ would have~$2$ singularities (cf. above). By the same reason the holonomy of $D^{r_1} \setminus \{ p_2 ,\ldots ,p_s\}$ with respect to $\widetilde{\mathcal F}^{r_1}$ must be trivial. This implies that none of the singularities $p_2 ,\ldots ,p_s$ is a saddle-node for $\widetilde{\mathcal F}^{r_1}$. In fact, by virtue of Theorem~(\ref{selano}), a saddle-node must have strong invariant manifold contained in $D^{r_1}$ which ensures that the above mentioned holonomy is non-trivial. Then we conclude that all the remaining singularities $p_2 ,\ldots ,p_s$ have eigenvalues $m_i ,-n_i$ ($i=2,\ldots ,s$) with $m_i ,n_i \in {\mathbb N}^{\ast}$. Once again the fact that the holonomy of $D^{r_1} \setminus \{ p_2 ,\ldots ,p_s\}$ is trivial shows that $m_i /n_i \in {\mathbb N}^{\ast}$. Finally Formula~(\ref{index2}) implies that $s=2$ and $m_2=n_2 =1$. An immediate application of Formulas~(\ref{ordem2}) and~(\ref{equa2}) (or yet Formula~(\ref{somou2})) shows that the separatrix of $p_2$ transverse to $D^{r_1}$ is a component of order~$1$ of the pole divisor of $\widetilde{Y}^{r_1}$. Finally we denote by $Z_{1,11}^{(1)}$ (resp. ${\mathcal F}_{1,11}^{(1)}$) the local vector field (resp. holomorphic foliation) resulting from the collapsing of $D^{r_1}$. Summarizing one has:
\noindent $\bullet$ $Z_{1,11}^{(1)}, \; {\mathcal F}_{1,11}^{(1)}$\,: The foliation ${\mathcal F}_{1,11}^{(1)}$ has exactly two separatrizes ${\mathcal S}_1 ,{\mathcal S}_2$ which are smooth, transverse and of indices respectively equal to~$1$ and~$0$. The order of ${\mathcal F}_{1,11}^{(1)}$ at the origin is~$2$ as well as the multiplicity of ${\mathcal F}_{1,11}^{(1)}$ along ${\mathcal S}_1 , {\mathcal S}_2$. The vector field $Z^{(1)}_{1,11}$ has poles of order~$1$ on ${\mathcal S}_1 ,{\mathcal S}_2$.
Now let us discuss the second case.
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_1}$ around $p_1$ is $Z_{0,12}$\,: Recall that $D^{r_1}$ is an irreducible component of order $d\neq 0$ of the pole divisor of $\widetilde{Y}^{r_1}$. Repeating the argument of the previous section, we see that the singularities $p_2 , \ldots ,p_s$ can be neither saddle-nodes nor LJ-singularities. Again this can directly be seen from Formula~(\ref{somou2}): by virtue of Lemma~(\ref{le3.3}) and Theorem~(\ref{selano}), both types of singularities in question have asymptotic order equal to~$1$. Nonetheless the asymptotic order of $Z_{0,12}$ is already~$2$ which implies the claim. It follows that $\widetilde{\mathcal F}^{r_1}$ has eigenvalues $m_i, -n_i$ at each of the remaining singularities $p_2 , \ldots, p_s$ ($m_i ,n_i \in {\mathbb N}^{\ast}$). In particular the index of $D^{r_1}$ w.r.t. $\widetilde{\mathcal F}^{r_1}$ around each $p_i$ is strictly negative. Hence Formula~(\ref{index2}) shows that $p_1$ is, in fact, the unique singularity of $\widetilde{\mathcal F}^{r_1}$ on $D^{r_1}$. We denote by $Z_{0,12}^{(1)}$ (resp. ${\mathcal F}_{0,12}^{(1)}$) the local vector field (resp. holomorphic foliation) arising from the collapsing of $D^{r_1}$. Thus:
\noindent $\bullet$ $Z_{0,12}^{(1)}, \; {\mathcal F}_{0,12}^{(1)}$\,: The order of ${\mathcal F}_{0,12}^{(1)}$ at the origin is~$1$, besides the linear part of ${\mathcal F}_{0,12}^{(1)}$ is nilpotent. ${\mathcal F}_{0,12}^{(1)}$ has one strong separatrix ${\mathcal S}_1$ obtained through the strong separatrix of ${\mathcal F}_{0,12}$ which is transverse to $D^{r_1}$. This separatrix is smooth and has index {\it zero}, furthermore the multiplicity of ${\mathcal F}_{0,12}^{(1)}$ along ${\mathcal S}_1$ is~$2$. The foliation ${\mathcal F}_{0,12}^{(1)}$ has still another formal weak separatrix which may or may not converge. Finally the vector field $Z_{0,12}^{(1)}$ has poles of order $d\neq 0$ on ${\mathcal S}_1$.
Finally we have:
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_1}$ around $p_1$ is $Z_{1,00}$\,: Note that $D^{r_1}$ is an irreducible component of order~$d\neq 0$ of the pole divisor of $\widetilde{Y}^{r_1}$ (cf. description of the vector field $Z_{1,00}$). As before the remaining singularities cannot be saddle-nodes or LJ-singularities (the asymptotic order of $Z_{1,00}$ w.r.t. $D^{r_1}$ is already~$2$). It follows that $\widetilde{\mathcal F}^{r_1}$ has eigenvalues $m_i, -n_i$ at the remaining singularities $p_2 ,\ldots ,p_s$ ($m_i, n_i \in {\mathbb N}$). Around each singularity $p_i$ ($i=2,\ldots ,s$), the vector field $\widetilde{Y}^{r_1}$ can be written as $$ x_i^{-d} t_i^{k_i} h_i [m_i x_i (1+ {\rm h.o.t.}) \partial /\partial x_i - n_i t_i (1+ {\rm h.o.t.}) \partial /\partial t_i] $$ where $h_i (0,0) \neq 0$. We just have to repeat the argument of Section~$5$, here we summarize the discussion by using the ``fact'' that the asymptotic order of $\widetilde{Y}^{r_1}$ w.r.t. $D^{r_1}$ has to be {\it zero} at each $p_i$. Indeed, this gives us that $k_i =-1+dm_i /n_i$. Comparing this with Lemma~(\ref{le3.1}), it results that $n_i=1$. Hence Formula~(\ref{index2}) informs us that $s=2$ and $m_2 =n_2 =1$. It also follows that $k_2 =d-1$. Let us denote by $Z_{1,00}^{(1)}$ (resp. ${\mathcal F}_{1,00}^{(1)}$) the local vector field (resp. holomorphic foliation) arising from the collapsing of $D^{r_1}$.
\noindent $\bullet$ $Z_{1,00}^{(1)}, \; {\mathcal F}_{1,00}^{(1)}$\,: The order of ${\mathcal F}_{1,00}^{(1)}$ at the origin is~$2$ and it has one strong separatrix ${\mathcal S}_1$ obtained through the separatrix of $p_2$ which is transverse to $D^{r_1}$. This separatrix is smooth and has index {\it zero}, furthermore the multiplicity of ${\mathcal F}_{1,00}^{(1)}$ along ${\mathcal S}_1$ is~$2$. The foliation ${\mathcal F}_{1,00}^{(1)}$ has still two formal weak separatrizes which may or may not converge. Finally the vector field $Z_{1,00}^{(1)}$ has poles of order $d\neq 0$ on ${\mathcal S}_1$.
Summarizing what precedes, we easily obtain the following analogue of Lemma~(\ref{sequence1}):
\begin{lema} \label{sequence4} ${\mathcal E}^{r_2}$ contains (at least) one rational curve $D^{r_2}$ of self-intersection~$-1$. Moreover if $p$ is a singularity of $\widetilde{\mathcal F}$ belonging to $D^{r_2}$ then either $p$ is simple for $\widetilde{\mathcal F}^{r_2}$ or $\widetilde{Y}^{r_2}$ has one of the normal forms $Z_{1,11},\; Z_{0,12}, \; Z_{1,00}, \; Z_{1,11}^{(1)}, \; Z_{0,12}^{(1)}, \; Z_{1,00}^{(1)}$ around $p$. Finally there is at least one such singularity $p_1$ which is not simple for $\widetilde{\mathcal F}^{r_2}$.
\fbox{} \end{lema}
The argument is now by recurrence, we shall discuss only the next step in details. Again $p_1 ,\ldots ,p_s$ are the singularities of $\widetilde{\mathcal F}^{r_2}$ on $D^{r_2}$ and, without loss of generality, $\widetilde{Y}^{r_2}$ admits one of the normal forms $Z_{1,11},\; Z_{0,12}, \; Z_{1,00}, \;Z_{1,11}^{(1)}, \; Z_{0,12}^{(1)}, \; Z_{1,00}^{(1)}$ around $p_1$.
\begin{lema} \label{sequence5} All the singularities $p_2 ,\ldots ,p_s$ are simple for $\widetilde{\mathcal F}^{r_2}$. \end{lema}
\noindent {\it Proof}\,: The proof is as in Lemma~(\ref{sequence2}). Since no leaf of $\widetilde{\mathcal F}^{r_2}$ can meet the divisor of zeros of $\widetilde{Y}^{r_2}$ in more than one point, it results that we can have at most one singularity $Z_{1,11}$ or $Z_{1,11}^{(1)}$ on $D^{r_2}$. The fact that the models $Z_{0,12}, \; Z_{1,00}, \; Z_{0,12}^{(1)}$ and $Z_{1,00}^{(1)}$ cannot be combined among them or with $Z_{1,11}, \; Z_{1,11}^{(1)}$ follows from the natural generalization of the method of Section~5 (which is again explained by the fact that these $3$ vector fields have asymptotic order~$2$ w.r.t. $D^{r_2}$). The lemma is proved.
\fbox{}
So we have obtained three new possibilities according to the normal form of $\widetilde{Y}^{r_2}$ around $p_1$ is $Z_{1,11}^{(1)}, \; Z_{0,12}^{(1)}$ or $Z_{1,00}^{(1)}$. Let us analyse them separately.
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_2}$ around $p_1$ is $Z_{1,11}^{(1)}$\,: Note that ${\mathcal F}_{1,11}^{(1)}$ has two separatrizes which may coincide with $D^{r_2}$, one with index {\it zero} and other with index~$1$. In any case, $D^{r_2}$ is an irreducible component of order~$1$ of the divisor of poles of $\widetilde{Y}^{r_2}$. Suppose first that the index of $D^{r_2}$ w.r.t. ${\mathcal F}^{r_2}$ at $p_1$ is {\it zero}. The discussion then goes exactly as in the case of $Z_{1,11}$. We conclude that $s=2$ and that ${\mathcal F}^{r_2}$ has eigenvalues~$-1,1$ at $p_2$. The fact that the holonomy of $D^{r_2} \setminus \{ p_1 ,p_2 \}$ w.r.t. ${\mathcal F}^{r_2}$ is trivial also implies that ${\mathcal F}^{r_2}$ is linearizable at $p_2$. Formulas~(\ref{ordem2}) and~(\ref{equa2}) show that the separatrix of $p_2$ transverse to $D^{r_2}$ is a component with order~$1$ of the divisor of poles of $\widetilde{Y}^{r_2}$. Finally we denote by $Z_{1,11}^{(2)}$ (resp. ${\mathcal F}_{1,11}^{(2)}$) the local vector field (resp. holomorphic foliation) arising from the collapsing of $D^{r_2}$. One has
\noindent $\bullet$ $Z_{1,11}^{(2)}, \; {\mathcal F}_{1,11}^{(2)}$\,: The foliation ${\mathcal F}_{1,11}^{(2)}$ has exactly 2 separatrizes ${\mathcal S}_1 ,{\mathcal S}_2$ which are smooth, transverse and of indices respectively equal to {\it zero} and~$2$. ${\mathcal F}_{1,11}^{(2)}$ has order~$2$ at the origin and its multiplicity along ${\mathcal S}_1 , \; {\mathcal S}_2$ is~$2$. The vector field $Z_{1,11}^{(2)}$ has poles of order~$1$ on each of the separatrizes ${\mathcal S}_1$, ${\mathcal S}_2$.
Now let us prove that the index of $D^{r_2}$ w.r.t. ${\mathcal F}^{r_2}$ at $p_1$ cannot be~$1$. Suppose for a contradiction that this index is~$1$. Again the triviality of the holonomy of the regular leaf contained in $D^{r_2}$ implies that all the singularities $p_2 ,\ldots ,p_s$ are linearizable with eigenvalues~$-1,1$. Hence Formula~(\ref{index2}) ensures that $s=3$. In turn, Formula~(\ref{equa2}) shows that the separatrix of $p_2$ (resp. $p_3$) transverse to $D^{r_2}$ is a component with order~$1$ of the pole divisor of $\widetilde{Y}^{r_2}$. This is however impossible in view of Formula~(\ref{somou2}). Alternate, we can observe that the divisor of poles of the vector field obtained by collapsing $D^{r_2}$ consists of three smooth separatrizes. By virtue of assumptions {\bf A, B, C} one of them must be the proper transform of $\{ y=0\}$. In particular its order as component of the pole divisor should be $k \geq 2$, thus providing the desired contradiction.
Next one has:
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_2}$ around $p_1$ is $Z_{0,12}^{(1)}$\,: The divisor $D^{r_2}$ constitutes a separatrix of $\widetilde{\mathcal F}^{r_2}$ at $p_1$. Besides $\widetilde{Y}^{r_2}$ has poles of order $d \neq 0$ on $D^{r_2}$ (cf. description of $Z_{0,12}^{(1)}$). Note also that the index of $D^{r_2}$ w.r.t. $\widetilde{\mathcal F}^{r_2}$ at $p_1$ is {\it zero}. Our standard method shows that the remaining singularities cannot be saddle-nodes or LJ-singularities (the asymptotic order of $Z_{0,12}^{(1)}$ w.r.t. $D^{r_2}$ is already~$2$). Thus $\widetilde{\mathcal F}^{r_2}$ has eigenvalues $m_i, -n_i$ at the singularity $p_i$, $i=2,\ldots s$ ($m_i, n_i \in {\mathbb N}$). On a neighborhood of $p_i$, $\widetilde{Y}^{r_2}$ is given in appropriate coordinates by $$ x_i^{-d} t_i^{k_i} h_i [m_i x_i (1+ {\rm h.o.t.}) \partial /\partial x_i - n_i t_i (1+ {\rm h.o.t.}) \partial /\partial t_i] $$ where $h_i (0,0) \neq 0$. It is enough to repeat the discussion of Section~$5$. Using the ``fact'' that the asymptotic order of $\widetilde{Y}^{r_1}$ w.r.t. $D^{r_1}$ has to be {\it zero} at each $p_i$, this can be summarized as follows. The asymptotic order is given by $k_i + 1 -dm_i /n_i$, since it must equal {\it zero}, one has $k_i =-1 +dm_i/n_i$. Comparing with Lemma~(\ref{le3.1}), we conclude that $n_i=1$. Hence Formula~(\ref{index2}) implies that $s=2$ and $m_2=n_2 =1$. In particular $k_2=d-1$. We then denote by $Z_{0,12}^{(2)}$ (resp. ${\mathcal F}_{0,12}^{(2)}$) the local vector field (resp. holomorphic foliation) arising from the collapsing of $D^{r_2}$.
\noindent $\bullet$ $Z_{0,12}^{(2)}, \; {\mathcal F}_{0,12}^{(2)}$\,: The order of ${\mathcal F}_{0,12}^{(2)}$ at the origin is~$2$. ${\mathcal F}_{0,12}^{(2)}$ has one strong separatrix ${\mathcal S}_1$ obtained through the strong separatrix of ${\mathcal F}_{0,12}^{(1)}$ which is transverse to $D^{r_2}$. This separatrix is smooth and has index {\it zero}, furthermore the multiplicity of ${\mathcal F}_{0,12}^{(2)}$ along ${\mathcal S}_1$ is~$2$. The foliation ${\mathcal F}_{0,12}^{(2)}$ has still another formal weak separatrix which may or may not converge. Finally the vector field $Z_{0,12}^{(2)}$ has poles of order $d\neq 0$ on ${\mathcal S}_1$.
Finally let us consider $Z_{1,00}^{(1)}$.
\noindent $\bullet$ The normal form of $\widetilde{Y}^{r_2}$ around $p_1$ is $Z_{1,00}^{(1)}$\,: The discussion is totally analoguous to the case $Z_{1,00}$. After collapsing $D^{r_2}$, we obtain a local vector field $Z_{1,00}^{(2)}$ (resp. holomorphic foliation ${\mathcal F}_{1,00}^{(2)}$) with the following characteristics:
\noindent $\bullet$ $Z_{1,00}^{(2)}, \; {\mathcal F}_{1,00}^{(2)}$\,: The order of ${\mathcal F}_{1,00}^{(2)}$ at the origin is~$2$ and it has one strong separatrix ${\mathcal S}_1$ obtained through the separatrix of $p_2$ which is transverse to $D^{r_2}$. This separatrix is smooth and has index {\it zero}, furthermore the multiplicity of ${\mathcal F}_{1,00}^{(2)}$ along ${\mathcal S}_1$ is~$2$. The foliation ${\mathcal F}_{1,00}^{(2)}$ has still two formal weak separatrizes which may or may not converge. Finally the vector field $Z_{1,00}^{(2)}$ has poles of order $d\neq 0$ on ${\mathcal S}_1$.
Let us inductively define a sequence of vector fields $Z_{1,11} ^{(n)}$ by combining over a rational curve with self-intersection~$-1$ a model $Z_{1,11}^{(n-1)}$ with a linear singularity $p_2$ having eigenvalues~$-1,1$. The rational curve in question induces a separatrix of index {\it zero} for $Z_{1,11}^{(n-1)}$ and both separatrizes of $p_2$ are components having order~$1$ of the pole divisor of the corresponding vector field. The model $Z_{1,11}^{(n)}$ is then obtained by collapsing the mentioned rational curve. Similarly we also define the sequences $Z_{1,00}^{(n)}, \, Z_{0,12}^{(n)}$.
\noindent {\it Proof of Theorem~(\ref{aque11})}\,: Let $Y,\; {\mathcal F}$ be as in the statement of this theorem. Suppose first that the order of ${\mathcal F}$ at $(0,0)$ is greater than one. We consider a resolution tree~(\ref{resotree}) for ${\mathcal F}$ and the recurrent procedure discussed above. Whenever we collapse a rational curve with self-intersection~$-1$ contained in one of the exceptional divisors ${\mathcal E}^j$, it results a singularity which is either simple or belongs to the list $Z_{1,11}, \; Z_{1,11}^{(n)}, \; Z_{1,00}, \; Z_{1,00}^{(n)},\; Z_{0,12}, \; Z_{0,12}^{(n)}$. After a finite number of steps of the above procedure, we arrive to the original vector field $Y$ (resp. foliation ${\mathcal F}$). Therefore $Y$ must admit one of the above indicated normal forms. Since the divisor of poles of $Y$ is constituted by the axis $\{ y=0\}$, the cases $Z_{1,11},\; Z_{1,11}^{(n)}, \; Z_{0,12}$ cannot be produced (their divisor of poles consist of two irreducible components). The case $Z_{0,12}^{(1)}$ cannot be produced either since the order of the associated foliation is supposed to be greater than or equal to~$2$. Thus we conclude that $Y=Z_{1,00}^{(n)}$ or, for $n\geq 2$, $Y=Z_{0,12}^{(n)}$ and the theorem follows in this case.
Next suppose that the order of ${\mathcal F}$ at $(0,0)$ is~$1$. Clearly if the linear part of ${\mathcal F}$ at $(0,0)$ has rank~$2$ (i.e. the corresponding holomorphic vector field with isolated singularities has~$2$ non-vanishing eigenvalues at $(0,0)$), then the conclusion is obvious. Next suppose that ${\mathcal F}$ is a saddle-node. If $\{ y=0\}$ corresponds to the strong invariant manifold of ${\mathcal F}$ then the statement is obvious. On the other hand, $\{ y=0\}$ cannot be the weak invariant manifold thanks to Theorem~(\ref{selano}).
It only remains to check the case where the linear part of ${\mathcal F}$ at $(0,0)$ is nilpotent. The blow-up $\widetilde{Y}$ (resp. $\widetilde{\mathcal F}$) of $Y$ (resp. ${\mathcal F}$) is such that $\widetilde{\mathcal F}$ has a single singularity $p \in \pi^{-1} (0)$. In addition the order of $\widetilde{\mathcal F}$ at $p$ is necessarily~$2$. On a neighborhood of $p$, the vector field $\widetilde{Y}$ is given by $x^{-k} y^{-k} Z$ where $Z$ is as in assumption~{\bf A}. The assumptions~{\bf B}, {\bf C} are clearly verified as well. An inspection in the preceding discussion immediately shows that it applies equally well to this vector field $\widetilde{Y}$. We conclude that $\widetilde{Y}$ is given on a neighborhood of $p$ by the model $Z_{0,12}$. Hence $Y$ is the model $Z_{0,12}^{(1)}$ completing the proof of the theorem.
\fbox{}
\noindent {\bf {\large $\bullet$ Conclusion}}:
\noindent {\it Proof of Theorem~A}\,: Let $X$ be a complete polynomial vector field in ${\mathbb C}^2$ with degree~$2$ or greater. We denote by ${\mathcal F}_X$ the singular holomorphic foliation induced by $X$ on ${\mathbb C} {\mathbb P} (2)$. We know from Lemma~(\ref{lema2.1}) that the line at infinity $\Delta \subset {\mathbb C} {\mathbb P} (2)$ is invariant under ${\mathcal F}_X$. On the other hand there is a dicritical singularity $p_1$ of ${\mathcal F}_X$ belonging to $\Delta$. Hence Proposition~(\ref{jouano}) ensures that ${\mathcal F}_X$ has a meromorphic first integral on ${\mathbb C} {\mathbb P} (2)$. Besides the generic leaves of ${\mathcal F}_X$ in ${\mathbb C} {\mathbb P} (2)$ are, up to normalization, rational curves (i.e. isomorphic to ${\mathbb C} {\mathbb P} (1)$). According to Saito and Suzuki (cf. \cite{suzu2}), up to polynomial automorphisms of ${\mathbb C}^2$, ${\mathcal F}_X$ is given by a first integral $R$ having one of the following forms: \begin{description}
\item[$\imath$)] $R(x,y) = x$;
\item[$\imath \imath$)] $R(x,y) = x^n y^m$, with ${\rm g.c.d} (m,n) =1$ and $m,n \in {\mathbb Z}$;
\item[$\imath \imath \imath$)] $R(x,y)=x^n(x^ly + P(x))^m$, with ${\rm g.c.d} (m,n) =1$ and $m,n \in {\mathbb Z}$, $l\geq 1$. Moreover $P$ is a polynomial of degree at most $l-1$ satisfying $P (0) \neq 0$. \end{description} To each of these first integrals there corresponds the foliations associated to the vector fields $X_1 ={\partial /\partial x}$, $X_2 = mx {\partial /\partial x} - ny {\partial /\partial y}$ and $X_3 = mx^{l+1} {\partial /\partial x} -[(n+lm)x^ly + nP(x) + mxP' (x)] {\partial /\partial y}$. Therefore the original vector field $X$ has the form $X=Q. X_i$, where $Q$ is a polynomial and $i =1,2,3$. If $i=1$ then it follows at once that $Q$ has to be as in the in order to produce a complete vector field $X$. Assume now that $i=2$. Using for instance Lemma~(\ref{le3.1}), we see that $P$ has again the form indicated in the statement unless $X_2 = x {\partial /\partial x} - y {\partial /\partial y}$ in which case we can also have $P = (xy)^a (x-y)$. Nonetheless we immediately check that the resulting vector field $X$ is not complete in this case.
Finally let us assume that $i=3$. It is again easy to see that that the resulting vector field cannot be complete. This follows for example from the fact that $(0,0)$ is a singularity of $X_3$ with trivial eigenvalues (cf. \cite{re4}). The theorem is proved.
\fbox{}
\noindent {\it Proof of Theorem~B}\,: Let us suppose for a contradiction that none of the singularities $p-1 ,\ldots ,p_k$ of ${\mathcal F}_X$ in $\Delta$ is dicritical. We write $X$ as $F.Z$ where $F$ is a polynomial of degree $n\in {\mathbb N}$ and $Z$ is a polynomial vector field of degree $d-n$ and having isolated zeros (where $d$ is the degree of $X$).
We consider the restriction of $X$ to a neighborhood of $p_i$. Clearly this restriction satisfies assumptions~{\bf A}, {\bf B} and~{\bf C} of Section~5. In particular Theorem~(\ref{aque11}) applies to show that the multiplicity of ${\mathcal F}$ along $\Delta$ is at most~$2$. Moreover, if this multiplicity is~$2$, then $X$ admits one of the normal forms $Z_{1,11}, \; Z_{1,11}^{(n)}, \; Z_{1,00}, \; Z_{1,00}^{(n)},\; Z_{0,12}, \; Z_{0,12}^{(n)}$ on a neighborhood of $p_i$.
From Lemma~(\ref{lema2.4}) we know that $k \leq 3$. Since the sum of the multiplicities of ${\mathcal F}$ along $\Delta$ at each $p_i$ is equal to $d-n+1$, it follows that $d-n \leq 5$.
However, if $k=3$, Corollary~(\ref{lema2.3}) shows that the top-degree homogeneous component of $X$ is as in the cases~5, 6, 7 of the corollary in question. Simple calculations guarantees that it is not possible to realize the models $Z_{1,11}, \ldots , Z_{0,12}^{(n)}$ in this way. The case $k=1$ being trivial, we just need to consider the case $k=2$. Now we have $d-n \leq 3$ and again it is very easy to conclude that none of these possibilities lead to a complete polynomial vector field. The resulting contradiction proves the theorem.
\fbox{}
\noindent {\sc Julio C. Rebelo \begin{tabbing} {\rm {\bf Permanent Address}} \hspace{3.8cm} \= {\rm {\bf Current Address}}\\ PUC-Rio \> Institute for Mathematical Sciences\\ R. Marques de S. Vicente 225 \> SUNY at Stony Brook\\ Rio de Janeiro RJ CEP 22453-900 \> Stony Brook NY 11794-3660\\ Brazil \> USA\\ {\it email [email protected]} \> {\it email [email protected]} \end{tabbing}
}
\end{document} |
\begin{document}
\title{f On bi-exactness of discrete quantum groups}
\begin{abstract} We define Ozawa's notion of bi-exactness to discrete quantum groups, and then prove some structural properties of associated von Neumann algebras. In particular, we prove that any non amenable subfactor of free quantum group von Neumann algebras, which is an image of a faithful normal conditional expectation, has no Cartan subalgebras. \end{abstract}
\section{\bf Introduction}
A countable discrete group $\Gamma$ is said to be \textit{bi-exact} (or said to be in \textit{class} $\cal S$)
if it is exact and there exists a map $\mu\colon \Gamma \rightarrow \mathrm{Prob}(\Gamma)\subset \ell^1(\Gamma)$ such that $\limsup_{x\rightarrow\infty}\|\mu(sxt)-s\mu(x)\|_1=0$ for any $s,t\in \Gamma$. This notion was introduced and studied by Ozawa in his book with Brown $\cite[\textrm{Section 15}]{BO08}$. In particular, he gave the following two different characterizations of bi-exactness (Lemma 15.1.4 and Proposition 15.2.3(2) in the book): \begin{itemize}
\item[$\rm (i)$] The group $\Gamma$ is exact and the algebra $L\Gamma$ satisfies condition $\rm (AO)^+$ with the dense $C^*$-algebra $C^*_\lambda(\Gamma)$.
\item[$\rm (ii)$] There exists a unital $C^*$-subalgebra ${\cal B}\subset \ell^\infty (\Gamma)$ such that
\begin{itemize}
\item[$\bullet$] the algebra $\cal B$ contains $c_0(\Gamma)$ (so that we can define ${\cal B}_\infty:={\cal B}/c_0(\Gamma)$);
\item[$\bullet$] the left translation action on $\ell^\infty(\Gamma)$ induces an amenable action on ${\cal B}_{\infty}$, and the right one induces the trivial action on ${\cal B}_{\infty}$.
\end{itemize} \end{itemize} Here we recall that a von Neumann algebra $M\subset \mathbb{B}(H)$ with a standard representation satisfies \textit{condition} $\rm (AO)^+$ $\cite[\textrm{Definition 3.1.1}]{Is12_2}$ if there exists a unital $\sigma$-weakly dense locally reflexive $C^*$-subalgebra $A\subset M$ and a unital completely positive (say, u.c.p.) map $\theta\colon A\otimes A^{\rm op} \rightarrow \mathbb{B}(H)$ such that $\theta(a\otimes b^{\rm op})-ab^{\rm op}\in \mathbb{K}(H)$ for any $a,b \in A$. Here $A^{\rm op}$ means the opposite algebra of $A$, which acts canonically on $H$ by right.
These characterizations of bi-exactness have been used in different contexts. For example, condition (i) is very close to condition (AO) introduced in $\cite{Oz03}$. In particular Ozawa's celebrated theorem in the same paper says that von Neumann algebras of bi-exact groups are solid, meaning that any relative commutant of a diffuse amenable subalgebra is still amenable. Condition (ii) is used to show that hyperbolic groups are bi-exact. This follows from the fact that all hyperbolic groups act on their Gromov boundaries as amenable actions. The definition of bi-exactness itself is also interesting since it is closely related to an \textit{array}, which was introduced and studied in $\cite{CS11}$ and $\cite{CSU11}$.
In the present paper, we generalize bi-exactness to discrete quantum groups in two different ways, which correspond to conditions (i) and (ii). This is not a difficult task since condition (i) does not rely on the structure of group $C^*$-algebras (this is a $C^*$-algebraic condition) and all objects in condition (ii) are easily defined for discrete quantum groups. We then study some basic facts on these conditions. In particular, we prove that free products of free quantum groups or quantum automorphism groups are bi-exact, showing that a condition close to bi-exactness is closed under taking free products.
After these observations, we prove some structural properties of von Neumann algebras associated with bi-exact quantum groups. We first give the following theorem which generalizes $\cite[\textrm{Theorem 3.1}]{PV12}$. This is a natural generalization from discrete groups to discrete quantum groups of Kac type. In the proof, we need only slight modifications since the original proof for groups do not rely on the structures of group von Neumann algebras very much. Note that a special case of this theorem was already generalized in $\cite{Is12_2}$ for general $\rm II_1$ factors.
\begin{ThmA}\label{A} Let $\mathbb{G}$ be a compact quantum group of Kac type, whose dual acts on a tracial von Neumann algebra $B$ as a trace preserving action. Write $M:=\hat{\mathbb{G}}\ltimes B$. Let $q$ be a projection in $M$ and $A\subset qMq$ a von Neumann subalgebra, which is amenable relative to $B$ in $M$. Assume that $\hat{\mathbb{G}}$ is weakly amenable and bi-exact. Then we have either one of the following statements: \begin{itemize}
\item[$\rm (i)$] We have $A\preceq_{M} B$.
\item[$\rm (ii)$] The algebra ${\cal N}_{qMq}(A)''$ is amenable relative to $B$ in $M$ (or equivalently, $L^2(qM)$ is left ${\cal N}_{qMq}(A)''$-amenable as a $qMq$-$B$-bimodule.). \end{itemize} \end{ThmA}
The same arguments as in the group case give the following corollary. As we mentioned, free quantum groups and quantum automorphism groups are examples of the corollary (see also Theorem \ref{C}). \begin{CorA} Let $\mathbb{G}$ be a compact quantum group of Kac type. Assume that $\hat{\mathbb{G}}$ is weakly amenable and bi-exact.
\begin{itemize}
\item[$\rm (1)$] The algebra $L^\infty(\mathbb{G})$ is strongly solid. Moreover any non amenable von Neumann subalgebra of $L^\infty(\mathbb{G})$ has no Cartan subalgebras.
\item[$\rm (2)$] If $\hat{\mathbb{G}}$ is non amenable, then $L^\infty(\mathbb{G})\otimes B$ has no Cartan subalgebras for any finite von Neumann algebra $B$.
\end{itemize} \end{CorA}
We also mention that if a non-amenable, weakly amenable and bi-exact discrete quantum group $\hat{\mathbb{G}}$ admits an action on a commutative von Neumann algebra $L^\infty(X,\mu)$, so that $\hat{\mathbb{G}}\ltimes L^\infty(X,\mu)$ is a $\rm II_1$ factor and $L^\infty(X,\mu)$ is a Cartan subalgebra, then $L^\infty(X,\mu)$ is the unique Cartan subalgebra up to unitary conjugacy. However such an example is not known.
Next we investigate similar results on discrete quantum groups of non Kac type. For this, let us recall condition $\rm (AOC)^+$, which is a similar one to condition $\rm (AO)^+$ on continuous cores. We say a von Neumann algebra $M$ and a faithful normal state $\phi$ on $M$ satisfies \textit{condition} $\rm (AOC)^+$ $\cite[\textrm{Definition 3.2.1}]{Is12_2}$ if there exists a unital $\sigma$-weakly dense $C^*$-subalgebra $A\subset M$ such that the modular action of $\phi$ gives a norm continuous action on $A$ (so that we can define $A \rtimes_{\rm r} \mathbb{R}$), $A \rtimes_{\rm r} \mathbb{R}$ is locally reflexive, and there exists a u.c.p.\ map $\theta\colon (A\rtimes_{\rm r}\mathbb{R})\otimes (A\rtimes_{\rm r}\mathbb{R})^{\rm op} \rightarrow \mathbb{B}(L^2(M,\phi)\otimes L^2(\mathbb{R}))$ such that $\theta(a\otimes b^{\rm op})-ab^{\rm op}\in \mathbb{K}(L^2(M)\otimes\mathbb{B}(L^2(\mathbb{R}))$ for any $a,b\in A\rtimes_{\rm r}\mathbb{R}$.
In $\cite{Is12_2}$, we proved that von Neumann algebras of free quantum groups with Haar states satisfy condition $\rm (AOC)^+$ and then deduced that they do not have modular action (of Haar states) invariant Cartan subalgebras if they have the $\rm W^*$CBAP (and they are non-amenable). This is a partial answer for the absence of Cartan subalgebras in two senses: the $\rm W^*$CBAP of free quantum groups were not known, and we could prove only the absence of special Cartan subalgebras under the $\rm W^*$CBAP.
Very recently De Commer, Freslon and Yamashita solved the first problem. In fact, they proved that free quantum groups and quantum automorphism groups are weakly amenable and hence von Neumann algebras of them have the $\rm W^*$CBAP $\cite{DFY13}$. Thus only the second problem remains to be proved.
In this paper, we solve the second problem. In fact, the following theorem is an analogue of Theorem \ref{A} on continuous cores and it gives a complete answer for our Cartan problem (except for amenability). In the theorem below, $C_h(L^\infty(\mathbb{G}))$ means the continuous core of $L^\infty(\mathbb{G})$ with respect to the Haar state $h$. \begin{ThmA}\label{B} Let $\mathbb{G}$ be a compact quantum group, $h$ a Haar state of $\mathbb{G}$ and $(B,\tau_B)$ a tracial von Neumann algebra. Denote $M:= C_h(L^\infty(\mathbb{G}))\otimes B$ and $\mathrm{Tr}_M:=\mathrm{Tr} \otimes\tau_B$, where $\mathrm{Tr}$ is the canonical trace on $C_h(L^\infty(\mathbb{G}))$. Let $q$ be a $\mathrm{Tr}_M$-finite projection in $M$ and $A\subset qMq$ an amenable von Neumann subalgebra. Assume that $L^\infty(\mathbb{G})$ has the $ W^*$CBAP and $(L^\infty(\mathbb{G}),h)$ satisfies condition $\rm (AOC)^+$ with the dense $C^*$-algebra $C_{\rm red}(\mathbb{G})$. Then we have either one of the following statements: \begin{itemize}
\item[$\rm (i)$] We have $A\preceq_{M} L\mathbb{R}\otimes B$.
\item[$\rm (ii)$] The algebra $L^2(qM)$ is left ${\cal N}_{qMq}(A)''$-amenable as a $qMq$-$B$-bimodule. \end{itemize} \end{ThmA}
\begin{CorA} Let $\mathbb{G}$ be a compact quantum group. Assume that $L^\infty(\mathbb{G})$ has the $ W^*$CBAP and $(L^\infty(\mathbb{G}),h)$ satisfies condition $\rm (AOC)^+$ with the dense $C^*$-subalgebra $C_{\rm red}(\mathbb{G})$.
\begin{itemize}
\item[$\rm (1)$]
Any non amenable von Neumann subalgebra of $L^\infty(\mathbb{G})$, which is an image of a faithful normal conditional expectation, has no Cartan subalgebras.
\item[$\rm (2)$] If $L^\infty(\mathbb{G})$ is non amenable, then $L^\infty(\mathbb{G})\otimes B$ has no Cartan subalgebras for any finite von Neumann algebra $B$. \end{itemize} \end{CorA}
As further examples of corollaries above, we give the following theorem. Note that this theorem does not say anything about amenability of $\hat{\mathbb{G}}$ and $L^\infty(\mathbb{G})$.
\begin{ThmA}\label{C} Let $\mathbb{G}$ be one of the following compact quantum groups. \begin{itemize}
\item[$\rm (i)$] A co-amenable compact quantum group.
\item[\rm (ii)] The free unitary quantum group $A_u(F)$ for any $F\in\mathrm{GL}(n,\mathbb{C})$.
\item[\rm (iii)] The free orthogonal quantum group $A_o(F)$ for any $F\in\mathrm{GL}(n,\mathbb{C})$.
\item[\rm (iv)] The quantum automorphism group $A_{\rm aut}(B, \phi)$ for any finite dimensional $C^*$-algebra $B$ and any faithful state $\phi$ on $B$.
\item[\rm (v)] The dual of a bi-exact and weakly amenable discrete group $\Gamma$ with $\Lambda_{\rm cb}(\Gamma)=1$.
\item[\rm (vi)] The dual of a free product $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$, where each $\mathbb{G}_i$ is as in from $\rm (i)$ to $\rm (v)$ above.
\end{itemize} Then the dual $\hat{\mathbb{G}}$ is weakly amenable and bi-exact. The associated von Neumann algebra $L^\infty(\mathbb{G})$ has the $W^*$CBAP and satisfies condition $\rm (AOC)^+$ with the Haar state and the dense $C^*$-subalgebra $C_{\rm red}(\mathbb{G})$. \end{ThmA}
\noindent Throughout the paper, we always assume that discrete groups are countable, quantum group $C^*$-algebras are separable, von Neumann algebras have separable predual, and Hilbert spaces are separable.
\noindent {\bf Acknowledgement.} The author would like to thank Yuki Arano, Cyril Houdayer, Yasuyuki Kawahigashi, Narutaka Ozawa, Sven Raum and Stefaan Vaes for fruitful conversations. He was supported by JSPS, Research Fellow of the Japan Society for the Promotion of Science and FMSP, Frontiers of Mathematical Sciences and Physics. This research was carried out while he was visiting the Institut de Math$\rm \acute{e}$matiques de Jussieu. He gratefully acknowledges the kind hospitality of them.
\section{\bf Preliminaries}
\subsection{\bf Compact (and discrete) quantum groups}\label{CQG}
Let $\mathbb{G}$ be a compact quantum group. In the paper, we basically use the following notation. We denote the comultiplication by $\Phi$, Haar state by $h$, the set of all equivalence classes of all unitary corepresentations by $\mathrm{Irred}(\mathbb{G})$, and right and left regular representations by $\rho$ and $\lambda$ respectively. We regard $C_{\rm red}(\mathbb{G}):=\rho(C(\mathbb{G}))$ as our main object and we frequently omit $\rho$ when we see the dense Hopf $*$-algebra. The canonical unitary $\mathbb{V}$ is defined as $\mathbb{V}=\bigoplus_{x\in\mathrm{Irred}\mathbb{G}} U^x$. The GNS representation of $h$ is written as $L^2(\mathbb{G})$ and it has a decomposition $L^2(\mathbb{G})=\sum_{x\in\mathrm{Irred}(\mathbb{G})}\oplus (H_x\otimes H_{\bar{x}})$. Along the decomposition, the modular operator of $h$ is of the form $\Delta^{it}=\sum_{x\in\mathrm{Irred}(\mathbb{G})}\oplus (F_x^{it}\otimes F_{\bar{x}}^{-it})$. The canonical conjugation is denoted by $J$. All dual objects are written with hat (e.g.\ $\hat{\mathbb{G}}, \hat{\Phi},\ldots$). We frequently use the unitary element $U:=J\hat{J}$ which satisfies $U\rho(C(\mathbb{G}))U=\lambda(C(\mathbb{G}))$ and $U\hat{\rho}(c_0(\hat{\mathbb{G}}))U=\hat{\lambda}(c_0(\hat{\mathbb{G}}))$.
{\bf $\bullet$ Crossed products}
For crossed products of quantum groups, we refer the reader to $\cite{BS93}$.
Let $\hat{\mathbb{G}}$ be a discrete quantum group and $A$ a $C^*$-algebra. Recall following notions: \begin{itemize}
\item A (\textit{left}) \textit{action} of $\hat{\mathbb{G}}$ on $A$ is a non-degenerate $*$-homomorphism $\alpha\colon A \rightarrow M(c_0(\hat{\mathbb{G}})\otimes A)$ such that $(\iota\otimes \alpha)\alpha=(\hat{\Phi}\otimes\iota)\alpha$ and $(\hat{\epsilon}\otimes\iota)\alpha(a)=a$ for any $a\in A$, where $\hat{\epsilon}$ is the counit.
\item A \textit{covariant representation} of an action $\alpha$ into a $C^*$-algebra $B$ is a pair $(\theta,X)$ such that
\begin{itemize}
\item $\theta$ is a non-degenerate $*$-homomorphism from $A$ into $M(B)$;
\item $X \in M(c_0(\hat{\mathbb{G}})\otimes B)$ is a unitary representation of $\hat{\mathbb{G}}$;
\item they satisfy the covariant relation $(\iota\otimes \theta)\alpha(a)=X^*(1\otimes \theta(a))X$ ($a\in A$).
\end{itemize} \end{itemize}
Let $\alpha$ be an action of $\hat{\mathbb{G}}$ on $A$. Then for a covariant representation $(\theta,X)$ into $B$, the closed linear span \begin{equation*} \overline{\mathrm{span}}\{\theta(a)(\omega\otimes\iota)(X) \mid a\in A, \omega\in \ell^\infty(\hat{\mathbb{G}})_* \} \subset M(B) \end{equation*} becomes a $C^*$-algebra.
The \textit{reduced crossed product $C^*$-algebra} is the $C^*$-algebra associated with the covariant representation $((\hat{\lambda}\otimes\iota)\alpha, \hat{\mathscr{W}}\otimes 1)$ into ${\cal L}(L^2(\mathbb{G})\otimes A)$, where $\hat{\lambda}$ is the left regular representation, $\hat{\mathscr{W}}:=(\iota\otimes\rho)(\mathbb{V})$, and ${\cal L}(L^2(\mathbb{G})\otimes A)$ is the $C^*$-algebra of all right $A$-module maps on the Hilbert module $L^2(\mathbb{G})\otimes A$. The \textit{full crossed product $C^*$-algebra} is defined as a universal object for all covariant representations. When $A\subset \mathbb{B}(H)$ is a von Neumann algebra, the \textit{crossed product von Neumann algebra} is defined as the $\sigma$-weak closure of the reduced crossed product in $\mathbb{B}(L^2(\mathbb{G})\otimes H)$. We denote them by $\hat{\mathbb{G}}\ltimes_{\rm r}A$, $\hat{\mathbb{G}}\ltimes_{\rm f}A$ and $\hat{\mathbb{G}}\ltimes A$ respectively.
The crossed product von Neumann algebra has a canonical conditional expectation $E_A$ onto $A$, given by \begin{equation*} \hat{\mathbb{G}}\ltimes A \ni \theta(a)(\omega\otimes\iota\otimes\iota)(\hat{\mathscr{W}}\otimes 1) \mapsto a (\omega\otimes h \otimes\iota)(\hat{\mathscr{W}}\otimes 1)\in A, \end{equation*} where $h$ is the Haar state (more explicitly it sends $\theta(a)\rho(u_{i,j}^z)$ to $a h(u_{i,j}^z)$). For a faithful normal state $\phi$ on $A$, we can define a canonical faithful normal state on $\hat{\mathbb{G}}\ltimes A$ by $\tilde{\phi}:=\phi\circ E_A$. When $\phi$ is a trace and the given action $\alpha$ is $\phi$-preserving (i.e.\ $(\iota\otimes\phi)\alpha(a)=\phi(a)$ for $a\in A$), $\tilde{\phi}$ also becomes a trace.
{\bf $\bullet$ Free products}
For free products of discrete quantum groups, we refer the reader to $\cite{Wa95}$.
We first recall fundamental facts of free products of $C^*$-algebras. Let $A_i$ $(i=1,\ldots,n)$ be unital $C^{\ast}$-algebras and $\phi_i$ non-degenerate states on $A_i$ (i.e.\ GNS representations of $\phi_i$ are faithful). Denote GNS-representations of $\phi_i$ by $(\pi_{i},H_{i},\xi_{i})$ and decompose $H_{i}=\mathbb{C}\xi_{i}\oplus H_{i}^{0}$ as Hilbert spaces, where $H_{i}^{0}:=(\mathbb{C}\xi_{i})^{\perp}$. Define two Hilbert spaces by
\begin{eqnarray*} H_1*\cdots*H_n:=\mathbb{C}\Omega \oplus \bigoplus_{k\geq1}\hspace{0.3em}&\bigoplus_{(i_1,\dots,i_k)\in N_k}& H_{i_{1}}^{0}\otimes H_{i_{2}}^{0}\cdots \otimes H_{i_{k}}^{0},\\ H(i):=\mathbb{C}\Omega \oplus \bigoplus_{k\geq1}\hspace{0.3em}&\bigoplus_{(i_1,\dots,i_k)\in N_k, i\neq i_{1}}& H_{i_{1}}^{0}\otimes H_{i_{2}}^{0}\cdots \otimes H_{i_{k}}^{0}, \end{eqnarray*} where $\Omega$ is a fixed norm one vector and $N_k:=\{(i_1,\dots,i_k)\in \{1,\ldots,n\}^k \mid i_l\neq i_{l+1} \textrm{ for all }l=1,\dots,k-1\}$. Write $H:=H_1*\cdots*H_n$. Let $W_i$ be unitary operators given by \begin{eqnarray*} W_{i}\colon H &=& H(i)\oplus(H_{i}^{0}\otimes H(i))\\ &\simeq& (\mathbb{C}\xi_{i}\otimes H(i))\oplus(H_{i}^{0}\otimes H(i))\\ &\simeq&(\mathbb{C}\xi_{i} \oplus H_{i}^{0})\otimes H(i)\\ &=&H_{i}\otimes H(i) \end{eqnarray*} Then a canonical $A_i$-action (more generally $\mathbb{B}(H_i)$-action) on $H$ is given by $\lambda_i(a):=W_i^* (a\otimes 1)W_i$ $(a\in A_i)$. The \textit{free product $C^*$-algebra} $(A_1,\phi_1)*\cdots *(A_n,\phi_n)$ is the $C^*$-algebra generated by $\lambda_i(A_i)$ $(i=1,\ldots, n)$. The vector state of the canonical cyclic vector $\Omega$ is called the free product state and denoted by $\phi_1*\cdots *\phi_n$.
When each $A_i$ is a von Neumann algebra and $\phi_i$ is normal, the \textit{free product von Neumann algebra} is defined as the $\sigma$-weak closure of the free product $C^*$-algebra in $\mathbb{B}(H)$. We denote it by $(A_1,\phi_1)\bar{*}\cdots \bar{*}(A_n,\phi_n)$.
Let $M_i$ $(i=1,\ldots,n)$ be von Neumann algebras and $\phi_i$ be faithful normal states on $M_i$. Denote modular objects of them by $\Delta_i$ and $J_i$. Then modular objects on the free product von Neumann algebra are given by \begin{alignat*}{3} \Delta^{it} &\colon H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{k}}^{0} \ni \xi_{i_{1}}\otimes \cdots \otimes \xi_{i_{k}} \longmapsto \Delta^{it}_{i_1}\xi_{i_{1}}\otimes \cdots \otimes \Delta^{it}_{i_k}\xi_{i_{k}} &\in& H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{k}}^{0},\\ J \hspace{0.7em} &\colon H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{k}}^{0} \ni \xi_{i_{1}}\otimes \cdots \otimes \xi_{i_{k}} \longmapsto J_{i_k}\xi_{i_{k}}\otimes \cdots \otimes J_{i_1}\xi_{i_{1}} &\in& H_{i_{k}}^{0}\otimes \cdots \otimes H_{i_{1}}^{0}. \end{alignat*}
The unitary elements $V_i:=\Sigma(J_i\otimes J|_{JH(i)})W_iJ$, where $\Sigma$ is a flip from $H_i\otimes H(i)$ to $H(i)\otimes H_i$, gives a right $M_i$-action (more generally $\mathbb{B}(H_i)$-action) by $\rho_i(a):=J\lambda_i(a)J=V_i^*(1\otimes J_iaJ_i)V_i$.
Let $\mathbb{G}_i$ $(i=1,\ldots,n)$ be compact quantum groups and $h_i$ Haar states of them. Then the reduced free product $(C_{\rm red}(\mathbb{G}_1), h_1)*\cdots *(C_{\rm red}(\mathbb{G}_n), h_n)$ has a structure of compact quantum group with the Haar state $h_1*\cdots *h_n$. We write its dual as $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$ and call it the \textit{free product} of $\hat{\mathbb{G}}_i$ $(i=1,\ldots,n)$. Modular objects $\hat{J}$ and $U:=J\hat{J}$ are given by \begin{alignat*}{3} \hat{J} \hspace{0.5em} &\colon H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{n}}^{0} \ni \xi_{i_{1}}\otimes \cdots \otimes \xi_{i_{n}} \longmapsto \hat{J}_{i_1}\xi_{i_1}\otimes \cdots \otimes \hat{J}_{i_n}\xi_{i_{n}} &\in& H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{n}}^{0},\\ U \hspace{0.5em} &\colon H_{i_{1}}^{0}\otimes \cdots \otimes H_{i_{n}}^{0} \ni \xi_{i_{1}}\otimes \cdots \otimes \xi_{i_{n}} \longmapsto U_{i_n}\xi_{i_{n}}\otimes \cdots \otimes U_{i_1}\xi_{i_{1}} &\in& H_{i_{n}}^{0}\otimes \cdots \otimes H_{i_{1}}^{0}. \end{alignat*}
We have a formula $W_i\hat{J}=(\hat{J}_i\otimes \hat{J}|_{H(i)}) W_i$.
By the natural inclusion $(C_{\rm red}(\mathbb{G}_i), h_i)\subset (C_{\rm red}(\mathbb{G}_1), h_1)*\cdots *(C_{\rm red}(\mathbb{G}_n), h_n)$ $(i=1,\ldots,n)$, we can regard all corepresentations of $\mathbb{G}_i$ as those of the dual of $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$. A representative of all irreducible representations of the dual of $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$ is given by the trivial corepresentation and all corepresentations of the form $w_1\boxtimes w_2\boxtimes \cdots \boxtimes w_n$, where each $w_l$ is in $\mathrm{Irred}(\mathbb{G}_i)$ for some $i$ (and not trivial corepresentation) and no two adjacent factors are taken from corepresentations of the same quantum group.
{\bf $\bullet$ Amenability of actions}
Let $\alpha$ be an action of a discrete quantum $\hat{\mathbb{G}}$ on a unital $C^*$-algebra $A$. Denote by $L^2(\mathbb{G})\otimes A$ the Hilbert module obtained from $L^2(\mathbb{G})\otimes_{\rm alg} A$ with $\langle\xi\otimes a | \eta\otimes b\rangle:=\langle\xi|\eta\rangle b^*a$ for $\xi,\eta\in L^2(\mathbb{G})$ and $a,b\in A$. We say $\alpha$ is \textit{amenable} $\cite[\textrm{Definition 4.1}]{VV05}$ if there exists a sequence $\xi_n\in L^2(\mathbb{G})\otimes A$ satisfying \begin{itemize}
\item[$\rm (i)$] $\langle\xi_n |\xi_n\rangle \rightarrow 1$ in $A$;
\item[$\rm (ii)$] for all $x\in \mathrm{Irred}(\mathbb{G})$, $\|((\iota\otimes \alpha)(\xi_n)-\hat{\mathscr{V}_{12}}(\xi_n)_{13}) (1\otimes p_x \otimes 1)\|\rightarrow 0$;
\item[$\rm (iii)$] $((\hat{\rho}\times\hat{\lambda})\circ\hat{\Phi}\otimes \iota)\alpha(a)\xi_n =\xi_n a$ for any $n\in\mathbb{N}$ and $a\in A$, \end{itemize} where $\hat{\mathscr V}:=(\lambda\otimes \iota)(\mathbb{V}_{21})$ and $\hat{\rho}\times\hat{\lambda}$ is the multiplication map from $\ell^\infty(\hat{\mathbb{G}})\otimes\ell^\infty(\hat{\mathbb{G}})$ into $\mathbb{B}(L^2(\mathbb{G}))$, which is bounded since $\ell^\infty(\hat{\mathbb{G}})$ is amenable. It is clear that $\hat{\mathbb{G}}$ is amenable if and only if any trivial action of $\hat{\mathbb{G}}$ is amenable.
{\bf $\bullet$ Free quantum groups and quantum automorphism groups}
Let $F$ be a matrix in $\mathrm{GL}(n,\mathbb{C})$. The \textit{free unitary quantum group} (resp.\ \textit{free orthogonal quantum group}) of $F$ $\cite{VW96}\cite{Wa95}$ is the $C^*$-algebra $C(A_u(F))$ (resp.\ $C(A_o(F))$) is defined as the universal unital $C^*$-algebra generated by all the entries of a unitary $n$ by $n$ matrix $u=(u_{i,j})_{i,j}$ satisfying that $F(u_{i,j}^*)_{i,j}F^{-1}$ is unitary (resp.\ $F(u_{i,j}^*)_{i,j}F^{-1}=u$).
Recall that a \textit{coaction} of a compact quantum group $\mathbb{G}$ on a unital $C^*$-algebra $B$ is a unital $*$-homomorphism $\beta\colon B\rightarrow B\otimes C(\mathbb{G})$ satisfying $(\beta\otimes \iota)\circ \beta=(\iota\otimes \Phi)\circ \beta$ and $\overline{{\rm span}}\{\beta(B)(1\otimes A)=B\otimes C(\mathbb{G})$. Let $(B,\phi)$ be a pair of a finite dimensional $C^*$-algebra and a faithful state on $B$. Then the \textit{quantum automorphism group} of $(B,\phi)$ $\cite{Wa98_1}\cite{Ba01}$ is the universal compact quantum group $A_{\rm aut}(B,\phi)$ which can be endowed with a coaction $\beta\colon B\rightarrow B\otimes C(A_{\rm aut}(B,\phi))$ satisfying $(\phi\otimes \iota)\circ \beta(x)=\phi(x)1$ for $x\in B$.
\subsection{\bf Weak amenability and the $\rm \bf W^*$CBAP}
Let $\hat{\mathbb{G}}$ be a discrete quantum group. Denote the dense Hopf $*$-algebra by ${\mathscr C}(\hat{\mathbb{G}})$. For any element $a\in\ell^\infty(\hat{\mathbb{G}})$, we can associate a linear map $m_a$ from ${\mathscr C}(\hat{\mathbb{G}})$ to ${\mathscr C}(\hat{\mathbb{G}})$, given by $(m_a\otimes \iota)(u^x)=(1\otimes ap_x)u^x$ for any $x\in \mathrm{Irred}(\mathbb{G})$, where $p_x\in c_0(\hat{\mathbb{G}})$ is the canonical projection onto $x$ component. We we say $\hat{\mathbb{G}}$ is \textit{weakly amenable} if there exist a net $(a_i)_i$ of elements of $\ell^\infty(\hat{\mathbb{G}})$ such that \begin{itemize}
\item each $a_i$ has finite support, namely, $a_ip_x=0$ except for finitely many $x\in \mathrm{Irred}(\mathbb{G})$;
\item $(a_i)_i$ converges to 1 pointwise, namely, $a_ip_x$ converges to $p_x$ in $\mathbb{B}(H_x)$ for any $x\in \mathrm{Irred}(\mathbb{G})$;
\item $\limsup_i\|m_{a_i}\|_{\rm c.b.}$ is finite. \end{itemize}
We also recall that a von Neumann algebra $M$ has the $\it weak^*$ \textit{completely approximation property} (or $\ W^*$\textit{CBAP}, in short)
if there exists a net $(\psi_i)_i$ of completely bounded (say c.b.) maps on $M$ with normal and finite rank such that $\limsup_i\|\psi_i\|_{\rm c.b.}<\infty$ and $\psi_i$ converges to $\mathrm{id}_M$ in the point $\sigma$-weak topology.
Then the \textit{Cowling--Haagerup constant} of $\hat{\mathbb{G}}$ and $M$ are defined as \begin{eqnarray*}
&&\Lambda_{\rm c.b.}(\hat{\mathbb{G}}):=\inf\{\ \limsup_i\|m_{a_i}\|_{\rm c.b.}\mid (a_i)_i \textrm{ satisfies the above condition}\},\\
&&\Lambda_{\rm c.b.}(M):=\inf\{\ \limsup_i\|\psi_i\|_{\rm c.b.}\mid (\psi_i) \textrm{ satisfies the above condition}\}. \end{eqnarray*} It is known that $\Lambda_{\rm c.b.}(\hat{\mathbb{G}})\geq\Lambda_{\rm c.b.}(L^\infty(\mathbb{G}))$.
De Commer, Freslon, and Yamashita recently proved that $\Lambda_{\rm c.b.}(\hat{\mathbb{G}})=1$, where $\mathbb{G}$ is a free quantum group or a quantum automorphism group $\cite{DFY13}$. Note that a special case of this was already solved by Freslon $\cite{Fr12}$.
\subsection{\bf Popa's intertwining techniques and relative amenability}
We first recall Popa's intertwining techniques in both non-finite and semifinite situations.
\begin{Def}\label{popa embed def}\upshape Let $M$ be a von Neumann algebra, $p$ and $q$ projections in $M$, $A\subset qMq$ and $B\subset pMp$ von Neumann subalgebras, and let $E_{B}$ be a faithful normal conditional expectation from $pMp$ onto $B$. Assume $A$ and $B$ are finite. We say $A$ \textit{embeds in $B$ inside} $M$ and denote by $A\preceq_M B$ if there exist non-zero projections $e\in A$ and $f\in B$, a unital normal $\ast$-homomorphism $\theta \colon eAe \rightarrow fBf$, and a partial isometry $v\in M$ such that \begin{itemize} \item $vv^*\leq e$ and $v^*v\leq f$, \item $v\theta(x)=xv$ for any $x\in eAe$. \end{itemize} \end{Def}
\begin{Thm}[\textit{non-finite version, }{\cite{Po01}\cite{Po03}\cite{Ue12}\cite{HV12}}]\label{popa embed} Let $M,p,q,A,B$, and $E_{B}$ be as in the definition above, and let $\tau$ be a faithful normal trace on $B$. Then the following conditions are equivalent. \begin{itemize}
\item[$\rm (i)$]The algebra $A$ embeds in $B$ inside $M$.
\item[$\rm (ii)$]There exists no sequence $(w_n)_n$ of unitaries in $A$ such that $E_{B}(b^*w_n a)\rightarrow 0$ strongly for any $a,b\in qMp$. \end{itemize} \end{Thm}
\begin{Thm}[\textit{semifinite version, }{\cite{CH08}\cite{HR10}}]\label{popa embed2}
Let $M$ be a semifinite von Neumann algebra with a faithful normal semifinite trace $\mathrm{Tr}$, and $B\subset M$ be a von Neumann subalgebra with $\mathrm{Tr}_{B}:=\mathrm{Tr}|_{B}$ semifinite. Denote by $E_{B}$ the unique $\mathrm{Tr}$-preserving conditional expectation from $M$ onto $B$. Let $q$ be a $\mathrm{Tr}$-finite projection in $M$ and $A\subset qMq$ a von Neumann subalgebra. Then the following conditions are equivalent. \begin{itemize}
\item[$\rm (i)$]There exists a non-zero projection $p\in B$ with $\mathrm{Tr}_{B}(p)<\infty$ such that $A\preceq_{eMe}pBp$, where $e:=p\vee q$.
\item[$\rm (ii)$]There exists no sequence $(w_n)_n$ in unitaries of $A$ such that $E_{B}(b^{\ast}w_n a)\rightarrow 0$ strongly for any $a,b\in qM$.
\end{itemize} We use the same symbol $A\preceq_{M}B$ if one of these conditions holds. \end{Thm} \noindent By the proof of the semifinite one, we have that $A\not\preceq_M B$ if and only if there exists a net $(p_j)$ of $\mathrm{Tr}_B$-finite projections in $B$ which converges to 1 strongly and $A\not\preceq_{e_jMe_j}p_jBp_j$, where $e_j:=p_j\vee q$. We also mention that when $A$ is diffuse and $B$ is atomic, then $A\not\preceq_M B$. This follows from the existence of a normal unital $*$-homomorphism $\theta$ in the definition.
We next recall relative amenability introduced in $\cite{OP07}$ and $\cite{PV11}$.
\begin{Def}\upshape Let $(M,\tau)$ be a tracial von Neumann algebra, $q\in M$ a projection, and $B\subset M$ and $A\subset qMq$ be von Neumann subalgebras. We say $A$ is amenable relative to $B$ in $M$ if there exists a state $\phi$ on $q\langle M,e_B\rangle q$ such that $\phi$ is $A$-central and the restriction of $\phi$ on $A$ coincides with $\tau$. \end{Def}
\begin{Def}\upshape Let $(M,\tau)$ and $(B,\tau_B) $ be tracial von Neumann algebras and $A\subset M$ be a von Neumann subalgebra. We say an $M$-$B$-bimodule ${}_M K_B$ is left $A$-amenable if there exists a state $\phi$ on $\mathbb{B}(K)\cap (B^{\rm op})'$ such that $\phi$ is $A$-central and the restriction of $\phi$ on $A$ coincides with $\tau$. \end{Def}
We note that for any $B\subset M$ and $A\subset qMq$, amenability of $A$ relative to $B$ in $M$ is equivalent to left $A$-amenability of $qL^2(M)$ as a $qMq$-B-bimodule, since $q\langle M,e_B\rangle q= q(\mathbb{B}(L^2(M))\cap (B^{\rm op})')q=\mathbb{B}(qL^2(M))\cap (B^{\rm op})'$. We also mention that when $B$ is amenable, then since $\mathbb{B}(K)\cap (B^{\rm op})'$ is amenable, there exists a conditional expectation from $\mathbb{B}(K)$ onto $\mathbb{B}(K)\cap (B^{\rm op})'$. In this case, relative amenability of $A$ (or left $A$-amenability) means amenability of $A$.
\section{\bf Bi-exactness}
\subsection{\bf Two definitions of bi-exactness}
We introduce two notions of bi-exactness on discrete quantum groups. These notions are equivalent for discrete groups as we have seen in Introduction. Recall that $C_{\rm red}(\mathbb{G})=\rho(C(\mathbb{G}))$ and $UC_{\rm red}(\mathbb{G})U=\lambda(C(\mathbb{G}))=C_{\rm red}(\mathbb{G})^{\rm op}$, where $U=J\hat{J}=\hat{J}J$. Basically we use $UC_{\rm red}(\mathbb{G})U$ instead of $C_{\rm red}(\mathbb{G})^{\rm op}$.
\begin{Def}\upshape\label{bi-ex} Let $\hat{\mathbb{G}}$ be a discrete quantum group. We say $\hat{\mathbb{G}}$ is \textit{bi-exact} if it satisfies following conditions: \begin{itemize}
\item[$\rm (i)$] the quantum group $\hat{\mathbb{G}}$ is exact (i.e.\ $C_{\rm red}(\mathbb{G})$ is exact);
\item[$\rm (ii)$] there exists a u.c.p.\ map $\theta\colon C_{\rm red}(\mathbb{G})\otimes C_{\rm red}(\mathbb{G}) \rightarrow \mathbb{B}(L^2(\mathbb{G}))$ such that $\theta(a\otimes b)-aUbU\in \mathbb{K}(L^2(\mathbb{G}))$ for any $a,b \in C_{\rm red}(\mathbb{G})$. \end{itemize} \end{Def} \begin{Def}\upshape\label{st bi-ex} Let $\hat{\mathbb{G}}$ be a discrete quantum group. We say $\hat{\mathbb{G}}$ is \textit{strongly bi-exact} if there exists a unital $C^*$-subalgebra ${\cal B}$ in $\ell^{\infty}(\hat{\mathbb{G}})$ such that \begin{itemize}
\item[$\rm (i)$] the algebra $\cal B$ contains $c_0(\hat{\mathbb{G}})$, and the quotient ${\cal B}_{\infty}:={\cal B}/c _0(\hat{\mathbb{G}})$ is nuclear;
\item[\rm (ii)] the left translation action on $\ell^\infty(\hat{\mathbb{G}})$ induces an amenable action on ${\cal B}_{\infty}$, and the right one induces the trivial action on ${\cal B}_{\infty}$. \end{itemize} \end{Def}
\begin{Rem}\upshape Amenability implies strong bi-exactness. In fact, we can choose ${\cal B}:=c_0(\hat{\mathbb{G}})+\mathbb{C}1$. In this case, both the left and right actions on ${\cal B}_\infty(\simeq \mathbb{C})$ are trivial. \end{Rem}
We first observe relationship between bi-exactness and strong bi-exactness. In (i) of the definition of strong bi-exactness, nuclearity of ${\cal B}_\infty$ is equivalent to that of $\cal B$. Moreover, together with the condition (ii), the $C^*$-subalgebra ${\cal C}_l\subset \mathbb{B}(L^2(\mathbb{G}))$ generated by $\hat{\lambda}({\cal B})$ and $C_{\rm red}(\mathbb{G})(=\rho(C(\mathbb{G})))$ is also nuclear. In fact, the quotient image of ${\cal C}_l$ in $\mathbb{B}(L^2(\mathbb{G}))/\mathbb{K}(L^2(\mathbb{G}))$ is nuclear since there is a canonical surjective map from $\hat{\mathbb{G}} \ltimes_{\rm f} {\cal B}_\infty$ to ${\cal C}_l$ and $\hat{\mathbb{G}} \ltimes_{\rm f} {\cal B}_\infty$ is nuclear by amenability of the action. Then ${\cal C}_l$ is an extension of $\mathbb{K}(L^2(\mathbb{G}))$ by this quotient image, and hence is nuclear. Note that ${\cal C}_l$ contains $\mathbb{K}(L^2(\mathbb{G}))$, since it contains $C_{\rm red}(\mathbb{G})$ and the orthogonal projection from $L^2(\mathbb{G})$ onto $\mathbb{C}\hat{1}$. We put ${\cal C}_r:=U{\cal C}_lU$, where $U:=J\hat{J}$. Triviality of the right action in (ii) implies that all commutators of $UC_{\rm red}(\mathbb{G})U$ and $\hat{\lambda}({\cal B})$ (respectively $C_{\rm red}(\mathbb{G})$ and $U\hat{\lambda}({\cal B})U$) are contained in $\mathbb{K}(L^2(\mathbb{G}))$. This implies that all commutators of ${\cal C}_l$ and ${\cal C}_r$ are also contained in $\mathbb{K}(L^2(\mathbb{G}))$. Thus we obtained the following $\ast$-homomorphism:
\begin{equation*} \nu\colon {\cal C}_l \otimes {\cal C}_r \longrightarrow \mathbb{B}(L^2(\mathbb{G}))/\mathbb{K}(L^2(\mathbb{G})); a\otimes b \longmapsto ab. \end{equation*} This map is an extension of the multiplication map on $C_{\rm red}(\mathbb{G}) \otimes UC_{\rm red}(\mathbb{G})U$, and so this multiplication map is nuclear since so is ${\cal C}_l \otimes {\cal C}_r$. Finally by the lifting theorem of Choi and Effros $\cite{CE76}$ (or see $\cite[\textrm{Theorem C.3}]{BO08}$), we obtain a u.c.p.\ lift $\theta$ of the multiplication map. Thus we observed that strong bi-exactness implies bi-exactness (exactness of $\hat{\mathbb{G}}$ follows from nuclearity of ${\cal C}_l$). The intermediate object ${\cal C}_l$ is important for us, and we will use this algebra in the next subsection.
We summary these observations as follows.
\begin{Lem}\label{intermediate} Strong bi-exactness implies bi-exactness. The following condition is an intermediate condition between bi-exactness and strong bi-exactness: \begin{itemize}
\item There exists a nuclear $C^*$-algebra ${\cal C}_l\subset \mathbb{B}(L^2(\mathbb{G}))$ which contains $C_{\rm red}(\mathbb{G})$ and $\mathbb{K}(L^2(\mathbb{G}))$, and all commutators of ${\cal C}_l$ and ${\cal C}_r(:=U{\cal C}_lU)$ are contained in $\mathbb{K}(L^2(\mathbb{G}))$. \end{itemize} \end{Lem}
Examples of bi-exact quantum groups were first given by Vergnioux $\cite{Ve05}$. He constructed a u.c.p.\ lift directly for free quantum groups. Then he, in a joint work with Vaes $\cite{VV05}$, gave a new proof for bi-exactness of $\hat{A}_o(F)$ and they in fact proved strong bi-exactness. In the proof, they only used the fact that $A_o(F)$ is monoidally equivalent to some $\mathrm{SU}_q(2)$ with $-1<q<1$ and $q\neq0$, seeing some estimates on intertwiner spaces of $\mathrm{SU}_q(2)$ $\cite[\textrm{Lemma 8.1}]{VV05}$. Since the dual of $\mathrm{SO}_q(3)$ is a quantum subgroup of some dual of $\mathrm{SU}_q(2)$, intertwiner spaces of $\mathrm{SO}_q(3)$ have the same estimates. From this fact, we can deduce strong bi-exactness of a dual of a compact quantum group which is monoidally equivalent to $\mathrm{SO}_q(3)$ (by the same argument as that for $\mathrm{SU}_q(2)$). We also mention that strong bi-exactness of $A_u(F)$ was proved by the same argument $\cite{VV08}$.
We summary these observations as follows. \begin{Thm}\label{example st bi-exact} Let $\mathbb{G}$ be a compact quantum group which is monoidally equivalent to $\mathrm{SU}_q(2)$, $\mathrm{SO}_q(3)$, or $A_u(F)$, where $-1<q<1$, $q\neq 0$, $F$ is not a scalar multiple of a $2$ by $2$ unitary. Then the dual $\hat{\mathbb{G}}$ is strongly bi-exact. \end{Thm}
In $\cite{Is12_2}$, we introduced condition $\rm (AOC)^+$, which is similar to condition $\rm (AO)^+$ on continuous cores, and proved that von Neumann algebras of free quantum groups satisfy this condition. In the proof we also used only the fact that $A_o(F)$ is monoidally equivalent to some $\mathrm{SU}_q(2)$ and hence we actually proved the following statement.
\begin{Thm}\label{example bi-exact} Let $\mathbb{G}$ be a compact quantum group which is monoidally equivalent to $\mathrm{SU}_q(2)$, $\mathrm{SO}_q(3)$, or $A_u(F)$, where $-1<q<1$, $q\neq 0$, $F$ is not a scalar multiple of a $2$ by $2$ unitary. Then $L^\infty(\mathbb{G})$ and its Haar state satisfy condition $\rm (AOC)^+$ with the dense $C^*$-algebra $C_{\rm red}(\mathbb{G})$. \end{Thm}
In the proof we gave a sufficient condition to condition $\rm (AOC)^+$, which was formulated for general von Neumann algebras $\cite[\textrm{Lemma 3.2.3}]{Is12_2}$. When we see a quantum group von Neumann algebra, we have a more concrete sufficient condition as follows. To verify this, see Subsection 3.2 in the same paper. In the lemma below, $\pi$ means the canonical $*$-homomorphism from $\mathbb{B}(L^2(\mathbb{G}))$ into $\mathbb{B}(L^2(\mathbb{G})\otimes L^2(\mathbb{R}))$ defined by $(\pi(x)\xi)(t):=\Delta_h^{-it}x\Delta_h^{it}\xi(t)$ for $x\in\mathbb{B}(L^2(\mathbb{G}))$, $t\in\mathbb{R}$, and $\xi\in L^2(\mathbb{G})\otimes L^2(\mathbb{R})$.
\begin{Lem}\label{AOC} Let $\mathbb{G}$ be a compact quantum group and ${\cal C}_l\subset C^*\{C_{\rm red}(\mathbb{G}), \hat{\lambda}(\ell^\infty(\hat{\mathbb{G}})) \}$ a $C^*$-subalgebra which contains $C_{\rm red}(\mathbb{G})$ and $\mathbb{K}(L^2(\mathbb{G}))$. Put ${\cal C}_r:=U{\cal C}_lU$. Assume the following conditions: \begin{itemize}
\item[$\rm (i)$] the algebra ${\cal C}_l$ is nuclear;
\item[$\rm (ii)$] a family of maps $\mathrm{Ad}\Delta_h^{it}$ $(t\in\mathbb{R})$ gives a norm continuous action of $\mathbb{R}$ on ${\cal C}_l$;
\item[$\rm (iii)$] all commutators of $\pi({\cal C}_l)$ and ${\cal C}_r \otimes1$ are contained in $\mathbb{K}(L^2(\mathbb{G}))\otimes\mathbb{B}(L^2(\mathbb{R}))$. \end{itemize} Then $L^\infty(\mathbb{G})$ and its Haar state satisfy condition $\rm (AOC)^+$ with the dense $C^*$-algebra $C_{\rm red}(\mathbb{G})$. \end{Lem}
\begin{Rem}\upshape\label{amenable} When $\hat{\mathbb{G}}$ is amenable, then $L^\infty(\mathbb{G})$ and its Haar state satisfy condition $\rm (AOC)^+$. In fact, we can choose ${\cal B}:=c_0(\hat{\mathbb{G}})+\mathbb{C}1$ and ${\cal C}_l:=C^*\{C_{\rm red}(\mathbb{G}), \hat{\lambda}({\cal B})\}$. In this case, all conditions in this lemma are easily verified. \end{Rem}
\begin{Rem}\upshape\label{group} When $\hat{\mathbb{G}}$ is a strongly bi-exact discrete quantum group of Kac type (possibly bi-exact discrete group) with ${\cal B}\subset \ell^\infty(\hat{\mathbb{G}})$, then since the modular operator is trivial, ${\cal C}_l:=C^*\{C_{\rm red}(\mathbb{G}), \lambda({\cal B})\}$ satisfies these conditions. \end{Rem}
\begin{Rem}\upshape In the proof of $\cite[\textrm{Proposition 3.2.4}]{Is12_2}$, we put ${\cal C}_l=\hat{\mathbb{G}}\ltimes_{\rm r} {\cal B}_\infty$ (here we write it as $\tilde{\cal C}_l$), and in this subsection we are putting ${\cal C}_l=C^*\{C_{\rm red}(\mathbb{G}), \hat{\lambda}({\cal B})\}$. Both of them are nuclear $C^*$-algebras containing $C_{\rm red}(\mathbb{G})$ and do the same work to get condition $\rm (AOC)^+$. The difference of them is that ${\cal C}_l$ is contained in $\mathbb{B}(L^2(\mathbb{G}))$ but $\tilde{\cal C}_l$ is not. Hence ${\cal C}_l$ is more useful and $\tilde{\cal C}_l$ is more general (since $\tilde{\cal C}_l$ produces ${\cal C}_l$). In the previous paper, we preferred the generality and hence used $\tilde{\cal C}_l$ in the proof. \end{Rem}
\subsection{\bf Free products of bi-exact quantum groups}
Free products of bi-exact discrete groups (more generally, free products of von Neumann algebras with condition (AO)) were studied in $\cite{Oz04}\cite[\textrm{Section 4}]{GJ07}\cite[\textrm{Section 15.3}]{BO08}$. In this subsection we will prove similar results on discrete quantum groups. We basically follow the strategy in $\cite{Oz04}$.
\begin{Lem}[{\cite[\rm Lemma\ 2.4]{Oz04}}]\label{nuclear} Let $B_i\subset \mathbb{B}(H_i)$ $(i=1,2)$ be $C^*$-subalgebras with $B_i$-cyclic vectors $\xi_i$ and denote by $\omega_i$ the corresponding vector states (note that each $\omega_i$ is non-degenerate). If each $B_i$ contains $P_i$, the orthogonal projection onto $\mathbb{C}\xi_i$, and is nuclear, then the free product $(B_1,\omega_1)*(B_2,\omega_2)$ is also nuclear. \end{Lem} \begin{Rem}\upshape In this Lemma, each $B_i$ contains $\mathbb{K}(H_i)$ since it contains $P_i$ and the vector $\xi_i$ is $B_i$-cyclic. Projections $\lambda_i(P_i)\in(B_1,\omega_1)*(B_2,\omega_2)$ are orthogonal projections onto $H(i)$ and hence the orthogonal projection onto $\mathbb{C}\Omega$ is of the form $\lambda_1(P_1)+\lambda_2(P_2)-1$, which is contained in $(B_1,\omega_1)*(B_2,\omega_2)$. Since the vector $\Omega$ is cyclic for $(B_1,\omega_1)*(B_2,\omega_2)$, $(B_1,\omega_1)*(B_2,\omega_2)$ contains all compact operators. \end{Rem}
For free product von Neumann algebras, we use the same notation $W_i$, $V_i$, $\Delta$, $\Delta_i$, $J$ and $J_i$ as in the free product part of Subsection \ref{CQG}.
\begin{Lem}[{\cite[\rm Lemma\ 3.1]{Oz04}}] For a free product von Neumann algebra $(M_1,\phi_1)*\cdots*(M_n,\phi_n)$, we have the following equations: \begin{eqnarray*} \lambda_i(a) = J\rho_i(a)J &=&JV_i^*(1_{JH(i)}\otimes J_iaJ_i)V_iJ \\ &=&V_i^*(P_\Omega\otimes a+\lambda_i(a)\mid_{JH(i)\ominus \mathbb{C}\Omega}\otimes 1_{H_i})V_i\\ &=&V_j^*(\lambda_i(a)\mid_{JH(j)}\otimes 1_{H_j})V_j, \end{eqnarray*} for any $a\in \mathbb{B}(H_i)$ and $i\neq j$, where $P_\Omega$ is the orthogonal projection onto $\mathbb{C}\Omega$. \end{Lem} \begin{Rem}\upshape\label{commutator} Simple calculations with the lemma show that $[\lambda_i(\mathbb{B}(H_i)),J\lambda_j(\mathbb{B}(H_j))J]=0$ when $i\neq j$, and that \begin{equation*} [\lambda_i(a), J\lambda_i(b)J]= V_i^* (P_\Omega \otimes [a, J_ibJ_i])V_i \end{equation*} for $a,b\in \mathbb{B}(H_i)$.
Since $V_i=\Sigma(J_i\otimes J|_{JH(i)})W_iJ$, where $\Sigma$ is the flip, this equation means \begin{eqnarray*} [\lambda_i(a), J\lambda_i(b)J] &=& V_i^* (P_\Omega \otimes [a, J_ibJ_i])V_i \\
&=&J^* W_i^* (J_i\otimes J|_{JH(i)})^*\Sigma^* (P_\Omega \otimes [a, J_ibJ_i]) \Sigma(J_i\otimes J|_{JH(i)})W_iJ \\ &=& J^* W_i^*(J_i[a, J_ibJ_i]J_i\otimes P_\Omega) W_iJ. \end{eqnarray*} Hence we get
\begin{equation*} [\lambda_i(a), J\lambda_i(b)J] =W_i^*([a, J_ibJ_i]\otimes P_\Omega) W_i \quad (a,b\in \mathbb{B}(H_i)). \end{equation*} This means the operator $[\lambda_i(a), J\lambda_i(b)J]$ is, as an operator on $H_1*\cdots *H_n$, $[a, J_ibJ_i]$ on $\mathbb{C}\Omega\oplus H_i^0(=H_i)$ and 0 otherwise. \end{Rem}
\begin{Pro}\label{free prod bi-exact} Let $\mathbb{G}_i$ $(i=1,\ldots,n)$ be compact quantum groups. If each $\hat{\mathbb{G}}_i$ satisfies the intermediate condition in \textrm{Lemma $\ref{intermediate}$} with ${\cal C}_l^i$, then the free product $\hat{\mathbb{G}}_1*\cdots*\hat{\mathbb{G}}_n$ satisfies the same condition with the nuclear $C^*$-algebra $({\cal C}_l^1,h_1)*\cdots* ({\cal C}_l^n,h_n)$, where $h_i$ are the vector states of $\hat{1}_{\mathbb{G}_i}$. In particular, $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$ is bi-exact if each $\hat{\mathbb{G}}_i$ is strongly bi-exact. \end{Pro} \begin{proof}
We may assume $n=2$. Write $H:=L^2(\mathbb{G}_1)*L^2(\mathbb{G}_2)$. By Lemma \ref{nuclear} and the following remark, ${\cal C}_l^1* {\cal C}_l^2$ is nuclear and contains $\mathbb{K}(H)$. So what to show is that commutators of ${\cal C}_l^1* {\cal C}_l^2$ and $U({\cal C}_l^1* {\cal C}_l^2)U$ are contained in $\mathbb{K}(H)$. We have only to check that $[\lambda_i({\cal C}_l^i),U\lambda_j({\cal C}_l^j)U]$ $(i,j=1,2)$ are contained in $\mathbb{K}(H)$, since $\mathbb{K}(H)$ is an ideal. This is easily verified by Remark \ref{commutator}. \end{proof}
\begin{Pro}\label{free prod AOC^+} Let $\mathbb{G}_i$ $(i=1,\ldots,n)$ be compact quantum groups. If each $\hat{\mathbb{G}}_i$ satisfies conditions in \textrm{Lemma $\ref{AOC}$} with ${\cal C}_l^i$, then the free product $\hat{\mathbb{G}}_1*\cdots*\hat{\mathbb{G}}_n$ satisfies the same condition with the nuclear $C^*$-algebra $({\cal C}_l^1,h_1)*\cdots* ({\cal C}_l^n,h_n)$, where $h_i$ are the vector states of $\hat{1}_{\mathbb{G}_i}$. \end{Pro} \begin{proof} We may assume $n=2$ and write $H:=L^2(\mathbb{G}_1)*L^2(\mathbb{G}_2)$. By the same manner as in the last proposition, ${\cal C}_l^1* {\cal C}_l^2$ is nuclear and contains $\mathbb{K}(H)$. This algebra is contained in $C^*\{C_{\rm red}(\mathbb{G}_1)*C_{\rm red}(\mathbb{G}_2), \hat{\lambda}(\ell^\infty(\hat{\mathbb{G}}_1*\hat{\mathbb{G}}_2)) \}$. Norm continuity of the modular action is trivial since it is continuous on each $\lambda_k({\cal C}_l^k)$. By Remark \ref{commutator}, our commutators in the algebra $\mathbb{B}(H)$ (not the algebra $\mathbb{B}(H\otimes L^2(\mathbb{R}))$) are of the form \begin{equation*} [\lambda_k(a), U\lambda_k(b)U] =W_k^*([a, U_kbU_k]\otimes P_\Omega) W_k \quad (a,b\in {\cal C}^k_l) \end{equation*} (or $[\lambda_k(a), U\lambda_l(b)U]=0$ when $k\neq l$). Modular actions for $a$ is of the form \begin{equation*} [\Delta^{it}\lambda_k(a)\Delta^{-it}, U\lambda_k(b)U] =[\lambda_k(\Delta_k^{it}a\Delta_k^{-it}), U\lambda_k(b)U] =W_k^*([\Delta_k^{it}a\Delta_k^{-it}, U_kbU_k]\otimes P_\Omega) W_k,
\end{equation*} where $\Delta_k$ is the modular operator for $\mathbb{G}_k$. Hence when we see commutators of $\pi({\cal C}_l^k)$ and ${\cal C}_r^l\otimes 1$ in $\mathbb{B}(H\otimes L^2(\mathbb{R}))$, we can first assume $k=l$ since these commutators are zero when $k\neq l$. When we see these commutators for a fixed $k$ (and $k=l$), we actually work on $\mathbb{B}(L^2(\mathbb{G}_k)\otimes L^2(\mathbb{R}))$ with the modular action of $\mathbb{G}_k$, where we regard $L^2(\mathbb{G}_k)\simeq\mathbb{C}\Omega\oplus L^2(\mathbb{G}_k)^0\subset H$. Hence by the assumption on $\mathbb{G}_k$, we get \begin{equation*} [\pi({\cal C}^k_l), {\cal C}^k_r\otimes 1]\subset \mathbb{K}(L^2(\mathbb{G}_k))\otimes \mathbb{B}(L^2(\mathbb{R})) \subset \mathbb{K}(L^2(\mathbb{G}))\otimes \mathbb{B}(L^2(\mathbb{R})). \end{equation*} Thus we get the condition on commutators. \end{proof}
Now we can give new examples of bi-exact quantum groups and von Neumann algebras with condition $\rm (AOC)^+$.
\begin{Cor}\label{free prod AOC} Let $\mathbb{G}_i$ $(i=1,\ldots,n)$ be compact quantum groups. Assume that each $\mathbb{G}_i$ is monoidally equivalent to $\mathrm{SU}_q(2)$, $\mathrm{SO}_q(3)$, or $A_u(F)$, where $-1<q<1$, $q\neq 0$, $F$ is not a scalar multiple of a $2$ by $2$ unitary. Then the free product $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$ is bi-exact. The associated von Neumann algebra $(L^\infty(\mathbb{G}_1),h_1)\bar{*}\cdots \bar{*}(L^\infty(\mathbb{G}_n),h_n)$ and its Haar state $h_1*\cdots *h_n$ satisfies condition $\rm (AOC)^+$ with the dense $C^*$-algebra $(C_{\rm red}(\mathbb{G}_1),h_1)*\cdots *(C_{\rm red}(\mathbb{G}_n),h_n)$. \end{Cor}
\subsection{\bf Proof of Theorem \ref{C}}
We first recall some known properties on free quantum groups and quantum automorphism groups. They were originally proved in $\cite{Ba97}\cite{Wa98_2}\cite{BDV05}$ for free quantum groups and $\cite{RV06}\cite{So08}\cite{Br12}$ for quantum automorphism groups. See $\cite[\textrm{Section 4}]{DFY13}$ for the details.
When $F\in\mathrm{GL}(2,\mathbb{C})$ is a scalar multiple of a $2$ by $2$ unitary, then $A_u(F)=A_u(1_2)$ and the dual of $A_u(1_2)$ is a quantum subgroup of $\mathbb{Z}*\hat{A}_o(1_2)$. When $F\in\mathrm{GL}(n,\mathbb{C})$ is any matrix, then the dual of $A_o(F)$ is isomorphic to a free product of some $\hat{A}_o(F_1)$ and $\hat{A}_u(F_1)$ with $F_1\bar{F_1}\in \mathbb{R}\cdot \mathrm{id}$. For such a matrix $F$ as $F\bar{F}=c \cdot\mathrm{id}$ for some $c\in\mathbb{R}$, the quantum group $A_o(F)$ is mononidally equivalent to $\mathrm{SU}_q(2)$, where $-\mathrm{Tr}(FF^*)/c=q+q^{-1}$, $-1\leq q\leq 1$ and $q\neq0$.
When $q=\pm1$, then $\mathrm{dim}_q(u)=|-\mathrm{Tr}(FF^*)/c|=2$, where $u$ is the fundamental representation of $A_o(F)$, and hence $A_o(F)=\mathrm{SU}_{\pm1}(2)$. Thus every $\hat{A}_o(F)$ and $\hat{A}_u(F)$ is a discrete quantum subgroup of a free product of amenable discrete quantum groups and duals of compact quantum groups which are monoidally equivalent to $\mathrm{SU}_q(2)$ or $A_u(F)$, where $-1< q< 1$, $q\neq0$, $F\in\mathrm{GL}(n,\mathbb{C})$ is not a scalar multiple of a $2$ by $2$ unitary.
The quantum automorphism group $A_{\rm aut}(M,\phi)$ is co-amenable if and only if $\mathrm{dim}(M)\leq4$. For any finite dimensional $C^*$-algebra $M$ and any state $\phi$ on $M$, $\hat{A}_{\rm aut}(M,\phi)$ is isomorphic to a free product of duals of quantum automorphism groups with $\delta$-form. Such quantum automorphism groups are co-amenable or monoidally equivalent to $\mathrm{SO}_q(3)$, where $\delta=q+q^{-1}$ and $0< q\leq1$. When $q=1$ and $\delta=2$, since $\mathrm{dim}(M)\leq\delta^2=4$, $A_{\rm aut}(M,\phi)$ is co-amenable. Thus every $\hat{A}_{\rm aut}(M,\phi)$ is a free product of amenable discrete quantum groups and duals of compact quantum groups which are monoidally equivalent to $\mathrm{SO}_q(3)$ for some $q$ with $0<q<1$.
We see the following easy lemma before the proof. \begin{Lem} Let $\mathbb{G}$ and $\mathbb{H}$ be compact quantum groups. Assume that $\hat{\mathbb{H}}$ is a quantum subgroup of $\hat{\mathbb{G}}$. If $\hat{\mathbb{G}}$ is bi-exact (resp.\ $(L^\infty(\mathbb{G}),h)$ satisfies condition $\rm (AOC)^+$ with the $C^*$-algebra $C_{\rm red}(\mathbb{G})$), then $\hat{\mathbb{H}}$ is bi-exact (resp.\ $(L^\infty(\mathbb{H}),h)$ satisfies condition $\rm (AOC)^+$ with the $C^*$-algebra $C_{\rm red}(\mathbb{H})$). \end{Lem} \begin{proof} By assumption there exists the unique Haar state preserving conditional expectation $E_{\mathbb{H}}$ from $L^\infty(\mathbb{G})$ onto $L^\infty(\mathbb{H})$ (and from $C_{\rm red}(\mathbb{G})$ onto $C_{\rm red}(\mathbb{H})$). It extends to a projection $e_{\mathbb{H}}$ from $L^2(\mathbb{G})$ onto $L^2(\mathbb{H})$. Let $\theta$ be a u.c.p.\ map as in the definition of bi-exactness (resp.\ condition $\rm (AOC)^+$). Then a u.c.p.\ map given by $a\otimes b^{\rm op}\mapsto e_{\mathbb{H}}\theta(a\otimes b^{\rm op})e_{\mathbb{H}}$ for $a, b\in C_{\rm red}(\mathbb{H})$ (resp.\ $a\otimes b^{\rm op}\mapsto (e_{\mathbb{H}}\otimes 1)\theta(a\otimes b^{\rm op})(e_{\mathbb{H}}\otimes 1)$ for $a, b\in C_{\rm red}(\mathbb{H})\rtimes_{\rm r}\mathbb{R}$) do the work. Note that modular objects $J$ and $\Delta^{it}$ of the Haar state commute with $e_{\mathbb{H}}$. Local reflexivity of $C_{\rm red}(\mathbb{H})$ (resp.\ $C_{\rm red}(\mathbb{H})\rtimes_{\rm r}\mathbb{R}$) follows from that of $C_{\rm red}(\mathbb{G})$ (resp.\ $C_{\rm red}(\mathbb{G})\rtimes_{\rm r}\mathbb{R}$) since it is a subalgebra. \end{proof}
\begin{proof}[\bf Proof of Theorem \ref{C}] For weak amenablity and the $\rm W^*$CBAP, they are already discussed in $\cite[\textrm{Section 5}]{DFY13}$ (see also $\cite[\textrm{Theorem 4.6}]{Fr11}$). Hence we see only bi-exactness and condition $\rm (AOC)^+$.
Let $\mathbb{G}$ be as in the statement. Then thanks for the observation above, $\hat{\mathbb{G}}$ is a discrete quantum subgroup of $\hat{\mathbb{G}}_1*\cdots *\hat{\mathbb{G}}_n$, where each $\mathbb{G}_i$ is co-amenable, a dual of bi-exact discrete group, or monoidally equivalent to $\mathrm{SU}_q(2)$, $\mathrm{SO}_q(3)$, or $A_u(F)$, where $-1<q<1$, $q\neq 0$, $F$ is not a scalar multiple of a $2$ by $2$ unitary. Hence by Theorems \ref{example st bi-exact} and \ref{example bi-exact}, Remarks \ref{amenable} and \ref{group}, Propositions \ref{free prod bi-exact} and \ref{free prod AOC^+}, and the last lemma, $\hat{\mathbb{G}}$ is bi-exact and $(L^\infty(\mathbb{G}),h)$ satisfies condition $\rm (AOC)^+$ with $C_{\rm red}(\mathbb{G})$. \end{proof}
\section{\bf Proofs of main theorems}
\subsection{\bf Proof of Theorem \ref{A}}
To prove Theorem \ref{A}, we can use the same manner as that in $\cite[\rm Theorem\ 3.1]{PV12}$ except for (i) Proposition 3.2 and (ii) Subsection 3.5 (case 2) in $\cite{PV12}$.
To see (ii) in our situation, we need a structure of quantum group von Neumann algebras, which is weaker than that of group von Neumann algebras but enough to solve our problem. Since we will see a similar (and more general) phenomena in the next subsection (Lemma \ref{case2}), we omit it.
Hence here we give a proof of (i). To do so, we see one proposition which is a quantum analogue of a well known property for crossed products with discrete groups. \begin{Pro} Let $\mathbb{G}$ be a compact quantum group of Kac type, whose dual acts on a tracial von Neumann algebra $(B,\tau_B)$ as a trace preserving action. Write $M:= \hat{\mathbb{G}}\ltimes B$. Let $p$ be a projection in $M$ and $A\subset pMp$ a von Neumann subalgebra. Then the following conditions are equivalent:
\begin{itemize}
\item[$\rm (i)$] $A\not\preceq_M B$;
\item[$\rm (ii)$] there exists a net $(w_n)_n$ of unitaries in $A$ such that $\lim_n \|(w_n)_{i,j}^x \|_{2,\tau_B}=0$ for any $x\in \mathrm{Irred}(\mathbb{G})$ and $i,j$, where $(w_n)_{i,j}^x$ is given by $w_i=\sum_{x\in \mathrm{Irred}(\mathbb{G}), i,j}(w_n)_{i,j}^x u_{i,j}^x$ for a fixed basis $(u_{i,j}^x)$ satisfying $h(u_{i,j}^x u_{k,l}^{y*})=\delta_{i,k}\delta_{j,l}\delta_{x,y}h(u_{i,j}^x u_{i,j}^{x*})$. \end{itemize} \end{Pro} \begin{proof}
We first assume (i). Then by definition, there exists a net $(w_n)_n$ in ${\cal U}(A)$ such that $\lim_n \|E_B(b^*w_na) \|_2=0$ for any $a,b \in M$. Putting $b=1$ and $a=u_{k,l}^{y*}$ and since $E_B(w_n u_{k,l}^{y*})=\sum_{x,i,j}(w_n)_{i,j}^x h(u_{i,j}^x u_{k,l}^{y*})=(w_n)_{k,l}^y h(u_{k,l}^y u_{k,l}^{y*})$, we have \begin{equation*}
\|(w_n)_{k,l}^y\|_2= |h(u_{k,l}^y u_{k,l}^{y*})|^{-1}\|E_B(w_n u_{k,l}^{y*})\|_2\rightarrow 0 \quad (n\rightarrow \infty).
\end{equation*}
Conversely, assume (ii). We will show $\lim_n \|E_B(b^*w_na) \|_2=0$ for any $a,b \in M$. To see this, we may assume $a=u_{\alpha,\beta}^y$ and $b=u_{k,l}^z$. For any $c\in B$, $u_{k,l}^{z*}c$ is a linear combination of $c' u_{k',l'}^{z*}$ for some $c'\in B$ and $k',l'$. When we apply $E_B$ to $u_{k,l}^{z*} c u_{i,j}^x u_{\alpha,\beta}^y$, it does not vanish only if $\bar{z}\otimes x\otimes y$ contains the trivial representation. For fixed $y,z$, the number of such $x$ is finite (since this means $x\in z\otimes \bar{y}$). So we have
\begin{eqnarray*}
\|E_B(b^*w_na) \|_2 &=& \|\sum_{\textrm{finitely many }x, i,j} E_B(b^*(w_n)_{i,j}^x u_{i,j}^xa) \|_2\\
&\leq& \sum_{\textrm{finitely many }x, i,j} \|b^*(w_n)_{i,j}^x u_{i,j}^xa \|_2\\
&\leq& \sum_{\textrm{finitely many }x, i,j} \|b^*\|\|(w_n)_{i,j}^x \|_2 \|u_{i,j}^xa\| \rightarrow 0. \end{eqnarray*} \end{proof}
The following proposition is a corresponding one to (i) Proposition 3.2 in $\cite{PV12}$. \begin{Pro} Theorem $\ref{A}$ is true if it is true for any trivial action. \end{Pro} \begin{proof} Let $\mathbb{G}$, $B$, $M$, $p$, and $A$ be as in Theorem \ref{A}. By Fell's absorption principle, we have the following $*$-homomorphism: \begin{equation*} \Delta\colon M= \hat{\mathbb{G}}\ltimes B \ni bu_{i,j}^x \longmapsto \sum_{k=1}^{n_x} bu_{i,k}^x\otimes u_{k,j}^x\in M\otimes L^\infty(\mathbb{G})=:{\cal M}. \end{equation*} Put ${\cal A}:=\Delta(A)$, $\tilde{q}:=\Delta(q)$ and ${\cal P}:={\cal N}_{\tilde{q}{\cal M}\tilde{q}}({\cal A})''$. Then consider following statements:
\begin{itemize}
\item[(1)] If $A$ is amenable relative to $B$ in $M$, then $\cal A$ is amenable relative to $M\otimes1$ in $\cal M$.
\item[(2)] If $\cal P$ is amenable relative to $M\otimes1$ in $\cal M$, then ${\cal N}_{qMq}(A)''$ is amenable relative to $B$ in $M$.
\item[(3)] If ${\cal A}\preceq_{\cal M}M\otimes 1$, then $A\preceq_M B$. \end{itemize} If one knows them, then (1) and our assumption say that we have either $\cal P$ is amenable relative to $M\otimes1$ or ${\cal A}\preceq_{\cal M}M\otimes 1$. Then (2) and (3) imply our desired conclusion.
To show (1) and (2), we only need the property $\Delta\circ E_B=E_{M\otimes 1}\circ \Delta=E_{B\otimes1}\circ \Delta$. So we can use the same strategy as in the group case. To show (3) we need the previous proposition, and once we accept it, then (3) is easily verified. \end{proof}
\subsection{\bf Proof of Theorem \ref{B}}
We use the same symbol as in the statement of Theorem \ref{B}. We moreover use the following notation: \begin{eqnarray*}
&&P:= {\cal N}_{qMq}(A)'', \ C_h:=C_h(L^\infty(\mathbb{G})),\
\tau:=\mathrm{Tr}_M(q\cdot q),\\ &&L^2(M):=L^2(M,\mathrm{Tr}_M)=L^2(B,\tau_B)\otimes L^2(C_h,\mathrm{Tr}),\\ &&\pi\colon M=B\otimes C_h \ni b\otimes x \mapsto (b\otimes_A q \otimes x) \in \mathbb{B}((L^2(M)q \otimes_A L^2(P))\otimes L^2(C_h,\mathrm{Tr})),\\ &&\theta\colon P^{\rm op} \ni y^{\rm op}\mapsto (q\otimes_A y^{\rm op}) \otimes 1 \in \mathbb{B}((L^2(M)q \otimes_A L^2(P))\otimes L^2(C_h,\mathrm{Tr})),\\ &&N:=W^*\{\pi(B\otimes 1), \theta(P^{\rm op})\},\ {\cal N}:=N\otimes C_h. \end{eqnarray*}
We first recall the following theorem which is a generalization of $\cite[\textrm{Theorem 3.5}]{OP07}$ and $\cite[\textrm{Theorem B}]{Oz10}$. Now we are working on a semifinite von Neumann algebra $\cal N$ but still the theorem is true. To verify this, we need the following observation. If a semifiite von Neumann algebra has the $\rm W^*$CBAP, $(\phi_i)_i$ is an approximation identity, and $(p_j)_j$ is a net of trace-finite projections converging to 1 strongly, then a subnet of $(\psi_j\circ\phi_i)$, where $\psi_j$ is a compression by $p_j$, is an approximation identity. Hence we can find an approximate identity whose images are contained in a trace-finite part of the semifinite von Neumann algebra. As finite rank maps relative to $B$ $\cite[\textrm{Definition 5.3}]{PV11}$, one can take linear spans of \begin{equation*} \psi_{y,z,r,t}\colon qMq\rightarrow qMq; x\mapsto y(\mathrm{id}_B\otimes \mathrm{Tr})(zxr)t, \end{equation*} where $y,z,r,t\in M$ satisfying $y=qy$, $t=tq$, $z=(1\otimes p)z$, and $r=r(1\otimes p')$ for some Tr-finite projections $p,p'\in C_h$. Note that for fixed $p\in C_h$ with $\mathrm{Tr}(p)<\infty$, $\mathrm{Tr}(p)^{-1}(\mathrm{id}_B\otimes \mathrm{Tr})$ is a conditional expectation from $B\otimes pC_hp$ onto $B$.
\begin{Thm}[{\cite[\textrm{Theorem 5.1}]{PV12}}] There exists a net $(\omega_i)_i$ of normal states on $\pi(q){\cal N}\pi(q)$ such that \begin{itemize}
\item[$\rm (i)$] $\omega_i(\pi(x))\rightarrow \tau(x)$ for any $x\in qMq$;
\item[$\rm (ii)$] $\omega_i(\pi(a)\theta(\bar{a}))\rightarrow 1$ for any $a\in {\cal U}(A)$;
\item[$\rm (iii)$] $\|\omega_i\circ\mathrm{Ad}(\pi(u)\theta(\bar{u}))-\omega_i\| \rightarrow 0$ for any $u \in {\cal N}_{qMq}(A)$. \end{itemize} Here $\bar a$ means $(a^{\rm op})^*$. \end{Thm}
Let $H$ be a standard Hilbert space of $N$ with a canonical conjugation $J_N$. From now on, as a standard representation of $C_h$, we use $L^2(C_h):=L^2(C_h,\hat{h})=L^2(\mathbb{G})\otimes L^2(\mathbb{R})$, where $\hat{h}$ is the dual weight of $h$.
Then since $H\otimes L^2(C_h)$ is standard for $\cal N$ with a canonical conjugation ${\cal J}:=J_N\otimes J_{C_h}$, there exists a unique net $(\xi_i)_i$ of unit vectors in the positive cone of $\pi(q){\cal J}\pi(q){\cal J}(H\otimes L^2(C_h))$ such that $\omega_i(\pi(q)x\pi(q))=\langle x \xi_i | \xi_i \rangle$ for $x\in {\cal N}$. Then conditions on $\omega_i$ are translated to the following conditions: \begin{itemize}
\item[$\rm (i)'$] $\langle \pi(x) \xi_i | \xi_i \rangle\rightarrow \tau(qxq)$ for any $x\in M$;
\item[$\rm (ii)'$] $\|\pi(a)\theta(\bar a)\xi_i -\xi_i\|\rightarrow 0$ for any $a\in {\cal U}(A)$;
\item[$\rm (iii)'$] $\|\mathrm{Ad}(\pi(u)\theta(\bar{u}))\xi_i -\xi_i\| \rightarrow 0$ for any $u \in {\cal N}_{qMq}(A)$. \end{itemize} Here $\mathrm{Ad}(x)\xi_i:=x{\cal J}x{\cal J}\xi_i$. To see $\rm (iii)'$, we need a generalized Powers--St$\rm \o$rmer inequality (e.g.\ $\cite[\textrm{Theorem IX.1.2.(iii)}]{Ta2}$).
The following lemma is a very similar statement to that in $\cite[\textrm{Subsection 3.5 (case 2)}]{PV12}$. Since we treat quantum groups and moreover our object $C_h$ is semifinite and twisted (not a tensor product), we need more careful treatment. So here we give a complete proof of the lemma. \begin{Lem}\label{case2} Let $(\xi_i)_i$ be as above. Assume $A\not\preceq_{M} B\otimes L\mathbb{R}$.
Then we have \begin{equation*}
\limsup_i\| (1_{\mathbb{B}(H)}\otimes x \otimes 1_{L\mathbb{R}})\xi_i\|=0 \quad \textrm{for any } x \in \mathbb{K}(L^2(\mathbb{G})). \end{equation*} \end{Lem} \begin{proof} Suppose by contradiction that there exist $\delta>0$ and a finite subset ${\cal F}\subset \mathrm{Irred}(\mathbb{G})$ such that \begin{equation*}
\limsup_i\| (1_{\mathbb{B}(H)}\otimes P_{\cal F} \otimes 1_{L\mathbb{R}})\xi_i\| >\delta, \end{equation*} where $P_{\cal F}$ is the orthogonal projection onto $\sum_{x\in {\cal F}}H_x \otimes H_{\bar x}$. Replacing with a subnet of $(\xi_i)_i$, we may assume that \begin{equation*}
\liminf_i\| (1_{\mathbb{B}(H)}\otimes P_{\cal F} \otimes 1_{L\mathbb{R}})\xi_i\| >\delta. \end{equation*} Our goal is to find a finite set ${\cal F}_1\subset \mathrm{Irred}(\mathbb{G})$ and a subnet of $(\xi_i)_i$ which satisfy \begin{equation*}
\liminf_i\| (1_{\mathbb{B}(H)}\otimes P_{{\cal F}_1} \otimes 1_{L\mathbb{R}})\xi_i\| >2^{1/2}\delta. \end{equation*} Then repeating this argument, we get a contradiction since \begin{equation*}
1\geq\liminf_i\| (1_{\mathbb{B}(H)}\otimes P_{{\cal F}_k} \otimes 1_{L\mathbb{R}})\xi_i\| >2^{k/2}\delta \quad (k\in \mathbb{N}). \end{equation*}
\noindent \textit{{\bf claim 1.} There exists a $\mathrm{Tr}$-finite projection $r\in L\mathbb{R}$ and a subnet of $(\xi_i)_i$ such that } \begin{equation*}
\liminf_i\| (1_{\mathbb{B}(H)}\otimes P_{\cal F} \otimes r)\xi_i\| >\delta. \end{equation*} \begin{proof}[\bf proof of claim 1]
Define a state on $\mathbb{B}(H\otimes L^2(C_h))$ by $\Omega(X):=\mathrm{Lim}_i\langle X\xi_i | \xi_i\rangle$. Then the condition $\rm (i)'$ of $(\xi_i)_i$ says that $\Omega(\pi(x))=\tau(qxq)$ for $x\in M$. Let $r_j$ be a net of $\mathrm{Tr}$-finite projections in $L\mathbb{R}$ which converges to 1 strongly. Then we have \begin{eqnarray*}
|\Omega((1\otimes P_{\cal F}\otimes 1) \pi(1-r_j))|^2&\leq&\Omega((1\otimes P_{\cal F}\otimes 1))\Omega(\pi(1-r_j)) \\ &=&\Omega((1\otimes P_{\cal F}\otimes 1))\tau(q(1-r_j)q) \rightarrow 0 \quad (j\rightarrow \infty). \end{eqnarray*} This implies that \begin{eqnarray*} 0&\leq&
\mathrm{Lim}_i\| (1\otimes P_{\cal F} \otimes 1)\xi_i\| - \mathrm{Lim}_i\| (1\otimes P_{\cal F} \otimes r_j)\xi_i\| \\ &=&\Omega(1\otimes P_{\cal F} \otimes 1) - \Omega( (1\otimes P_{\cal F} \otimes 1)\pi(r_j))\\ &=&\Omega((1\otimes P_{\cal F}\otimes 1) \pi(1-r_j)) \rightarrow 0 \quad (j\rightarrow \infty) \end{eqnarray*}
Hence we can find a $\mathrm{Tr}$-finite projection $r\in L\mathbb{R}$ such that $\mathrm{Lim}_i\| (1\otimes P_{\cal F} \otimes r)\xi_i\|>\delta$. Finally by taking a subnet of $(\xi_i)_i$, we have $\liminf_i\| (1\otimes P_{\cal F} \otimes r)\xi_i\|>\delta$. \end{proof}
We fix a basis $\{u_{i,j}^x \}_{i,j}^x (=:X)$ of the dense Hopf $*$-algebra of $C(\mathbb{G})$ and use the notation \begin{eqnarray*} &&\mathrm{Irred}(\mathbb{G})_x:=\{ u_{i,j}^x\in X \mid i,j=1,\ldots,n_x\} \quad (x\in \mathrm{Irred}(\mathbb{G})),\\ &&\mathrm{Irred}(\mathbb{G})_{\cal E}:=\cup_{x\in{}\cal E}\mathrm{Irred}(\mathbb{G})_x \quad ({\cal E} \subset \mathrm{Irred}(\mathbb{G})). \end{eqnarray*} We may assume that each $\hat{u}_{i,j}^x\in L^2(\mathbb{G})$ is an eigenvector of the modular operator of $h$, namely, for any $u_{i,j}^x$ there exists $\lambda>0$ such that $\Delta_h^{it}\hat{u}_{i,j}^x=\lambda^{it}\hat{u}_{i,j}^x$ $(t\in \mathbb{R})$. In this case we have a formula $\sigma_t^h(u_{i,j}^x)=\lambda^{it}u_{i,j}^x$ and hence $\sigma_t^h(u_{i,j}^{x*} a u_{i,j}^{x})=u_{i,j}^{x*} \sigma_t^h(a) u_{i,j}^{x}$ for any $a\in L^\infty(\mathbb{G})$.
Let $P_e$ be the orthogonal projection from $L^2(\mathbb{G})$ onto $\mathbb{C}\hat 1$. For any $u\in X$, consider a compression map \begin{equation*} \Phi_u(x):=h(u^*u)^{-1}(1\otimes P_eu^* \otimes 1)\pi(x)(1\otimes uP_e \otimes 1) \end{equation*} for $x\in M$, which gives a normal map from $M$ into $B\otimes \mathbb{C}P_e\otimes\mathbb{B}(L^2(\mathbb{R}))\simeq B\otimes \mathbb{B}(L^2(\mathbb{R}))$.
\noindent \textit{{\bf claim 2.} For any $u\in X$, we have} \begin{equation*} \Phi_{u}(b\otimes af)= b\otimes h(u^*u)^{-1}h(u^{*}au)f \quad (b\in B, a\in L^\infty(\mathbb{G}), f\in L\mathbb{R}). \end{equation*} \textit{In particular, $\Phi_u$ is a normal conditional expectation from $M$ onto $B\otimes L\mathbb{R}$.} \begin{proof}[\bf proof of claim 2] Assume $B=\mathbb{C}$ for simplicity. Recall that any element $a\in L^\infty(\mathbb{G})$ in the continuous core is of the form $a=\int\sigma_{-t}^h(a)\otimes e_t \cdot dt$, which means $(a\xi)(s)=\sigma_{-s}^h(a)\xi(s)$ for $\xi \in L^2(\mathbb{G})\otimes L^2(\mathbb{R})$ and $s\in \mathbb{R}$. Hence a simple calculation shows that for any $a\in L^\infty(\mathbb{G})$, \begin{eqnarray*} h(u^*u)\Phi_u(a)&=&(P_eu^* \otimes 1)\int\sigma_{-t}^h(a)\otimes e_t \cdot dt (uP_e \otimes 1)\\ &=&\int P_eu^*\sigma_{-t}^h(a)uP_e\otimes e_t \cdot dt\\
&=&\int h(u^*au)P_e\otimes e_t \cdot dt= h(u^*au)P_e\otimes 1, \end{eqnarray*} where we used $P_eu^*\sigma_{-t}^h(a)uP_e=h(u^*\sigma_{-t}^h(a)u)P_e=h(\sigma_{-t}^h(u^*au))P_e=h(u^*au)P_e$. Thus $\Phi_u$ satisfies our desired condition. \end{proof}
\noindent \textit{{\bf claim 3.} For any $u\in X$ and $x\in M$, we have} \begin{equation*}
\limsup_i\|\pi(x)(1\otimes P_u\otimes r)\xi_i\|\leq h(u^*u)^{-1/2} \|x(1\otimes r)\|_{2,\mathrm{Tr}_M\circ\Phi_u}, \end{equation*} \textit{where $P_u$ is the orthogonal projection from $L^2(\mathbb{G})$ onto $\mathbb{C}\hat{u}$.} \begin{proof}[\bf proof of claim 3] This follows from a direct calculation. Since $P_u=h(u^*u)^{-1} uP_e u^*$, we have \begin{eqnarray*}
&&h(u^*u)\|(\pi(x)(1\otimes P_u\otimes r)\xi_i\|^2\\
&=&h(u^*u)\langle (1\otimes P_u \otimes 1)\pi(\tilde{x}^*\tilde{x}) (1\otimes P_u \otimes 1)\xi_i | \xi_i\rangle \qquad (\tilde x := x(1\otimes r))\\
&=&h(u^*u)^{-1}\langle (1\otimes P_eu^* \otimes 1)\pi(\tilde{x}^*\tilde{x}) (1\otimes uP_e \otimes 1)\tilde{\xi}_i | \tilde{\xi}_i\rangle \qquad (\tilde{\xi}_i :=(1\otimes u^* \otimes 1) \xi_i)\\
&=&\langle (1\otimes P_e \otimes 1)\pi(\Phi_u(\tilde{x}^*\tilde{x})) (1\otimes P_e \otimes 1)\tilde{\xi}_i | \tilde{\xi}_i\rangle\\
&=&\| \pi(y) (1\otimes P_eu^* \otimes 1)\xi_i \|^2 \qquad (y:=\Phi_u(\tilde{x}^*\tilde{x})^{1/2})\\
&\leq& \| \pi(y) \xi_i \|^2 \qquad (\textrm{since $\pi(y)$ and $(1\otimes P_eu^* \otimes 1$) commute})\\ &\rightarrow& \mathrm{Tr}_M(qy^*yq) \leq \mathrm{Tr}_M(y^*y) = \mathrm{Tr}_M(\Phi_u(\tilde{x}^*\tilde{x}))= \mathrm{Tr}_M\circ\Phi_u((1\otimes r)x^*x(1\otimes r)). \end{eqnarray*} \end{proof}
\noindent \textit{{\bf claim 4.} For any $\epsilon>0$ and any finite subset ${\cal E}\subset \mathrm{Irred}(\mathbb{G})$, there exist $a\in {\cal U}(A)$ and $v\in M$ such that \begin{itemize}
\item $v$ is a finite sum of elements of the form $b\otimes u f$, where $b\in B$, $f\in L\mathbb{R}$ and $u\in X \setminus \mathrm{Irred}(\mathrm{G})_{\cal E}$;
\item $h(u^*u)^{-1/2}\|(a-v)(1\otimes r)\|_{2,\mathrm{Tr}_M\circ\Phi_u}<\epsilon$ for any $u\in\mathrm{Irred}(\mathrm{G})_{\cal F}$. \end{itemize} } \begin{proof}[\bf proof of claim 4]
Since $A\not\preceq_{{}_e M_e}B\otimes L\mathbb{R}r$, where $e:=q\vee r$, for any $\epsilon>0$ and any finite subset ${\cal E}\subset \mathrm{Irred}(\mathbb{G})$, there exist $a\in {\cal U}(A)$ s.t\ $\|E_{B\otimes L\mathbb{R}}((1\otimes r)(1\otimes u^*)a(1\otimes r))\|_{2,\mathrm{Tr}_M}<\epsilon$ for any $u\in \mathrm{Irred}(\mathrm{G})_{\cal E}$.
Since the linear span of $\{b\otimes u\lambda_t| b\in B, t\in\mathbb{R},u\in X\}$ is strongly dense in $M$, we can find a bounded net $(z_j)$ of elements in this linear span which converges to $a$ in the strong topology.
Hence we can find $z$ in the linear span such that $\|(a-z)(1\otimes r)\|_{2,\mathrm{Tr}_M}$ and $\|(a-z)(1\otimes r)\|_{2,\mathrm{Tr}_M\circ\Phi_u}$ $(u\in\mathrm{Irred}(\mathrm{G})_{\cal F})$ are very small.
In this case we may assume that $\|E_{B\otimes L\mathbb{R}}((1\otimes r)(1\otimes u^*)z(1\otimes r))\|_{2,\mathrm{Tr}_M}<\epsilon$ for any $u\in \mathrm{Irred}(\mathrm{G})_{\cal E}$.
Write $z=\sum_{\rm finite} b_{i,j}^x\otimes u_{i,j}^x f_{i,j}^x$. Then for any $y\in{\cal E}$ the above inequality implies \begin{eqnarray*}
\epsilon&>&\|E_{B\otimes L\mathbb{R}}((1\otimes r)(1\otimes u^{y*}_{k,l})z(1\otimes r))\|_{2,\mathrm{Tr}_M}\\
&=&\|\sum_{\rm finite} (1\otimes r)(b_{i,j}^x\otimes h(u^{y*}_{k,l}u_{i,j}^x)f_{i,j}^x)(1\otimes r)\|_{2,\mathrm{Tr}_M}\\
&=&\sum_{\rm finite}\delta_{x,y}\delta_{i,k}\delta_{j,l}h(u^{y*}_{k,l}u_{i,j}^x)\|b_{i,j}^x\otimes f_{i,j}^xr\|_{2,\mathrm{Tr}_M}
=\|u^{y}_{k,l}\|^2_{2,h}\|b_{k,l}^y\otimes f_{k,l}^yr\|_{2,\mathrm{Tr}_M}. \end{eqnarray*} Hence if we write $z=\sum_{x\in {\cal E}}b_{i,j}^x\otimes u_{i,j}^x f_{i,j}^x + \sum_{x\not\in {\cal E}}b_{i,j}^x\otimes u_{i,j}^x f_{i,j}^x $, and say $v$ for the second sum, then we have \begin{eqnarray*}
\|(z-v)(1\otimes r)\|_{2,\mathrm{Tr}_M\circ\Phi_u}
&\leq&\sum_{x\in{\cal E},i,j}\| b_{i,j}^x\otimes u_{i,j}^x f_{i,j}^xr \|_{2,\mathrm{Tr}_M\circ\Phi_u} \\
&\leq&\sum_{x\in{\cal E},i,j}\|u_{i,j}^x\| \| b_{i,j}^x\otimes f_{i,j}^xr \|_{2,\mathrm{Tr}_M\circ\Phi_u} \\
&\leq&\sum_{x\in{\cal E},i,j}\| b_{i,j}^x\otimes f_{i,j}^xr \|_{2,\mathrm{Tr}_M} \\ &<&C({\cal E})\cdot\epsilon, \end{eqnarray*}
for any $u\in\mathrm{Irred}(\mathrm{G})_{\cal F}$, where $C({\cal E})>0$ is a constant which depends only on ${\cal E}$. Thus this $v$ is our desired one. \end{proof}
Now we return to the proof. Since $1\otimes P_{\cal F}\otimes r$ is a projection, we have \begin{eqnarray*}
\limsup_i\|\xi_i-(1\otimes P_{\cal F}\otimes r)\xi_i\|^2
&=&\limsup_i(\|\xi_i\|^2-\|(1\otimes P_{\cal F}\otimes r)\xi_i\|^2)\\
&=&1-\liminf_i\|(1\otimes P_{\cal F}\otimes r)\xi_i\|^2<1-\delta^2. \end{eqnarray*}
So there exists $\epsilon>0$ such that $\limsup_i\|\xi_i-(1\otimes P_{\cal F}\otimes r)\xi_i\|<(1-\delta^2)^{1/2}-\epsilon$. We apply claim 4 to $(\sum_{x\in{\cal F}}\mathrm{dim}(H_x)^2)^{-1}\epsilon$ and ${\cal E}:={\cal F}\bar{\cal F}$ $(=\{z| z\in x\otimes \bar{y} \textrm{ for some }x,y\in{\cal F}\})$, and get $a$ and $v$. Then we have \begin{eqnarray*}
&&\limsup_i\|\xi_i-\theta(\bar{a})\pi(v)(1\otimes P_{\cal F}\otimes r)\xi_i\|\\
&\leq&\limsup_i\|\xi_i-\theta(\bar{a})\pi(a)(1\otimes P_{\cal F}\otimes r)\xi_i\|+\limsup_i\|\theta(\bar{a})\pi(a-v)(1\otimes P_{\cal F}\otimes r)\xi_i\|\\
&\leq&\limsup_i\|\theta(a^{\rm op})\pi(a^*)\xi_i-(1\otimes P_{\cal F}\otimes r)\xi_i\| + \sum_{u\in \mathrm{Irred}(\mathbb{G})_{\cal F}} h(u^*u)^{-1/2}\|(a-v)(1\otimes r)\|_{2, \mathrm{Tr}_M\circ\Phi_u} \\
&<&\limsup_i\|\xi_i-(1\otimes P_{\cal F}\otimes r)\xi_i\| + \epsilon < (1-\delta^2)^{1/2}, \end{eqnarray*} where we used condition $\rm (ii)'$ of $(\xi_i)$ and claim 3. By the choice of $v$, it is of the form $\sum_{x\in{\cal S},i,j}b_{i,j}^x\otimes u_{i,j}^x f_{i,j}^x$ for some finite set ${\cal S}\subset \mathrm{Irred}(\mathbb{G})$ with ${\cal S}\cap{\cal F}\bar{\cal F}=\emptyset$. Note that this means ${\cal SF}\cap{\cal F}=\emptyset$. The vector $\theta(\bar{a})\pi(v)(1\otimes P_{\cal F}\otimes r)\xi_i$ is contained in the range of $1\otimes P_{\cal SF}\otimes 1$. This is because the modular operator $\Delta_h^{it}$ commutes with $P_{\cal SF}$ and $P_{\cal F}$ and hence \begin{equation*} (1\otimes P_{{\cal SF}}\otimes 1)\pi(v)(1\otimes P_{\cal F}\otimes 1)=\pi(v)(1\otimes P_{\cal F}\otimes 1). \end{equation*} Then we have \begin{eqnarray*} 1-\delta^2
&>&\limsup_i\|\xi_i-\theta(\bar{a})\pi(v)(1\otimes P_{\cal F}\otimes r)\xi_i\|^2 \\
&\geq&\limsup_i\|(1\otimes P_{\cal SF}\otimes 1)^{\perp}\xi_i\|^2\\
&=&1-\liminf_i\|(1\otimes P_{\cal SF}\otimes 1)\xi_i\|^2. \end{eqnarray*}
This means $\delta<\liminf_i\|(1\otimes P_{\cal SF}\otimes 1)\xi_i\|$. Finally put ${\cal F}_1:={\cal F}\cup{\cal SF}$. Then since ${\cal SF}\cap{\cal F}=\emptyset$, we get \begin{equation*}
\liminf_i\|(1\otimes P_{{\cal F}_1}\otimes 1)\xi_i\|^2
\geq\liminf_i\|(1\otimes P_{\cal F}\otimes 1)\xi_i\|^2 + \liminf_i\|(1\otimes P_{\cal SF}\otimes 1)\xi_i\|^2>2\delta^2. \end{equation*} Thus ${\cal F}_1$ is our desired one and we can end the proof. \end{proof}
Now we start the proof. We follow that of $\cite[\textrm{Subsection 3.4 (case 1)}]{PV12}$. Since this part of the proof does not rely on the structure of group von Neumann algebras, we need only slight modifications, which were already observed in $\cite{Is12_2}$. Hence here we give a rough sketch of the proof. We use the following notation which is used in $\cite{PV12}$. \begin{eqnarray*} &&{\cal D}:=M\odot M^{\rm op} \odot P^{\rm op} \odot P \supset (C_{\rm red}(\mathbb{G})\rtimes_{\rm r}\mathbb{R})\odot (C_{\rm red}(\mathbb{G})\rtimes_{\rm r}\mathbb{R})^{\rm op} \odot P^{\rm op} \odot P =:{\cal D}_0,\\ &&\Psi\colon {\cal D}\rightarrow \mathbb{B}(H\otimes L^2(C_h)\otimes L^2(C_h)), \quad \Theta \colon {\cal D}\rightarrow \mathbb{B}(H\otimes L^2(C_h)),\\ &&\Psi((b_1\otimes x_1) \otimes (b_2\otimes x_2)^{\rm op}\otimes y^{\rm op}\otimes z)=b_1J_Nb_2^*J_Ny^{\rm op}J\bar{z}J\otimes x_1\otimes x_2^{\rm op}, \\ &&\Theta((b_1\otimes x_1) \otimes (b_2\otimes x_2)^{\rm op}\otimes y^{\rm op}\otimes z)=b_1J_Nb_2^*J_Ny^{\rm op}J\bar{z}J\otimes x_1x_2^{\rm op}\\ &&\phantom{\Theta((b_1\otimes x_1) \otimes (b_2\otimes x_2)^{\rm op}\otimes y^{\rm op}\otimes z)}=\pi(b_1\otimes x_1){\cal J}\pi(b_2\otimes x_2)^*{\cal J}\theta(y^{\rm op}){\cal J}\theta(\bar{z}){\cal J}. \end{eqnarray*}
\begin{proof}[\bf Proof of Theorem \ref{B}]
Define a state on $\mathbb{B}(H\otimes L^2(C_h))$ by $\Omega_1(X):=\mathrm{Lim}_i\langle X\xi_i|\xi_i\rangle$. Our condition $\rm (AOC)^+$, together with Lemma \ref{case2}, implies that $|\Omega_1(\Theta(X))|\leq \|\Psi(X)\|$ for any $X\in{\cal D}_0$. We extend this inequality on $\cal D$ by using an approximation identity of $C_h$, which take vales in $C_{\rm red}(\mathbb{G})\rtimes_{\rm r}\mathbb{R}$ (such a net exists, see $\cite[\textrm{Lemma 2.3.1}]{Is12_2}$).
Then a new functional $\Omega_2$ on $C^*(\Psi({\cal D}))$ is defined by $\Omega_2(\Psi(X)):=\Omega_1(\Theta(X))$ $(X\in{\cal D})$.
Its Hahn--Banach extension on $\mathbb{B}(H\otimes L^2(C_h)\otimes L^2(C_h))$ restricts to a $P$-central state on $q(B\otimes \mathbb{B}(C_h))q\otimes \mathbb{C}$. More precisely we have a $P$-central state on $\mathbb{B}(qL^2(M))\cap(B^{\rm op})'(=q(B\otimes \mathbb{B}(C_h))q)$ which restricts to the trace $\tau$ on $qMq$. \end{proof}
\subsection{\bf Proofs of corollaries}
For the Kac type case, the same proofs as in the group case work. So we see only the proof of Corollary \ref{B}.
Let $\mathbb{G}$ be a compact quantum group in the statement of the corollary, $h$ the Haar state of $\mathbb{G}$, and $(B,\tau_B)$ be a tracial von Neumann algebra. Write $M:=B\otimes L^\infty(\mathbb{G})$. Let $(A,\tau_A)$ be an amenable tracial von Neumann subalgebra in $M$ with expectation $E_A$.
Then since modular actions of $\tau_A$ and $\tau_A\circ E_A$ has the relation $\sigma^{\tau_A}=\sigma^{\tau_A\circ E_A}|_A$ (see the proof of $\cite[\rm{Theorem\ IX.4.2}]{Ta2}$), we have an inclusion $A\otimes L\mathbb{R}=C_{\tau_A}(A)\subset C_{\tau_A\circ E_A}(M)$ with a faithful normal conditional expectation $\tilde{E}_A$ given by $\tilde{E}_A(x\lambda_t)=E_A(x)\lambda_t$ for $x\in M$ and $t\in \mathbb{R}$. Since continuous cores are canonically isomorphic with each other, there is a canonical isomorphism from $C_{\tau_A\circ E_A}(M)$ onto $C_{\tau_B\otimes h}(M)=B\otimes C_h(L^\infty(\mathbb{G}))$ $(:=\tilde{M})$. We denote by $\tilde{A}$ the image of $C_{\tau_A}(A)$ in $\tilde{M}$. Put $\tilde{P}:={\cal N}_{\tilde{M}}(\tilde{A})''$. Note that there is a faithful normal conditional expectation $E_{\tilde{P}}$ from $\tilde{M}$ onto $\tilde{P}$, since $\tilde{A}$ is an image of an expectation (use $\cite[\rm{Theorem\ IX.4.2}]{Ta2}$). \begin{Lem} Under the setting above, for any projection $z\in{\cal Z}(A)\cap (1_B\otimes L^\infty(\mathbb{G}))$ (possibly $z=1$) we have either \begin{itemize}
\item[$\rm (i)$] $Az\preceq_M B$;
\item[$\rm (ii)$] there exists a conditional expectation from $z(B\otimes \mathbb{B}(L^2(C_h)))z$ onto $z\tilde{P}z$, where we regard $z\in 1_B\otimes C_h$ by the canonical inclusion $L^\infty(\mathbb{G})\subset C_h$. \end{itemize} \end{Lem} \begin{proof} Suppose $Az\not\preceq_M B$. Then by the same manner as in $\cite[\textrm{Proposition 2.10}]{BHR12}$, we have $\tilde{A}zq\not\preceq_{\tilde{M}}B\otimes L\mathbb{R}r$ for any projections $q\in {\cal Z}(\tilde{A})$ and $r\in L\mathbb{R}$ with $(\tau_B\otimes\mathrm{Tr})(q)<\infty$ and $\mathrm{Tr}(r)<\infty$. By the comment below Theorem \ref{popa embed2}, this means $\tilde{A}zq\not\preceq_{\tilde{M}}B\otimes L\mathbb{R}$ for any projection $q\in {\cal Z}(\tilde{A})$ with $(\tau_B\otimes\mathrm{Tr})(q)<\infty$. We fix such $q$ and assume $z=1$ for simplicity.
Apply Theorem \ref{B} to them and get that $L^2(q\tilde{M})$ is left ${\cal N}_{q\tilde{M}q}(q\tilde{A}q)''$-amenable as a $q\tilde{M}q$-$B$-bimodule. Note that ${\cal N}_{q\tilde{M}q}(q\tilde{A}q)''=q\tilde{P}q$ (e.g.\ $\cite[\rm Lemma\ 2.2]{FSW10}$). By $\cite[\textrm{Proposition 2.4}]{PV12}$ this means that ${}_{q\tilde{M}q}L^2(q\tilde{M}q)_{q\tilde{P}q} \prec {}_{q\tilde{M}q}L^2(q\tilde{M})\otimes_B L^2(\tilde{M}q)_{q\tilde{P}q}$. Let $\nu_q$ (or $\nu$ for $q=1$) be the following canonical multiplication $*$-homomorphism: \begin{alignat*}{5} & \hspace{5em}\mathbb{B}(\tilde{q}K) && \hspace{1em}\mathbb{B}(qq^{\rm op}L^2(\tilde{M}))\\ & \hspace{6em}\cup && \hspace{3.5em}\cup \\ & \textrm{$*$-alg}\{q\tilde{M}q\otimes_B q^{\rm op}, q\otimes_B (q\tilde{P}q)^{\rm op}\} &\quad\xrightarrow{\nu_q}\quad& \textrm{$*$-alg}\{q\tilde{M}q, (q\tilde{P}q)^{\rm op}\}& \end{alignat*} where $K:=L^2(\tilde{M})\otimes_B L^2(\tilde{M})$ and $\tilde{q}:=(q\otimes_B1)(1\otimes_B q^{\rm op})\in\mathbb{B}(K)$. The weak containment above means that $\nu_q$ is bounded. Let $(q_i)_i$ be a net of $(\tau_B\otimes\mathrm{Tr})$-finite projections in ${\cal Z}(\tilde{A})$ which converges to 1 strongly. Then since each $q_i$ satisfies the weak containment above, we have for any $x\in \textrm{$*$-alg}\{\tilde{M}\otimes_B1, 1\otimes_B (\tilde{P})^{\rm op}\}\subset\mathbb{B}(K)$, \begin{equation*}
\|x\|_{\mathbb{B}(K)}=\sup_i\|\tilde{q_i}x\tilde{q_i}\|_{\mathbb{B}(\tilde{q_i}K)}\geq
\sup_i\|\nu_{q}(\tilde{q_i}x\tilde{q_i})\|_{\mathbb{B}(q_iq_i^{\rm op}L^2(\tilde{M}))}=\|\nu(x)\|_{\mathbb{B}(L^2(\tilde{M}))}. \end{equation*} Hence $\nu$ is bounded and we have ${}_{\tilde{M}}L^2(\tilde{M})_{\tilde{P}} \prec {}_{\tilde{M}}L^2(\tilde{M})\otimes_B L^2(\tilde{M})_{\tilde{P}}$. (For a general $z$, we have ${}_{z\tilde{M}z}L^2(z\tilde{M}z)_{z\tilde{P}z} \prec {}_{z\tilde{M}z}L^2(z\tilde{M})\otimes_B L^2(\tilde{M}z)_{z\tilde{P}z}$.)
By Arveson's extension theorem, we extend $\nu$ on $C^*\{\tilde{M}\otimes_B1, 1\otimes_B (B'\cap \mathbb{B}(L^2(\tilde{M})))\}$ as a u.c.p.\ map into $\mathbb{B}(L^2(\tilde{M}))$ and denote it by $\Phi$. Then since $\tilde{M}\otimes 1$ is contained in the multiplicative domain of $\Phi$, the image of $1\otimes_B (B'\cap \mathbb{B}(L^2(\tilde{M})))$ is contained in $\tilde{M}'=\tilde{M}^{\rm op}$. Consider the following composition map: \begin{equation*} B^{\rm op}\otimes \mathbb{B}(L^2(C_h))= (B'\cap\mathbb{B}(L^2(\tilde{M})))\simeq1\otimes_B (B'\cap\mathbb{B}(L^2(\tilde{M})))\xrightarrow{\Phi}\tilde{M}^{\rm op}\xrightarrow{E_{\tilde{P}}^{\rm op}} \tilde{P}^{\rm op}. \end{equation*} (For a general $z$, we have $z^{\rm op}(B^{\rm op}\otimes \mathbb{B}(L^2(C_h)))z^{\rm op}\rightarrow z(z\tilde{M}z)^{\rm op}\simeq (z\tilde{M}z)^{\rm op}\rightarrow (z\tilde{P}z)^{\rm op}.$) Finally composing with $\mathrm{Ad}(J_{\tilde{M}})=\mathrm{Ad}(J_B\otimes J_{C_h})$, we get a conditional expectation from $B\otimes \mathbb{B}(L^2(C_h))$ onto $\tilde{P}$. \end{proof}
Now assume that $L^\infty(\mathbb{G})$ is non amenable and $A\subset M$ is a Cartan subalgebra. Let $w$ be a central projection in $L^\infty(\mathbb{G})$ such that $L^\infty(\mathbb{G})w$ has no amenable direct summand. Write $z=1_B\otimes w$. Then $z\in A$ since $A$ is maximal abelian. Since $\tilde{A}$ is Cartan in $\tilde{M}$ (e.g.\ $\cite[\textrm{Subsection 2.3}]{HR10}$), we have $\tilde{M}=\tilde{P}$. The lemma above says that we have either (i) $Az\preceq_MB$ or (ii) there exists a conditional expectation from $B\otimes \mathbb{B}(wL^2(C_h))$ onto $z\tilde{M}z=B\otimes C_hw$. If (ii) holds, then composing with $\tau_B\otimes \mathrm{id}_{C_h}$, we get a conditional expectation from $\mathbb{B}(wL^2(C_h))$ onto $C_hw$. This is a contradiction since $C_hw=(L^\infty(\mathbb{G})w)\rtimes \mathbb{R}$ is non amenable. If (i) holds, then we have $Az\preceq_{Mz}B\otimes \mathbb{C}w$ since $z\in {\cal Z}(M)$, and hence $(B\otimes \mathbb{C}w)'\cap Mz \preceq_{Mz} A'\cap Mz$ by $\cite[\textrm{Lemma 3.5}]{Va08}$ (this is true for non finite $M$). This means $1_B \otimes L^\infty(\mathbb{G})w\preceq_{Mz} Az$ and hence $L^\infty(\mathbb{G})w$ has an amenable direct summand. Thus we get a contradiction.
Next assume that $B=\mathbb{C}$. Let $N\subset L^\infty(\mathbb{G})(=M)$ be a non-amenable subalgebra with expectation $E_N$ and assume that $A\subset N$ is a Cartan subalgebra with expectation $E_A$. Let $z$ be a central projection in $N$ such that $Nz$ is non-amenable and diffuse. Since $A$ is maximal abelian, $z\in A$ and $Az$ is a Cartan subalgebra in $Nz$. Hence $Az$ is diffuse, which means $Az\not\preceq_{M}\mathbb{C}$. The above lemma implies that there exists a conditional expectation from $\mathbb{B}(zL^2(C_h))$ onto $z{\cal N}_{\tilde{M}}(\tilde{A})''z={\cal N}_{z\tilde{M}z}(\tilde{A}z)''$. Now ${\cal N}_{z\tilde{M}z}(\tilde{A}z)''$ is non amenable and hence a contradiction. In fact, we have inclusions $C_{\tau_A}(A)z\subset C_{\tau_A\circ E_A}(N)z\subset zC_{\tau_A\circ E_A \circ E_N}(M)z$, and since the first inclusion is Cartan, the normalizer of $\tilde{A}z$ in $z\tilde{M}z$ generates a non-amenable subalgebra.
\section{\bf Further remarks}
\subsection{\bf Continuous and discrete cores}
In $\cite[\textrm{Subsection 5.2}]{Is12_2}$, we discussed semisolidity and non solidity of continuous cores of free quantum group $\rm III_1$ factors. Non solidity of them follow from non amenability of the relative commutant of the diffuse algebra $L\mathbb{R}$, which contains a non-amenable centralizer algebra.
Let $\mathbb{G}$ be a compact quantum group as in Theorem \ref{C} and assume that $L^\infty(\mathbb{G})$ is a full $\rm III_1$ factor and the Haar state is $\mathrm{Sd}(L^\infty(\mathbb{G}))$-almost periodic. Denote its discrete core by $D(L^\infty(\mathbb{G}))$ (see $\cite{Co74}\cite{Dy94}$). Then $qD(L^\infty(\mathbb{G}))q$ is always strongly solid for any trace finite projection $q$, since $D(L^\infty(\mathbb{G}))$ is isomorphic to $L^\infty(\mathbb{G})_h\otimes \mathbb{B}(H)$ for some separable Hilbert space $H$ and $L^\infty(\mathbb{G})_h$ is a strongly solid $\rm II_1$ factor (this follows from Theorem \ref{B}). Hence structures of these cores are different. In the subsection, we observe this difference from some viewpoints.
Write as $\Gamma\leq \mathbb{R}^*$ the Sd-invariant of $L^\infty(\mathbb{G})$. Then regarding $\mathbb{R}\subset \hat{\Gamma}$, take the crossed product $L^\infty(\mathbb{G})\rtimes \hat{\Gamma}$ by the extended modular action of the Haar state. This algebra is an explicit realization of the discrete core, namely, $D(L^\infty(\mathbb{G}))\simeq L^\infty(\mathbb{G})\rtimes \hat{\Gamma}$. Since this extended action is the modular action on the dense subgroup $\mathbb{R}$, we can easily verify that $L^\infty(\mathbb{G})\rtimes \hat{\Gamma} (=:M)$ satisfies the same condition as condition $\rm (AOC)^+$ replacing $\mathbb{R}$-action with $\hat{\Gamma}$-action. Under this setting, we can prove Theorem \ref{B}, namely, for any amenable subalgebra $A\subset qMq$ (with trace finite projection $q\in L\hat{\Gamma}$), we have either (i) $A\preceq_{M} L\hat{\Gamma}$ or (ii) ${\cal N}_{qMq}(A)''$ is amenable. This obviously implies strong solidity of $qMq$, since all diffuse subalgebra $A$ automatically satisfies $A\not\preceq_{qMq}L\hat{\Gamma}$ (because $L\hat{\Gamma}$ is atomic).
Roughly speaking, this observation implies that the only obstruction of the solidity of $C_h(L^\infty(\mathbb{G}))(=:C_h)$ is a diffuse subalgebra $L\mathbb{R}$. More precisely, $C_h$ is non solid since the subalgebra $L\mathbb{R}$ is diffuse, and $D(L^\infty(\mathbb{G}))$ is strongly solid since the subalgebra $L\hat{\Gamma}$ is atomic.
The next view point is from property Gamma (or non fullness). It is known that a $\rm II_1$ factor $M$ has the property Gamma if and only if $C^*\{M,M' \}\cap \mathbb{K}(L^2(M))=0$ $\cite{aaa}$. Moreover it is not difficult to see that a von Neumann algebra $M$ satisfies condition (AO) and $C^*\{M,M' \}\cap \mathbb{K}(L^2(M))=0$, then it is amenable. Hence any non amenable $\rm II_1$ factor can not have property Gamma and condition (AO) at the same time. When $L^\infty(\mathbb{G})$ is a full $\rm III_1$ factor, on the one hand, for any $\mathrm{Tr}$-finite projection $p$, $pC_hp$ is non full, since it admits a non trivial central sequence (use the almost periodicity of $h$). Hence we can deduce $C^*\{C_h,C_h' \}\cap \mathbb{K}(L^2(C_h)=0$. In particular $C_h$ (and $pC_hp$ for \textit{any} projection $p$) does not satisfy condition (AO). On the other hand, for the discrete core $D(L^\infty(\mathbb{G}))$, $qD(L^\infty(\mathbb{G}))q$ always satisfies condition $\rm (AO)^+$ for any projection $q\in L\hat{\Gamma}\simeq \ell^\infty(\Gamma)$ with finite support. This is because $D(L^\infty(\mathbb{G}))$ satisfies condition $\rm (AO)^+$ with respect to the quotient $\mathbb{K}(L^2(\mathbb{G}))\otimes \mathbb{B}(\ell^2(\Gamma))$ and hence $qD(L^\infty(\mathbb{G}))q$ satisfies $\rm (AO)^+$ for $\mathbb{K}(L^2(\mathbb{G}))\otimes q\mathbb{B}(\ell^2(\Gamma))q=\mathbb{K}(L^2(\mathbb{G}))\otimes \mathbb{K}(q\ell^2(\Gamma))$. By $\cite[\textrm{Theorem B}]{Is12_2}$, we again get the strong solidity of $qD(L^\infty(\mathbb{G}))q$. We also get fullness of $qD(L^\infty(\mathbb{G}))q$ (since this is non amenable) for any $q\in \ell^\infty(\Gamma)$ with finite support and hence that of $D(L^\infty(\mathbb{G}))$.
\subsection{\bf Primeness of crossed products by bi-exact quantum groups}
In this subsection, we consider only compact quantum groups of Kac type. Let $M=\hat{\mathbb{G}}\ltimes B$ be a finite von Neumann algebra as in the statement of Theorem \ref{A}. Assume that $B$ is amenable. Then we have for any amenable subalgebra $A\subset qMq$ $(q\in M$ is a projection), we have either (i) $A\preceq_M B$ or (ii) ${\cal N}_{qMq(A)''}$ is amenable. This is a sufficient condition to semisolidity when $B$ is abelian, and to semiprimeness, which means that any tensor decomposition has an amenable tensor component, when $B$ is non abelian. Thus we proved that any non amenable von Neumann subalgebra $N\subset p(\hat{\mathbb{G}}\ltimes B)p$ is prime when $B$ is abelian, and semiprime when $B$ is amenable.
\end{document} |
\begin{document}
\title{Preference-based Teaching}
\titlerunning{Preference-based Teaching}
\author{Ziyuan Gao$^1$ \and Christoph Ries$^2$ \and Hans Ulrich Simon$^2$ \and Sandra Zilles$^1$} \authorrunning{Z.~Gao, C.~Ries, H.~Simon and S.~Zilles}
\institute{Department of Computer Science\\University of Regina, Regina, SK, Canada S4S 0A2 \\\email{\{gao257,zilles\}@cs.uregina.ca} \and Department of Mathematics, \\ Ruhr-Universit\"{a}t Bochum, D-44780 Bochum, Germany\\\email{\{christoph.ries,hans.simon\}@rub.de}}
\maketitle
\begin{abstract} We introduce a new model of teaching named ``preference-based \linebreak[4]teaching'' and a corresponding complexity parameter---the preference-based \linebreak[4]teaching dimension (PBTD)---representing the worst-case number of examples needed to teach any concept in a given concept class. Although the PBTD coincides with the well-known recursive teaching dimension (RTD) on finite classes, it is radically different on infinite ones: the RTD becomes infinite already for trivial infinite classes (such as half-intervals) whereas the PBTD evaluates to reasonably small values for a wide collection of infinite classes including classes consisting of so-called closed sets w.r.t.~a given closure operator, including various classes related to linear sets over ${\mathbb N}_0$ (whose RTD had been studied quite recently) and including the class of Euclidean half-spaces. On top of presenting these concrete results, we provide the reader with a theoretical framework (of a combinatorial flavor) which helps to derive bounds on the PBTD. \end{abstract}
\begin{keywords} teaching dimension, preference relation, recursive teaching dimension,\linebreak[4] learning halfspaces, linear sets \end{keywords}
\section{Introduction} \label{sec:introduction}
The classical model of teaching~\cite{SM1991,GK1995} formulates the following interaction protocol between a teacher and a student: \begin{itemize} \item Both of them agree on a ``classification-rule system'', formally given by a concept class ${\mathcal{L}}$. \item In order to teach a specific concept $L \in {\mathcal{L}}$, the teacher presents to the student a \emph{teaching set}, i.e., a set $T$ of labeled examples so that $L$ is the only concept in ${\mathcal{L}}$ that is consistent with $T$. \item The student determines $L$ as the unique concept in ${\mathcal{L}}$ that is consistent with $T$. \end{itemize}
Goldman and Mathias~\cite{GM1996} pointed out that this model of teaching is not powerful enough, since the teacher is required to make \emph{any\/} consistent learner successful. A challenge is to model powerful teacher/student interactions without enabling unfair ``coding tricks''. Intuitively, the term ``coding trick'' refers to any form of undesirable collusion between teacher and learner, which would reduce the learning process to a mere decoding of a code the teacher sent to the learner. There is no generally accepted definition of what constitutes a coding trick, in part because teaching an exact learner could always be considered coding to some extent: the teacher presents a set of examples which the learner ``decodes'' into a concept.
In this paper, we adopt the notion of ``valid teacher/learner pair'' introduced by \cite{GM1996}. They consider their model to be intuitively free of coding tricks while it provably allows for a much broader class of interaction protocols than the original teaching model. In particular, teaching may thus become more efficient in terms of the number of examples in the teaching sets. Further definitions of how to avoid unfair coding tricks have been suggested~\cite{ZLHZ2011}, but they were less stringent than the one proposed by Goldman and Mathias. The latter simply requests that, if the learner hypothesizes concept $L$ upon seeing a sample set $S$ of labeled examples, then the learner will still hypothesize $L$ when presented with any sample set $S\cup S'$, where $S'$ contains only examples labeled consistently with $L$. A coding trick would then be any form of exchange between the teacher and the learner that does not satisfy this definition of validity.
The model of recursive teaching~\cite{ZLHZ2011,MGZ2014}, which is free of coding tricks according to the Goldman-Mathias definition, has recently gained attention because its complexity parameter, the recursive teaching dimension (RTD), has shown relations to the VC-dimension and to sample compression~\cite{ChenCT16,DFSZ2014,MSWY2015,SZ2015}, when focusing on finite concept classes. Below though we will give examples of rather simple infinite concept classes with infinite RTD, suggesting that the RTD is inadequate for addressing the complexity of teaching infinite classes.
In this paper, we introduce a model called \emph{preference-based teaching}, in which the teacher and the student do not only agree on a classification-rule system ${\mathcal{L}}$ but also on a preference relation (a strict partial order) imposed on ${\mathcal{L}}$. If the labeled examples presented by the teacher allow for several consistent explanations (= consistent concepts) in ${\mathcal{L}}$, the student will choose a concept $L \in {\mathcal{L}}$ that she prefers most. This gives more flexibility to the teacher than the classical model: the set of labeled examples need not distinguish a target concept $L$ from any other concept in ${\mathcal{L}}$ but only from those concepts $L'$ over which $L$ is not preferred.\footnote{Such a preference relation can be thought of as a kind of bias in learning: the student is ``biased'' towards concepts that are preferred over others, and the teacher, knowing the student's bias, selects teaching sets accordingly.} At the same time, preference-based teaching yields valid teacher/learner pairs according to Goldman and Mathias's definition. We will show that the new model, despite avoiding coding tricks, is quite powerful. Moreover, as we will see in the course of the paper, it often allows for a very natural design of teaching sets.
Assume teacher and student choose a preference relation that minimizes the worst-case number $M$ of examples required for teaching any concept in the class $\mathcal{L}$. This number $M$ is then called the preference-based teaching dimension (PBTD) of $\mathcal{L}$. In particular, we will show the following:
(i) Recursive teaching is a special case of preference-based teaching where the preference relation satisfies a so-called ``finite-depth condition''. It is precisely this additional condition that renders recursive teaching useless for many natural and apparently simple infinite concept classes. Preference-based teaching successfully addresses these shortcomings of recursive teaching, see Section~\ref{sec:rtd}. For finite classes, PBTD and RTD are equal.
(ii) A wide collection of geometric and algebraic concept classes with infinite RTD can be taught very efficiently, i.e., with low PBTD. To establish such results, we show in Section~\ref{sec:closure-operator} that spanning sets can be used as preference-based teaching sets with positive examples only --- a result that is very simple to obtain but quite useful.
(iii) In the preference-based model, linear sets over ${\mathbb N}_0$ with origin 0 and at most $k$ generators can be taught with $k$ positive examples, while recursive teaching with a bounded number of positive examples was previously shown to be impossible and it is unknown whether recursive teaching with a bounded number of positive and negative examples is possible for $k \geq 4$. We also give some almost matching upper and lower bounds on the PBTD for other classes of linear sets, see Section~\ref{sec:linsets}.
(iv) The PBTD of halfspaces in $\mathbbm{R}^d$ is upper-bounded by $6$, independent of the dimensionality $d$ (see Section~\ref{sec:halfspaces}), while its RTD is infinite.
(v) We give full characterizations of concept classes that can be taught with only one example (or with only one example, which is positive) in the preference-based model (see Section~\ref{sec:pbtd1}).
Based on our results and the naturalness of the teaching sets and preference relations used in their proofs, we claim that
preference-based teaching is far more suitable to the study of infinite concept classes than recursive teaching.
Parts of this paper were published in a previous conference version~\cite{GRSZ2016}.
\section{Basic Definitions and Facts} \label{sec:definitions}
${\mathbb N}_0$ denotes the set of all non-negative integers and ${\mathbb N}$ denotes the set of all positive integers. A {\em concept class} ${\mathcal{L}}$ is a family of subsets over a universe ${\mathcal{X}}$, i.e., ${\mathcal{L}} \subseteq 2^{\mathcal{X}}$ where $2^{\mathcal{X}}$ denotes the powerset of ${\mathcal{X}}$. The elements of ${\mathcal{L}}$ are called {\em concepts}. A {\em labeled example} is an element of ${\mathcal{X}} \times\{-,+\}$. We slightly deviate from this notation in Section~\ref{sec:halfspaces}, where our treatment of halfspaces makes it more convenient to use $\{-1,1\}$ instead of $\{-,+\}$, and in Section~\ref{sec:pbtd1}, where we perform Boolean operations on the labels and therefore use $\{0,1\}$ instead of $\{-,+\}$. Elements of ${\mathcal{X}}$ are called {\em examples}. Suppose that $T$ is a set of labeled examples. Let $T^+ = \{x\in{\mathcal{X}} : (x,+) \in T\}$ and $T^- = \{x\in{\mathcal{X}} : (x,-) \in T\}$. A set $L \subseteq{\mathcal{X}}$ is {\em consistent with $T$} if it includes all examples in $T$ that are labeled ``$+$'' and excludes all examples in $T$ that are labeled ``$-$'', i.e, if $T^+ \subseteq L$ and $T^- \cap L = \emptyset$. A set of labeled examples that is consistent with $L$ but not with $L'$ is said to {\em distinguish $L$ from $L'$}. The classical model of teaching is then defined as follows.
\begin{definition}[\cite{SM1991,GK1995}] \label{def:classical-model} A {\em teaching set} for a concept $L \in {\mathcal{L}}$ w.r.t.~${\mathcal{L}}$ is a set $T$ of labeled examples such that $L$ is the only concept in ${\mathcal{L}}$ that is consistent with $T$, i.e., $T$ distinguishes $L$ from any other concept in ${\mathcal{L}}$. Define
$\mathrm{TD}(L,{\mathcal{L}}) = \inf\{|T| : T\mbox{ is a teaching\ }$ $\mbox{set for $L$ w.r.t.~${\mathcal{L}}$}\}$. i.e., $\mathrm{TD}(L,{\mathcal{L}})$ is the smallest possible size of a teaching set for $L$ w.r.t.~${\mathcal{L}}$. If $L$ has no finite teaching set w.r.t.~${\mathcal{L}}$, then $\mathrm{TD}(L,{\mathcal{L}})=\infty$. The number $\mathrm{TD}({\mathcal{L}}) = \sup_{L \in {\mathcal{L}}}\mathrm{TD}(L,{\mathcal{L}}) \in {\mathbb N}_0\cup\{\infty\}$ is called the {\em teaching dimension of ${\mathcal{L}}$}. \end{definition}
For technical reasons, we will occasionally deal with the number $\mathrm{TD}_{min}({\mathcal{L}}) = \inf_{L \in {\mathcal{L}}}\mathrm{TD}(L,$ ${\mathcal{L}})$, i.e., the number of examples needed to teach the concept from ${\mathcal{L}}$ that is easiest to teach.
In this paper, we will examine a teaching model in which the teacher and the student do not only agree on a classification-rule system ${\mathcal{L}}$ but also on a preference relation, denoted as $\prec$, imposed on ${\mathcal{L}}$. We assume that $\prec$ is a {\em strict partial order} on ${\mathcal{L}}$, i.e., $\prec$ is asymmetric and transitive.
The partial order that makes every pair $L \neq L' \in {\mathcal{L}}$ incomparable is denoted by $\prec_\emptyset$. For every $L \in{\mathcal{L}}$, let \[ {\mathcal{L}}_{\prec L} = \{L'\in{\mathcal{L}}: L' \prec L\} \] be the set of concepts over which $L$ is strictly preferred. Note that ${\mathcal{L}}_{\prec_\emptyset L} = \emptyset$ for every $L\in{\mathcal{L}}$.
As already noted above, a teaching set $T$ of $L$ w.r.t.~${\mathcal{L}}$ distinguishes $L$ from any other concept in ${\mathcal{L}}$. If a preference relation comes into play, then $T$ will be exempted from the obligation to distinguish $L$ from the concepts in ${\mathcal{L}}_{\prec L}$ because $L$ is strictly preferred over them anyway.
\begin{definition} \label{def:td-prec-L} A {\em teaching set for $L \subseteq X$ w.r.t.~$({\mathcal{L}},\prec)$} is defined as a teaching set for $L$ w.r.t.~${\mathcal{L}}\setminus{\mathcal{L}}_{\prec L}$. Furthermore define \[
\mathrm{PBTD}(L,{\mathcal{L}},\prec) = \inf\{|T| : T\mbox{ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec$})\} \in {\mathbb N}_0\cup\{\infty\} \enspace . \] The number $\mathrm{PBTD}({\mathcal{L}},\prec) = \sup_{L \in {\mathcal{L}}}\mathrm{PBTD}(L,{\mathcal{L}},\prec) \in {\mathbb N}_0\cup\{\infty\}$ is called the {\em teaching dimension of $({\mathcal{L}},\prec)$}. \end{definition} Definition~\ref{def:td-prec-L} implies that \begin{equation} \label{eq:td-prec-L} \mathrm{PBTD}(L,{\mathcal{L}},\prec) = \mathrm{TD}(L,{\mathcal{L}}\setminus{\mathcal{L}}_{\prec L}) \enspace . \end{equation} Let $L \mapsto T(L)$ be a mapping that assigns a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$ to every $L\in{\mathcal{L}}$. It is obvious from Definition~\ref{def:td-prec-L} that $T$ must be injective, i.e., $T(L) \neq T(L')$ if $L$ and $L$' are distinct concepts from ${\mathcal{L}}$. The classical model of teaching is obtained from the model described in Definition~\ref{def:td-prec-L} when we plug in the empty preference relation $\prec_\emptyset$ for $\prec$. In particular, $\mathrm{PBTD}({\mathcal{L}},\prec_\emptyset)$ $= \mathrm{TD}({\mathcal{L}})$.
\iffalse To avoid unfair coding tricks, \cite{GM1996} required that the student identify a concept $L\in{\mathcal{L}}$ even from any superset $S\supseteq T(L)$ of the set $T(L)$ presented by the teacher, as long as $L$ is consistent with $S$. It is easy to see that preference-based teaching with respect to a fixed preference relation fulfills this requirement. \fi
We are interested in finding the partial order that is optimal for the purpose of teaching and we aim at determining the corresponding teaching dimension. This motivates the following notion: \begin{definition} \label{def:td-best-le} The {\em preference-based teaching dimension of ${\mathcal{L}}$} is given by \[ \mathrm{PBTD}({\mathcal{L}}) = \inf\{\mathrm{PBTD}({\mathcal{L}},\prec) : \mbox{$\prec$ is a strict partial order on ${\mathcal{L}}$}\} \enspace . \] \end{definition}
A relation $R'$ on ${\mathcal{L}}$ is said to be an {\em extension of a relation $R$} if $R \subseteq R'$. The {\em order-extension principle} states that any partial order has a linear extension \cite{Jech-1973}. The following result (whose second assertion follows from the first one in combination with the order-extension principle) is pretty obvious:
\begin{lemma} \label{lem:extension} \begin{enumerate} \item Suppose that $\prec'$ extends $\prec$. If $T$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$, then $T$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec')$. Moreover $\mathrm{PBTD}({\mathcal{L}},\prec') \le \mathrm{PBTD}({\mathcal{L}},$ $\prec)$. \item $\mathrm{PBTD}({\mathcal{L}}) = \inf\{\mathrm{PBTD}({\mathcal{L}},\prec) : \mbox{$\prec$ is a strict linear order on ${\mathcal{L}}$}\}$. \end{enumerate} \end{lemma}
Recall that Goldman and Mathias \cite{GM1996} suggested to avoid coding tricks by requesting that any superset $S$ of a teaching set for a concept $L$ remains a teaching set, if $S$ is consistent with $L$. This property is obviously satisfied in preference-based teaching. A preference-based teaching set needs to distinguish a concept $L$ from all concepts in ${\mathcal{L}}$ that are preferred over $L$. Adding more labeled examples from $L$ to such a teaching set will still result in a set distinguishing $L$ from all concepts in ${\mathcal{L}}$ that are preferred over $L$.
\paragraph{Preference-based teaching with positive examples only.}
Suppose that ${\mathcal{L}}$ contains two concepts $L,L'$ such that $L \subset L'$. In the classical teaching model, any teaching set for $L$ w.r.t.~${\mathcal{L}}$ has to employ a negative example in order to distinguish $L$ from $L'$. Symmetrically, any teaching set for $L'$ w.r.t.~${\mathcal{L}}$ has to employ a positive example. Thus classical teaching cannot be performed with one type of examples only unless ${\mathcal{L}}$ is an antichain w.r.t.~inclusion. As for preference-based teaching, the restriction to one type of examples is much less severe, as our results below will show.
A teaching set $T$ for $L \in {\mathcal{L}}$ w.r.t.~$({\mathcal{L}},\prec)$ is said to be {\em positive} if it does not make use of negatively labeled examples, i.e., if $T^- = \emptyset$. In the sequel, we will occasionally identify a positive teaching set $T$ with $T^+$. A positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$ can clearly not distinguish $L$ from a proper superset of $L$ in ${\mathcal{L}}$. Thus, the following holds:
\begin{lemma} \label{lem:ts-pos} Suppose that $L \mapsto T^+(L)$ maps each $L \in {\mathcal{L}}$ to a positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$. Then $\prec$ must be an extension of $\supset$ (so that proper subsets of a set $L$ are strictly preferred over $L$) and, for every $L \in {\mathcal{L}}$, the set $T^+(L)$ must distinguish $L$ from every proper subset of $L$ in ${\mathcal{L}}$. \end{lemma}
\noindent Define \begin{equation} \label{eq:td-plus-L}
\mathrm{PBTD}^+(L,{\mathcal{L}},\prec) = \inf\{|T| : T\mbox{ is a positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec$})\} \enspace . \end{equation} The number $\mathrm{PBTD}^+({\mathcal{L}},\prec) = \sup_{L \in {\mathcal{L}}}\mathrm{PBTD}^+(L,{\mathcal{L}},\prec)$ (possibly $\infty$) is called the {\em positive teaching dimension of $({\mathcal{L}},\prec)$}. The {\em positive preference-based teaching dimension of ${\mathcal{L}}$} is then given by \begin{equation} \label{eq:td-plus-cL} \mathrm{PBTD}^+({\mathcal{L}}) = \inf\{\mathrm{PBTD}^+({\mathcal{L}},\prec) : \mbox{$\prec$ is a strict partial order on ${\mathcal{L}}$}\} \enspace . \end{equation}
\paragraph{Monotonicity.}
A complexity measure $K$ that assigns a number $K({\mathcal{L}}) \in {\mathbb N}_0$ to a concept class ${\mathcal{L}}$ is said to be {\em monotonic} if ${\mathcal{L}}' \subseteq {\mathcal{L}}$ implies that $K({\mathcal{L}}') \le K({\mathcal{L}})$. It is well known (and trivial to see) that $\mathrm{TD}$ is monotonic. It is fairly obvious that $\mathrm{PBTD}$ is monotonic, too:
\begin{lemma} \label{lem:monotonicity} $\mathrm{PBTD}$ and $\mathrm{PBTD}^+$ are monotonic. \end{lemma}
\noindent As an application of monotonicity, we show the following result:
\begin{lemma} \label{lem:lb-tdmin} For every finite subclass ${\mathcal{L}}'$ of ${\mathcal{L}}$, we have $ \mathrm{PBTD}({\mathcal{L}}) \ge \mathrm{PBTD}({\mathcal{L}}') \ge \mathrm{TD}_{min}({\mathcal{L}}') $. \end{lemma}
\begin{proof} The first inequality holds because $\mathrm{PBTD}$ is monotonic. The second inequality follows from the fact that a finite partially ordered set must contain a minimal element. Thus, for any fixed choice of $\prec$, ${\mathcal{L}}'$ must contain a concept $L'$ such that ${\mathcal{L}}'_{\prec L'} = \emptyset$. Hence, \[ \mathrm{PBTD}({\mathcal{L}}',\prec) \ge \mathrm{PBTD}(L',{\mathcal{L}}',\prec) \stackrel{(\ref{eq:td-prec-L})}{=} \mathrm{TD}(L',{\mathcal{L}}'\setminus{\mathcal{L}}'_{\prec L'}) = \mathrm{TD}(L',{\mathcal{L}}') \ge \mathrm{TD}_{min}({\mathcal{L}}') \enspace . \] Since this holds for any choice of $\prec$, we get $\mathrm{PBTD}({\mathcal{L}}') \ge \mathrm{TD}_{min}({\mathcal{L}}')$, as desired. \end{proof}
\section{Preference-based versus Recursive Teaching} \label{sec:rtd}
The preference-based teaching dimension is a relative of the recursive teaching dimension. In fact, both notions coincide on finite classes, as we will see shortly. We first recall the definitions of the recursive teaching dimension and of some related notions~\cite{ZLHZ2011,MGZ2014}.
A {\em teaching sequence for ${\mathcal{L}}$} is a sequence of the form ${\mathcal{S}} = ({\mathcal{L}}_i,d_i)_{i\ge1}$ where ${\mathcal{L}}_1,{\mathcal{L}}_2,{\mathcal{L}}_3,\ldots$ form a partition of ${\mathcal{L}}$ into non-empty sub-classes and, for every $i\ge1$, we have that \begin{equation} \label{eq:rtd} d_i = \sup_{L\in{\mathcal{L}}_i}\mathrm{TD}\left(L,{\mathcal{L}}\setminus\cup_{j=1}^{i-1}{\mathcal{L}}_j\right) \enspace . \end{equation} If, for every $i\ge 1$, $d_i$ is the supremum over all $L \in {\mathcal{L}}_i$ of the smallest size of a \emph{positive teaching set} for $L$ w.r.t.\ $\cup_{j\geq i}{\mathcal{L}}_j$ (and $d_i = \infty$ if some $L \in {\mathcal{L}}_i$ does not have a positive teaching set w.r.t.\ $\cup_{j\ge i}{\mathcal{L}}_j$), then ${\mathcal{S}}$ is said to be a \emph{positive teaching sequence for ${\mathcal{L}}$}. The {\em order} of a teaching sequence or a positive teaching sequence ${\mathcal{S}}$ (possibly $\infty$) is defined as $\mathrm{ord}({\mathcal{S}}) = \sup_{i\ge1}d_i$. The {\em recursive teaching dimension of ${\mathcal{L}}$} (possibly $\infty$) is defined as the order of the teaching sequence of lowest order for ${\mathcal{L}}$. More formally, $\mathrm{RTD}({\mathcal{L}}) = \inf_{{\mathcal{S}}}\mathrm{ord}({\mathcal{S}})$ where ${\mathcal{S}}$ ranges over all teaching sequences for ${\mathcal{L}}$. Similarly, $\mathrm{RTD}^+({\mathcal{L}}) = \inf_{{\mathcal{S}}}\mathrm{ord}({\mathcal{S}})$, where ${\mathcal{S}}$ ranges over all positive teaching sequences for ${\mathcal{L}}$. Note that the following holds for every ${\mathcal{L}}' \subseteq {\mathcal{L}}$ and for every teaching sequence ${\mathcal{S}} = ({\mathcal{L}}_i,d_i)_{i\ge1}$ for ${\mathcal{L}}'$ such that $\mathrm{ord}({\mathcal{S}}) = \mathrm{RTD}({\mathcal{L}}')$: \begin{equation} \label{eq:rtd-tdmin} \mathrm{RTD}({\mathcal{L}}) \ge \mathrm{RTD}({\mathcal{L}}') = \mathrm{ord}({\mathcal{S}}) \ge d_1 = \sup_{L\in{\mathcal{L}}_1}\mathrm{TD}(L,{\mathcal{L}}') \ge \mathrm{TD}_{min}({\mathcal{L}}') \enspace . \end{equation}
Note an important difference between $\mathrm{PBTD}$ and $\mathrm{RTD}$: while $\mathrm{RTD}({\mathcal{L}}) \ge$ $\mathrm{TD}_{min}$ $({\mathcal{L}}')$ for \emph{all}\/ ${\mathcal{L}}'\subseteq{\mathcal{L}}$, in general the same holds for $\mathrm{PBTD}$ only when restricted to finite ${\mathcal{L}}'$, cf.\ Lemma~\ref{lem:lb-tdmin}. This difference will become evident in the proof of Lemma~\ref{lem:huge-gap}.
The {\em depth} of $L \in {\mathcal{L}}$ w.r.t.~a strict partial order imposed on ${\mathcal{L}}$ is defined as the length of the longest chain in $({\mathcal{L}},\prec)$ that ends with the $\prec$-maximal element $L$ (resp.~as $\infty$ if there is no bound on the length of these chains). The recursive teaching dimension is related to the preference-based teaching dimension as follows:
\begin{lemma} \label{lem:rtd-pbtd} $\mathrm{RTD}({\mathcal{L}}) = \inf_{\prec}\mathrm{PBTD}({\mathcal{L}},\prec)$ and $\mathrm{RTD}^+({\mathcal{L}}) = \inf_{\prec}\mathrm{PBTD}^+({\mathcal{L}},\prec)$ where $\prec$ ranges over all strict partial orders on ${\mathcal{L}}$ that satisfy the following ``finite-depth condition'': every $L \in {\mathcal{L}}$ has a finite depth w.r.t.~$\prec$. \end{lemma}
The following is an immediate consequence of Lemma~\ref{lem:rtd-pbtd} and the trivial observation that the finite-depth condition is always satisfied if ${\mathcal{L}}$ is finite:
\begin{corollary} \label{cor:rtd-pb} $\mathrm{PBTD}({\mathcal{L}}) \le \mathrm{RTD}({\mathcal{L}})$, with equality if ${\mathcal{L}}$ is finite. \end{corollary}
\noindent While $\mathrm{PBTD}({\mathcal{L}})$ and $\mathrm{RTD}({\mathcal{L}})$ refer to the same finite number when ${\mathcal{L}}$ is finite, there are classes for which $\mathrm{RTD}$ is finite and yet larger than $\mathrm{PBTD}$, as Lemma~\ref{lem:huge-gap} will show. Generally, for infinite classes, the gap between $\mathrm{PBTD}$ and $\mathrm{RTD}$ can be arbitrarily large:
\begin{lemma} \label{lem:huge-gap} There exists an infinite class ${\mathcal{L}}_\infty$ of VC-dimension $1$ such that $\mathrm{PBTD}^+$ $({\mathcal{L}}_\infty)$ $=1$ and $\mathrm{RTD}({\mathcal{L}}_\infty)=\infty$. Moreover, for every $k\ge1$, there exists an infinite class ${\mathcal{L}}_k$ such that $\mathrm{PBTD}^+({\mathcal{L}}_k)=1$ and $\mathrm{RTD}({\mathcal{L}}_k)=k$. \end{lemma}
\iffalse The complete proof of Lemma~\ref{lem:huge-gap} is given in Appendix~\ref{app:rtd}. Here we only specify the classes ${\mathcal{L}}_\infty$ and ${\mathcal{L}}_k$ that are employed in this proof: \begin{itemize} \item Choose ${\mathcal{L}}_\infty$ as the class of half-intervals $[0,a]$, where $0 \le a < 1$, over the universe $[0,1)$.\footnote{$\mathrm{RTD}({\mathcal{L}}_\infty)=\infty$ had been observed by~\cite{MSWY2015} already.} \item Let ${\mathcal{X}} = [0,2)$. For each $a = \sum_{n \ge 1}\alpha_n2^{-n}\in [0,1)$ and for all $i=1,\ldots,k$, let $1 \le a_i < 2$ be given by $a_i = 1+\sum_{n \ge 0}\alpha_{kn+i}2^{-n}$. Finally, let $I_a = [0,a) \cup \{a_1,\ldots,a_k\} \subseteq {\mathcal{X}}$ and let ${\mathcal{L}}_k = \{I_a: 0 \le a < 1\}$. \end{itemize}. \fi
\begin{proof} We first show that there exists a class of VC-dimension $1$, say ${\mathcal{L}}_\infty$, such that $\mathrm{PBTD}^+({\mathcal{L}}_\infty)=1$ while $\mathrm{RTD}({\mathcal{L}}_\infty)=\infty$. To this end, let ${\mathcal{L}}_\infty$ be the family of closed half-intervals over $[0,1)$, i.e., ${\mathcal{L}}_\infty = \{ [0,a]: 0 \le a < 1\}$. We first prove that $\mathrm{PBTD}^+({\mathcal{L}}_\infty) = 1$. Consider the preference relation given by $[0,b] \prec [0,a]$ iff $a<b$. Then, for each $0 \le a <1$, we have \[ \mathrm{PBTD}([0,a],{\mathcal{L}}_\infty,\prec) \stackrel{(\ref{eq:td-prec-L})}{=} \mathrm{TD}([0,a],\{[0,b]:\ 0 \le b \le a\}) = 1 \] because the single example $(a,+)$ suffices for distinguishing $[0,a]$ from any interval $[0,b]$ with $b<a$.
It was observed by~\cite{MSWY2015} already that $\mathrm{RTD}({\mathcal{L}}_\infty)=\infty$ because every teaching set for some $[0,a]$ must contain an infinite sequence of distinct reals that converges from above to $a$. Thus, using Equation~(\ref{eq:rtd-tdmin}) with ${\mathcal{L}}'={\mathcal{L}}$, we have $\mathrm{RTD}({\mathcal{L}}_\infty) \ge \mathrm{TD}_{min}({\mathcal{L}}_\infty) = \infty$.
Next we show that, for every $k\ge1$, there exists a class, say ${\mathcal{L}}_k$, such that $\mathrm{PBTD}^+$ $({\mathcal{L}}_k)=1$ while $\mathrm{RTD}({\mathcal{L}}_k)=k$. To this end, let ${\mathcal{X}} = [0,2)$. For each $a \in [0,1)$, fix a binary representation $\sum_{n \ge 1}\alpha_n2^{-n}$ of $a$,
where $\alpha_n\in\{0,1\}$ are binary coefficients, and for all $i=1,\ldots,k$, let $1 \le a_i < 2$ be given by $a_i = 1+\sum_{n \ge 0}\alpha_{kn+i}2^{-kn+i}$.\footnote{Note that, for $a=\frac{m}{2^N}$ with $m,N\in\mathbb{N}$, there are two binary representations. We can pick either one to define the $\alpha_n$ and $a_i$ values.} Let $A$ be the set of all $a \in [0,1)$ such that if $\sum_{n \ge 1}\alpha_n2^{-n}$ is the binary representation of $a$ fixed earlier, then for all $i \in \{1,\ldots,k\}$, there is some $n \geq 0$ for which $\alpha_{nk+i} \neq 0$. Finally, let $I_a = [0,a] \cup \{a_1,\ldots,a_k\} \subseteq {\mathcal{X}}$ and let ${\mathcal{L}}_k = \{I_a: 0 \le a < 1 \wedge a \in A\}$.
Clearly $\mathrm{PBTD}^+({\mathcal{L}}_k)=1$ because, using the preference relation given by $I_b \prec I_a$ iff $a<b$, we can teach $I_a$ w.r.t.~${\mathcal{L}}_k$ by presenting the single example $(a,+)$ (the same strategy as for half-intervals). Moreover, note that $I_a$ is the only concept in ${\mathcal{L}}_k$ that contains $a_1,\ldots,a_k$, i.e., $\{a_1,\ldots,a_k\}$ is a positive teaching set for $I_a$ w.r.t.~${\mathcal{L}}_k$. It follows that $\mathrm{RTD}({\mathcal{L}}_k) \le \mathrm{TD}({\mathcal{L}}_k) \le k$. It remains to show that $\mathrm{RTD}({\mathcal{L}}_k) \ge k$. To this end, we consider the subclass ${\mathcal{L}}'_k$ consisting of all concepts $I_a$ such that $a \in A$ and $a$ has only finitely many $1$'s in its binary representation $(\alpha_n)_{n\in\mathbb{N}}$, i.e., all but finitely many of the $\alpha_n$ are zero. Pick any concept $I_a \in {\mathcal{L}}'_k$. Let $T$ be any set of at most $k-1$ examples labeled consistently according to $I_a$. At least one of the positive examples $a_1,\ldots,a_k$ must be missing, say $a_i$ is missing. Let $J_{a,i}$ be the set of indices given by $J_{a,i} = \{n\in{\mathbb N}_0:\ \alpha_{kn+i}=0\}$. The following observations show that there exists some $a' \in {\mathcal{X}}\setminus\{a\}$ such that $I_{a'}$ is consistent with $T$. \begin{itemize} \item When we set some (at least one but only finitely many) of the bits $\alpha_{kn+i}$ with $n \in J_{a,i}$ from $0$ to $1$ (while keeping fixed the remaining bits of the binary representation of $a$), then we obtain a number $a' \neq a$ such that $I_{a'}$ is still consistent with all positive examples in $T$ (including the example $(a,+)$ which might be in $T$). \item Note that $J_{a,i}$ is an infinite set. It is therefore possible to choose the bits that are set from $0$ to $1$ in such a fashion that the finitely many bit patterns represented by the numbers in $T^- \cap [1,2)$ are avoided. \item It is furthermore possible to choose the bits that are set from $0$ to $1$ in such a fashion that the resulting number $a'$ is as close to $a$ as we like so that $I_{a'}$ is also consistent with the negative examples from $T^- \cap[0,1)$ and $a' \in A$. \end{itemize} It follows from this reasoning that no set with less than $k$ examples can possibly be a teaching set for~$I_a$. Since this holds for an arbitrary choice of $a$, we may conclude that $\mathrm{RTD}({\mathcal{L}}_k) \ge \mathrm{RTD}({\mathcal{L}}'_k) \ge \mathrm{TD}_{min}({\mathcal{L}}'_k) = k$. \end{proof}
\section{Preference-based Teaching with Positive Examples Only} \label{sec:closure-operator}
The main purpose of this section is to relate positive preference-based teaching to ``spanning sets" and ``closure operators", which are well-studied concepts in the computational learning theory literature. Let ${\mathcal{L}}$ be a concept class over the universe ${\mathcal{X}}$. We say that $S\subseteq{\mathcal{X}}$ is a {\em spanning set} of $L\in{\mathcal{L}}$ w.r.t.~${\mathcal{L}}$ if $S \subseteq L$ and any set in ${\mathcal{L}}$ that contains $S$ must contain $L$ as well.\footnote{This generalizes the classical definition of a spanning set~\cite{HSW1990}, which is given w.r.t.~intersection-closed classes only.} In other words, $L$ is the unique smallest concept in ${\mathcal{L}}$ that contains $S$. We say that $S\subseteq{\mathcal{X}}$ is a {\em weak spanning set} of $L\in{\mathcal{L}}$ w.r.t.~${\mathcal{L}}$ if $S\subseteq L$ and $S$ is not contained in any proper subset of $L$ in ${\mathcal{L}}$.\footnote{Weak spanning sets have been used in the field of recursion-theoretic inductive inference under the name ``tell-tale sets''~\cite{Ang1980}.} We denote by $I({\mathcal{L}})$ (resp.~$I'({\mathcal{L}})$) the smallest number $k$ such that every concept $L \in {\mathcal{L}}$ has a spanning set (resp.~a weak spanning set) w.r.t.~${\mathcal{L}}$ of size at most $k$. Note that $S$ is a spanning set of $L$ w.r.t.~${\mathcal{L}}$ iff $S$ distinguishes $L$ from all concepts in ${\mathcal{L}}$ except for supersets of $L$, i.e., iff $S$ is a positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\supset)$. Similarly, $S$ is a weak spanning set of $L$ w.r.t.~${\mathcal{L}}$ iff $S$ distinguishes $L$ from all its proper subsets in ${\mathcal{L}}$ (which is necessarily the case when $S$ is a positive teaching set). These observations can be summarized as follows:
\begin{equation} \label{eq:span-pbtd} I'({\mathcal{L}}) \le \mathrm{PBTD}^+({\mathcal{L}}) \le \mathrm{PBTD}^+({\mathcal{L}},\supset) \le I({\mathcal{L}}) \enspace . \end{equation}
The last two inequalities are straightforward. The inequality $I'({\mathcal{L}}) \le \mathrm{PBTD}^+({\mathcal{L}})$ follows from Lemma~\ref{lem:ts-pos}, which implies that no concept $L$ can have a preference-based teaching set $T$ smaller than its smallest weak spanning set. Such a set $T$ would be consistent with some proper subset of $L$, which is impossible by Lemma~\ref{lem:ts-pos}.
Suppose ${\mathcal{L}}$ is intersection-closed. Then $\cap_{L\in{\mathcal{L}}:S \subseteq L}L$ is the unique smallest concept in ${\mathcal{L}}$ containing $S$. If $S \subseteq L_0$ is a weak spanning set of $L_0 \in {\mathcal{L}}$, then $\cap_{L\in{\mathcal{L}}:S \subseteq L}L = L_0$ because, on the one hand, $\cap_{L\in{\mathcal{L}}:S \subseteq L}L \subseteq L_0$ and, on the other hand, no proper subset of $L_0$ in ${\mathcal{L}}$ contains $S$. Thus the distinction between spanning sets and weak spanning sets is blurred for intersection-closed classes:
\begin{lemma} \label{lem:cap-closed} Suppose that ${\mathcal{L}}$ is intersection-closed. Then $I'({\mathcal{L}}) = \mathrm{PBTD}^+({\mathcal{L}}) = I({\mathcal{L}})$. \end{lemma}
\begin{example} Let ${\mathcal{R}}_d$ denote the class of $d$-dimensional axis-parallel hyper-rectangles (= $d$-dimensio- nal boxes). This class is intersection-closed and clearly $I({\mathcal{R}}_d)=2$. Thus $\mathrm{PBTD}^+({\mathcal{R}}_d)=2$. \end{example}
A mapping $\mathrm{cl}:2^{\mathcal{X}} \rightarrow 2^{\mathcal{X}}$ is said to be a {\em closure operator} on the universe ${\mathcal{X}}$ if the following conditions hold for all sets $A,B \subseteq {\mathcal{X}}$: \[ A \subseteq B \Rightarrow \mathrm{cl}(A) \subseteq \mathrm{cl}(B)\ \mbox{ and }\ A \subseteq \mathrm{cl}(A) = \mathrm{cl}(\mathrm{cl}(A)) \enspace . \] The following notions refer to an arbitrary but fixed closure operator. The set $\mathrm{cl}(A)$ is called the {\em closure} of $A$. A set $C$ is said to be {\em closed} if $\mathrm{cl}(C) = C$. It follows that precisely the sets $\mathrm{cl}(A)$ with $A \subseteq {\mathcal{X}}$ are closed. With this notation, we observe the following lemma.
\begin{lemma} \label{lem:span-closure} Let ${\mathcal{C}}$ be the set of all closed subsets of ${\mathcal{X}}$ under some closure operator $\mathrm{cl}$, and let $L\in{\mathcal{C}}$. If $L = \mathrm{cl}(S)$, then $S$ is a spanning set of $L$ w.r.t.~${\mathcal{C}}$. \end{lemma}
\begin{proof} Suppose $L'\in{\mathcal{C}}$ and $S\subseteq L'$. Then $L = \mathrm{cl}(S) \subseteq \mathrm{cl}(L') = L'$. \end{proof}
For every closed set $L \in {\mathcal{L}}$, let $s_{cl}(L)$ denote the size (possibly $\infty$) of the smallest set $S \subseteq {\mathcal{X}}$ such that $\mathrm{cl}(S) = L$. With this notation, we get the following (trivial but useful) result:
\begin{theorem} \label{th:span} Given a closure operator, let ${\mathcal{C}}[m]$ be the class of all closed subsets $C \subseteq {\mathcal{X}}$ with $s_{cl}(C) \le m$. Then $\mathrm{PBTD}^+({\mathcal{C}}[m]) \le \mathrm{PBTD}^+({\mathcal{C}}[m],\supset) \le m$. Moreover, this holds with equality provided that ${\mathcal{C}}[m] \setminus {\mathcal{C}}[m-1] \neq \emptyset$. \end{theorem}
\begin{proof} The inequality $\mathrm{PBTD}^+({\mathcal{C}}[m],\supset) \le m$ follows directly from Equation~(\ref{eq:span-pbtd}) and Lemma~\ref{lem:span-closure}. \\ Pick a concept $C_0 \in {\mathcal{C}}[m]$ such that $s_{cl}(C_0) = m$. Then any subset $S$ of $C_0$ of size less than $m$ spans only a proper subset of $C_0$, i.e., $\mathrm{cl}(S) \subset C_0$. Thus $S$ does not distinguish $C_0$ from $\mathrm{cl}(S)$. However, by Lemma~\ref{lem:ts-pos}, any preference-based learner must strictly prefer $\mathrm{cl}(S)$ over $C_0$. It follows that there is no positive teaching set of size less than $m$ for $C_0$ w.r.t.~${\mathcal{C}}[m]$. \end{proof}
Many natural classes can be cast as classes of the form ${\mathcal{C}}[m]$ by choosing the universe and the closure operator appropriately; the following examples illustrate the usefulness of Theorem~\ref{th:span} in that regard.
\begin{example}\label{exmp:pbtdpluslinset} Let
\[ \mathrm{LINSET}_k = \{\spn{G}: (G \subset {\mathbb N}) \wedge (1 \le |G| \le k)\} \] where $\spn{G} = \left\{\sum_{g \in G}a(g)g: a(g)\in{\mathbb N}_0\right\}$. In other words, $\mathrm{LINSET}_k$ is the set of all non-empty linear subsets of $\mathbb{N}_0$ that are generated by at most $k$ generators. Note that the mapping $G \mapsto \spn{G}$ is a closure operator over the universe ${\mathbb N}_0$. Since obviously $\mathrm{LINSET}_k \setminus \mathrm{LINSET}_{k-1} \neq \emptyset$, we obtain $\mathrm{PBTD}^+(\mathrm{LINSET}_k) = k$. \end{example}
\begin{example} \label{ex:polygons} Let ${\mathcal{X}} = \mathbbm{R}^2$ and let $\mathcal{C}_k$ be the class of convex polygons with at most $k$ vertices. Defining $\mathrm{cl}(S)$ to be the convex closure of $S$, we obtain ${\mathcal{C}}[k]=\mathcal{C}_k$ and thus $\mathrm{PBTD}^+(\mathcal{C}_k) = k$. \end{example}
\begin{example} Let $\mathcal{X}=\mathbb{R}^n$ and let $\mathcal{C}_k$ be the class of polyhedral cones that can be generated by $k$ (or less) vectors in $\mathbbm{R}^n$. If we take $\mathrm{cl}(S)$ to be the conic closure of $S \subseteq\mathbbm{R}^n$, then $\mathcal{C}[k]=\mathcal{C}_k$ and thus $\mathrm{PBTD}^+(\mathcal{C}_k)=k$. \end{example}
\section{A Convenient Technique for Proving Upper Bounds} \label{sec:admissible-mappings}
In this section, we give an alternative definition of the preference-based teaching dimension using the notion of an ``admissible mapping". Given a concept class ${\mathcal{L}}$ over a universe ${\mathcal{X}}$, let $T$ be a mapping $L \mapsto T(L) \subseteq {\mathcal{X}} \times \{-,+\}$ that assigns a set $T(L)$ of labeled examples to every set $L \in {\mathcal{L}}$ such that the labels in $T(L)$ are consistent with $L$. The {\em order} of $T$, denoted as $\mathrm{ord}(T)$, is defined
as $\sup_{L \in {\mathcal{L}}}|T(L)| \in {\mathbb N}\cup\{\infty\}$. Define the mappings $T^+$ and $T^-$ by setting $T^+(L) = \{x : (x,+) \in T(L)\}$ and $T^-(L) = \{x : (x,-) \in T(L)\}$ for every $L\in{\mathcal{L}}$. We say that $T$ is {\em positive} if $T^-(L)=\emptyset$ for every $L\in{\mathcal{L}}$. In the sequel, we will occasionally identify a positive mapping $L \mapsto T(L)$ with the mapping $L \mapsto T^+(L)$. The symbol ``$+$'' as an upper index of $T$ will always indicate that the underlying mapping $T$ is positive.
\noindent The following relation will help to clarify under which conditions the sets $(T(L))_{L\in{\mathcal{L}}}$ are teaching sets w.r.t.~a suitably chosen preference relation: \[ R_T = \{(L,L')\in{\mathcal{L}}\times{\mathcal{L}}:\ (L \neq L') \wedge (\mbox{$L$ is consistent with $T(L')$})\} \enspace . \] The transitive closure of $R_T$ is denoted as $\mathrm{trcl}(R_T)$ in the sequel. The following notion will play an important role in this paper:
\begin{definition} \label{def:admissible} A mapping $L \mapsto T(L)$ with $L$ ranging over all concepts in ${\mathcal{L}}$ is said to be {\em admissible for ${\mathcal{L}}$} if the following holds: \begin{enumerate} \item For every $L \in{\mathcal{L}}$, $L$ is consistent with $T(L)$. \item The relation $\mathrm{trcl}(R_T)$ is asymmetric (which clearly implies that $R_T$ is asymmetric too). \end{enumerate} \end{definition} If $T$ is admissible, then $\mathrm{trcl}(R_T)$ is transitive and asymmetric, i.e., $\mathrm{trcl}(R_T)$ is a strict partial order on ${\mathcal{L}}$. We will therefore use the notation $\prec_T$ instead of $\mathrm{trcl}(R_T)$ whenever $T$ is known to be admissible.
\begin{lemma} Suppose that $T^+$ is a positive admissible mapping for ${\mathcal{L}}$. Then the relation $\prec_{T^+}$ on ${\mathcal{L}}$ extends the relation $\supset$ on ${\mathcal{L}}$. More precisely, the following holds for all $L,L' \in {\mathcal{L}}$: \[ L' \subset L \Rightarrow (L,L') \in R_{T^+} \Rightarrow L \prec_{T^+} L' \enspace . \] \end{lemma}
\begin{proof} If $T^+$ is admissible, then $L'$ is consistent with $T^+(L')$. Thus $T^+(L') \subseteq L' \subset L$ so that $L$ is consistent with $T^+(L')$ too. Therefore $(L,L') \in R_{T^+}$, i.e., $L \prec_{T^+} L'$. \end{proof}
\noindent The following result clarifies how admissible mappings are related to preference-based teaching:
\begin{lemma} For each concept class ${\mathcal{L}}$, the following holds: \[ \mathrm{PBTD}({\mathcal{L}}) = \inf_T \mathrm{ord}(T)\ \mbox{ and }\ \mathrm{PBTD}^+({\mathcal{L}}) = \inf_{T^+} \mathrm{ord}(T^+) \] where $T$ ranges over all mappings that are admissible for ${\mathcal{L}}$ and $T^+$ ranges over all positive mappings that are admissible for ${\mathcal{L}}$. \end{lemma}
\begin{proof} We restrict ourselves to the proof for $\mathrm{PBTD}({\mathcal{L}}) = \inf_T \mathrm{ord}(T)$ because the equation $\mathrm{PBTD}^+({\mathcal{L}}) = \inf_{T^+} \mathrm{ord}(T^+)$ can be obtained in a similar fashion. We first prove that $\mathrm{PBTD}({\mathcal{L}})$ $\le \inf_T \mathrm{ord}(T)$. Let $T$ be an admissible mapping for ${\mathcal{L}}$. It suffices to show that, for every $L\in{\mathcal{L}}$, $T(L)$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec_T)$. Suppose $L'\in{\mathcal{L}}\setminus\{L\}$ is consistent with $T(L)$. Then $(L',L) \in R_T$ and thus $L' \prec_T L$. It follows that $\prec_T$ prefers $L$ over all concepts $L'\in{\mathcal{L}}\setminus\{L\}$ that are consistent with $T(L)$. Thus $T$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec_T)$, as desired.
We now prove that $\inf_T \mathrm{ord}(T) \le \mathrm{PBTD}({\mathcal{L}})$. Let $\prec$ be a strict partial order on ${\mathcal{L}}$ and let $T$ be a mapping such that, for every $L\in{\mathcal{L}}$, $T(L)$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$. It suffices to show that $T$ is admissible for ${\mathcal{L}}$. Consider a pair $(L',L) \in R_T$. The definition of $R_T$ implies that $L' \neq L$ and that $L'$ is consistent with $T(L)$. Since $T(L)$ is a teaching set w.r.t.~$({\mathcal{L}},\prec)$, it follows that $L' \prec L$. Thus, $\prec$ is an extension of $R_T$. Since $\prec$ is transitive, it is even an extension of $\mathrm{trcl}(R_T)$. Because $\prec$ is asymmetric, $\mathrm{trcl}(R_T)$ must be asymmetric, too. It follows that $T$ is admissible. \end{proof}
\section{Preference-based Teaching of Linear Sets} \label{sec:linsets}
Some work in computational learning theory \cite{Abe89,GSZ2015,Takada92} is concerned with learning \emph{semi-linear sets}, i.e., unions of linear subsets of $\mathbb{N}^k$ for some fixed $k\ge 1$, where each linear set consists of exactly those elements that can be written as the sum of some constant vector $c$ and a linear combination of the elements of some fixed set of generators, see Example~\ref{exmp:pbtdpluslinset}. While semi-linear sets are of common interest in mathematics in general, they play a particularly important role in the theory of formal languages, due to \emph{Parikh's theorem}, by which the so-called Parikh vectors of strings in a context-free language always form a semi-linear set~\cite{Parikh66}.
A recent study \cite{GSZ2015} analyzed computational teaching of classes of linear subsets of $\mathbb{N}$ (where $k=1$) and some variants thereof, as a substantially simpler yet still interesting special case of semi-linear sets. In this section, we extend that study to preference-based teaching.
Within the scope of this section, all concept classes are formulated over the universe ${\mathcal{X}} = {\mathbb N}_0$. Let $G = \{g_1,\ldots$ $,g_k\}$ be a finite subset of ${\mathbb N}$. We denote by $\spn{G}$ resp.~by $\spn{G}_+$ the following sets: \[ \spn{G} = \left\{\sum_{i=1}^{k}a_ig_i:\ a_1,\ldots,a_k\in{\mathbb N}_0\right\}\ \mbox{ and }\ \spn{G}_+ = \left\{\sum_{i=1}^{k}a_ig_i:\ a_1,\ldots,a_k\in{\mathbb N}\right\} \enspace . \]
We will determine (at least approximately) the preference-based teaching dimension of the following concept classes over ${\mathbb N}_0$: \begin{eqnarray*}
\mathrm{LINSET}_k & = & \{\spn{G}:\ (G \subset {\mathbb N}) \wedge (1 \le |G| \le k)\} \enspace . \\
\mathrm{CF}\mbox{-}\mathrm{LINSET}_k & = & \{\spn{G}:\ (G \subset {\mathbb N}) \wedge (1 \le |G| \le k) \wedge (\gcd(G)=1)\} \enspace . \\
\mathrm{NE}\mbox{-}\mathrm{LINSET}_k & = & \{\spn{G}_+:\ (G \subset {\mathbb N}) \wedge (1 \le |G| \le k) \} \enspace . \\
\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k & = & \{\spn{G}_+:\ (G \subset {\mathbb N}) \wedge (1 \le |G| \le k) \wedge (gcd(G)=1)\} \enspace . \end{eqnarray*}
A subset of ${\mathbb N}_0$ whose complement in ${\mathbb N}_0$ is finite is said to be {\em co-finite}. The letters ``CF'' in $\mathrm{CF}\mbox{-}\mathrm{LINSET}$ mean ``co-finite''. The concepts in $\mathrm{LINSET}_k$ have the algebraic structure of a monoid w.r.t.~addition. The concepts in $\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$ are also known as ``numerical semigroups''~\cite{RG-S2009}. A zero coefficient $a_j=0$ erases $g_j$ in the linear combination $\sum_{i=1}^{k}a_ig_i$. Coefficients from ${\mathbb N}$ are non-erasing in this sense. The letters ``NE'' in ``$\mathrm{NE}\mbox{-}\mathrm{LINSET}$'' mean ``non-erasing''.
The {\em shift-extension} ${\mathcal{L}}'$ of a concept class ${\mathcal{L}}$ over the universe ${\mathbb N}_0$ is defined as follows: \begin{equation} \label{eq:shift-extension} {\mathcal{L}}' = \{c+L:\ (c\in{\mathbb N}_0) \wedge (L\in{\mathcal{L}})\} \enspace . \end{equation}
The following bounds on $\mathrm{RTD}$ and $\mathrm{RTD}^+$ (for sufficiently large values of $k$)\footnote{For instance, $\mathrm{RTD}^+(\mathrm{LINSET}_k)=\infty$ holds for all $k\ge2$ and $\mathrm{RTD}(\mathrm{LINSET}_k) = \mbox{?}$ (where ``?'' means ``unknown'') holds for all $k\ge4$.} are known from~\cite{GSZ2015}: \[
\begin{array}{|l|l|l|} \hline
& \mathrm{RTD}^+ & \mathrm{RTD} \\ \hline \mathrm{LINSET}_k & =\infty & \mbox{?} \\ \mathrm{CF}\mbox{-}\mathrm{LINSET}_k & =k & \in\{k-1,k\} \\ \mathrm{NE}\mbox{-}\mathrm{LINSET}'_k & =k+1 & \in\{k-1,k,k+1\} \\ \hline \end{array} \] Here $\mathrm{NE}\mbox{-}\mathrm{LINSET}'_k$ denotes the shift-extension of $\mathrm{NE}\mbox{-}\mathrm{LINSET}_k$ .
The following result shows the corresponding bounds with PBTD in place of RTD:
\begin{theorem} \label{th:bounds-linset} The bounds in the following table are valid: \[
\begin{array}{|l|l|l|} \hline
& \mathrm{PBTD}^+ & \mathrm{PBTD} \\ \hline \mathrm{LINSET}_k & =k & \in\{k-1,k\} \\ \mathrm{CF}\mbox{-}\mathrm{LINSET}_k & =k & \in\{k-1,k\} \\ \mathrm{NE}\mbox{-}\mathrm{LINSET}_k &
\in \left[k-1:k\right] &
\in \left[\left\lfloor\frac{k-1}{2}\right\rfloor:k\right] \\ \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k &
\in \left[k-1:k\right] &
\in \left[\left\lfloor\frac{k-1}{2}\right\rfloor:k\right] \\ \hline \end{array} \] Moreover \begin{equation} \label{eq:more-bounds} \mathrm{PBTD}^+({\mathcal{L}}') = k+1\ \wedge\ \mathrm{PBTD}({\mathcal{L}}') \in \{k-1,k,k+1\} \end{equation} holds for all ${\mathcal{L}}\in\{\mathrm{LINSET}_k,\mathrm{CF}\mbox{-}\mathrm{LINSET}_k,\mathrm{NE}\mbox{-}\mathrm{LINSET}_k,\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k\}$. \end{theorem}
Note that the equation $\mathrm{PBTD}^+(\mathrm{LINSET}_k) = k$ was already proven in Example~\ref{exmp:pbtdpluslinset}, using the fact that $G\mapsto\spn{G}$ is a closure operator. Since $G\mapsto\spn{G}_+$ is not a closure operator, we give a separate argument to prove an upper bound of $k$ on $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}_k)$ (see Lemma~\ref{lem:nelinset-ub} in Appendix~\ref{app:linsets}). All other upper bounds in Theorem~\ref{th:bounds-linset} are then easy to derive. The lower bounds in Theorem~\ref{th:bounds-linset} are much harder to obtain. A complete proof of Theorem~\ref{th:bounds-linset} will be given in Appendix~\ref{app:linsets}.
\iffalse \section{Hierarchical Preference-based Teaching} \label{sec:hierarchical-pbt}
In this section, we introduce a technique for defining preference relations that we use later on in Section~\ref{sec:halfspaces} to obtain upper bounds on the $\mathrm{PBTD}$ of classes of halfspaces.
Suppose that ${\mathcal{L}}$ is a parametrized concept class in the sense that any concept of ${\mathcal{L}}$ can be fixed by assigning real values $q = (q_1,\ldots,q_d)$ to ``programmable parameters'' $Q = (Q_1,\ldots,Q_d)$. The concept resulting from setting $Q_i = q_i$ for $i=1,\ldots,d$ is denoted as $L_q$. Let ${\mathcal{D}} \subseteq \mathbbm{R}^d$ be a set which makes the representation $q$ of $L_q$ unique, i.e., $L_q = L_{q'}$ with $q,q' \in {\mathcal{D}}$ implies that $q=q'$. We will then identify a preference relation over ${\mathcal{L}}$ with a preference relation over ${\mathcal{D}}$. For every $p \in \{\downarrow,\uparrow\}^d$, let $\prec_p$ be the following algorithmically defined (lexicographic) preference relation: \begin{enumerate} \item Given $q \neq q' \in {\mathcal{D}}$, find the smallest index $i \in [d]$ such that $q_i \neq q'_i$, say $q_i < q'_i$. \item If $p_i = \downarrow$ (resp.~$p_i=\uparrow$), then $q' \prec_p q$ (resp.~$q \prec_p q')$. \end{enumerate} Imagine a student with this preference relation who has seen a collection of labeled examples. The following hierarchical system of Rules $i=1,\ldots,d$ clarifies which value $q'_i$ she should assign to the unknown parameter $Q_i$: \begin{description} \item[Rule i:] With $i$-highest priority do the following. Choose $q'_i$ as small as possible if $p_i=\downarrow$ and as large as possible if $p_i=\uparrow$. Assign the value $q'_i$ to the parameter $Q_i$. \end{description} It is important to understand that this rule system is hierarchical (as expressed by the distinct priorities of the rules): when Rule $i$ becomes active, then the values of the parameters $Q_1,\ldots,Q_{i-1}$ (in accordance with the rules $1,\ldots,i-1$) have been chosen already.
Suppose that $L_q$ with $q \in {\mathcal{D}}$ is the target concept. A teacher who designs a teaching set for $L_q$ w.r.t.~$({\mathcal{L}},\prec_p)$ can proceed in stages $i=1,\ldots,d$ as follows: \begin{description} \item[Stage i:] Suppose that $p_i=\downarrow$ (resp.~$p_i=\uparrow$). Choose the next part $T_i$ of the teaching set so that every hypothesis $L_{q'}$ with $q' \in {\mathcal{D}}$, and $q'_1=q_1,\ldots,q'_{i-1}=q_{i-1}$ satisfies the following condition: if $L_{q'}$ is consistent with $T_1 \cup \ldots \cup T_{i-1} \cup T_i$, then $q'_i \ge q_i$ (resp.~$q'_i \le q_i$). \end{description} In other words, the teacher chooses $T_1$ so that the student with preference relation $\prec_p$ will assign the value $q_1$ to $Q_1$. Given that $Q_1=q_1$, the teacher chooses $T_2$ so that the student will next assign the value $q_2$ to $Q_2$, and so on.
The use of this basic technique can be made more convenient by allowing more than one parameter to be handled in a single stage.
\fi
\section{Preference-based Teaching of Halfspaces} \label{sec:halfspaces}
In this section, we study preference-based teaching of halfspaces. We will denote the all-zeros vector as $\vec{0}$. The vector with $1$ in coordinate $i$ and with $0$ in the remaining coordinates is denoted as $\vec{e}_i$. The dimension of the Euclidean space in which these vectors reside will always be clear from the context. The sign of a real number $x$ (with value $1$ if $x>0$, value $-1$ if $x<0$, and value $0$ if $x=0$) is denoted by $\mathrm{sign}(x)$.
Suppose that $w\in\mathbbm{R}^d\setminus\{\vec0\}$ and $b\in\mathbbm{R}$. The {\em (positive) halfspace induced by $w$ and $b$} is then given by \[ H_{w,b} = \{x\in\mathbbm{R}^d:\ w^\top x + b \ge 0\} \enspace . \] Instead of $H_{w,0}$, we simply write $H_w$. Let ${\mathcal{H}}_d$ denote the class of $d$-dimensional Euclidean half\-spaces: \[ {\mathcal{H}}_d = \{H_{w,b}:\ w\in\mathbbm{R}^d\setminus\{\vec{0}\} \wedge b \in \mathbbm{R}\} \enspace . \] Similarly, ${\mathcal{H}}_d^0$ denotes the class of $d$-dimensional homogeneous Euclidean halfspaces: \[ {\mathcal{H}}_d^0 = \{H_w:\ w\in\mathbbm{R}^d\setminus\{\vec{0}\}\} \enspace . \] Let $S_{d-1}$ denote the $(d-1)$-dimensional unit sphere in $\mathbbm{R}^d$. Moreover $S_{d-1}^+ = \{x\in S_{d-1}: x_d>0\}$ denotes the ``northern hemisphere''. If not stated explicitly otherwise, we will represent homogeneous halfspaces with normalized vectors residing on the unit sphere. We remind the reader of the following well-known fact:
\begin{remark} \label{rem:orthogonal-group} The orthogonal group in dimension $d$ (i.e., the multiplicative group of orthogonal $(d \times d)$-matrices) acts transitively on $S_{d-1}$ and it conserves the inner product. \end{remark}
We now prove a helpful lemma, stating that each vector $w^*$ in the northern hemisphere may serve as a representative for some homogeneous halfspace $H_u$ in the sense that all other elements of $H_u$ in the northern hemisphere have a strictly smaller $d$-th component than $w^*$. This will later help to teach homogeneous halfspaces with a preference that orders vectors by the size of their last coordinate.
\begin{lemma} \label{lem:close2northpole} Let $d\ge2$, let $0<h\le1$ and let $R_{d,h} = \{w \in S_{d-1}: w_d = h\}$. With this notation the following holds. For every $w^* \in R_{d,h}$, there exists $u\in\mathbbm{R}^d\setminus\{\vec{0}\}$ such that \begin{equation} \label{eq:good-choice} (w^* \in H_u) \wedge (\forall w \in (S_{d-1}^+ \cap H_u)\setminus\{w^*\}: w_d < h) \enspace . \end{equation} \end{lemma}
\begin{proof} For $h=1$, the statement is trivial, since $R_{d,1} = \{\vec{e}_d\}$. So let $h<1$.
Because of Remark~\ref{rem:orthogonal-group}, we may assume without loss of generality that the vector $w^* \in R_{d,h}$ equals $(0,\ldots,0,\sqrt{1-h^2},h)$. It suffices therefore to show that, with this choice of $w^*$, the vector $u = (0,\ldots,0,w_d^*,-w_{d-1}^*)$ satisfies~(\ref{eq:good-choice}). Note that $w \in H_{u}$ iff $\spn{u,w} = w_d^*w_{d-1} - w_{d-1}^*w_d \ge 0$. Since $\spn{u,w^*}=0$, we have $w^* \in H_u$. Moreover, it follows that \[ S_{d-1}^+ \cap H_{u} = \left\{w\in S_{d-1}^+: \frac{w_{d-1}}{w_d} \ge \frac{w_{d-1}^*}{w_d^*} > 0 \right\} \enspace . \] It is obvious that no vector $w \in S_{d-1}^+ \cap H_u$ can have a $d$-th component $w_d$ exceeding $w_d^*=h$ and that setting $w_d=h=w_d^*$ forces the settings $w_{d-1}=w_{d-1}^*=\sqrt{1-h^2}$ and $w_1 = \ldots = w_{d-2} = 0$. Consequently,~(\ref{eq:good-choice}) is satisfied, which concludes the proof. \end{proof}
With this lemma in hand, we can now prove an upper bound of 2 for the preference-based teaching dimension of the class of homogeneous halfspaces, independent of the underlying dimension~$d$.
\begin{theorem} \label{th:halfspace0-ub} $\mathrm{PBTD}({\mathcal{H}}_1^0) = \mathrm{TD}({\mathcal{H}}_1^0) = 1$ and, for every $d\ge2$, we have $\mathrm{PBTD}({\mathcal{H}}_d^0)$ $\le 2$. \end{theorem}
\begin{proof} Clearly, $\mathrm{PBTD}({\mathcal{H}}_1^0) = \mathrm{TD}({\mathcal{H}}_1^0) = 1$ since ${\mathcal{H}}_1^0$ consists of the two sets $\{x\in\mathbbm{R}: x\ge0\}$ and $\{x\in\mathbbm{R}: x\le0$\}.
\noindent Suppose now that $d\ge2$. Let $w^*$ be the target weight vector (i.e., the weight vector that has to be taught). Under the following conditions, we may assume without loss of generality that $w_d^*\neq0$: \begin{itemize} \item For any $0<s_1<s_2$, the student prefers any weight vector that ends with $s_2$ zero coordinates over any weight vector that ends with only $s_1$ zero coordinates. \item If the target vector ends with (exactly) $s$ zero coordinates, then the teacher presents only examples ending with (at least) $s$ zero coordinates. \end{itemize} In the sequel, we specify a student and a teacher such that these conditions hold, so that we will consider only target weight vectors $w^*$ with $w_d^*\neq0$.
\noindent The student has the following preference relation: \begin{itemize} \item Among the weight vectors $w$ with $w_d\neq0$,
the student prefers vectors with larger values of $|w_d|$
over those with smaller values of $|w_d|$. \end{itemize}
\noindent The teacher will use two examples. The first one is chosen as \[ \left\{ \begin{array}{ll}
(-\vec{e}_d,-) & \mbox{if $w^*_d>0$} \\
(\vec{e}_d,-) &\mbox{if $w^*_d<0$}
\end{array} \right. \enspace . \] This example reveals whether the unknown weight vector $w^* \in S_{d-1}$ has a strictly positive or a strictly negative $d$-th component. For reasons of symmetry, we may assume that $w_d^*>0$. We are now precisely in the situation that is described in Lemma~\ref{lem:close2northpole}. Given $w^*$ and $h=w_d^*$, the teacher picks as a second example $(u,+)$ where $u\in\mathbbm{R}^d\setminus\{\vec{0}\}$ has the properties described in the lemma. It follows immediately that the student's preferences will make her choose the weight vector $w^*$. \end{proof}
The upper bound of 2 given in Theorem~\ref{th:halfspace0-ub} is tight, as is stated in the following lemma.
\begin{lemma} \label{lem:halfspace-neg} For every $d\ge2$, we have $\mathrm{PBTD}({\mathcal{H}}_d^0)\ge 2$. \end{lemma}
\begin{proof} We verify this lemma via Lemma~\ref{lem:lb-tdmin}, by providing a finite subclass ${\mathcal{F}}$ of ${\mathcal{H}}_2^0$ such that $\mathrm{TD}_{min}({\mathcal{F}})=2$. Let ${\mathcal{F}}=\{H_w : \vec{0} \neq w\in\{-1,0,1\}^2\}$. It is easy to verify that each of the $8$ halfspaces in ${\mathcal{F}}$ has a teaching dimension of 2 with respect to ${\mathcal{F}}$. This example can be extended to higher dimensions in the obvious way. \end{proof}
We thus conclude that the class of homogeneous halfspaces has a preference-based teaching dimension of 2, independent of the dimensionality $d\ge 2$.
\begin{corollary} For every $d\ge2$, we have $\mathrm{PBTD}({\mathcal{H}}_d^0)=2$. \end{corollary}
By contrast, we will show next that the recursive teaching dimension of the class of homogeneous halfspaces grows with the dimensionality.
\begin{theorem}\label{thm:tdrtdhalfspace} For any $d \ge 2$, $\mathrm{TD}(\mathcal{H}^0_d) = \mathrm{RTD}(\mathcal{H}^0_d) = d+1$. \end{theorem}
\begin{proof} Assume by normalization that the target weight vector has norm $1$, i.e., it is taken from $S_{d-1}$. Remark~\ref{rem:orthogonal-group} implies that all weight vectors in $S_{d-1}$ are equally hard to teach. It suffices therefore to show that $\mathrm{TD}(H_{\vec{e}_1},{\mathcal{H}}^0_d) = d+1$.
We first show that $\mathrm{TD}(H_{\vec{e}_1},{\mathcal{H}}^0_d) \le d+1$. Define $u = -\sum_{i=2}^{d}\vec{e}_i$. We claim that $T = \{ (\vec{e}_i,+): 2 \leq i \leq d\} \cup \{(u,+),(\vec{e}_1,+)\}$ is a teaching set for $H_{\vec{e}_1}$ w.r.t.~${\mathcal{H}}^0_d.$ Consider any $w \in S_{d-1}$ such that $H_w$ is consistent with $T$. Note that $w_i = \spn{\vec{e}_i,w} \geq 0$ for all $i \in \{2,\ldots,d\}$ and $\spn{u,w} = -\sum_{i=2}^{d}w_i \geq 0$ together imply that $w_i = 0$ for all $i\in\{2,\ldots,d\}$ and therefore $w = \pm \vec{e}_1$. Furthermore, $w_1 = \spn{w,\vec{e}_1} \geq 0$, and so $w = \vec{e}_1$, as required.
Now we show that $\mathrm{TD}(H_{\vec{e}_1},{\mathcal{H}}^0_d) \geq d+1$ holds for all $d\ge2$. It is easy to see that two examples do not suffice for distinguishing $\vec{e}_1\in\mathbbm{R}^2$ from all weight vectors in $S_1$. In other words, $\mathrm{TD}(H_{\vec{e}_1},{\mathcal{H}}^0_2) \ge3$. Suppose now that $d\ge3$. It is furthermore easy to see that a teaching set $T$ which distinguishes $\vec{e}_1$ from all weight vectors in $S_{d-1}$ must contain at least one positive example $u$ that is orthogonal to $\vec{e}_1$. The inequality $\mathrm{TD}(H_{\vec{e}_1},{\mathcal{H}}^0_d) \geq d+1$ is now obtained inductively because the example $(u,+) \in T$ leaves open a problem that is not easier than teaching $\vec{e}_1$ w.r.t.~the $(d-2)$-dimensional sphere $\{x \in S_{d-1}: x \perp u\}$. \end{proof}
We have thus established that the class of homogeneous halfspaces has a recursive teaching dimension growing linearly with $d$, while its preference-based teaching dimension is constant. In the case of general (i.e., not necessarily homogeneous) $d$-dimensional halfspaces, the difference between $\mathrm{RTD}$ and $\mathrm{PBTD}$ is even more extreme. On the one hand, by generalizing the proof of Lemma~\ref{lem:huge-gap}, it is easy to see that $\mathrm{RTD}({\mathcal{H}}_d)=\infty$ for all $d\ge 1$. On the other hand, we will show in the remainder of this section that $\mathrm{PBTD}({\mathcal{H}}_d) \le 6$, independent of the value of $d$.
We will assume in the sequel (by way of normalization) that an inhomogeneous halfspace has a bias $b\in\{\pm1\}$. We start with the following result:
\begin{lemma} \label{lem:3-examples} Let $w^* \in \mathbbm{R}^d$ be a vector with a non-trivial $d$-th component $w^*_d\neq0$ and let $b^*\in\{\pm1\}$ be a bias. Then there exist three examples labeled according to $H_{w^*,b^*}$ such that the following holds. Every weight-bias pair $(w,b)$ consistent with these examples satisfies $b=b^*$, $\mathrm{sign}(w_d) = \mathrm{sign}(w_d^*)$ and \begin{equation} \label{eq:w-d-constraint} \left\{ \begin{array}{ll}
|w_d| \ge |w_d^*| & \mbox{if $b^*=-1$} \\
|w_d| \le |w_d^*| & \mbox{if $b^*=+1$}
\end{array} \right. \enspace . \end{equation} \end{lemma}
\begin{proof} Within the proof, we use the label ``$1$'' instead of ``$+$'' and the label ``$-1$'' instead of ``$-$''. The pair $(w,b)$ denotes the student's hypothesis for the target weight-bias pair $(w^*,b^*)$. The examples shown to the student will involve the unknown quantities $w^*$ and $b^*$. Each example will lead to a new constraint on $w$ and $b$. We will see that the collection of these constraints reveals the required information. We proceed in three stages: \begin{enumerate} \item The first example is chosen as $(\vec{0},b^*)$. The pair $(w,b)$ can be consistent with this example only if $b = -1$ in the case that $b^*=-1$ and $b\in\{0,1\}$ in the case that $b^*=1$. \item The next example is chosen as $\vec{a}_2 = -\frac{2b^*}{w_d^*}\cdot\vec{e}_d$ and labeled ``$-b^*$''. Note that $\spn{w^*,\vec{a}_2}+b^* = -b^*$. We obtain the following new constraint: \[ \spn{w,\vec{a}_2}+b = \left\{ \begin{array}{ll}
-2\frac{w_d}{w_d^*}+\overbrace{b}^{\in\{0,1\}} < 0 & \mbox{if $b^*=1$} \\
+2\frac{w_d}{w_d^*}+\underbrace{b}_{=-1} \ge 0 & \mbox{if $b^*=-1$}
\end{array} \right. \enspace . \] The pair $(w,b)$ with $b=b^*$ if $b^*=-1$ and $b\in\{0,1\}$ if $b^*=1$ can satisfy the above constraint only if the sign of $w_d$ equals the sign of $w_d^*$. \item The third example is chosen as the example $\vec{a}_3 = -\frac{b^*}{w_d^*}\cdot\vec{e}_d$ with label ``$1$''. Note that $\spn{w^*,\vec{a}_3}^*+b^* = 0$. We obtain the following new constraint: \[ \spn{w,\vec{a}_3} = -\frac{b^*w_d}{w_d^*}+b \ge 0 \enspace . \] Given that $w$ is already constrained to weight vectors satisfying $\mathrm{sign}(w_d)=\mathrm{sign}($ $w_d^*)$, we can safely
replace $w_d/w_d^*$ by $|w_d|/|w_d^*|$. This yields
$|w_d|/|w_d^*| \le b$ if $b^*=1$ and $|w_d|/|w_d^*|\ge -b$
if $b^*=-1$. Since $b$ is already constrained as described in stage 1 above, we obtain $|w_d|/|w_d^*| \le b \in \{0,1\}$
if $b^*=1$ and $|w_d|/|w_d^*|\ge -b = 1$ if $b^*=-1$. The weight-bias pair $(w,b)$ satisfies these constraints only if $b=b^*$ and if~(\ref{eq:w-d-constraint}) is valid. \end{enumerate} The assertion of the lemma is immediate from this discussion. \end{proof}
\begin{theorem} \label{th:halfspace-ub} $\mathrm{PBTD}({\mathcal{H}}_d) \le 6$. \end{theorem}
\begin{proof} As in the proof of Lemma~\ref{lem:3-examples}, we use the label ``$1$'' instead of ``$+$'' and the label ``$-1$'' instead of ``$-$''. As in the proof of Theorem~\ref{th:halfspace0-ub}, we may assume without loss of generality that the target weight vector $w^*\in\mathbbm{R}^d$ satisfies $w_d^* \neq 0$. The proof will proceed in stages. On the way, we specify six rules which determine the preference relation of the student.
{\bf Stage 1} is concerned with teaching homogeneous halfspaces given by $w^*$ (and $b^*=0$). The student respects the following rules: \begin{description} \item[Rule 1:] She prefers any pair $(w,0)$ over any pair $(w',b)$ with $b\neq0$. In other words, any homogeneous halfspace is preferred over any non-homogeneous halfspace. \item[Rule 2:] Among homogeneous halfspaces, her preferences are the same as the ones that were used within the proof of Theorem~\ref{th:halfspace0-ub} for teaching homogeneous halfspaces. \end{description} Thus, if $b^*=0$, then we can simply apply the teaching protocol for homogeneous halfspaces. In this case, $w^*$ can be taught at the expense of only two examples.
Stage~1 reduces the problem to teaching inhomogeneous halfspaces given by $(w^*,$ $b^*)$ with $b^*\neq0$. We assume, by way of normalization, that $b^*\in\{\pm1\}$, but note that $w^*$ can now not be assumed to be of unit (or any other fixed) length.
In {\bf stage 2}, the teacher presents three examples in accordance with Lemma~\ref{lem:3-examples}. It follows that the student will take into consideration only weight-bias pairs $(w,b)$ such that the constraints $b=b^*$, $\mathrm{sign}(w_d) = \mathrm{sign}(w_d^*)$ and~(\ref{eq:w-d-constraint}) are satisfied. The following rule will then induce the constraint $w_d=w_d^*$: \begin{description} \item[Rule 3:] Among the pairs $(w,b)$ such that $w_d\neq0$ and $b\in\{\pm1\}$, the student's preferences are as follows. If $b=-1$ (resp.~$b=1$), then she prefers vectors $w$ with a smaller (resp.~larger) value
of $|w_d|$ over those with a larger (resp.~smaller) value of $|w_d|$. \end{description} Thanks to Lemma~\ref{lem:3-examples} and thanks to Rule 3, we may from now on assume that $b=b^*$ and $w_d=w_d^*$. In the sequel, let $w^*$ be decomposed according to $w^* = (\vec{w}_{d-1}^*,w_d^*) \in \mathbbm{R}^{d-1}\times\mathbbm{R}$. We think of $\vec{w}_{d-1}$ as the student's hypothesis for $\vec{w}_{d-1}^*$.
{\bf Stage~3} is concerned with the special case where $\vec{w}_{d-1}^*=\vec{0}$. The student will automatically set $\vec{w}_{d-1}=\vec{0}$ if we add the following to the student's rule system: \begin{description} \item[Rule 4:] Given that the values for $w_d$ and $b$ have been fixed already (and are distinct from $0$), the student prefers weight-bias pairs with $\vec{w}_{d-1}=\vec{0}$ over any weight-bias pair with $\vec{w}_{d-1}\neq\vec{0}$. \end{description}
Stage 3 reduces the problem to teaching $(w^*,b^*)$ with fixed non-zero values for $w_d$ and $b^*$ (known to the student) and with $\vec{w}_{d-1}^* \neq \vec{0}$. Thus, essentially, only $\vec{w}_{d-1}^*$ has still to be taught. In the next stage, we will argue that the problem of teaching $\vec{w}_{d-1}^*$ is equivalent to teaching a homogeneous halfspace.
In {\bf stage 4}, the teacher will present only examples $a$ such that $a_d = -\frac{b^*}{w_d^*}$ so that the contribution of the $d$-th component to the inner product of $w^*$ and $a$ cancels with the bias $b^*$. Given this commitment for $a_d$, the first $d-1$ components of the examples can be chosen so as to teach the homogeneous halfspace $H_{\vec{w}_{d-1}^*}$. According to Theorem~\ref{th:halfspace0-ub}, this can be achieved at the expense of two more examples. Of course the student's preferences must match with the preferences that were used in the proof of this theorem: \begin{description} \item[Rule 5:] Suppose that the values of $w_d$ and $b$ have been fixed already (and are distinct from $0$) and suppose that $\vec{w}_{d-1}\neq\vec{0}$. Then the preferences for the choice of $\vec{w}_{d-1}$ match with the preferences that were used in the protocol for teaching homogeneous halfspaces. \end{description}
After stage 4, the student takes into consideration only weight-bias pairs $(w,b)$ such that $w_d=w_d^*$, $b=b^*$ and $H_{\vec{w}_{d-1}} = H_{\vec{w}_{d-1}^*}$. However, since we had normalized the bias and not the weight vector, this does not necessarily mean that $\vec{w}_{d-1} = \vec{w}_{d-1}^*$. On the other hand, the two weight vectors already coincide modulo a positive scaling factor, say \begin{equation} \label{eq:scaling} \vec{w}_{d-1} = s \cdot \vec{w}_{d-1}^* \mbox{ for some $s>0$} \enspace . \end{equation} In order to complete the proof, it suffices to teach the $L_1$-norm of $\vec{w}_{d-1}^*$ to the student (because~(\ref{eq:scaling})
and $\|\vec{w}_{d-1}\|_1 = \|\vec{w}_{d-1}^*\|_1$ imply that $\vec{w}_{d-1} = \vec{w}_{d-1}^*$). The next (and final) stage serves precisely this purpose.
As for {\bf stage~5}, we first fix some notation. For $i=1,\ldots,k-1$, let $\beta_i = \mathrm{sign}(w_i^*)$. Note that~(\ref{eq:scaling}) implies
that $\beta_i = \mathrm{sign}(w_i)$. Let $L = \|\vec{w}_{d-1}^*\|_1$ denote the $L_1$-norm of $\vec{w}_{d-1}^*$. The final example is chosen as $\vec{a}_6 = (\beta_1,\ldots,\beta_{d-1},-(L+b^*)/w_d^*)$ and labeled ``$1$''. Note that \[
\spn{w^*,\vec{a_6}} + b^* = |w_1^*|+\ldots+|w_{d-1}^*|-L = 0 \enspace . \] Given that $\beta_i=\mathrm{sign}(w_i)$, $w_d=w_d^*$ and $b=b^*$, the student can derive from $\vec{a_6}$ and its label the following constraint on~$\vec{w}_{d-1}$: \[
\spn{w,\vec{a_6}} + b = |w_1|+\ldots+|w_{d-1}| - L \ge 0 \enspace . \]
In combination with the following rule, we can now force the constraint $\|\vec{w}_{d-1}\|_1 = L$: \begin{description} \item[Rule 6:] Suppose that the values of $w_d$ and $b$ have been fixed already (and are distinct from $0$) and suppose that $H_{\vec{w}_{d-1}}$ has already been fixed. Then, among the vectors representing $H_{\vec{w}_{d-1}}$, the ones with a smaller $L_1$-norm are preferred over the ones with a larger $L_1$-norm. \end{description} An inspection of the six stages reveals that at most six examples altogether were shown to the student (three in stage 2, two in stage 4, and one in stage 5). This completes the proof of the theorem. \\ \mbox{} \end{proof}
Note that Theorems~\ref{th:halfspace0-ub} and~\ref{th:halfspace-ub} remain valid when we allow $w$ to be the all-zero vector, which extends $\mathcal{H}_d^0$ by $\{{\mathbb{R}}^d\}$ and $\mathcal{H}_d$ by $\{{\mathbb{R}}^d,\emptyset\}$. ${\mathbb{R}}^d$ will be taught with a single positive example, and $\emptyset$ with a single negative example. The student will give the highest preference to ${\mathbb{R}}^d$, the second highest to $\emptyset$, and among the remaining halfspaces, the student's preferences stay the same.
\section{Classes with $\mathrm{PBTD}$ or $\mathrm{PBTD}^+$ Equal to One}\label{sec:pbtd1}
In this section, we will give complete characterizations of (i) the concept classes with a positive preference-based teaching dimension of $1$, and (ii) the concept classes with a preference-based teaching dimension of $1$. Throughout this section, we use the label ``$1$'' to indicate positive examples and the label ``$0$'' to indicate negative examples.
Let $I$ be a (possibly infinite) index set. We will consider a mapping $A: I \times I \rightarrow \{0,1\}$ as a binary matrix $A \in \{0,1\}^{I \times I}$. $A$ is said to be {\em lower-triangular} if there exists a linear ordering $\prec$ on $I$ such that $A(i,i')=0$ for every pair $(i,i')$ such that $i \prec i'$.
We will occasionally identify a set $L\subseteq{\mathcal{X}}$ with its indicator function by setting $L(x) = \mathbbm{1}_{[x \in L]}$.
\noindent For each $M \subseteq {\mathcal{X}}$, we define \[ M \oplus L=(L \setminus M) \cup (M \setminus L) \] and \[ M \oplus {\mathcal{L}} = \{M \oplus L: L\in{\mathcal{L}}\} \enspace . \] For $T \subseteq {\mathcal{X}}\times\{0,1\}$, we define similarly \[ M \oplus T = \{(x,\bar y): (x,y) \in T\mbox{ and } x \in M\} \cup \{(x,y) \in T: x \notin M\} \enspace . \] Moreover, given $M\subseteq{\mathcal{X}}$ and a linear ordering $\prec$ on ${\mathcal{L}}$, we define a linear ordering $\prec_M$ on $M \oplus {\mathcal{L}}$ as follows: \[ M \oplus L' \prec_M M \oplus L \Longleftrightarrow \underbrace{M \oplus (M \oplus L')}_{=L'} \prec \underbrace{M \oplus (M \oplus L)}_{=L} \enspace . \]
\begin{lemma} \label{lem:flip} With this notation, the following holds. If the mapping ${\mathcal{L}} \ni L \mapsto T(L) \subseteq{\mathcal{X}}\times\{0,1\}$ assigns a teaching set to $L$ w.r.t.~$({\mathcal{L}},\prec)$, then the mapping $M \oplus {\mathcal{L}} \ni M \oplus L \mapsto M \oplus T(L) \subseteq {\mathcal{X}}\times\{0,1\}$ assigns a teaching set to $M \oplus L$ w.r.t.~$(M\oplus{\mathcal{L}},\prec_M)$. \end{lemma} Since this result is rather obvious, we skip its proof.
We say that ${\mathcal{L}}$ and ${\mathcal{L}}'$ are {\em equivalent} if ${\mathcal{L}}' = M\oplus{\mathcal{L}}$ for some $M\subseteq{\mathcal{X}}$ (and this clearly is an equivalence relation). As an immediate consequence of Lemma~\ref{lem:flip}, we obtain the following result:
\begin{lemma} If ${\mathcal{L}}$ is equivalent to ${\mathcal{L}}'$, then $\mathrm{PBTD}({\mathcal{L}}) = \mathrm{PBTD}({\mathcal{L}}')$. \end{lemma}
The following lemma provides a necessary condition for a concept class to have a preference-based teaching dimension of one.
\begin{lemma} \label{lem:single-occurrence} Suppose that ${\mathcal{L}} \seq2^{\mathcal{X}}$ is a concept class of $\mathrm{PBTD}$ $1$. Pick a linear ordering $\prec$ on ${\mathcal{L}}$ and a mapping ${\mathcal{L}} \ni L \mapsto (x_L,y_L) \in {\mathcal{X}}\times\{0,1\}$ such that, for every $L\in{\mathcal{L}}$, $\{(x_L,y_L)\}$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$. Then \begin{itemize} \item either every instance $x\in{\mathcal{X}}$ occurs at most once in $(x_L)_{L\in{\mathcal{L}}}$ \item or there exists a concept $L^*\in{\mathcal{L}}$ that is preferred over all other concepts in ${\mathcal{L}}$ and $x_{L^*}$ is the only instance from ${\mathcal{X}}$ that occurs twice in $(x_L)_{L\in{\mathcal{L}}}$. \end{itemize} \end{lemma}
\begin{proof} Since the mapping $T$ must be injective, no instance can occur twice in $(x_L)_{L\in{\mathcal{L}}}$ with the same label. Suppose that there exists an instance $x\in{\mathcal{X}}$ and concepts $L \prec L^*$ such that $x = x_L = x_{L^*}$ and, w.l.o.g., $y_L=1$ and $y_{L^*}=0$. Since $\{(x,1)\}$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$, every concept $L' \succ L$ (including the ones that are preferred over $L^*$) must satisfy $L'(x) = 0$. For analogous reasons, every concept $L' \succ L^*$ (if any) must satisfy $L'(x)=1$. A concept $L'\in{\mathcal{L}}$ that is preferred over $L^*$ would have to satisfy $L'(x)=0$ and $L'(x)=1$, which is impossible. It follows that there can be no concept that is preferred over~$L^*$. \end{proof}
The following result is a consequence of Lemmas~\ref{lem:flip} and~\ref{lem:single-occurrence}.
\begin{theorem} \label{th:pbtd1-equivalence} If $\mathrm{PBTD}({\mathcal{L}})=1$, then there exists a concept class ${\mathcal{L}}'$ that is equivalent to ${\mathcal{L}}$ and satisfies $\mathrm{PBTD}({\mathcal{L}}')=\mathrm{PBTD}^+({\mathcal{L}}')=1$. \end{theorem}
\begin{proof} Pick a linear ordering $\prec$ on ${\mathcal{L}}$ and, for every $L\in{\mathcal{L}}$, a pair $(x_L,y_L) \in {\mathcal{X}}\times\{0,1\}$ such that $T(L)=\{(x_L,y_L)\}$ is a teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$. \begin{description} \item[Case 1:] Every instance $x\in{\mathcal{X}}$ occurs at most once in $(x_L)_{L\in{\mathcal{L}}}$. \\ Then choose $M = \{x_L: y_L=0\}$ and apply Lemma~\ref{lem:flip}. \item[Case 2:] There exists a concept $L^*\in{\mathcal{L}}$ that is preferred over all other concepts in ${\mathcal{L}}$ and $x_{L^*}$ is the only instance from ${\mathcal{X}}$ that occurs twice in $(x_L)_{L\in{\mathcal{L}}}$. \\ Then choose $M = \{x_L: y_L=0 \wedge L \neq L^*\}$ and apply Lemma~\ref{lem:flip}. With this choice, we obtain $M \oplus T(L) = \{(x_L,1)\}$ for every $L\in{\mathcal{L}}\setminus\{L^*\}$. Since $L^*$ is preferred over all other concepts in ${\mathcal{L}}$, we may teach $L^*$ w.r.t.~$({\mathcal{L}},\prec)$ by the empty set (instead of employing a possibly $0$-labeled example). \end{description} The discussion shows that there is a class ${\mathcal{L}}'$ that is equivalent to ${\mathcal{L}}$ and can be taught in the preference-based model with positive teaching sets of size $1$ (or size $0$ in case of $L^*$). \end{proof}
We now have the tools required for characterizing the concept classes whose positive PBTD equals $1$.
\begin{theorem} \label{th:pbtd-plus1} $\mathrm{PBTD}^+({\mathcal{L}})=1$ if and only if there exists a mapping ${\mathcal{L}} \ni L \mapsto x_L \in {\mathcal{X}}$ such that the matrix $A\in\{0,1\}^{({\mathcal{L}}\setminus\{\emptyset\})\times({\mathcal{L}}\setminus\{\emptyset\})}$ given by $A(L,L') = L'(x_L)$ is lower-triangular. \end{theorem}
\begin{proof} Suppose first that $\mathrm{PBTD}^+({\mathcal{L}})=1$. Pick a linear ordering $\prec$ on ${\mathcal{L}}$ and, for every $L\in{\mathcal{L}}\setminus\{\emptyset\}$, pick $x_L\in{\mathcal{X}}$ such that $\{x_L\}$ is a positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$.\footnote{Such an $x_L$ always exists, even if $\emptyset$ is a teaching set for $L$, because every superset of a teaching set for $L$ that is still consistent with $L$ is still a teaching set for $L$, cf.\ the discussion immediately after Lemma~\ref{lem:extension}.} If $L \prec L'$ (so that $L'$ is preferred over $L$), we must have $L'(x_L)=0$. It follows that the matrix $A$, as specified in the theorem, is lower-triangular.
Suppose conversely that there exists a mapping ${\mathcal{L}} \ni L \mapsto x_L \in {\mathcal{X}}$ such that the matrix $A\in\{0,1\}^{({\mathcal{L}}\setminus\{\emptyset\})\times({\mathcal{L}}\setminus\{\emptyset\})}$ given by $A(L,L') = L'(x_L)$ is lower-triangular, say w.r.t.~the linear ordering $\prec$ on ${\mathcal{L}}\setminus\{\emptyset\}$. Then, for every $L\in{\mathcal{L}}\setminus\{\emptyset\}$, the singleton $\{x_L\}$ is a positive teaching set for $L$ w.r.t.~$({\mathcal{L}},\prec)$ because it distinguishes $L$ from $\emptyset$ (of course) and also from every concept $L' \in {\mathcal{L}}\setminus\{\emptyset\}$ such that $L' \succ L$. If $\emptyset \in{\mathcal{L}}$, then extend the linear ordering $\prec$ by preferring $\emptyset$ over every other concept from ${\mathcal{L}}$ (so that $\emptyset$ is a positive teaching set for $\emptyset$ w.r.t.~$({\mathcal{L}},\prec)$). \end{proof}
In view of Theorem~\ref{th:pbtd1-equivalence}, Theorem~\ref{th:pbtd-plus1} characterizes every class ${\mathcal{L}}$ with $\mathrm{PBTD}({\mathcal{L}})=1$ up to equivalence.
Let $\mathrm{Sg}({\mathcal{X}}) = \{\{x\}: x \in {\mathcal{X}}\}$ denote the class of singletons over ${\mathcal{X}}$ and suppose that $\mathrm{Sg}({\mathcal{X}})$ is a sub-class of ${\mathcal{L}}$ and $\mathrm{PBTD}({\mathcal{L}})=1$. We will show that only fairly trivial extensions of $\mathrm{Sg}({\mathcal{X}})$ with a preference-based dimension of $1$ are possible.
\begin{lemma} \label{lem:pbtd1-implications} Let ${\mathcal{L}}\seq2^{\mathcal{X}}$ be a concept class of $\mathrm{PBTD}$ $1$ that contains $\mathrm{Sg}({\mathcal{X}})$. Let $T$ be an admissible mapping for ${\mathcal{L}}$ that assigns a labeled example $(x_L,y_L)\in{\mathcal{X}}\times\{0,1\}$ to each $L\in{\mathcal{L}}$. For $b=0,1$, let ${\mathcal{L}}^b = \{L\in{\mathcal{L}}: y_L=b\}$. Similarly, let ${\mathcal{X}}^b = \{x\in{\mathcal{X}}: y_{\{x\}}\in{\mathcal{L}}^b\}$. With this notation, the following holds: \begin{enumerate} \item If $L\in{\mathcal{L}}^1$ and $L \subset L' \in {\mathcal{L}}$, then $L'\in{\mathcal{L}}^1$. \item If $L'\in{\mathcal{L}}^0$ and $L' \supset L \in {\mathcal{L}}$, then $L\in{\mathcal{L}}^0$. \item
$|{\mathcal{X}}^0| \le 2$. Moreover if $|{\mathcal{X}}^0|=2$, then there exist $q \neq q' \in{\mathcal{X}}$ such that ${\mathcal{X}}^0=\{q,q'\}$ and $x_{\{q\}} = q'$. \end{enumerate} \end{lemma}
\begin{proof} Recall that $R_T = \{(L,L')\in{\mathcal{L}}\times{\mathcal{L}}:\ (L \neq L') \wedge (\mbox{$L$ is consistent with $T(L')$})\}$ and that $R_T$ (and even the transitive closure of $R_T$) is asymmetric if $T$ is admissible. \begin{enumerate} \item If $L \in {\mathcal{L}}^1$ and $L \subset L'$, then $y_L=1$ so that $L'$ is consistent with the example $(x_L,y_L)$. It follows that $(L',L) \in R_T$. $L' \in {\mathcal{L}}^0$ would similarly imply that $(L,L') \in R_T$ so that $R_T$ would not be asymmetric. This is in contradiction with the admissibility of $T$. \item The second assertion in the lemma is a logically equivalent reformulation of the first assertion. \item Suppose for the sake of contradiction that ${\mathcal{X}}^0$ contains three distinct points, say $q_1,q_2,q_3$. Since, for $i=1,2,3$, $T$ assigns a $0$-labeled example to $\{q_i\}$, at least one of the remaining two points is consistent with $T(\{q_i\})$. Let $G$ be the digraph with the nodes $q_1,q_2,q_3$ and with an edge from $q_j$ to $q_i$ iff $\{q_j\}$ is consistent with $T(\{q_i\})$. Then each of the three nodes has an indegree of at least $1$. Digraphs of this form must contain a cycle so that $\mathrm{trcl}(R_T)$ is not asymmetric. This is in contradiction with the admissibility of $R_T$.
A similar argument holds if ${\mathcal{X}}^0$ contains only two distinct elements, say $q$ and $q'$. If neither $x_{\{q\}} = q'$ nor $x_{\{q'\}} = q$, then $(\{q'\},\{q\}) \in R_T$ and $(\{q\},\{q'\}) \in R_T$ so that $R_T$ is not asymmetric --- again a contradiction to the admissibility of $R_T$. \end{enumerate} \end{proof}
\noindent We are now in the position to characterize those classes of $\mathrm{PBTD}$ one that contain all singletons.
\begin{theorem} \label{th:pbtd1} Suppose that ${\mathcal{L}}\seq2^{\mathcal{X}}$ is a concept class that contains $\mathrm{Sg}({\mathcal{X}})$. Then $\mathrm{PBTD}({\mathcal{L}})=1$ if and only if the following holds. Either ${\mathcal{L}}$ coincides with $\mathrm{Sg}({\mathcal{X}})$ or ${\mathcal{L}}$ contains precisely one additional concept, which is either the empty set or a set of size $2$. \end{theorem}
\begin{proof} We start with proving ``${\Leftarrow}$''. It is well known that $\mathrm{PBTD}^+({\mathcal{L}})=1$ for ${\mathcal{L}} = \mathrm{Sg}({\mathcal{X}})\cup\{\emptyset\}$: prefer $\emptyset$ over any singleton set, set $T(\emptyset)=\emptyset$ and, for every $x\in{\mathcal{X}}$, set $T(\{x\})=\{(x,1)\}$. In a similar fashion, we can show that $\mathrm{PBTD}({\mathcal{L}})=1$ for ${\mathcal{L}} = \mathrm{Sg}({\mathcal{X}})\cup\{\{q,q'\}\}$ for any choice of $q \neq q' \in {\mathcal{X}}$. Prefer $\{q,q'\}$ over $\{q\}$ and $\{q'\}$, respectively. Furthermore, prefer $\{q\}$ and $\{q'\}$ over all other singletons. Finally, set $T(\{q,q'\})=\emptyset$, $T(\{q\})=\{(q',0)\}$, $T(\{q'\})=\{(q,0)\}$ and, for every $x\in{\mathcal{X}}\setminus\{q,q'\}$, set $T(\{x\}) = \{(x,1)\}$.
As for the proof of ``${\Rightarrow}$'', we make use of the notions $T,x_L,y_L,{\mathcal{L}}^0,{\mathcal{L}}^1,{\mathcal{X}}^0,{\mathcal{X}}^1$ that had been introduced in Lemma~\ref{lem:pbtd1-implications} and we proceed by case analysis.
\begin{description} \item[Case 1:] ${\mathcal{X}}^0 = \emptyset$. \\ Since ${\mathcal{X}}^0 = \emptyset$, we have ${\mathcal{X}}={\mathcal{X}}^1$. In combination with the first assertion in Lemma~\ref{lem:pbtd1-implications}, it follows that ${\mathcal{L}}\setminus\{\emptyset\} = {\mathcal{L}}^1$. We claim that no concept in ${\mathcal{L}}$ contains two distinct elements. Assume for the sake of contradiction that there is a concept $L\in{\mathcal{L}}$
such that $|L|\ge2$. It follows that, for every $q \in L$, $x_{\{q\}}=q$ and $y_{\{q\}}=1$ so that $(L,\{q\}) \in R_T$. Moreover, there exists $q_0 \in L$ such that $x_L=q_0$ and $y_L=1$. It follows that $(\{q_0\},L) \in R_T$, which contradicts the fact that $R_T$ is asymmetric. \item[Case 2:] ${\mathcal{X}}^0 = \{q\}$ for some $q\in{\mathcal{X}}$. \\ Set $q' = x_{\{q\}}$ and note that $y_{\{q\}}=0$. Moreover, since ${\mathcal{X}}^1 = {\mathcal{X}}\setminus\{q\}$, we have $x_{\{p\}}=p$ and $y_{\{p\}}=1$
for every $p\in{\mathcal{X}}\setminus\{q\}$. We claim that ${\mathcal{L}}$ cannot contain a concept $L$ of size at least $2$ that contains an element of ${\mathcal{X}}\setminus\{q,q'\}$. Assume for the sake of contradiction, that there is a set $L$ such that $|L|\ge2$ and $p \in L$ for some $p\in{\mathcal{X}}\setminus\{q,q'\}$. The first assertion in Lemma~\ref{lem:pbtd1-implications} implies that $y_L=1$ (because $y_{\{p\}}=1$ and $\{p\} \subseteq L$). Since all pairs $(x,1)$ with $x \neq q$ are already in use for teaching the corresponding singletons, we may conclude that $q \in L$ and $T(L) = \{(q,1)\}$. This contradicts the fact that $\mathrm{trcl}(R_T)$ is asymmetric, because our discussion implies that $(L,\{p\}) , (\{p\},\{q\}) , (\{q\},L) \in R_T$. We may therefore safely assume that there is no concept of size at least $2$ in ${\mathcal{L}}$ that has a non-empty intersection with ${\mathcal{X}}\setminus\{q,q'\}$. Thus, except for the singletons, the only remaining sets that possibly belong to ${\mathcal{L}}$ are $\emptyset$ and $\{q,q'\}$. We still have to show that not both of them can belong to ${\mathcal{L}}$. Assume for the sake of contradiction that $\emptyset,\{q,q'\}\in{\mathcal{L}}$. Since $\emptyset$ is consistent with $T(\{q\})=\{(q',0)\}$, we have $(\emptyset,\{q\}) \in R_T$. Clearly, $y_\emptyset=0$. Since $\{q\}$ is consistent with every pair $(x,0)$ except for $(q,0)$, we must have $x_\emptyset = q$. (Otherwise, we have $(\{q\},\emptyset) \in R_T$ and arrive at a contradiction.) Let us now inspect the possible teaching sets for $L = \{q,q'\}$. Since $\{q,q'\}$ is consistent with $T(\{q'\}) = \{(q',1)\}$, setting $y_L = 0$ would lead to a contradiction. The example $(q',1)$ is already in use for teaching $\{q'\}$. It is therefore necessary to set $T(L) = \{(q,1)\}$. An inspection of the various teaching sets shows that $(\emptyset,\{q\}) , (\{q\},L) , (L,\{q'\}), (\{q'\},\emptyset) \in R_T$, which contradicts the fact that $\mathrm{trcl}(R_T)$ is asymmetric. \item[Case 3:] ${\mathcal{X}}^0=\{q,q'\}$ for some $q \neq q' \in {\mathcal{X}}$. \\ Note first that $y_{\{q\}}=y_{\{q'\}}=0$ and $y_{\{p\}}=1$ for every $p\in{\mathcal{X}}\setminus\{q,q'\}$. We claim that $\emptyset\notin{\mathcal{L}}$. Assume for the sake of contradiction that $\emptyset\in{\mathcal{L}}$. Then $(\emptyset,\{q\}) , (\emptyset,\{q'\}) \in R_T$ since $\emptyset$ is consistent with the teaching sets for instances from ${\mathcal{X}}^0$. But then, no matter how $x$ in $T(\emptyset) = \{(x,0)\}$ is chosen, at least one of the sets $\{q\}$ and $\{q'\}$ will be consistent with $T(\emptyset)$ so that at least one of the pairs $(\{q\},\emptyset)$ and $(\{q'\},\emptyset)$ belongs to $R_T$. This contradicts the fact that $R_T$ must be asymmetric. Thus $\emptyset\notin{\mathcal{L}}$, indeed. Now it suffices to show that ${\mathcal{L}}$ cannot contain a concept of size at least $2$ that contains an element of ${\mathcal{X}}\setminus\{q,q'\}$. Assume for the sake of contradiction that there is a set $L\in{\mathcal{L}}$
such that $|L|\ge2$ and $p \in L$ for some $p\in{\mathcal{X}}\setminus\{q,q'\}$. Observe that $(L,\{p\}) \in R_T$. Another application of the first assertion in Lemma~\ref{lem:pbtd1-implications} shows that $y_L=1$ (because $y_{\{p\}}=1$ and $p \in L$) and $x_L \in \{q,q'\}$ (because the other $1$-labeled instances are already in use for teaching the corresponding singletons). It follows that one of the pairs $(\{q\},L)$ and $(\{q'\},L)$ belongs to $R_T$. The third assertion of Lemma~\ref{lem:pbtd1-implications} implies that $T(q)=\{(q',0)\}$ or $T(q')=\{(q,0)\}$. For reasons of symmetry, we may assume that $T(q)=\{(q',0)\}$. This implies that $(\{p\},\{q\}) \in R_T$. Let $q''$ be given by $T(q') = \{(q'',0)\}$. Note that either $q''=q$ or $q''\in{\mathcal{X}}\setminus\{q,q'\}$. In the former case, we have that $(\{p\},\{q'\}) \in R_T$ and in the latter case we have that $(\{q\},\{q'\}) \in R_T$. Since $(\{p\},\{q\}) \in R_T$ (which was observed above already), we conclude that in both cases, $(\{p\},\{q\}) , (\{p\},\{q'\}) \in \mathrm{trcl}(R_T)$. Combining this with our observations above that $(L,\{p\} ) \in R_T$ and that one of the pairs $(\{q\},L)$ and $(\{q'\},L)$ belongs to $R_T$, yields a contradiction to the fact that $\mathrm{trcl}(R_T)$ is asymmetric. \end{description} \end{proof}
\begin{corollary} Let ${\mathcal{L}}\seq2^{\mathcal{X}}$ be a concept class that contains $\mathrm{Sg}({\mathcal{X}})$. If $\mathrm{PBTD}({\mathcal{L}})=1$, then $\mathrm{RTD}({\mathcal{L}})=1$. \end{corollary}
\begin{proof} According to Theorem~\ref{th:pbtd1}, either $L$ coincides with $\mathrm{Sg}({\mathcal{X}})$ or ${\mathcal{L}}$ contains precisely one additional concept that is $\emptyset$ or a set of size $2$. The partial ordering $\prec$ on ${\mathcal{L}}$ that is used in the first part of the proof of Theorem~\ref{th:pbtd1} (proof direction ``${\Leftarrow}$'') is easily compiled into a recursive teaching plan of order $1$ for ${\mathcal{L}}$.\footnote{This also follows from Lemma~\ref{lem:rtd-pbtd} and the fact that there are no chains of a length exceeding $2$ in $({\mathcal{L}},\prec)$.} \end{proof}
The characterizations proven above can be applied to certain geometric concept classes.
Consider a class ${\mathcal{L}}$, consisting of bounded and topologically closed objects in the $d$-dimensional Euclidean space, that satisfies the following condition: for every pair $(A,B)\in\mathbbm{R}^d$, there is exactly one object in ${\mathcal{L}}$, denoted as $L_{A,B}$
in the sequel, such that $A,B \in L$ and such that $\|A-B\|$ coincides with the diameter of $L$. This assumption implies
that $|{\mathcal{L}} \setminus \mathrm{Sg}(\mathbbm{R}^d)|=\infty$. By setting $A=B$, it furthermore implies $\mathrm{Sg}(\mathbbm{R}^d) \subseteq {\mathcal{L}}$. Let us prefer objects with a small diameter over objects with a larger diameter. Then, obviously, $\{A,B\}$ is a positive teaching
set for $L_{A,B}$. Because of $|{\mathcal{L}} \setminus \mathrm{Sg}(\mathbbm{R}^d)|=\infty$, ${\mathcal{L}}$ does clearly not satisfy the condition in Theorem~\ref{th:pbtd1}, which is necessary for ${\mathcal{L}}$ to have a PBTD of $1$. We may therefore conclude that $\mathrm{PBTD}({\mathcal{L}}) = \mathrm{PBTD}^+({\mathcal{L}}) = 2$.
The family of classes with the required properties is rich and includes, for instance, the class of $d$-dimensional balls as well as the class of $d$-dimensional axis-parallel rectangles.
\section{Conclusions}
Preference-based teaching uses the natural notion of preference relation to extend the classical teaching model. The resulting model is (i) more powerful than the classical one, (ii) resolves difficulties with the recursive teaching model in the case of infinite concept classes, and (iii) is at the same time free of coding tricks even according to the definition by \cite{GM1996}. Our examples of algebraic and geometric concept classes demonstrate that preference-based teaching can be achieved very efficiently with naturally defined teaching sets and based on intuitive preference relations such as inclusion. We believe that further studies of the PBTD will provide insights into structural properties of concept classes that render them easy or hard to learn in a variety of formal learning models.
We have shown that spanning sets lead to a general-purpose construction for \linebreak[4]preference-based teaching sets of only positive examples. While this result is fairly obvious, it provides further justification of the model of preference-based teaching, since the teaching sets it yields are often intuitively exactly those a teacher would choose in the classroom (for instance, one would represent convex polygons by their vertices, as in Example~\ref{ex:polygons}). It should be noted, too, that it can sometimes be difficult to establish whether the upper bound on PBTD obtained this way is tight, or whether the use of negative examples or preference relations other than inclusion yield smaller teaching sets.
Generally, the choice of preference relation provides a degree of freedom that increases the power of the teacher but also increases the difficulty of establishing lower bounds on the number of examples required for teaching.
\noindent\textbf{Acknowledgements.} Sandra Zilles was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC), in the Discovery Grant and Canada Research Chairs programs. We thank the anonymous referees for their numerous thoughtful comments, which greatly helped to improve the presentation of the paper.
\appendix
\iffalse \section{Proof of Lemma~\ref{lem:huge-gap}} \label{app:rtd} \fi
\section{Proof of Theorem~\ref{th:bounds-linset}} \label{app:linsets}
In Section~\ref{subsec:shift-lemma}, we present a general result which helps to verify the upper bounds in Theorem~\ref{th:bounds-linset}. These upper bounds are then derived in Section~\ref{subsec:linset-ubs}. Section~\ref{subsec:linset-lbs} is devoted to the derivation of the lower bounds.
\subsection{The Shift Lemma} \label{subsec:shift-lemma}
In this section, we assume that ${\mathcal{L}}$ is a concept class over a universe ${\mathcal{X}} \in \{{\mathbb N}_0,{\mathbb Q}_0^+,\mathbbm{R}_0^+\}$. We furthermore assume that $0$ is contained in every concept $L \in {\mathcal{L}}$. We can extend ${\mathcal{L}}$ to a larger class, namely the shift-extension ${\mathcal{L}}'$ of ${\mathcal{L}}$, by allowing each of its concepts to be shifted by some constant which is taken from ${\mathcal{X}}$: \[ {\mathcal{L}}' = \{c+L:\ (c\in{\mathcal{X}}) \wedge (L\in{\mathcal{L}})\} \enspace . \] The next result states that this extension has little effect only on the complexity measures $\mathrm{PBTD}$ and $\mathrm{PBTD}^+$:
\begin{lemma} [Shift Lemma]\label{lem:shift} With the above notation and assumptions, the following holds: \[ \mathrm{PBTD}({\mathcal{L}}) \le \mathrm{PBTD}({\mathcal{L}}') \le 1+\mathrm{PBTD}({\mathcal{L}})\ \mbox{ and }\ \mathrm{PBTD}^+({\mathcal{L}}) \le \mathrm{PBTD}^+({\mathcal{L}}') \le 1+\mathrm{PBTD}^+({\mathcal{L}}) \enspace . \] \end{lemma}
\begin{proof} It suffices to verify the inequalities $\mathrm{PBTD}({\mathcal{L}}') \le 1+\mathrm{PBTD}({\mathcal{L}})$ and $\mathrm{PBTD}^+$ $({\mathcal{L}}') \le 1+\mathrm{PBTD}^+({\mathcal{L}})$ because the other inequalities hold by virtue of monotonicity. Let $T$ be an admissible mapping for ${\mathcal{L}}$. It suffices to show that $T$ can be transformed into an admissible mapping $T'$ for ${\mathcal{L}}'$ such that $\mathrm{ord}(T') \le 1+\mathrm{ord}(T)$ and such that $T'$ is positive provided that $T$ is positive. To this end, we define $T'$ as follows: \[ T'(c+L) = \{(c,+)\} \cup \{(c+x,b):\ (x,b) \in T(L)\} \enspace . \] Obviously $\mathrm{ord}(T') \le 1+\mathrm{ord}(T)$. Note that $c \in c+L$ because of our assumption that $0$ is contained in every concept in ${\mathcal{L}}$. Moreover, since the admissibility of $T$ implies that $L$ is consistent with $T(L)$, the above definition of $T'(c+L)$ makes sure that $c+L$ is consistent with $T'(c+L)$. It suffices therefore to show that the relation $\mathrm{trcl}(R_{T'})$ is asymmetric. Consider a pair $(c'+L',c+L) \in R_{T'}$. By the definition of $R_{T'}$, it follows that $c'+L'$ is consistent with $T'(c+L)$. Because of $(c,+) \in T'(c+L)$, we must have $c' \le c$. Suppose that $c'=c$. In this case, $L'$ must be consistent with $T(L)$. Thus $L' \prec_T L$. This reasoning implies that $(c'+L',c+L) \in R_{T'}$ can happen only if either $c'<c$ or $(c'=c) \wedge (L' \prec_T L)$. Since $\prec_T$ is asymmetric, we may now conclude that $\mathrm{trcl}(R_{T'})$ is asymmetric, as desired. Finally note that, according to our definition above, the mapping $T'$ is positive provided that $T$ is positive. This concludes the proof. \end{proof}
\subsection{The Upper Bounds in Theorem~\ref{th:bounds-linset}} \label{subsec:linset-ubs}
We remind the reader that the equality $\mathrm{PBTD}^+(\mathrm{LINSET}_k)=k$ was stated in Example~\ref{exmp:pbtdpluslinset}. We will show in Lemma~\ref{lem:nelinset-ub} that $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}_k)\le k$. In combination with the Shift Lemma, this implies that $\mathrm{PBTD}^+(\mathrm{LINSET}'_k) \le k+1$ and $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}'_k) \le k+1$. All remaining upper bounds in Theorem~\ref{th:bounds-linset} follow now by virtue of monotonicity.
\begin{lemma}\label{lem:nelinset-ub} $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}_k)\le k$. \end{lemma}
\begin{proof} We want to show that there is a preference relation for which $k$ positive examples suffice to teach any concept in $\mathrm{NE}\mbox{-}\mathrm{LINSET}_k$. To this end, let $G=\{g_1,\ldots,g_\ell\}$ be a generator set with $\ell \le k$ where $g_1 < \ldots < g_\ell$. We use $\mathrm{sum}(G) = g_1 + \ldots + g_\ell$ to denote the sum of all generators in $G$. We say that $g_i$ is a {\em redundant generator} in $G$ if $g_i \in \spn{\{g_1,\ldots,g_{i-1}\}}$. Let $G^* = \{g_1^*,\ldots,g^*_{\ell^*}\} \subseteq G$ with $g^*_1 < \ldots < g^*_{\ell^*}$ be the set of non-redundant generators in $G$ and let $\mathrm{tuple}(G) = (g_1^*,\ldots,g_{\ell^*}^*)$ be the corresponding ordered sequence. Then $G^*$ is an independent subset of $G$ generating the same linear set as $G$ when allowing zero coefficients, i.e., we have $\spn{G^*} = \spn{G}$ (although $\spn{G^*}_+ \neq \spn{G}_+$ whenever $G^*$ is a proper subset of $G$).
To define a suitable preference relation, let $G,\widehat G$ be generator sets of size $k$ or less with $\mathrm{tuple}(G) = (g^*_1,\ldots,g^*_{\ell^*})$ and $\mathrm{tuple}(\widehat G) = (\widehat g^*_1,\ldots,\widehat g^*_{\widehat\ell^*})$. Let the student prefer $G$ over $\widehat G$ if any of the following conditions is satisfied: \begin{description} \item[Condition 1:] $\mathrm{sum}(G)>\mathrm{sum}(\widehat G)$. \item[Condition 2:] $\mathrm{sum}(G)=\mathrm{sum}(\widehat G)$ and $\mathrm{tuple}(G)$ is lexicographically greater than \linebreak[4]$\mathrm{tuple}$ $(\widehat G)$ without having $\mathrm{tuple}(\widehat G)$ as prefix. \item[Condition 3:] $\mathrm{sum}(G)=\mathrm{sum}(\widehat G)$ and $\mathrm{tuple}(G)$ is a proper prefix of $\mathrm{tuple}(\widehat G)$. \end{description}
To teach a concept $\spn{G}\in\mathrm{NE}\mbox{-}\mathrm{LINSET}_k$ with $\mathrm{sum}(G)=g$ and $\mathrm{tuple}(G) = (g_1^*,\ldots,g_{\ell^*}^*)$, one uses the teaching set \[ S = \{ (g,+) , (g+g_1^*,+) ,\ldots, (g+g_{h^*}^*,+) \} \] where \begin{equation} \label{eq:def-h} h = \left\{ \begin{array}{ll}
\ell^*-1 & \mbox{if $G^*=G$} \\
\ell^* & \mbox{if $G^* \subset G$}
\end{array} \right. \enspace . \end{equation}
Note that $S$ contains at most $|G| \le k$ examples. Let $\widehat G$ with $\spn{\widehat G}_+ \in \mathrm{NE}\mbox{-}\mathrm{LINSET}_k$ denote the generator set that is returned by the student. Clearly $\spn{\widehat G}$ satisfies $\mathrm{sum}(\widehat G)=g$ since \begin{itemize} \item concepts with larger generator sums are inconsistent with $(g,+)$, and \item concepts with smaller generator sums have a lower preference (compare with Condition~1 above). \end{itemize} It follows that $g+g^*_i \in \spn{\widehat G}_+$ is equivalent to $g^*_i \in \spn{\widehat G} = \spn{\widehat G^*}$. We conclude that the smallest generator in $\mathrm{tuple}(\widehat G)$ equals $g^*_1$ since \begin{itemize} \item a smallest generator in $\mathrm{tuple}(\widehat G)$ that is greater than $g^*_1$ would cause an inconsistency with $(g+g^*_1,+)$, and \item a smallest generator in $\mathrm{tuple}(\widehat G)$ that is smaller than $g^*_1$ would have a lower preference (compare with Condition~2 above). \end{itemize} Assume inductively that the $i-1$ smallest generators in $\mathrm{tuple}(\widehat G)$ are $g^*_1,\ldots,g^*_{i-1}$. Since $g^*_i \notin \spn{\{g^*_1,\ldots,g^*_{i-1}\}}$, we may apply a reasoning that is similar to the above reasoning concerning $g^*_1$ and conclude that the $i$'th smallest generator in $\mathrm{tuple}(\widehat G)$ equals $g^*_i$. The punchline of this discussion is that the sequence $\mathrm{tuple}(\widehat G)$ starts with $g^*_1,\ldots,g^*_{h}$ with $h$ given by~(\ref{eq:def-h}). Let $G' = G \setminus G^*$ be the set of redundant generators in $G$ and note that \[ g - \sum_{i=1}^{h}g^*_i = \left\{ \begin{array}{ll}
g^*_{\ell^*} & \mbox{if $G^* = G$} \\
\sum_{g' \in G'}g' & \mbox{if $G^* \subset G$} \\
\end{array} \right. \enspace . \] Let $\widehat G' = \widehat G \setminus \{g^*_1,\ldots,g^*_h\}$. We proceed by case analysis: \begin{description} \item[Case 1:] $G^*=G$. \\ Since $\widehat G$ is consistent with $(g,+)$, we have $\sum_{g' \in \widehat G'}g' = g_{\ell^*}^*$. Since $g^*_{\ell^*} \notin \langle\{g^*_1,\ldots,$ $g^*_{\ell^*-1}\}\rangle$, the set $\widehat G'$ must contain an element that cannot be generated by $g^*_1,\ldots,$ $g^*_{\ell^*-1}$. Given the preferences of the student (compare with Condition~2), she will choose $\widehat G' = \{g^*_{\ell^*}\}$. It follows that $\widehat G = G$. \item[Case 2:] $G^* \subset G$. \\ Here, we have $\sum_{g' \in \widehat G'}g' = \sum_{g' \in G'}g'$. Given the preferences of the student (compare with Condition~3), she will choose $\widehat G$ such that $\widehat G^* = G^*$ and $\widehat G'$ consists of elements from $\spn{G^*}$ that sum up to $\sum_{g' \in G'}g'$ (with $\widehat G' = \left\{\sum_{g' \in G'}g'\right\}$ among the possible choices). Clearly, $\spn{\widehat G}_+ = \spn{G}_+$. \end{description} Thus, in both cases, the student comes up with the right hypothesis. \end{proof}
\subsection{The Lower Bounds in Theorem~\ref{th:bounds-linset}} \label{subsec:linset-lbs}
The lower bounds in Theorem~\ref{th:bounds-linset} are an immediate consequence of the following result:
\begin{lemma} \label{lem:lbs-linset} The following lower bounds are valid: \begin{eqnarray} \mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k) & \ge & k+1 \enspace . \label{eq:lb1} \\ \mathrm{PBTD}(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k) & \ge & k-1 \enspace . \label{eq:lb2} \\ \mathrm{PBTD}(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) & \ge & \frac{k-1}{2} \enspace . \label{eq:lb3} \\ \mathrm{PBTD}(\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) & \ge & k-1 \enspace . \label{eq:lb4} \\ \mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) & \ge & k-1 \enspace . \label{eq:lb5} \end{eqnarray} \end{lemma}
This lemma can be seen as an extension and a strengthening of a similar result in~\cite{GSZ2015} where the following lower bounds were shown: \begin{eqnarray*} \mathrm{RTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}'_k) & \ge & k+1 \enspace .\\ \mathrm{RTD}(\mathrm{NE}\mbox{-}\mathrm{LINSET}'_k) & \ge & k-1 \enspace .\\ \mathrm{RTD}(\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) & \ge & k-1 \enspace . \end{eqnarray*} The proof of Lemma~\ref{lem:lbs-linset} builds on some ideas that are found in~\cite{GSZ2015} already, but it requires some elaboration to obtain the stronger results.
We now briefly explain why the lower bounds in Theorem~\ref{th:bounds-linset} directly follow from Lemma~\ref{lem:lbs-linset}. Note that the lower bound $k-1$ in~(\ref{eq:more-bounds}) is immediate from~(\ref{eq:lb2}) and a monotonicity argument. This is because $\mathrm{NE}\mbox{-}\mathrm{LINSET}'_k\supseteq\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$ as well as $\mathrm{LINSET}'_k\supseteq\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k\supseteq\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$. Note furthermore that $\mathrm{PBTD}^+$ $(\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k) \ge k+1$ because of~(\ref{eq:lb1}) and a monotonicity argument. Then the Shift Lemma implies that $\mathrm{PBTD}^+(\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) \ge k$. Similarly, $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}_k$ $)$ $\geq k-1$ follows from~(\ref{eq:lb5}) and a monotonicity argument. All remaining lower bounds in Theorem~\ref{th:bounds-linset} are obtained from these observations by virtue of monotonicity.
The proof of Theorem~\ref{th:bounds-linset} can therefore be accomplished by proving Lemma~\ref{lem:lbs-linset}. It turns out that the proof of this lemma is quite involved. We will present in Section~\ref{subsubsec:lb0} some theoretical prerequisites. Sections~\ref{subsubsec:lb1} and~\ref{subsubsec:lb234} are devoted to the actual proof of the lemma.
\subsubsection{Some Basic Concepts in the Theory of Numerical Semigroups} \label{subsubsec:lb0}
Recall from Section~\ref{sec:linsets} that $\spn{G} = \left\{\sum_{g \in G}a(g)g: a(g)\in{\mathbb N}_0\right\}$. The elements of $G$ are called {\em generators} of $\spn{G}$. A set $P \subset {\mathbb N}$ is said to be {\em independent} if none of the elements in $P$ can be written as a linear combination (with coefficients from ${\mathbb N}_0$) of the remaining elements (so that $\spn{P'}$ is a proper subset of $\spn{P}$ for every proper subset $P'$ of $P$). It is well known~\cite{RG-S2009} that independence makes generating systems unique, i.e., if $P,P'$ are independent, then $\spn{P} = \spn{P'}$ implies that $P=P'$. Moreover, for every independent set $P$, the following implication is valid: \begin{equation} \label{eq:independent-set} (S \subseteq \spn{P} \wedge P \not\subseteq S)\ \Rightarrow\ (\spn{S} \subset \spn{P}) \enspace . \end{equation}
Let $P = \{a_1,\ldots,a_k\}$ be independent with $a_1 = \min P$. It is well known\footnote{E.g., see~\cite{RG-S2009}} and easy to see that the residues of $a_1,a_2,\ldots,a_k$ modulo $a_1$ must be pairwise distinct (because, otherwise, we would obtain
a dependence). If $a_1$ is a prime and $|P|\ge2$, then the independence of $P$ implies that $\gcd(P)=1$. Thus the following holds:
\begin{lemma} \label{lem:ind-gcd} If $P \subset {\mathbb N}$ is an independent set of cardinality at least $2$ and $\min P$ is a prime, then $\gcd(P)=1$. \end{lemma}
\noindent In the remainder of the paper, the symbols $P$ and $P'$ are reserved for denoting independent sets of generators.
It is well known that $\spn{G}$ is co-finite iff $\gcd(G)=1$~\cite{RG-S2009}. Let $P$ be a finite (independent) subset of ${\mathbb N}$ such that $\gcd(P)=1$. The largest number in ${\mathbb N}\setminus\spn{P}$ is called the {\em Frobenius number of $P$} and is denoted as $F(P)$. It is well known~\cite{RG-S2009} that \begin{equation} \label{eq:frobenius} F(\{p,q\}) = pq-p-q \end{equation} provided that $p,q \ge 2$ satisfy $\gcd(p,q)=1$.
\subsubsection{Proof of~(\ref{eq:lb1})} \label{subsubsec:lb1}
The shift-extension of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$ is (by way of definition) the following class: \begin{equation} \label{eq:necflinset-extension} \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k =
\{c+\spn{P}_+:\ (c\in{\mathbb N}_0) \wedge (P\subset{\mathbb N}) \wedge (|P| \le k) \wedge (\gcd(P)=1)\} \enspace . \end{equation} It is easy to see that this can be written alternatively in the form \begin{equation} \label{eq:necflinset-rewritten} \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k = \left\{N+\spn{P}:\
N\in{\mathbb N}_0 \wedge P\subset{\mathbb N} \wedge |P| \le k \wedge \gcd(P)=1 \wedge \sum_{p\in P}p \le N\right\} \end{equation} where $N$ in~(\ref{eq:necflinset-rewritten}) corresponds to $c+\sum_{p \in P}p$ in~(\ref{eq:necflinset-extension}).
For technical reasons, we define the following subfamilies of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$. For each $N\ge0$, let \[ \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k[N] = \{N+L : L\in\mathrm{LINSET}_k[N]\} \] where \[ \mathrm{LINSET}_k[N] = \left\{\spn{P}\in\mathrm{LINSET}_k : (\gcd(P)=1) \wedge \left(\sum_{p \in P}p \le N\right)\right\} \enspace . \] In other words, $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k[N]$ is the subclass consisting of all concepts in $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$ (written in the form~(\ref{eq:necflinset-rewritten})) whose constant is $N$.
\noindent A central notion for proving~(\ref{eq:lb1}) is the following one: \begin{definition} \label{def:special-set} Let $k,N\ge2$ be integers. We say that a set $L\in\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'$ is {\em $(k,N)$-special} if it is of the form $L = N+\spn{P}$ such that the following holds: \begin{enumerate} \item $P$ is an independent set of cardinality $k$ and $\min P$ is a prime (so that $\gcd(P)=1$ according to Lemma~\ref{lem:ind-gcd}, which furthermore implies that $\spn{P}$ is co-finite). \item Let $q(P)$ denote the smallest prime that is greater than $F(P)$ and greater than $\max P$. For $a = \min P$ and $r=0,\ldots,a-1$, let \[ t_r(P) = \min \{s\in\spn{P} : s \equiv r \pmod{a}\}\ \mbox{ and }\ t_{max}(P) = \max_{0 \le r \le a-1}t_r(P) \enspace . \] Then \begin{equation} \label{eq:large-constant} N \ge k(a+t_{max}(P))\ \mbox{ and }\ N \ge q(P)+\sum_{p \in P\setminus\{a\}}p \enspace . \end{equation} \end{enumerate} \end{definition}
We need at least $k$ positive examples in order to distinguish a $(k,N)$-special set from all its proper subsets in $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k[N]$, as the following result shows:
\begin{lemma} \label{lem:special-sets} For all $k\ge2$, the following holds. If $L\in\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'$ is $(k,N)$-special, then $L \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'[N]$ and $I'(L,\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k[N]) \ge k$. \end{lemma}
\begin{proof} Suppose that $L = N+\spn{P}$ is of the form as described in Definition~\ref{def:special-set}. Let $P = \{a,a_2\ldots,a_k\}$ with $a = \min P$. For the sake of simplicity, we will write $t_r$ instead of $t_r(P)$ and $t_{max}$ instead of $t_{max}(P)$. The independence of $P$ implies that $t_{a_i \bmod a} = a_i$ for $i=2,\ldots,k$. It follows that $t_{max} \ge \max P$. Since, by assumption, $N \ge k \cdot t_{max}$, it becomes obvious that $L \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'[N]$.
Assume by way of contradiction that the following holds: \begin{itemize} \item[(A)] There is a weak spanning set $S$ of size $k-1$ for $L$ w.r.t.~$\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k[N]$. \end{itemize} Since $N$ is contained in any concept from $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k[N]$, we may assume that $N \notin S$ so that $S$ is of the form $S = \{N+x_1,\ldots,N+x_{k-1}\}$ for integers $x_i\ge1$. For $i = 1,\ldots,k-1$, let $r_i = x_i \bmod{a} \in \{0,1,\ldots,a-1\}$. It follows that each $x_i$ is of the form $x_i = q_ia+t_{r_i}$ for some integer $q_i\ge0$. Let $X = \{x_1,\ldots,x_{k-1}\}$. We proceed by case analysis: \begin{description} \item[Case 1:] $X \subseteq \{a_2,\ldots,a_k\}$
(so that, in view of $|X|=k-1$, we even have $X = \{a_2,\ldots,a_k\}$). \\ Let $L' = N+\spn{X}$. Then $S \subseteq L'$. Note that $X \subseteq P$ but $P \not\subseteq X$. We may conclude from~(\ref{eq:independent-set}) that $\spn{X} \subset \spn{P}$ and, therefore, $L' \subset L$. Thus $L'$ is a proper subset of $L$ which contains $S$. Note that~(\ref{eq:large-constant}) implies that $N \ge \sum_{i=2}^{k}a_i = \sum_{i=1}^{k-1}x_i$. If $\gcd(X)=1$, then $L' \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}[N]$ and we have an immediate contradiction to the above assumption~(A). Otherwise, if $\gcd(X)\ge2$, then we define $L'' = N+\spn{X\cup\{q(P)\}}$. Note that $S \subseteq L' \subseteq L''$. Since $q(P)>F(P)$, we have $X\cup\{q(P)\} \subseteq \spn{P}$ and, since $q(P) > \max P$, we have $P \not\subseteq X\cup\{q(P)\}$. We may conclude from~(\ref{eq:independent-set}) that $\spn{X\cup\{q(P)\}} \subset \spn{P}$ and, therefore, $L'' \subset L$. Thus, $L''$ is a proper subset of $L$ which contains $S$. Because $X = \{a_2,\ldots,a_k\}$ and $q(P)$ is a prime that is greater than $\max P$, it follows that $\gcd(X\cup\{q(P)\}) = 1$. In combination with~(\ref{eq:large-constant}), it easily follows now that $L'' \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}[N]$. Putting everything together, we arrive at a contradiction to the assumption~(A). \item[Case 2:] $X \not\subseteq \{a_2,\ldots,a_k\}$. \\ If $r_i=0$ for $i=1,\ldots,k-1$, then each $x_i$ is a multiple of $a$. In this case, $N+\spn{a,q(P)}$ is a proper subset of $L = N+\spn{P}$ that is consistent with $S$, which yields a contradiction. We may therefore assume that there exists $i' \in\{1,\ldots,k-1\}$ such that $r_{i'}\neq0$. From the case assumption, $X \not\subseteq \{a_2,\ldots,a_k\}$, it follows that there must exist an index $i''\in\{1,\ldots,k-1\}$ such that $q_{i''} \ge 1$ or $t_{r_{i''}}\notin\{a_2,\ldots,a_k\}$. For $i=1,\ldots,k-1$, let $q'_i = \min\{q_i,1\}$ and $x'_i = q'_ia+t_{r_i}$. Note that $q'_{i''}=1$ iff $q_{i''} \ge 1$. Define $L'' = N+\spn{X'}$ for $X' = \{a,x'_1,\ldots,x'_{k-1}\}$ and observe the following. First, the set $L''$ clearly contains $S$. Second, the choice of $x'_1,\ldots,x'_{k-1}$ implies that $X' \subseteq \spn{P}$. Third, it easily follows from $q'_{i''}=1$ or $t_{r_{i''}}\notin\{a_2,\ldots,a_k\}$ that $P \not\subseteq \{a,x'_1,\ldots,x'_{k-1}\}$. We may conclude from~(\ref{eq:independent-set}) that $\spn{X'} \subset \spn{P}$ and, therefore, $L'' \subset L$. Thus, $L''$ is a proper subset of $L$ which contains $S$. Since $r_{i'} \neq 0$ and $a$ is a prime, it follows that $\gcd(a,x'_{i'})=1$ and, therefore, $\gcd(X')=1$. In combination with~(\ref{eq:large-constant}), it easily follows now that $L'' \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}[N]$. Putting everything together, we obtain again a contradiction to the assumption~(A). \end{description} \end{proof}
For the sake of brevity, let ${\mathcal{L}} = \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'$. Assume by way of contradiction that there exists a positive mapping $T$ of order $k$ that is admissible for ${\mathcal{L}}_k$. We will pursue the following strategy: \begin{enumerate} \item We define a set $L \in {\mathcal{L}}_k$ of the form $L = N+p+\spn{1}$. \item We define a second set $L' = N+\spn{G} \in {\mathcal{L}}$ that is $(k,N)$-special and consistent with $T^+(L)$. Moreover, $L' \setminus L = \{N\}$. \end{enumerate} If this can be achieved, then the proof will be accomplished as follows: \begin{itemize} \item According to Lemma~\ref{lem:special-sets}, $T^+(L')$ must contain at least $k$ examples (all of which are different from $N$) for distinguishing $L'$ from all its proper subsets in ${\mathcal{L}}_k[N]$. \item Since $L'$ is consistent with $T^+(L)$, the set $T^+(L')$ must contain an example which distinguishes $L'$ from $L$. But the only example which fits this purpose is $(N,+)$. \item The discussion shows that $T^+(L')$ must contain $k$ examples in order to distinguish $L'$ from all its proper subsets in ${\mathcal{L}}_k$ plus one additional example, $N$, needed to distinguish $L'$ from $L$. \item We obtain a contradiction to our initial assumption that $T^+$ is of order $k$. \end{itemize} We still have to describe how our proof strategy can actually be implemented. We start with the definition of $L$. Pick the smallest prime $p \ge k+1$. Then $\{p,p+1,\ldots,p+k\}$ is independent. Let $M = F(\{p,p+1\}) \stackrel{(\ref{eq:frobenius})}{=} p(p+1)-p-(p+1)$. An easy calculation shows that $k\ge2$ and $p \ge k+1$ imply that $M \ge p+k$. Let $I = \{p,p+1,\ldots,M\}$. Choose $N$ large enough so that all concepts of the form
\[ N+\spn{P}\ \mbox{ where}\ |P|=k,\ p = \min P \mbox{ and } P \subseteq I \] are $(k,N)$-special. With these choices of $p$ and $N$, let $L = N+p+\spn{1}$. Note that $N+p,N+p+1 \in T^+(L)$ because, otherwise, one of the concepts $N+p+1+\spn{1},N+p+\spn{2,3} \subset L$ would be consistent with $T^+(L)$ whereas $T^+(L)$ must distinguish $L$ from all its proper subsets in
${\mathcal{L}}_k$. Setting $A = \{x: N+x \in T^+(L)\}$, it follows that $|A| = |T^+(L)| \le k$ and $p,p+1 \in A$. The set $A$ is not necessarily independent but it contains an independent subset $B$ such that $p,p+1 \in B$ and $\spn{A} = \spn{B}$. Since $M = F(\{p,p+1\})$, it follows that any integer greater than $M$ is contained in $\spn{p,p+1}$. Since $B$ is an independent extension of $\{p,p+1\}$, it cannot contain any integer greater than $M$.
It follows that $B \subseteq I$. Clearly, $|B| \le k$ and $\gcd(B)=1$. We would like to transform $B$ into another generating system $G \subseteq I$ such that
\[ \spn{B} \subseteq \spn{G}, \gcd(G) = 1 \mbox{ and } |G|=k \enspace . \]
If $|B|=k$, we can simply set $G=B$. If $|B|<k$, then we make use of the elements in the independent set $\{p,p+1,\ldots,p+k\} \subseteq I$ and add them, one after the other, to $B$ (thereby removing other elements from $B$ whenever their removal leaves $\spn{B}$ invariant) until the resulting set $G$ contains $k$ elements. We now define the set $L'$ by setting $L' = N+\spn{G}$. Since $G \subseteq I = \{p,p+1,\ldots,M\}$, and $p,p+1 \in G$, it follows that $p = \min G$, $\gcd(G)=1$ and $\min(L' \setminus\{N\})$ is $N+p$. Thus, $L' \setminus L = \{N\}$, as desired. Moreover, since $N$ had been chosen large enough, the set $L'$ is $(k,N)$-special. Thus $L$ and $L'$ have all properties that are required by our proof strategy and the proof of~(\ref{eq:lb1}) is complete.
\subsubsection{Proof of~(\ref{eq:lb2}),~(\ref{eq:lb3}),~(\ref{eq:lb4}) and~(\ref{eq:lb5})} \label{subsubsec:lb234}
\noindent We make use of some well known (and trivial) lower bounds on $\mathrm{TD}_{min}$:
\begin{example} \label{ex:powerset} For every $k\in{\mathbb N}$, let $[k] = \{1,2,\ldots,k\}$, let $2^{[k]}$ denote the powerset of $[k]$ and, for all $\ell = 0,1,\ldots,k$, let
\[ {[k] \choose \ell} = \{S \subseteq [k]:\ |S|=\ell\} \] denote the class of those subsets of $[k]$ that have exactly $\ell$ elements. It is trivial to verify that \[ \mathrm{TD}_{min}\left(2^{[k]}\right) = k\ \mbox{ and }\ \mathrm{TD}_{min}\left({[k] \choose \ell}\right) = \min\{\ell,k-\ell\} \enspace . \] \end{example}
In view of $\mathrm{PBTD}^+(\mathrm{LINSET}_k) = k$, the next results
show that negative examples are of limited help only as far as preference-based teaching of concepts from $\mathrm{LINSET}_k$ is concerned:
\begin{lemma} \label{lem:td-min} For every $k\ge1$ and for all $\ell=0,\ldots,k-1$, let \begin{eqnarray*} {\mathcal{L}}_k & = & \{\spn{k,p_1,\ldots,p_{k-1}}:\ p_i \in \{k+i,2k+i\}\} \enspace , \\
{\mathcal{L}}_{k,\ell} & = &\{ \{\spn{k,p_1,\ldots,p_{k-1}}\in{\mathcal{L}}_k:\ |\{i : p_i=k+i\}| = \ell\} \enspace . \end{eqnarray*} With this notation, the following holds: \[ \mathrm{TD}_{min}({\mathcal{L}}_k) \ge k-1\ \mbox{ and }\ \mathrm{TD}_{min}({\mathcal{L}}_{k,\ell}) \ge \min\{\ell,k-1-\ell\} \enspace . \] \end{lemma}
\begin{proof} For $k=1$, the assertion in the lemma is vacuous. Suppose therefore that $k\ge2$. An inspection of the generators $k,p_1,\ldots,p_{k-1}$ with $p_i \in \{k+i,2k+i\}$ shows that \begin{eqnarray*} {\mathcal{L}}_k & = & \{L_{k,S}:\ S \subseteq \{k+1,k+2,\ldots,2k-1\}\} \\
{\mathcal{L}}_{k,\ell} & = & \{L_{k,S}:\ (S \subseteq \{k+1,k+2,\ldots,2k-1\}) \wedge (|S|=\ell)\}\ \end{eqnarray*} where \[ L_{k,S} = \{0,k\} \cup \{2k,2k+1,\ldots\} \cup S \enspace . \] Note that the examples in $\{0,1,\ldots,k\} \cup \{2k,2k+1,\ldots,\}$ are redundant because they do not distinguish between distinct concepts from ${\mathcal{L}}_k$. The only useful examples are therefore contained in the interval $\{k+1,k+2,\ldots,2k-1\}$. From this discussion, it follows that teaching the concepts of ${\mathcal{L}}_k$ (resp.~of ${\mathcal{L}}_{k,\ell}$) is not essentially different from teaching the concepts of $2^{[k-1]}$ $\left(\mbox{resp.~of }{[k-1] \choose \ell}\right)$. This completes the proof of the lemma because we know from Example~\ref{ex:powerset} that $\mathrm{TD}_{min}(2^{[k-1]}) = k-1$ and $\mathrm{TD}_{min}\left({[k-1] \choose \ell}\right) = \min\{\ell,k-1-\ell\}$. \end{proof}
We claim now that the inequalities~(\ref{eq:lb2}),~(\ref{eq:lb3}) and~(\ref{eq:lb4}) are valid, i.e., we claim that the following holds: \begin{enumerate} \item $\mathrm{PBTD}(\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) \ge k-1$. \item $\mathrm{PBTD}(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) \ge \lfloor (k-1)/2 \rfloor$. \item $\mathrm{PBTD}(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k) \ge k-1$. \end{enumerate}
\begin{proof} For $k=1$, the inequalities are obviously valid. Suppose therefore that $k\ge2$. \begin{enumerate} \item Since $\gcd(k,k+1) = \gcd(k,2k+1) = 1$, it follows that ${\mathcal{L}}_k$ is a finite subclass of $\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$. Thus $\mathrm{PBTD}(\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) \ge \mathrm{PBTD}({\mathcal{L}}_k) \ge \mathrm{TD}_{min}({\mathcal{L}}_k) \ge k-1$. \item Define ${\mathcal{L}}_{k}[N] = \{N+L:\ L\in{\mathcal{L}}_k\}$ and ${\mathcal{L}}_{k,\ell}[N] = \{N+L:\ L\in {\mathcal{L}}_{k,\ell}\}$. Clearly $\mathrm{TD}_{min}({\mathcal{L}}_k[N]) = \mathrm{TD}_{min}({\mathcal{L}}_k)$ and $\mathrm{TD}_{min}({\mathcal{L}}_{k,\ell}[N]) = \mathrm{TD}_{min}({\mathcal{L}}_{k,\ell})$ holds for every $N\ge0$. It follows that the lower bounds in Lemma~\ref{lem:td-min} are also valid for the classes ${\mathcal{L}}_k[N]$ and ${\mathcal{L}}_{k,\ell}[N]$ in place of ${\mathcal{L}}_k$ and ${\mathcal{L}}_{k,\ell}$, respectively. Let \begin{equation} \label{eq:special-shift} N(k) = k^2 + (k-1-\lfloor (k-1)/2 \rfloor)k + \sum_{i=1}^{k-1}i = k^2+(k-1-\lfloor (k-1)/2 \rfloor)k+\frac{1}{2}(k-1)k \enspace . \end {equation} It suffices to show that $N(k)+{\mathcal{L}}_{k,\lfloor (k-1)/2 \rfloor}$ is a finite subclass of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$. To this end, first note that \[ \spn{k,p_1,\ldots,p_{k-1}}_+ = k+\sum_{i=1}^{k-1}p_i + \spn{k,p_1,\ldots,p_{k-1}} \enspace . \] Call $p_i$ ``light'' if $p_i = k+i$ and call it ``heavy'' if $p_i = 2k+i$. Note that a concept $L$ from $N(k)+{\mathcal{L}}_{k,\ell}$ is of the general form \begin{equation} \label{eq:general-form} L = N(k) + \spn{k,p_1,\ldots,p_{k-1}} \end{equation} with exactly $\ell$ light parameters among $p_1,\ldots,p_{k-1}$. A straightforward calculation shows that, for $\ell = \lfloor (k-1)/2 \rfloor$, the sum $k+\sum_{i=1}^{k-1}p_i$ equals the number $N(k)$ as defined in~(\ref{eq:special-shift}). Thus, the concept $L$ from~(\ref{eq:general-form}) with exactly $\lfloor (k-1)/2 \rfloor$ light parameters among $\{p_1,\ldots,p_{k-1}\}$ can be rewritten as follows: \[ L = N(k) + \spn{k,p_1,\ldots,p_{k-1}} = \spn{k,p_1,\ldots,p_{k-1}}_+ \enspace . \] This shows that $L \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$. As $L$ is a concept from $N(k)+$ \linebreak[4]${\mathcal{L}}_{k,\lfloor (k-1)/2 \rfloor}$ in general form, we may conclude that $N(k)+{\mathcal{L}}_{k,\lfloor (k-1)/2 \rfloor}$ is a finite subclass of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$, as desired. \item The proof of the third inequality is similar to the above proof of the second one. It suffices to show that, for every $k\ge2$, there exists $N \in {\mathbb N}$ such that $N+{\mathcal{L}}_k$ is a subclass of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$. To this end, we set $N = 3k^2$. A concept $L$ from $3k^2+{\mathcal{L}}_k$ is of the general form \[ L = 3k^2+\spn{k,p_1,\ldots,p_{k-1}} \] with $p_i \in \{k+i,2k+i\}$ (but without control over the number of light parameters). It is easy to see that the constant $3k^2$ is large enough so that $L$ can be rewritten as \[ L = 3k^2 - \left(k+\sum_{i=1}^{k-1}p_i\right) + \spn{k,p_1,\ldots,p_{k-1}}_+ \] where $3k^2 - \left(k+\sum_{i=1}^{k-1}p_i\right) \ge 0$. This shows that $L \in \mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$. As $L$ is a concept from $3k^2+{\mathcal{L}}_k$ in general form, we may conclude that $3k^2+{\mathcal{L}}_k$ is a finite subclass of $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}'_k$, as desired.
\end{enumerate} \end{proof}
We conclude with the proof of the inequality~(\ref{eq:lb5}).
\begin{lemma} \label{lem:pbtdplusnecflinsetklb} $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{LINSET}_k) \geq \mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k) \geq k-1$. \end{lemma}
\begin{proof} The class $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_1$ contains only ${\mathbb N}$, and so $\mathrm{PBTD}^+(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_1)$ $= 0$. The class $\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_2$ contains at least two members so that $\mathrm{PBTD}^+$ \linebreak[4]$(\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_2)$ $\geq 1$. Now assume $k \geq 3$. Set \[ N = \sum_{i=0}^{k-1} \left(k+i\right) \] and \[ L = \spn{k,k+1,\ldots,2k-1}_+ = N+\spn{k,k+1,\ldots,2k-1}
= \{N\} \cup \{N+k,N+k+1,\ldots\} \enspace . \] Choose and fix an arbitrary set $S \subseteq L$ of size $k-2$. It suffices to show $S$ is not a weak spanning set for $L$ w.r.t.~$\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$. If $S$ does not contain $N$, then the set \[ L' = \spn{N+k-1,1}_+ = L\setminus\{N\} \] satisfies $S \subset L' \subset L$ so that $S$ cannot be a weak spanning set for $L$. Suppose therefore from now on that $N \in S$. We proceed by case analysis: \begin{description} \item[Case 1:] $k = 3$. \\
Then $N = 12$, $L = 12+\spn{3,4,5} = \{12\} \cup \{15,16,17,\ldots\}$. Moreover $|S|=1$ so that $S=\{12\}$. Now the set $L' = \spn{5,7}_+ = 12+\spn{5,7}$ satisfies $S \subset L' \subset L$ so that $S$ cannot be a weak spanning set for $L$. \item[Case 2:] $k = 4$. \\
Then $N = 22$, $L = 22+\spn{4,5,6,7} = \{22\} \cup \{26,27,28,\ldots\}$. Moreover $|S|=2$ so that $S=\{22\} \cup \{26+x\}$ for some $x\ge0$. Let $a = (x \bmod 4) \in \{0,1,2,3\}$. It is easy to check that the set \[ L' = \left\{ \begin{array}{ll}
22+\spn{4,5,13} & \mbox{if $a\in\{0,1\}$} \\
22+\spn{4,7,11} & \mbox{if $a=3$} \\
22+\spn{5,6,11} & \mbox{if $x=a=2$} \\
22+\spn{4,5,13} & \mbox{if $x>a=2$}
\end{array} \right . \] satisfies $S \subset L' \subset L$ so that $S$ cannot be a weak spanning set for $L$. \item[Case 3:] $k \geq 5$. \\ Then the set $S$ has the form $S = \{N\} \cup \{N+k+x_1,\ldots,N+k+x_{k-3}\}$ for distinct integers $x_1,\ldots,x_{k-3}\ge0$. For $i=1,\ldots,k-3$, let $a_i = (x_i \bmod k) \in \{0,\ldots,k-1\}$. The the set \[ L' = N + \spn{k,k+a_1,\ldots,k+a_{k-3},N-(k-2)k-(a_1+\ldots+a_{k-3})} \] satisfies $S \subset L' \subset L$ so that $S$ cannot be a weak spanning set for $L$. \end{description} In any case, we came to the conclusion that a subset of $L$ with only $k-2$ elements cannot be a weak spanning set for $L$ w.r.t.~$\mathrm{NE}\mbox{-}\mathrm{CF}\mbox{-}\mathrm{LINSET}_k$. \end{proof}
\end{document} |
\begin{document}
\title{Information propagation through quantum chains with fluctuating disorder}
\author{Christian K.\ \surname{Burrell}} \affiliation{Department of Mathematics, Royal Holloway University of London, Egham, Surrey, TW20 0EX, UK}
\author{Jens \surname{Eisert}} \affiliation{Institute of Physics and Astronomy, University of Potsdam, 14476 Potsdam, Germany} \affiliation{QOLS, Blackett Laboratory, Imperial College London, Prince Consort Road, London SW7 2BW, UK}
\author{Tobias J.\ \surname{Osborne}} \affiliation{Department of Mathematics, Royal Holloway University of London, Egham, Surrey, TW20 0EX, UK}
\begin{abstract} We investigate the propagation of information through one-dimensional quantum chains in fluctuating external fields. We find that information propagation is suppressed, but in a quite different way compared to the situation with static disorder. We study two settings: (i) a general model where an unobservable fluctuating field acts as a source of decoherence; (ii) the XX model with both observable and unobservable fluctuating fields. In the first setting we establish a noise threshold below which information can propagate ballistically and above which information is localised. In the second setting we find localisation for all levels of unobservable noise, whilst an observable field can yield diffusive propagation of information. \end{abstract}
\maketitle
Quantum lattice models as frequently encountered in the condensed-matter context or in quantum optics obey a kind of locality: once locally excited the excitation will travel through the lattice at a finite velocity. For spin models this speed at which information can propagate is generally limited by the \emph{Lieb-Robinson bound} \cite{LieRob72} which says that there is an effective \emph{``light cone''} (or \emph{``sound cone''}) for correlations, with exponentially decaying tails, whose radius grows \emph{linearly} with time \cite{NacSim05}. The importance of understanding information propagation in interacting quantum systems was only understood recently when it was exploited to establish the Lieb-Schultz-Mattis theorem in higher dimensions \cite{Has04}. In generalising the proof of this breakthrough result an intimate link between the speed of information propagation and the efficient simulation of these systems has been revealed \cite{Osb05,Has08}.
The argument underlying the Lieb-Robinson bound relies only on the ultra-violet cutoff imposed by the lattice structure and is therefore very general. Hence there are some situations where the Lieb-Robinson bound is not the best available: when we know more about the structure of the interactions it should be possible to construct tighter bounds---an intuition which has been borne out in Refs.\ \cite{BurOsb07, ZniProPre07} where it was shown that for the XX model with static disorder there exists an effective ``light cone'' whose radius grows \emph{logarithmically} with time \cite{BurOsb07}. In this example \emph{Anderson localisation} \cite{And57} for the tight-binding model was combined with a Lieb-Robinson type argument to supply the stronger bound on the multiparticle dynamics.
\begin{figure}
\caption{Schematic illustration of the different ``light cones'' for
the different regimes: (a) the (lower) \emph{linear} one
for ordered systems; (b) the (upper) \emph{logarithmic} one
for static disorder; (c) the (middle) \emph{diffusive} one for dynamic disorder. In each regime information is strongly attenuated outside the associated light cone.}
\label{fig:light-cones}
\end{figure}
Interacting quantum lattice systems are also of great interest in quantum information science as they could be used as \emph{quantum wires} to join together different parts of quantum computers or quantum devices \cite{Bur06, Bos08}. When used in this manner quantum spin chains become \emph{quantum channels} and the standard Lieb-Robinson bound then supplies a fundamental lower bound on the time required to transmit information: the receiver must wait for a time at least proportional to the length of the chain before the message arrives. For the disordered XX model mentioned above the bound of Ref.\ \cite{BurOsb07} shows that the receiver must wait for a time which is \emph{exponential} in the chain length before information about the message can be extracted, thus rendering such systems essentially useless as quantum channels.
In this work we obtain bounds on the propagation of information through interacting quantum spin chains in a \emph{fluctuating} disordered field, a setting which is ubiquitous in the condensed matter context \cite{CM}. It will become apparent that---in some situations---information propagates diffusively; in other situations the system becomes localised in the sense that information can propagate effectively by at most a constant number of sites.
We study two models: (i) a spin chain with general nearest-neighbour interactions in a noisy field fluctuating both in strength and direction; (ii) an XX spin chain in a field fluctuating in strength only. To solve the dynamics of both these models we find a master equation which describes the time evolution. We then explicitly calculate an improved Lieb-Robinson bound for the first model before finding various correlation functions for the second model. This allows us to identify several different possible regimes of information propagation: either localised or ballistic propagation for the first model and either localised or diffusive propagation for the second.
{\it Master equation ---} We derive a master equation for each of our models with time-dependent Hamiltonian
\begin{eqnarray}\label{eqn:hamiltonian}
H(\xi,t) = H_0 + \sum_{\alpha,j} \xi^\alpha_j(t) \sigma^\alpha_j
\end{eqnarray} where $\sigma^x, \sigma^y, \sigma^z$ are the Pauli matrices, $H_0$ is the intrinsic system Hamiltonian and the second term describes the interaction with the noisy field. For the first model $\alpha$ runs over the three directions $x,y,z$ whilst for the second model the field direction is fixed ($\alpha=z$); $j$ runs over all lattice sites and $\xi_j^\alpha(t)$ are the field strengths at time $t$, which must be distributed according to a probability distribution with finite second moment (e.g. a Gaussian distribution, in which case $\xi_j^\alpha = dB_j^\alpha$ describes a Brownian motion $B_j^\alpha$).
After time $t$ an initial state $\rho(0)$ evolves to $\rho(\xi,t) = U(\xi,t)\rho(0)U(\xi,t)^\dag$ where the propagator $U(\xi,t)$ is given by the time-ordered exponential $U(\xi,t)=\mathcal{T}\exp{(-i\int_0^t H(\xi,s)ds)}$. We now describe the derivation of the master equation for the \emph{ensemble averaged density operator} $\rho(t) = \mathbb{E}_\xi \rho(\xi,t)$ where $\mathbb{E}_\xi$ represents an average over all possible realisations of the disorder. We split the time evolution up into $M$ small time steps and at each step we couple each site to ancillas or meters---one for each direction $x,y,z$ (only one meter per site is used for the model with fixed field direction). When each meter is initialised in a Gaussian state, one can take the small time step limit $M\to\infty$, following the steps of continuous-measurement theory in \cite{CavMil87}, resulting in the master equation
\begin{equation}\label{eqn:master1}
\partial_t \rho(t) = -i[\rho(t), H_0] - \gamma \sum_{\alpha,j}
[[\rho(t),\sigma^\alpha_j],\sigma^\alpha_j]
\end{equation} where $\gamma = c\mathbb{E}(\xi^\alpha_j)^2$, $c>0$, is a constant measuring the strength of the disorder. As before, for the second model we omit the sum over $\alpha$ and simply set $\alpha=z$. The above derivation is valid when all times scales of interest are longer than the typical fluctuation timescale.
{\it General spin chain model ---} In this section we derive a Lieb-Robinson bound for a general spin chain of length $n$ in an unobservable field whose strength and direction fluctuate independently on each site. The Hamiltonian is given by Eq.\ (\ref{eqn:hamiltonian}) with intrinsic Hamiltonian equal to a sum of general nearest-neighbor interactions $H_0 = \sum_j h_j$, where $h_j$ acts on sites $j$ and $j+1$. Note that this model is general enough to include all the standard quantum lattice systems, including the ferromagnetic and antiferromagnetic Heisenberg models and also frustrated systems.
A Lieb-Robinson bound governs the speed of ``light'' or ``sound'' in the lattice model. It is commonly expressed as an upper bound on the \emph{Lieb-Robinson commutator},
\begin{equation*}
C_B(x,t) = \sup_{A_x} \frac{\|[A_x,B(t)]\|}{\|A_x\|}
\end{equation*}
where $A_x$ is an operator acting non-trivially only on site $x$, $B(t) =\mathbb{E}_\xi U^\dag(\xi,t) B U(\xi,t)$ is an ensemble average over all possible realisations of the operator $B$ in the Heisenberg picture, and $\|\cdot\|$ is the operator norm. While the Lieb-Robinson commutator is not directly observable, any bound for it easily translates to a bound on \emph{all} observable time-dependent two-point correlation functions $\langle A B(t) \rangle - \langle A\rangle \langle B(t)\rangle$. Hence, the Lieb-Robinson bound is a convenient bound to express the decay of correlations.
In the Heisenberg picture $B$ obeys an equation similar to the master equation derived in the previous section, namely $\partial_t B(t) = i[H_0,B(t)] - \gamma \sum_j \mathcal{D}_j(B(t))$ where the decoherence operators are defined as $\mathcal{D}_j(B(t))=\sum_\alpha[\sigma^\alpha_j,[\sigma^\alpha_j,B(t)]]$. Defining $\mathcal{D}(B) = \sum_j \mathcal{D}_j(B)$ and using a Taylor expansion we arrive at
\begin{equation*}
\begin{array}{rcl}
\|[A_x,B(t)]\| & \leq & (1-8\varepsilon\gamma n) \, \|[A_x,B(t)+i\varepsilon[H_0,B(t)]]\|\\
& + & \varepsilon\gamma\|[A_x,8 n B(t)-\mathcal{D}(B(t))]\| + O(\varepsilon^2)
\end{array}
\end{equation*}
We deal with each of the non-negligible terms on the right hand side in turn. Making repeated use of Taylor expansions and the unitary equivalence of operator norm it is easy to see that the first term can bounded as $\|[A_x,B(t)+i\varepsilon[H_0,B(t)]]\| \leq \varepsilon \|[[H_0,A_x],B(t)]\| + \|[A_x,B(t)]\| + O(\varepsilon^2)$. In order to bound the second term, we note the following two identities: (i) $\mathcal{D}_k(\sigma^\alpha_j) = 8\delta_{j,k}\sigma^\alpha_k$ for all $\alpha\neq 0$ and $\mathcal{D}_k(\sigma^0_j) = 0$, where $\sigma^0_k = \mathbb{I}$; (ii) for all $\beta\neq 0$
\begin{equation}
\sum_{\alpha\in\{0,x,y,z\}} \sigma^\alpha_k \sigma^\beta_k
\sigma^\alpha_k = 0,\,
\sum_{\alpha\in\{0,x,y,z\}} \sigma^\alpha_k \sigma^0_k
\sigma^\alpha_k = 4\mathbb{I}
\end{equation} If we define $B_{\bm\alpha} = \sigma^{\alpha_1}_1 \otimes\cdots\otimes \sigma^{\alpha_n}_n$ to be a tensor product operator indexed by the vector $\bm\alpha = (\alpha_1, \ldots, \alpha_n)$, it is straightforward to use the above identities to show that
\begin{equation}\label{eqn:commutator2}
\begin{array}{l}
\|[A_x,8nB_{\bm\alpha} - \mathcal{D}(B_{\bm\alpha})]\| \\
\qquad \leq
\sum_{k\neq x,\beta\in\{0,x,y,z\}} \|[A_x,\sigma^\beta_k B_{\bm\alpha} \sigma^\beta_k]\|\\
\qquad = 8(n-1)\|[A_x,B_{\bm\alpha}]\|
\end{array}
\end{equation}
We can extend this result to general $B$ by linearity: $B(t) = \sum_{\bm\alpha} c_{\bm\alpha}(t)B_{\bm\alpha}$. This implies that the second term obeys the bound (\ref{eqn:commutator2}) with $B_{\bm\alpha}$ replaced by $B(t)$. Combining the above equations allows us to bound the time derivative $\partial_t C_B(x,t) \leq -8\gamma C_B(x,t) + \sup_{A_x} {\|[[H_0,A_x],B(t)]\|}/{\|A_x\|}$. It remains to rewrite the off-diagonal term, the right-most term containing the double commutator. Note that $[H_0,A_x] = 2\|H_0\|\|A_x\|V$ with $V = \sum_{\alpha,\beta\in\{0,x,y,z\}} (u^{\alpha,\beta}_{x-1}\sigma^\alpha_{x-1}\sigma^\beta_x + u^{\alpha,\beta}_x \sigma^\alpha_x\sigma^\beta_{x+1} )$ where $|u^{\alpha,\beta}_y| \leq \|V\| \leq 1$. By defining the vector $\textbf{\emph{C}}_B(t) = (C_B(1,t), \ldots, C_B(n,t))$ and employing the above expansion for $V$ we are able to rewrite the bound on the time-derivative of the Lieb-Robinson commutator as
\begin{equation*}
\partial_t \textbf{\emph{C}}_B(t) \leq (-8(\gamma-8\|H_0\|)\mathbb{I} +32\|H_0\|R)
\textbf{\emph{C}}_B(t)
\end{equation*} where the inequality is to be understood componentwise and $R_{j,k}=\delta_{j,k+1} + \delta_{j+1,k}$. This has solution
\begin{eqnarray}\label{eqn:lrbound}
C_B(x,t) \leq e^{-8(\gamma-8\|H_0\|)t} \sum_j \left(e^{32\|H_0\|Rt}\right)_{x,j} C_B(j,0).
\end{eqnarray} This is the main result of this section: a new Lieb-Robinson bound for our class of models. The key idea here is that the first term can (provided the noise $\gamma$ is large enough) exponentially suppress information propagation, whilst the second term can cause ballistic propagation of information. We now analyse the interplay between these two effects:
{\it Discussion of the new bound ---} Noting that $\|R\|=2$, it is clear that if $\gamma < 16\|H_0\|$ then our bound is exponentially growing and the original Lieb-Robinson bound is superior to ours: it limits us only to ballistic propagation of information. If, however, $\gamma > 16\|H_0\|$ then the right-hand-side of our new bound (\ref{eqn:lrbound}) is negligible if either (i) $|j-k| \geq t\kappa_\varepsilon$ for some constant $\kappa_\varepsilon>0$ or (ii) $t \geq t_\varepsilon =\log{({C}/{\varepsilon})} /\Gamma$ where $C=\sum_j C_B(j,0)$ and $\Gamma=8\gamma + 128\|H_0\|$. Equivalently, the right hand side of the new bound is non-negligible only when $|j-k| < \kappa_\varepsilon t_\varepsilon$, which is to say that non-negligible correlations can propagate by at most a constant number of sites.
\begin{figure}
\caption{Illustration of the dynamics of the correlation function
$c_{j,1}(\xi,t) = \langle\Omega| a_{j}(0) a^\dag_1(t)|\Omega\rangle$ generated by Eq.~(\ref{eq:noisexx}) for a 50 site chain for a specific realisation of the fluctuating field $\xi^z_j(t)$. The horizontal axis is site number $j$ and the vertical axis is the time (for $\gamma = 0.05$, $\gamma = 0.1$, $\gamma = 0.2$ and $\gamma = 0.5$).}
\label{fig:numerics}
\end{figure}
{\it XX spin chain ---} In this section we abandon the Lieb-Robinson commutator in favor of various correlation functions as they are easier to calculate in this specific scenario. In the previous section we studied a noisy field fluctuating in both strength and direction but here we consider a fluctuating field oriented in the $z$-direction. For convenience we study the XX model, although one can expect similar results to hold for other spin chain models with nearest-neighbor interactions and a fixed field direction. We begin by applying the \emph{Jordan Wigner transformation} to map our system of qubits into a system of spinless fermions
\begin{equation}\label{eq:noisexx}
H(\xi,t) = \sum_j \left(\adagop{j}\aop{j+1} + \adagop{j}\aop{j-1} +
\xi^z_j(t)\adagop{j}\aop{j}\right)
\end{equation} The sum is over an infinite chain or a finite ring (in which case we impose periodic boundary conditions). In order to study the propagation of information through the system we introduce \emph{disorder dependent correlation functions} which are the probability amplitude for a fermion to hop from one site to another in a given time interval
\begin{equation*}
c_{j,k}(\xi,t) = \bra{\Omega} \aop{j} U(\xi,t) \adagop{k} \ket{\Omega}
\end{equation*} where $\ket{\Omega}$ is the vacuum. We further define \emph{ensemble averaged correlation functions} $c_{j,k}(t) =\mathbb{E}_\xi c_{j,k}(\xi,t) = \bra{\Omega} \aop{j}(t)\adagop{k}\ket{\Omega}$ where $\aop{j}(t) =\mathbb{E}_\xi U^\dag(\xi,t) \aop{j} U(\xi,t)$ is the ensemble averaged annihilation operator in the Heisenberg picture. Using Eq.\ (\ref{eqn:master1}) we form a differential equation for $\text{tr}[\aop{j}(t)\rho]$, yielding $ \partial_t\aop{j}(t) = -i(\aop{j+1}(t) + \aop{j-1}(t)) - \gamma \aop{j}(t)$. Introducing $\textbf{\emph{a}}(t) = (\aop{1}(t), \aop{2}(t), \ldots)$ allows us to rewrite this as a vector differential equation with solution $\textbf{\emph{a}}(t) = e^{-\gamma t} e^{iRt} \textbf{\emph{a}}(0)$. This in turn implies that the ensemble averaged correlation functions are
\begin{equation*}
c_{j,k}(t) = e^{-\gamma t} ( e^{-iRt} )_{j,k}
\end{equation*}
An analysis similar to that performed previously for the Lieb-Robinson bound reveals that all ensemble averaged correlation functions are exponentially small excepting those for which $|j-k| < \kappa(\varepsilon,\gamma) = \text{const}$. In other words, the information can---on average---propagate by at most a constant number of sites. In parallel with the results of the first model we have localisation of information but in contrast we have no noise threshold. See Fig.\ \ref{fig:numerics}.
It must be noted that this result holds only when we average over the disorder: specific realisations of the disorder could lead to less localised dynamics. In order to see how far the dynamics of a specific realisation of the noise can stray from the averaged dynamics found above we restrict ourselves to the infinite chain. We introduce the discrete \emph{squared position operator} and the \emph{momentum operator}
\begin{equation*}
{x}^2 = \sum_{j=-\infty}^{\infty} j^2 |j\rangle\langle j|,\,
p=\sum_{j} i\left(
|j+1\rangle\langle j| - |j\rangle\langle j+1|
\right)
\end{equation*}
(where $|j\rangle$ denotes the state vector of a single excitation at site $j$). The Heisenberg picture time derivatives obey equations similar to Eq.\ (\ref{eqn:master1}), e.g.,
$\partial_t {x}^2(t) = i[H_0,{x}^2(t)] - \gamma \mathcal{E}({x}^2(t))$ where $ \mathcal{E}(M)/2 = M-\sum_j |j\rangle\langle j| M |j\rangle\langle j|$ eliminates the diagonal entries of $M$. When the initial state is a single particle at site $0$, $\rho(0)=|0\rangle\langle 0|$, then
\begin{equation*}
\langle{x}^2\rangle(t) = \mathbb{E}_\xi\text{tr}[{x}^2(t)\rho(0)]
= \frac{2t}{\gamma} + \frac{e^{-2\gamma t}-1}{\gamma^2} =:
f(\gamma,t)
\end{equation*}
Recalling the trivial bound $\rho_{j,j}(t) \leq 1$, using translational invariance, and noting that $\langle{x}^2\rangle(t) = \sum_j j^2 \rho_{j,j}(t)$ allows us to conclude that when we place the particle initially at site $k$, $\rho(0)=|k\rangle\langle k|$, then
\begin{equation}\label{eqn:variance-bound}
\rho_{j,j}(t) \leq \min \left\{ 1, {f(\gamma,t)}/{|j-k|^2}\right\}
\end{equation} One can define \emph{ensemble variance correlation functions} by
$v_{j,k}(t) =\mathbb{E}_\xi |c_{j,k}(\xi,t)|^2 - |\mathbb{E}_\xi c_{j,k}(\xi,t)|^2$. A little algebra verifies that $\mathbb{E}_\xi |c_{j,k}(\xi,t)|^2$ are precisely the diagonal elements $\rho_{j,j}(t)$ when $\rho(0)=|k\rangle\langle k|$; consequently the $v_{j,k}(t)$ also obey the bound (\ref{eqn:variance-bound}).
This bound implies that it is unlikely that we will stray far from the averaged dynamics given by $c_{j,k}(t)$ for small times $(2t/\gamma)^{1/2}\ll |j-k|$. By employing Chebyshev's inequality which states that for a random variable $X$ with mean $\mu$ and finite variance $\sigma^2$ and for any positive real number $\kappa$ then $\mathbb{P}\left( \left| X-\mu\right| \geq \kappa\sigma\right) \leq {1}/{\kappa^2}$, we can conclude that there is an effective light cone whose radius grows proportionally to ${f(\gamma,t)}^{1/2} \sim (2t/\gamma)^{1/2}$. In other words, information can---on average---propagate by a distance proportional to the square-root of the time elapsed, reminiscent of a classical random walk \cite{TailNote}.
To aid understanding of the above result, we express the averaged motion in the Heisenberg picture to find that {\it wave packets stop} in their motion: since $[H_0,p]=0$ and $\mathcal{E}(p)=2p$, the Heisenberg picture momentum obeys $\langle p\rangle (t)=\mathbb{E}_\xi \text{tr}[p(t)\rho(0)] = e^{-\gamma t} \langle p\rangle (0)$, so an initial excitation ``localizes'' in momentum space exponentially fast.
{\it Mixing properties of generic fluctuating disorder --- } What is the steady state the dynamics converges to on average for very long times? We now finally link the dynamics under disorder to unitary designs \cite{Design,Harrow}, showing that generically one will obtain the maximally mixed state on average. Consider the following master equation for a spin chain (with $\gamma>0$)
\begin{equation}\label{eqn:master2}
\partial_t \rho(t) = -i[\rho(t), H_0] - \gamma \sum_{j}
[[\rho(t),X_j],X_j]
\end{equation} Define the matrix $F$ by $-i [ H_0,B_{\bm\alpha}]= \sum_{\bm\beta} F_{{\bm\alpha},{\bm\beta}} B_{\bm\beta}$, where $B_{\bm\alpha} = \sigma^{\alpha_1}_1 \otimes\cdots\otimes \sigma^{\alpha_n}_n$ as before. The operators $X_j$ can without loss of generality be taken to be of the form $X_j=x\sigma_j^{0}+y\sigma_j^{z}$, and we write $I= \{ {\bm\alpha}: \alpha_j\in \{x,y\} \text{ for some } j\}$. If the operators governing the local disorder have full rank and if the submatrix of $F$ of entries $F_{{\bm\alpha},{\bm\beta}}$ for which ${\bm\alpha}\not\in I$ and ${\bm\beta}\in I$ is a matrix of maximum rank, then $\lim_{t\rightarrow\infty}\rho(t)=\lim_{t\rightarrow\infty} \mathbb{E}_\xi \rho(\xi,t)= (\mathbb{I}/2)^{\otimes n}$ \cite{Proof}, meaning that the state becomes maximally mixed. Almost all Hamiltonians $H_0$ and local fluctuations have this property. Time evolution under fluctuating disorder therefore approximates a unitary $1$-design \cite{Design,Harrow} arbitrarily well for large times and the system ``relaxes'' on average.
{\it Summary and outlook --- } In conclusion we have studied various regimes of fluctuating on-site disorder (noise) in quantum spin chains and we have identified several regimes of information propagation. These have been compared to the propagation regimes for similar models with both static disorder and no disorder. We have found that in some instances (namely those with low noise levels or fixed field direction) that the localisation due to fluctuating disorder is weaker than that caused by static disorder; conversely we have identified other regimes (those with high levels of unobservable noise) for which the localisation is stronger than that caused by static noise. This highlights the complicated nature of disordered quantum systems. Indeed, such localization under time-dependent disorder should also be directly observable in experiments with cold atoms, exploiting similar ideas of fluctuating speckle potentials as used in the seminal experiment of Ref.\ \cite{Aspect}. One could even think of using fluctuating disorder in cold atom systems as a ``disorder filter'', shaping traveling wavepackets in their form or letting them stop.
It is a simple matter to extend the results above by replacing the intrinsic Hamiltonian $H_0$ with a time dependent one $H_0(t)$: the same master equation is obeyed. This would allow us to implement quantum logic gates on neighboring qubits hence providing a means with which to accomplish quantum computation. The important point is that this provides a more realistic model of noise than has been hitherto used by many authors, including Refs.\ \cite{Buh-et-al06}: the usual scheme is to assume perfect gate operation followed by an instantaneous error; our model allows one to analyse systems where the qubits are subject to noise and decoherence at all times and in particular whilst a gate is being applied.
{\it Acknowledgements ---} This work was supported by the EPSRC, the Nuffield Foundation, the EU (QAP, COMPAS), and the EURYI.
\end{document} |
\begin{document}
\begin{abstract}
We study the strong instability of standing waves for a system of nonlinear Schr\"odinger equations with quadratic interaction under the mass resonance condition in dimension $d=5$.
\end{abstract}
\maketitle
\section{Introduction}
\label{S:0}
We consider the system NLS equations
\begin{equation} \label{Syst-NLS}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \frac{1}{2m} \Delta u & = & \lambda v \overline{u}, \\
i\partial_t v + \frac{1}{2M} \Delta v & = & \mu u^2,
\end{array}
\right.
\end{equation}
where $u, v: \mathbb{R} \times \mathbb{R}^d \rightarrow \mathbb{C}$, $m$ and $M$ are positive constants, $\Delta$ is the Laplacian in $\mathbb{R}^d$ and $\lambda, \mu$ are complex constants.
The system \eqref{Syst-NLS} is regarded as a non-relativistic limit of the system of nonlinear Klein-Gordon equations
\[
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
\frac{1}{2c^2m}\partial^2_t u - \frac{1}{2m} \Delta u + \frac{mc^2}{2} u& = & -\lambda v \overline{u}, \\
\frac{1}{2c^2M}\partial^2_t v - \frac{1}{2M} \Delta v + \frac{Mc^2}{2} v& = & -\mu u^2,
\end{array}
\right.
\]
under the mass resonance condition
\begin{align} \label{mas-res}
M=2m.
\end{align}
Indeed, the modulated wave functions $(u_c,v_c):= (e^{itmc^2} u, e^{itMc^2} v)$ satisfy
\begin{align}\label{klei-gord}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
\frac{1}{2c^2m} \partial^2_t u_c - i\partial_t u_c - \frac{1}{2m} \Delta u_c &=& - e^{itc^2(2m-M)} \lambda v_c \overline{u}_c,\\
\frac{1}{2c^2M} \partial^2_t v_c - i\partial_t v_c - \frac{1}{2M} \Delta v_c &=& - e^{itc^2(M-2m)} \mu u^2_c.
\end{array}
\right.
\end{align}
We see that the phase oscillations on the right hand sides vanish if and only if \eqref{mas-res} holds, and the system \eqref{klei-gord} formally yields \eqref{Syst-NLS} as the speed of light $c$ tends to infinity. The system \eqref{Syst-NLS} also appears in the interaction process for waves propagation in quadratic media (see e.g. \cite{CMS}).
The system \eqref{Syst-NLS} has attracted a lot of interest in past several years. The scattering theory and the asymptotic behavior of solutions have been studied in \cite{HLN, HLN-modi, HLO, OU}. The Cauchy problem for \eqref{Syst-NLS} in $L^2$, $H^1$ and in the weighted $L^2$ space $\langle x \rangle^{-1} L^2 = \mathcal{F}(H^1)$ under mass resonance condition have been studied in \cite{HOT}. The space-time analytic smoothing effect has been studied in \cite{HO-1, HO-2, Hoshino}. The sharp threshold for scattering and blow-up for \eqref{Syst-NLS} under the mass resonance condition in dimension $d=5$ has been studied in \cite{Hamano}. The existence, stability of standing waves and the characterization of finite time blow-up solutions with minimal mass have been studied recently in \cite{Dinh}.
Let us recall the local well-posedness in $H^1$ for \eqref{Syst-NLS} due to \cite{HOT}. To ensure the conservation law of total charge, it is natural to consider the following condition:
\begin{align} \label{mas-con}
\exists ~ c \in \mathbb{R} \backslash \{0\} \ : \ \lambda = c \overline{\mu}.
\end{align}
\begin{proposition}[LWP in $H^1$ \cite{HOT}] \label{prop-lwp-h1}
Let $d\leq 6$ and let $\lambda$ and $\mu$ satisfy \eqref{mas-con}. Then for any $(u_0,v_0) \in H^1\times H^1$, there exists a unique paire of local solutions $(u,v) \in Y(I)\times Y(I)$ of \eqref{Syst-NLS} with initial data $(u(0), v(0))=(u_0,v_0)$, where
\begin{align*}
Y(I) = (C\cap L^\infty)(I,H^1) \cap L^4(I,W^{1,\infty}) &\text{ for } d=1, \\
Y(I) = (C\cap L^\infty)(I,H^1) \cap L^{q_0}(I,W^{1,{r_0}}) &\text{ for } d=2,
\end{align*}
where $0<\frac{2}{q_0}=1-\frac{2}{r_0}<1$ with $r_0$ sufficiently large,
\begin{align*}
Y(I) = (C\cap L^\infty)(I, H^1) \cap L^2(I, W^{1,\frac{2d}{d-2}}) &\text{ for } d\geq 3.
\end{align*}
Moreover, the solution satisfies the conservation of mass and energy: for all $t\in I$,
\begin{align*}
M(u(t),v(t))&:= \|u(t)\|^2_{L^2}+ c\|v(t)\|^2_{L^2} = M(u_0,v_0), \\
E(u(t),v(t))&:= \frac{1}{2m}\|\nabla u(t)\|^2_{L^2} + \frac{c}{4M} \|\nabla v(t)\|^2_{L^2} + \emph{Re} (\lambda \langle v(t), u^2(t) \rangle ) = E(u_0,v_0),
\end{align*}
where $\langle \cdot, \cdot \rangle$ is the scalar product in $L^2$.
\end{proposition}
We now assume that $\lambda$ and $\mu$ satisfy \eqref{mas-con} with $c>0$ and $\lambda \ne 0, \mu \ne 0$. By change of variables
\[
u(t,x) \mapsto \sqrt{\frac{c}{2}} |\mu| u \left(t,\sqrt{\frac{1}{2m}} x \right), \quad v(t,x) \mapsto -\frac{\lambda}{2} v\left( t, \sqrt{\frac{1}{2m}} x\right),
\]
the system \eqref{Syst-NLS} becomes
\begin{equation} \label{cha-Syst}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \Delta u & = & -2 v \overline{u}, \\
i\partial_t v + \kappa \Delta v & = & - u^2,
\end{array}
\right.
\end{equation}
where $\kappa=\frac{m}{M}$ is the mass ratio. Note that the mass and the energy now become
\begin{align*}
M(u(t),v(t)) &= \|u(t)\|^2_{L^2} + 2 \|v(t)\|^2_{L^2}, \\
E(u(t),v(t)) &= \frac{1}{2} (\|\nabla u(t)\|^2_{L^2} + \kappa \|\nabla v(t)\|^2_{L^2} ) - \text{Re}( \langle v(t), u^2(t)\rangle).
\end{align*}
The local well-posedness in $H^1$ for \eqref{cha-Syst} reads as follows.
\begin{proposition} [LWP in $H^1$] \label{prop-lwp-wor}
Let $d\leq 6$. Then for any $(u_0, v_0) \in H^1 \times H^1$, there exists a unique pair of local solutions $(u,v) \in Y(I) \times Y(I)$ of \eqref{cha-Syst} with initial data $(u(0), v(0))=(u_0,v_0)$. Moreover, the solution satisfies the conservation of mass and energy: for all $t \in I$,
\begin{align*}
M(u(t),v(t)) &:= \|u(t)\|^2_{L^2} + 2 \|v(t)\|^2_{L^2} = M(u_0,v_0), \\
E(u(t),v(t)) &:= \frac{1}{2} (\|\nabla u(t)\|^2_{L^2} + \kappa \|\nabla v(t)\|^2_{L^2}) - \emph{Re} (\langle v(t),u^2(t)\rangle) = E(u_0,v_0).
\end{align*}
\end{proposition}
The main purpose of this paper is to study the strong instability of standing waves for the system \eqref{cha-Syst} under the mass resonance condition $\kappa=\frac{1}{2}$ in dimension $d=5$. Let $d=5$ and consider
\begin{equation} \label{mas-res-Syst}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \Delta u & = & -2 v \overline{u}, \\
i\partial_t v + \frac{1}{2} \Delta v & = & - u^2,
\end{array}
\right.
\end{equation}
We call a standing wave a solution to \eqref{mas-res-Syst} of the form $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$, where $\omega \in \mathbb{R}$ is a frequency and $(\phi_\omega, \psi_\omega) \in H^1 \times H^1$ is a non-trivial solution to the elliptic system
\begin{equation} \label{ell-equ}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl} -\Delta \phi_\omega + \omega \phi_\omega & = & 2 \psi_\omega \overline{\phi}_\omega, \\ -\frac{1}{2} \Delta \psi_\omega + 2\omega \psi_\omega & = & \phi_\omega^2.\end{array}
\right.
\end{equation}
We are interested in showing the strong instability of ground state standing waves for \eqref{mas-res-Syst}. Let us first introduce the notion of ground states related to \eqref{mas-res-Syst}. Denote
\[
S_\omega(u,v):= E(u,v) + \frac{\omega}{2} M(u,v) = \frac{1}{2} K(u,v) + \frac{\omega}{2} M(u,v) - P(u,v),
\]
where
\[
K(u,v) = \|\nabla u\|^2_{L^2} + \frac{1}{2} \|\nabla v\|^2_{L^2}, \quad M(u,v) = \|u\|^2_{L^2} + 2 \|v\|^2_{L^2}, \quad P(u,v) = \text{Re} \int \overline{v} u^2 dx.
\]
We also denote the set of non-trivial solutions of \eqref{ell-equ} by
\[
\mathcal{A}_\omega:= \{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S'_\omega(u,v) =0 \}.
\]
\begin{definition} \label{def-gro-sta-ins}
A pair of functions $(\phi,\psi) \in H^1 \times H^1$ is called ground state for \eqref{ell-equ} if it is a minimizer of $S_\omega$ over the set $\mathcal{A}_\omega$. The set of ground states is denoted by $\mathcal{G}_\omega$. In particular,
\[
\mathcal{G}_\omega= \{(\phi,\psi) \in \mathcal{A}_\omega \ : \ S_\omega(\phi,\psi) \leq S_\omega(u,v), \forall (u,v) \in \mathcal{A}_\omega \}.
\]
\end{definition}
We have the following result on the existence of ground states for \eqref{ell-equ}.
\begin{proposition} \label{prop-exi-gro-sta-ins}
Let $d=5$, $\kappa = \frac{1}{2}$ and $\omega>0$. Then the set $\mathcal{G}_\omega$ is not empty, and it is characterized by
\[
\mathcal{G}_\omega = \{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S_\omega(u,v) = d(\omega), K_\omega(u,v) =0 \},
\]
where
\[
K_\omega(u,v) = \left. \partial_\gamma S_\omega(\gamma u, \gamma v) \right|_{\gamma=1} = K(u,v) + \omega M(u,v) - 3 P(u,v)
\]
is the Nehari functional and
\begin{align} \label{d-ome}
d(\omega) := \inf \{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, K_\omega(u,v) =0 \}.
\end{align}
\end{proposition}
The existence of real-valued ground states for \eqref{ell-equ} was proved in \cite{HOT} (actually for $d\leq 5$ and $\kappa>0$). Here we proved the existence of ground states (not necessary real-valued) and proved its characterization. This characterization plays an important role in the study of strong instability of ground states standing waves for \eqref{mas-res-Syst}. We only state and prove Proposition $\ref{prop-exi-gro-sta-ins}$ for $d=5$ and $\kappa=\frac{1}{2}$. However, it is still available for $d\leq 5$ and $\kappa>0$.
We also recall the definition of the strong instability of standing waves.
\begin{definition} \label{def-str-ins}
We say that the standing wave $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$ is strongly unstable if for any ${\varepsilon}>0$, there exists $(u_0,v_0) \in H^1 \times H^1$ such that $\|(u_0,v_0) - (\phi_\omega, \psi_\omega)\|_{H^1 \times H^1} <{\varepsilon}$ and the corresponding solution $(u(t),v(t))$ to \eqref{mas-res-Syst} with initial data $(u(0), v(0))=(u_0,v_0)$ blows up in finite time.
\end{definition}
Our main result of this paper is the following.
\begin{theorem} \label{theo-str-ins}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega, \psi_\omega) \in \mathcal{G}_\omega$. Then the ground state standing waves $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$ for \eqref{mas-res-Syst} is strongly unstable.
\end{theorem}
To our knowledge, this paper is the first one addresses the strong instability of standing waves for a system of nonlinear Schr\"odinger equations with quadratic interaction. In \cite{CCO-ins}, Colin-Colin-Ohta proved the instability of standing waves for a system of nonlinear Schr\"odinger equations with three waves interaction. However, they only studied the orbital instability not strong instability by blow-up, and they only consider a special standing wave solution $(0,0,e^{2i\omega t} \varphi)$, where $\varphi$ is the unique positive radial solution to the elliptic equation
\[
-\Delta \varphi + 2 \omega \varphi - |\varphi|^{p-1} \varphi=0.
\]
This paper is organized as follows. In Section $\ref{S:1}$, we show the existence of ground states and its characterization given in Proposition $\ref{prop-exi-gro-sta-ins}$. In Section $\ref{S:2}$, we give the proof of the strong instability of standing waves given in Theorem $\ref{theo-str-ins}$.
\section{Exitence of ground states}
\label{S:1}
We first show the existence of ground states given in Proposition $\ref{prop-exi-gro-sta-ins}$. To do so, we need the following profile decomposition.
\begin{proposition}[Profile decomposition] \label{prop-pro-dec-5D}
Let $d=5$ and $\kappa=\frac{1}{2}$. Le $(u_n,v_n)_{n\geq 1}$ be a bounded sequence in $H^1 \times H^1$. Then there exist a subsequence, still denoted by $(u_n,v_n)_{n\geq 1}$, a family $(x^j_n)_{n\geq 1}$ of sequences in $\mathbb{R}^5$ and a sequence $(U^j, V^j)_{j\geq 1}$ of $H^1\times H^1$-functions such that
\begin{itemize}
\item[(1)] for every $j\ne k$,
\begin{align} \label{ort-pro-dec-5D}
|x^j_n-x^k_n| \rightarrow \infty \text{ as } n\rightarrow \infty;
\end{align}
\item[(2)] for every $l\geq 1$ and every $x \in \mathbb{R}^5$,
\[
u_n(x) = \sum_{j=1}^l U^j(x-x^j_n) + u^l_n(x), \quad v_n(x)= \sum_{j=1}^l V^j(x-x^j_n) + v^l_n(x),
\]
with
\begin{align} \label{err-pro-dec-5D}
\limsup_{n\rightarrow \infty} \|(u^l_n, v^l_n)\|_{L^q\times L^q} \rightarrow 0 \text{ as } l \rightarrow \infty,
\end{align}
for every $q\in (2, 10/3)$.
\end{itemize}
Moreover, for every $l\geq 1$,
\begin{align}
M(u_n,v_n) &= \sum_{j=1}^l M(U^j_n, V^j_n) + M(u^l_n,v^l_n) + o_n(1), \label{mas-pro-dec-5D} \\
K(u_n,v_n) &= \sum_{j=1}^l K(U^j,V^j) + K(u^l_n, v^l_n) + o_n(1), \label{kin-pro-dec-5D} \\
P(u_n,v_n) &= \sum_{j=1}^l P(U^j,V^j) + P(u^l_n, v^l_n) + o_n(1), \label{sca-pro-dec-5D}
\end{align}
where $o_n(1) \rightarrow 0$ as $n\rightarrow \infty$.
\end{proposition}
We refer the reader to \cite[Proposition 3.5]{Dinh} for the proof of this profile decomposition.
The proof of Proposition $\ref{prop-exi-gro-sta-ins}$ is done by several lemmas. To simplify the notation, we denote for $\omega>0$,
\[
H_\omega(u,v):= K(u,v) + \omega M(u,v).
\]
It is easy to see that for $\omega>0$ fixed,
\begin{align} \label{equ-nor}
H_\omega(u,v) \sim \|(u,v)\|_{H^1 \times H^1}.
\end{align}
Note also that
\[
S_\omega(u,v)=\frac{1}{2} K_\omega(u,v)+\frac{1}{2}P(u,v)= \frac{1}{3}K_\omega(u,v)+\frac{1}{6} H_\omega(u,v).
\]
\begin{lemma} \label{lem-pos-d-ome}
$d(\omega)>0$.
\end{lemma}
\begin{proof}
Let $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ be such that $K_\omega(u,v)=0$ or $H(u,v) = 3 P(u,v)$. We have from Sobolev embedding that
\begin{align*}
P(u,v) \leq \int |v| |u|^2 dx \lesssim \|v\|_{L^3} \|u\|^2_{L^3} \lesssim \|\nabla v\|_{L^2} \|\nabla u\|^2_{L^2} \lesssim [H_\omega(u,v)]^3 \lesssim [P(u,v)]^3.
\end{align*}
This implies that there exists $C>0$ such that
\[
P(u,v) \geq \sqrt{\frac{1}{C}}>0.
\]
Thus
\[
S_\omega(u,v) = \frac{1}{2} K(u,v) + \frac{1}{2} P(u,v) \geq \frac{1}{2} \sqrt{\frac{1}{C}}>0.
\]
Taking the infimum over all $(u,v)\in H^1 \times H^1 \backslash \{(0,0)\}$ satisfying $K_\omega(u,v)=0$, we get the result.
\end{proof}
We now denote the set of all minimizers for $d(\omega)$ by
\[
\mathcal{M}_\omega := \left\{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ K_\omega(u,v) =0, S_\omega(u,v) = d(\omega) \right\}.
\]
\begin{lemma} \label{lem-no-emp-M-ome}
The set $\mathcal{M}_\omega$ is not empty.
\end{lemma}
\begin{proof}
Let $(u_n,v_n)_{n\geq 1}$ be a minimizing sequence for $d(\omega)$, i.e. $(u_n, v_n) \in H^1 \times H^1 \backslash \{(0,0)\}$, $K_\omega(u_n,v_n) =0$ for any $n\geq 1$ and $\lim_{n\rightarrow \infty} S_\omega(u_n,v_n) = d(\omega)$. Since $K_\omega(u_n,v_n) = 0$, we have that $H_\omega(u_n,v_n) = 3 P(u_n,v_n)$ for any $n\geq 1$. We also have that
\[
S_\omega (u_n, v_n) = \frac{1}{3} K_\omega(u_n,v_n) + \frac{1}{6}H_\omega(u_n,v_n) \rightarrow d(\omega) \text{ as } n\rightarrow \infty.
\]
This yields that there exists $C>0$ such that
\[
H_\omega(u_n,v_n) \leq 6 d(\omega) + C
\]
for all $n\geq 1$. By \eqref{equ-nor}, $(u_n,v_n)_{n\geq 1}$ is a bounded sequence in $H^1 \times H^1$. We apply the profile decomposition given in Proposition $\ref{prop-pro-dec-5D}$ to get up to a subsequence,
\[
u_n(x) = \sum_{j=1}^l U^j(x-x^j_n) + u^l_n(x), \quad v_n(x) = \sum_{j=1}^l V^j(x-x^j_n) + v^l_n(x)
\]
for some family of sequences $(x^j_n)_{n\geq 1}$ in $\mathbb{R}^5$ and $(U^j,V^j)_{j\geq 1}$ a sequence of $H^1 \times H^1$-functions satisfying \eqref{err-pro-dec-5D} -- \eqref{sca-pro-dec-5D}. We see that
\[
H_\omega (u_n,v_n)=\sum_{j=1}^l H_\omega (U^j,V^j) + H_\omega(u^l_n, v^l_n) + o_n(1).
\]
This implies that
\begin{align*}
K_\omega(u_n,v_n)&=H_\omega(u_n,v_n)-3P(u_n,v_n)\\
&=\sum_{j=1}^l H_\omega(U^j,V^j) + H_\omega(u^l_n,v^l_n) - 3P(u_n,v_n)+ o_n(1) \\
&=\sum_{j=1}^l K_\omega(U^j,V^j) + 3\sum_{j=1}^l P(U^j,V^j)- 3 P(u_n,v_n) + H_\omega(u^l_n,v^l_n)+o_n(1).
\end{align*}
Since $K_\omega(u_n,v_n)=0$ for any $n\geq 1$, $P(u_n,v_n) \rightarrow 2 d(\omega)$ as $n\rightarrow \infty$ and $H_\omega(u^l_n,v^l_n) \geq 0$ for any $n\geq 1$, we infer that
\[
\sum_{j=1}^l K_\omega(U^j,V^j) + 3 \sum_{j=1}^l P(U^j,V^j) - 6 d(\omega) \leq 0
\]
or
\[
\sum_{j=1}^l H_\omega (U^j,V^j) - 6d(\omega) \leq 0.
\]
By H\"older's inequality and \eqref{err-pro-dec-5D}, it is easy to see that $\limsup_{n\rightarrow \infty} P(u_n^l, v^l_n) =0$ as $l\rightarrow \infty$. Thanks to \eqref{sca-pro-dec-5D}, we have that
\[
2d(\omega) = \lim_{n\rightarrow \infty} P(u_n,v_n) = \sum_{j=1}^\infty P(U^j,V^j).
\]
We thus obtain
\begin{align} \label{pro-dec-app-5D}
\sum_{j=1}^\infty K_\omega(U^j,V^j) \leq 0 \text{ and } \sum_{j=1}^\infty H_\omega(U^j,V^j) \leq 6d(\omega).
\end{align}
We now claim that $K_\omega(U^j,V^j) =0$ for all $j\geq 1$. Indeed, suppose that if there exists $j_0 \geq 1$ such that $K_\omega(U^{j_0},V^{j_0}) <0$, then we see that the equation $K_\omega(\gamma U^{j_0}, \gamma V^{j_0}) = \gamma^2 H_\omega(U^{j_0},V^{j_0}) - 3 \gamma^3 P(U^{j_0},V^{j_0})=0$ admits a unique non-zero solution
\[
\gamma_0 := \frac{H_\omega(U^{j_0},V^{j_0})}{3 P(U^{j_0}, V^{j_0})} \in (0,1).
\]
By the definition of $d(\omega)$, we have
\[
d(\omega) \leq S_\omega(\gamma_0 U^{j_0}, \gamma_0 V^{j_0}) = \frac{1}{6} H_\omega(\gamma_0 U^{j_0}, \gamma_0 V^{j_0}) = \frac{\gamma_0^2}{6} H(U^{j_0},V^{j_0}) <\frac{1}{6} H_\omega(U^{j_0},V^{j_0})
\]
which contradicts to the second inequality in \eqref{pro-dec-app-5D}. We next claim that there exists only one $j$ such that $(U^j,V^j)$ is non-zero. Indeed, if there are $(U^{j_1},V^{j_1})$ and $(U^{j_2},V^{j_2})$ non-zero, then by \eqref{pro-dec-app-5D}, both $H_\omega(U^{j_1},V^{j_1})$ and $H_\omega(U^{j_2},V^{j_2})$ are strictly smaller than $6d(\omega)$. Moreover, since $K_\omega(U^{j_1},V^{j_1}) =0$,
\[
d(\omega) \leq S_\omega(U^{j_1},V^{j_1}) = \frac{1}{6} H_\omega(U^{j_1},V^{j_1}) <d(\omega)
\]
which is absurd. Therefore, without loss of generality we may assume that the only one non-zero profile is $(U^1,V^1)$. We will show that $(U^1,V^1) \in \mathcal{M}_\omega$. Indeed, we have $P(U^1,V^1) = 2d(\omega)>0$ which implies $(U^1,V^1) \ne (0,0)$. We also have
\[
K_\omega(U^1,V^1) =0 \text{ and } S_\omega(U^1,V^1) = \frac{1}{2} P(U^1,V^1) =d(\omega).
\]
This shows that $(U^1,V^1)$ is a minimizer for $d(\omega)$. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-inc-M-G}
$\mathcal{M}_\omega \subset \mathcal{G}_\omega$.
\end{lemma}
\begin{proof}
Let $(\phi,\psi) \in \mathcal{M}_\omega$. Since $K_\omega(\phi,\psi) =0$, we have $H_\omega(\phi,\psi) = 3 P(\phi,\psi)$. On the other hand, since $(\phi,\psi)$ is a minimizer for $d(\omega)$, there exists a Lagrange multiplier $\gamma \in \mathbb{R}$ such that
\[
S'_\omega(\phi,\psi) = \gamma K'_\omega(\phi,\psi).
\]
This implies that
\[
0 = K_\omega(\phi,\psi) = \langle S'_\omega(\phi,\psi), (\phi,\psi)\rangle = \gamma \langle K'_\omega(\phi,\psi), (\phi,\psi)\rangle.
\]
A direct computation shows that
\[
\langle K'_\omega(\phi,\psi), (\phi,\psi)\rangle = 2 K(\phi,\psi) + 2 \omega M(\phi,\psi) - 9 P(\phi,\psi) = 2 H_\omega(\phi,\psi) - 9 P(\phi,\psi) = - 3P(\phi,\psi) <0.
\]
Therefore, $\gamma = 0$ and $S'_\omega(\phi,\psi) =0$ or $(\phi,\psi) \in \mathcal{A}_\omega$. It remains to show that $S_\omega(\phi,\psi) \leq S_\omega(u,v)$ for all $(u,v) \in \mathcal{A}_\omega$. Let $(u,v) \in\mathcal{A}_\omega$. We have $K_\omega(u,v) = \langle S'_\omega(u,v), (u,v) \rangle =0$. By the definition of $d(\omega)$, we get $S_\omega(\phi,\psi) \leq S_\omega(u,v)$. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-inc-G-M}
$\mathcal{G}_\omega \subset \mathcal{M}_\omega$.
\end{lemma}
\begin{proof}
Let $(\phi_\omega, \psi_\omega) \in \mathcal{G}_\omega$. Since $\mathcal{M}_\omega$ is not empty, we take $(\phi,\psi) \in \mathcal{M}_\omega$. We have from Lemma $\ref{lem-inc-M-G}$ that $(\phi,\psi) \in \mathcal{G}_\omega$. Thus, $S_\omega(\phi_\omega,\psi_\omega) = S_\omega(\phi, \psi)=d(\omega)$. It remains to show that $K_\omega(\phi_\omega,\psi_\omega)=0$. Since $(\phi_\omega,\psi_\omega) \in \mathcal{A}_\omega$, $S'_\omega(\phi_\omega,\psi_\omega)=0$. This implies that
\[
K_\omega(\phi_\omega,\psi_\omega) = \langle S'_\omega(\phi_\omega,\psi_\omega), (\phi_\omega,\psi_\omega) \rangle =0.
\]
The proof is complete.
\end{proof}
\noindent \textit{Proof of Proposition $\ref{prop-exi-gro-sta-ins}$.}
The proof of Proposition $\ref{prop-exi-gro-sta-ins}$ follows immediately from Lemmas $\ref{lem-no-emp-M-ome}$, $\ref{lem-inc-M-G}$ and $\ref{lem-inc-G-M}$.
$\Box$
\section{Strong instability of standing waves}
\label{S:2}
We are now able to study the strong instability of standing waves for \eqref{mas-res-Syst}. Note that the local well-posedness in $H^1 \times H^1$ for \eqref{mas-res-Syst} in 5D is given in Proposition $\ref{prop-lwp-wor}$. Let us start with the following so-called Pohozaev's identities.
\begin{lemma} \label{lem-poh-ide}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$ be a solution to \eqref{ell-equ}. Then the following identities hold
\[
2 K(\phi_\omega, \psi_\omega) = 5 P(\phi_\omega, \psi_\omega), \quad 2\omega M(\phi_\omega, \psi_\omega) = P(\phi_\omega,\psi_\omega).
\]
\end{lemma}
\begin{proof}
We only make a formal calculation. The rigorous proof follows from a standard approximation argument. Multiplying both sides of the first equation in \eqref{ell-equ} with $\overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
\|\nabla \phi_\omega\|^2_{L^2} + \omega \|\phi_\omega\|^2_{L^2}= 2 \text{Re} \int \overline{\psi}_\omega \phi_\omega^2 dx.
\]
Multiplying both sides of the second equation in \eqref{ell-equ} with $\overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we get
\[
\frac{1}{2} \|\nabla \psi_\omega\|^2_{L^2} + 2 \omega \|\psi_\omega\|^2_{L^2} = \text{Re} \int \overline{\psi}_\omega \phi_\omega^2 dx.
\]
We thus obtain
\begin{align} \label{poh-ide-pro-1}
K(\phi_\omega,\psi_\omega) + 2 \omega M(\phi_\omega,\psi_\omega) = 3 P(\phi_\omega,\psi_\omega).
\end{align}
Multiplying both sides of the first equation in \eqref{ell-equ} with $x \cdot \nabla \overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we see that
\[
-\text{Re} \int \Delta \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx + \omega \text{Re} \int \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx = 2 \text{Re} \int \psi_\omega \overline{\phi}_\omega x \cdot \nabla \overline{\phi}_\omega dx.
\]
A direct computation shows that
\begin{align*}
\text{Re} \int \Delta\phi_\omega x \cdot \nabla \overline{\phi}_\omega dx &=\frac{3}{2} \|\nabla \phi_\omega\|^2_{L^2}, \\
\text{Re} \int \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx &= -\frac{5}{2} \|\phi_\omega\|^2_{L^2}, \\
\text{Re} \int \psi_\omega \overline{\phi}_\omega x \cdot \nabla \overline{\phi}_\omega dx &= -\frac{5}{2} \text{Re} \int \overline{\psi}_\omega (\phi_\omega)^2 dx - \frac{1}{2} \text{Re} \int \phi_\omega^2 x\cdot \nabla \overline{\psi}_\omega dx.
\end{align*}
It follows that
\[
-\frac{3}{2} \|\nabla \phi_\omega\|^2_{L^2} - \frac{5}{2} \omega \|\phi_\omega\|^2_{L^2} = - 5 \text{Re} \int \overline{\psi}_\omega \phi^2_\omega dx - \text{Re} \int \phi^2_\omega x \cdot \nabla \overline{\psi}_\omega dx.
\]
Similarly, multiplying both sides of the second equation in \eqref{ell-equ} with $x \cdot \nabla \overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
-\frac{3}{4} \|\nabla \psi_\omega\|^2_{L^2} - 5 \omega \|\psi_\omega\|^2_{L^2} = \text{Re} \int \phi^2_\omega x\cdot \nabla \overline{\psi}_\omega dx.
\]
We thus get
\begin{align} \label{poh-ide-pro-2}
\frac{3}{2} K(\phi_\omega,\psi_\omega) + \frac{5}{2} \omega M(\phi_\omega,\psi_\omega) = 5 P(\phi_\omega,\psi_\omega).
\end{align}
Combining \eqref{poh-ide-pro-1} and \eqref{poh-ide-pro-2}, we prove the result.
\end{proof}
We also have the following exponential decay of solutions to \eqref{ell-equ}.
\begin{lemma} \label{lem-dec-pro-gro-sta}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$ be a solution to \eqref{ell-equ}. Then the following properties hold
\begin{itemize}
\item $(\phi_\omega,\psi_\omega) \in W^{3,p} \times W^{3,p}$ for every $2 \leq p <\infty$. In particular, $(\phi_\omega,\psi_\omega) \in C^2 \times C^2$ and $|D^\beta \phi_\omega(x)| + |D^\beta \psi_\omega (x)| \rightarrow 0$ as $|x| \rightarrow \infty$ for all $|\beta| \leq 2$;
\item
\[
\int e^{|x|} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx <\infty, \quad \int e^{|x|} (|\nabla \psi_\omega|^2 + 4|\psi_\omega|^2) dx <\infty.
\]
In particular, $(|x| \phi_\omega, |x| \psi_\omega) \in L^2 \times L^2$.
\end{itemize}
\end{lemma}
\begin{proof}
The follows the argument of \cite[Theorem 8.1.1]{Cazenave}. Let us prove the first item. We note that if $(\phi_\omega, \psi_\omega) \in L^p \times L^p$ for some $2 \leq p<\infty$, then $\psi_\omega \overline{\phi}_\omega, \phi^2_\omega \in L^{\frac{p}{2}}$. It follows that $(\phi_\omega, \psi_\omega) \in W^{2,\frac{p}{2}} \times W^{2,\frac{p}{2}}$. By Sobolev embedding, we see that
\begin{align} \label{dec-pro-pro-1}
(\phi_\omega,\psi_\omega) \in L^q \times L^q \text{ for some } q \geq \frac{p}{2} \text{ satisfying } \frac{1}{q} \geq \frac{2}{p} - \frac{2}{5}.
\end{align}
We claim that $(\phi_\omega,\psi_\omega) \in L^p \times L^p$ for any $2 \leq p<\infty$. Since $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$, the Sobolev embedding implies that $(\phi_\omega, \psi_\omega) \in L^p \times L^p$ for any $2 \leq p<\frac{10}{3}$. It remains to show the claim for any $p$ sufficiently large. To see it, we define the sequence
\[
\frac{1}{q_n} = 2^n \left( -\frac{1}{15} + \frac{2}{5 \times 2^n} \right).
\]
We have
\[
\frac{1}{q_{n+1}} -\frac{1}{q_n} = -\frac{1}{15} \times 2^n <0.
\]
This implies that $\frac{1}{q_n}$ is decreasing and $\frac{1}{q_n} \rightarrow -\infty$ as $n\rightarrow \infty$. Since $q_0= 3$ (we take $(\phi_\omega, \psi_\omega) \in L^3 \times L^3$ to prove our claim), it follows that there exists $k \geq 0$ such that
\[
\frac{1}{q_n} >0 \text{ for } 0 \leq n \leq k \text{ and } \frac{1}{q_{n+1}} \leq 0.
\]
We will show that $(\phi_\omega, \psi_\omega) \in L^{q_k} \times L^{q_k}$. If $(\phi_\omega, \psi_\omega) \in L^{q_{n_0}} \times L^{q_n}$ for some $0 \leq n_0 \leq k-1$, then by \eqref{dec-pro-pro-1}, $(\phi_\omega,\psi_\omega) \in L^q \times L^q$ for some $q \geq \frac{q_{n_0}}{2}$ satisfying $\frac{1}{q} \geq \frac{2}{q_{n_0}} - \frac{2}{5}$. By the choice of $q_n$, it is easy to check that $\frac{2}{q_{n_0}} - \frac{2}{5} = \frac{2}{q_{n_0+1}}$. In particular, $(\phi_\omega,\psi_\omega) \in L^{q_{n_0+1}} \times L^{q_{n_0+1}}$. By induction, we prove $(\phi_\omega, \psi_\omega) \in L^{q_k} \times L^{q_k}$. Applying again \eqref{dec-pro-pro-1}, we have
\[
(\phi_\omega,\psi_\omega) \in L^q \times L^q \text{ for all } q \geq \frac{q_k}{2} \text{ such that } \frac{1}{q} \geq \frac{1}{q_{k+1}}.
\]
This shows that $(\phi_\omega, \psi_\omega)$ belongs to $L^p \times L^p$ for any $p$ sufficiently large. The claim follows. Using the claim, we have in particular $\psi_\omega \overline{\phi}_\omega, \phi^2_\omega \in L^p$ for any $2 \leq p<\infty$. Hence $(\phi_\omega, \psi_\omega) \in W^{2,p} \times W^{2,p}$ for any $2 \leq p<\infty$. By H\"older's inequality, we see that $\partial_j(\psi_\omega \overline{\phi}_\omega), \partial_j(\phi^2_\omega) \in L^p$ for any $2 \leq p<\infty$ and any $ 1\leq j \leq 5$. Thus $(\partial_j \phi_\omega, \partial_j \psi_\omega) \in W^{2,p} \times W^{2,p}$ for any $2 \leq p<\infty$ and any $1 \leq j \leq 5$, or $(\phi_\omega,\psi_\omega) \in W^{3,p} \times W^{3,p}$ for any $2 \leq p<\infty$. By Sobolev embedding, $(\phi_\omega,\psi_\omega) \in C^{2,\delta} \times C^{2,\delta}$ for all $0<\delta <1$. In particular, $|D^\beta \phi_\omega(x)| + |D^\beta \psi_\omega(x)| \rightarrow 0$ as $|x| \rightarrow \infty$ for all $|\beta| \leq 2$.
To see the second item. Let ${\varepsilon}>0$ and set $\chi_{\varepsilon}(x) := e^{\frac{|x|}{1+{\varepsilon} |x|}}$. For each ${\varepsilon}>0$, the function $\chi_{\varepsilon}$ is bounded, Lipschitz continuous and satisfies $|\nabla \chi_{\varepsilon}| \leq \chi_{\varepsilon}$ a.e. Multiplying both sides of the first equation in \eqref{ell-equ} by $\chi_{\varepsilon} \overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
\text{Re} \int \nabla \phi_\omega \cdot \nabla (\chi_\omega \overline{\phi}_\omega) dx + \int \chi_{\varepsilon} |\phi_\omega|^2 dx = 2 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx.
\]
Since $\nabla(\chi_{\varepsilon} \overline{\phi}_\omega) = \chi_{\varepsilon} \nabla \overline{\phi}_\omega + \nabla \chi_{\varepsilon} \overline{\phi}_\omega$, the Cauchy-Schwarz inequality implies that
\begin{align*}
\text{Re} \int \nabla \phi_\omega \cdot \nabla (\chi_{\varepsilon} \overline{\phi}_\omega) dx &= \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx + \text{Re} \int \nabla \chi_{\varepsilon} \nabla \phi_\omega \overline{\phi}_\omega dx \\
&\geq \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx - \int |\nabla \chi_{\varepsilon}| |\nabla \phi_\omega| |\phi_\omega| dx \\
& \geq \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx - \frac{1}{2} \int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx.
\end{align*}
We thus get
\begin{align} \label{dec-pro-pro-2}
\int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 4 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx.
\end{align}
Similarly, multiplying both sides of the second equation in \eqref{ell-equ} with $\chi_{\varepsilon} \overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we get
\begin{align} \label{dec-pro-pro-3}
\int \chi_{\varepsilon}(|\nabla \psi_\omega|^2 + 4 |\psi_\omega|^2) dx \leq \frac{8}{3} \text{Re} \int \chi_{\varepsilon} \overline{\psi}_\omega \phi^2_\omega dx.
\end{align}
By the first item, there exists $R>0$ large enough such that $|v(x)| \leq \frac{1}{8}$ for $|x| \geq R$. We have that
\begin{align*}
4 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx & \leq 4 \int \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx \\
&=4 \int_{|x| \leq R} \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx + \int_{|x| \geq R} \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx \\
&\leq 4 \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx + \frac{1}{2} \int \chi_{\varepsilon} |\phi_\omega|^2 dx.
\end{align*}
We thus get from \eqref{dec-pro-pro-2} that
\begin{align} \label{dec-pro-pro-4}
\int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 8 \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx.
\end{align}
Letting ${\varepsilon} \rightarrow 0$, we obtain
\[
\int e^{|x|}( |\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 8 \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx <\infty.
\]
Similarly, by \eqref{dec-pro-pro-3} and \eqref{dec-pro-pro-4},
\begin{align*}
\int \chi_{\varepsilon} (|\nabla \psi_\omega|^2 + 4 |\psi_\omega|^2) dx &\leq \frac{2}{3} \left(4 \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx + \frac{1}{2} \int \chi_{\varepsilon} |\phi_\omega|^2 dx \right) \\
&\leq \frac{16}{3} \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx.
\end{align*}
Letting ${\varepsilon} \rightarrow 0$, we get
\[
\int e^{|x|}( |\nabla \psi_\omega|^2 + 4|\psi_\omega|^2) dx \leq \frac{16}{3} \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx <\infty.
\]
The proof is complete.
\end{proof}
We also need the following virial identity related to \eqref{mas-res-Syst}.
\begin{lemma} \label{lem-vir-ide-ins}
Let $d=5$ and $\kappa=\frac{1}{2}$. Let $(u_0,v_0) \in H^1 \times H^1$ be such that $(|x|u_0, |x| v_0) \in L^2 \times L^2$. Then the corresponding solution to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (u_0,v_0)$ satisfies
\begin{align*}
\frac{d^2}{dt^2} (\|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2}) = 8 \left(\|\nabla u(t)\|^2_{L^2} + \frac{1}{2} \|\nabla v(t)\|^2_{L^2}\right) - 20 \emph{Re} \int \overline{v}(t) u^2(t) dx.
\end{align*}
\end{lemma}
\begin{proof}
The above identity follows immediately from \cite[Lemma 3.1]{Dinh} with $\chi(x) = |x|^2$.
\end{proof}
Now let us denote for $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$,
\[
Q(u,v) := K(u,v) - \frac{5}{2} P(u,v).
\]
It is obvious that
\begin{align} \label{vir-ide-ins}
\frac{d^2}{dt^2} (\|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2}) = 8 Q(u(t),v(t)).
\end{align}
Note that if we take
\begin{align} \label{scaling}
u^\gamma(x) = \gamma^{\frac{5}{2}} u (\gamma x), \quad v^\gamma(x) = \gamma^{\frac{5}{2}} v(\gamma x),
\end{align}
then
\begin{align*}
S_\omega(u^\gamma, v^\gamma) &= \frac{1}{2} K(u^\gamma,v^\gamma) + \frac{\omega}{2} M(u^\gamma,v^\gamma) - P(u^\gamma,v^\gamma) \\
&=\frac{\gamma^2}{2} K(u,v) + \frac{\omega}{2} M(u,v) - \gamma^{\frac{5}{2}} P(u,v).
\end{align*}
It is easy to see that
\[
Q(u,v) = \left. \partial_\gamma S_\omega(u^\gamma, v^\gamma) \right|_{\gamma=1}.
\]
\begin{lemma} \label{lem-cha-gro-sta-5D}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. Then
\[
S_\omega(\phi_\omega,\psi_\omega) = \inf \left\{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, Q(u,v)=0 \right\}.
\]
\end{lemma}
\begin{proof}
Denote $m:= \inf \left\{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, Q(u,v)=0 \right\}$. Since $(\phi_\omega,\psi_\omega)$ is a solution of \eqref{ell-equ}, it follows from Lemma $\ref{lem-poh-ide}$ that $Q(\phi_\omega,\psi_\omega) =K_\omega(\phi_\omega,\psi_\omega)=0$. Thus
\begin{align} \label{cha-gro-sta-5D-pro-1}
S_\omega(\phi_\omega,\psi_\omega) \geq m.
\end{align}
Now let $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ be such that $Q(u,v) =0$. If $K_\omega(u,v) =0$, then by Proposition $\ref{prop-exi-gro-sta-ins}$, $S_\omega(u,v) \geq S_\omega(\phi_\omega,\psi_\omega)$. If $K_\omega(u,v) \ne 0$, we consider $K_\omega(u^\gamma, v^\gamma) = \gamma^2 K(u,v) + \omega M(u,v) - \gamma^{\frac{5}{2}} P(u,v)$, where $(u^\gamma, v^\gamma)$ is as in \eqref{scaling}. Since $\lim_{\gamma \rightarrow 0} K_\omega(u^\gamma, v^\gamma)= \omega M(u,v) >0$ and $\lim_{\gamma \rightarrow \infty} K_\omega(u^\gamma, v^\gamma) = -\infty$, there exists $\gamma_0>0$ such that $K_\omega(u^{\gamma_0},v^{\gamma_0}) =0$. It again follows from Proposition $\ref{prop-exi-gro-sta-ins}$, $S_\omega(u^{\gamma_0},v^{\gamma_0}) \geq S_\omega(\phi_\omega,\psi_\omega)$. On the other hand,
\[
\partial_\gamma S_\omega(u^\gamma,v^\gamma) = \gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v).
\]
We see that the equation $\partial_\gamma S_\omega(u^\gamma, v^\gamma) =0$ admits a unique non-zero solution
\[
\left( \frac{2K(u,v)}{5P(u,v)} \right)^2=1
\]
since $Q(u,v) =0$. This implies that $\partial_\gamma S_\omega(u^\gamma, v^\gamma)>0$ if $\gamma \in (0,1)$ and $\partial_\gamma S_\omega(u^\gamma,v^\gamma)<0$ if $\gamma \in (1,\infty)$. In particular, $S_\omega(u^\gamma,v^\gamma) \leq S_\omega(u,v)$ for all $\gamma >0$. Hence $S_\omega(u^{\gamma_0},v^{\gamma_0}) \leq S_\omega(u,v)$. We thus obtain $S_\omega(\phi_\omega,\psi_\omega) \leq S_\omega(u,v)$ for any $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ satisfying $Q(u,v)=0$. Therefore,
\begin{align} \label{cha-gro-sta-5D-pro-2}
S_\omega(\phi_\omega,\psi_\omega) \leq m.
\end{align}
Combining \eqref{cha-gro-sta-5D-pro-1} and \eqref{cha-gro-sta-5D-pro-2}, we prove the result.
\end{proof}
Let $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. Define
\[
\mathcal{B}_\omega:= \left\{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S_\omega(u,v) < S_\omega(\phi_\omega,\psi_\omega), Q(u,v) <0 \right\}.
\]
\begin{lemma} \label{lem-inv-set}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. The set $\mathcal{B}_\omega$ is invariant under the flow of \eqref{mas-res-Syst}.
\end{lemma}
\begin{proof}
Let $(u_0,v_0) \in \mathcal{B}_\omega$. We will show that the corresponding solution $(u(t),v(t))$ to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (u_0,v_0)$ satisfies $(u(t),v(t)) \in \mathcal{B}_\omega$ for any $t$ in the existence time. Indeed, by the conservation of mass and energy, we have
\begin{align} \label{inv-set-pro}
S_\omega(u(t),v(t)) = S_\omega(u_0,v_0) < S_\omega (\phi_\omega,\psi_\omega)
\end{align}
for any $t$ in the existence time. It remains to show that $Q(u(t),v(t))<0$ for any $t$ as long as the solution exists. Suppose that there exists $t_0 >0$ such that $Q(u(t_0),v(t_0)) \geq 0$. By the continuity of the function $t\mapsto Q(u(t),v(t))$, there exists $t_1 \in (0,t_0]$ such that $Q(u(t_1),v(t_1)) =0$. It follows from Lemma $\ref{lem-cha-gro-sta-5D}$ that $S_\omega(u(t_1),v(t_1)) \geq S_\omega(\phi_\omega,\psi_\omega)$ which contradicts to \eqref{inv-set-pro}. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-key-lem}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. If $(u,v) \in \mathcal{B}_\omega$, then
\[
Q(u,v) \leq 2 (S_\omega(u,v) - S_\omega(\phi_\omega,\psi_\omega)).
\]
\end{lemma}
\begin{proof}
Let $(u,v) \in \mathcal{B}_\omega$. Set
\[
f(\gamma):= S_\omega(u^\gamma,v^\gamma) = \frac{\gamma^2}{2} K(u,v) + \frac{\omega}{2} M(u,v) - \gamma^{\frac{5}{2}} P(u,v).
\]
We have
\[
f'(\gamma) = \gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v) = \frac{Q(u^\gamma, v^\gamma)}{\gamma}.
\]
We see that
\begin{align} \label{key-lem-pro}
(\gamma f'(\gamma))' &= 2\gamma K(u,v) - \frac{25}{4} \gamma^{\frac{3}{2}} P(u,v) \nonumber \\
&= 2 \left(\gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v) \right) - \frac{5}{4} \gamma^{\frac{3}{2}} P(u,v) \nonumber \\
&\leq 2f'(\gamma)
\end{align}
for all $\gamma >0$. Note that $P(u,v) \geq 0$ which follows from the fact $Q(u,v) <0$. We also note that since $Q(u,v) <0$, the equation $\partial_\gamma S_\omega(u^\gamma, v^\gamma)=0$ admits a unique non-zero solution
\[
\gamma_0 = \left(\frac{2K(u,v)}{5P(u,v)} \right)^2 \in (0,1),
\]
and $Q(u^{\gamma_0},v^{\gamma_0}) = \gamma_0 \times \left.\partial_\gamma S_\omega(u^\gamma,v^\gamma)\right|_{\gamma=\gamma_0} =0$. Taking integration of \eqref{key-lem-pro} over $(\gamma_0,1)$ and using the fact $\gamma f'(\gamma) = Q(u^\gamma,v^\gamma)$, we get
\[
Q(u,v) - Q(u^{\gamma_0},v^{\gamma_0}) \leq 2 (S_\omega(u,v) - S_\omega(u^{\gamma_0},v^{\gamma_0})).
\]
The result then follows from the fact that $S_\omega(\phi_\omega,\psi_\omega) \leq S_\omega(u^{\gamma_0},v^{\gamma_0})$ since $Q(u^{\gamma_0},v^{\gamma_0}) = 0$.
\end{proof}
We are now able to prove the strong instability of standing waves given in Theorem $\ref{theo-str-ins}$.
\noindent \textit{Proof of Theorem $\ref{theo-str-ins}$.}
Let ${\varepsilon}>0$. Since $(\phi^{\gamma}_\omega, \psi^{\gamma}_\omega) \rightarrow (\phi_\omega,\psi_\omega)$ as $\gamma \rightarrow 1$, there exists $\gamma_0>1$ such that $\|(\phi^{\gamma_0}_\omega,\psi^{\gamma_0}_\omega) - (\phi_\omega,\psi_\omega)\|_{H^1 \times H^1} <{\varepsilon}$. We claim that $(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega) \in \mathcal{B}_\omega$. Indeed, we have
\begin{align*}
S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) &= \frac{\gamma^2}{2} K(\phi_\omega,\psi_\omega) +\frac{\omega}{2} M(\phi_\omega,\psi_\omega) -\gamma^{\frac{5}{2}} P(\phi_\omega,\psi_\omega), \\
\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) &= \gamma K(\phi_\omega, \psi_\omega) - \frac{5}{2} \gamma^{\frac{3}{2}} P(\phi_\omega,\psi_\omega) = \frac{Q(\phi^\gamma_\omega, \psi^\gamma_\omega)}{\gamma}.
\end{align*}
Since $Q(\phi_\omega,\psi_\omega)=0$, the equation $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) =0$ admits a unique non-zero solution
\[
\left( \frac{2K(\phi_\omega,\psi_\omega)}{5P(\phi_\omega,\psi_\omega)} \right)^2 =1.
\]
This implies that $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) >0$ if $\gamma \in (0,1)$ and $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega)<0$ if $\gamma \in (1,\infty)$. In particular, $S_\omega(\phi^\gamma_\omega,\psi^\gamma_\omega)<S_\omega(\phi_\omega,\psi_\omega)$ for any $\gamma>0$ and $\gamma \ne 1$. On the other hand, since $Q(\phi^\gamma_\omega,\psi^\gamma_\omega)= \gamma \partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega)$, we see that $Q(\phi^\gamma_\omega, \psi^\gamma_\omega) >0$ if $\gamma \in (0,1)$ and $Q(\phi^\gamma_\omega, \psi^\gamma_\omega)<0$ if $\gamma \in (1,\infty)$. Since $\gamma_0>1$, we see that
\[
S_\omega(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)< S_\omega(\phi_\omega,\psi_\omega) \text{ and } Q(\phi^{\gamma_0}_\omega,\psi^{\gamma_0}_\omega) <0.
\]
Therefore, $(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega) \in \mathcal{B}_\omega$ and the claim follows.
By the local well-posedness, there exists a unique solution $(u(t), v(t)) \in C([0,T), H^1 \times H^1)$ to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)$, where $T>0$ is the maximal time of existence. By Lemma $\ref{lem-inv-set}$, we see that $(u(t),v(t)) \in \mathcal{B}_\omega$ for any $t\in [0,T)$. Thus, applying Lemma $\ref{lem-key-lem}$, we get
\[
Q(u(t),v(t)) \leq 2 (S_\omega(u(t),v(t)) - S_\omega (\phi_\omega,\psi_\omega)) = 2(S_\omega(\phi^{\gamma_0},\psi^{\gamma_0}) - S_\omega(\phi_\omega, \psi_\omega)) =- \delta
\]
for any $t\in [0,T)$, where $\delta= 2 (S_\omega(\phi_\omega, \psi_\omega) - S_\omega(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)) >0$. Since $(|x|\phi_\omega, |x|\psi_\omega) \in L^2 \times L^2$, it follows that $(|x| \phi^{\gamma_0}_\omega, |x| \psi^{\gamma_0}_\omega) \in L^2 \times L^2$. Thanks to the virial identity \eqref{vir-ide-ins}, we obtain
\[
\frac{d^2}{dt^2} \left( \|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2} \right) = 8 Q(u(t),v(t)) \leq -8 \delta <0,
\]
for any $t\in [0,T)$. The classical argument of Glassey \cite{Glassey} implies that the solution blows up in finite time. The proof is complete.
$\Box$
\end{document} |
\begin{document}
\title{\huge On the usage of lines in $GC_n$ sets} \author{Hakop Hakopian, Vahagn Vardanyan} \date{}
\maketitle
\begin{abstract}
A planar node set $\mathcal X,$ with $|\mathcal X|=\binom{n+2}{2}$ is called $GC_n$ set if each node possesses fundamental polynomial in form of a product of $n$ linear factors. We say that a node uses a line $Ax+By+C=0$ if $Ax+By+C$ divides the fundamental polynomial of the node. A line is called $k$-node line if it passes through exactly $k$-nodes of $\mathcal X.$ At most $n+1$ nodes can be collinear in $GC_n$ sets and an $(n+1)$-node line is called maximal line. The Gasca - Maeztu conjecture (1982) states that every $GC_n$ set has a maximal line. Until now the conjecture has been proved only for the cases $n \le 5.$ Here we adjust and prove a conjecture proposed in the paper - V. Bayramyan, H. H., Adv Comput Math, 43: 607-626, 2017. Namely, by assuming that the Gasca-Maeztu conjecture is true, we prove that for any $GC_n$ set $\mathcal X$ and any $k$-node line $\ell$ the following statement holds:
\noindent Either the line $\ell$ is not used at all, or it is used by exactly $\binom{s}{2}$ nodes of $\mathcal X,$ where $s$ satisfies the condition $\sigma:=2k-n-1\le s\le k.$ If in addition $\sigma \ge 3$ and $\mu(\mathcal X)>3$ then the first case here is excluded, i.e., the line $\ell$ is necessarily a used line. Here $\mu(\mathcal X)$ denotes the number of maximal lines of $\mathcal X.$
At the end, we bring a characterization for the usage of $k$-node lines in $GC_n$ sets when $\sigma=2$ and $\mu(\mathcal X)>3.$ \end{abstract}
{\bf Key words:} Polynomial interpolation, Gasca-Maeztu conjecture, $n$-poised set, $n$-independent set, $GC_n$ set, fundamental polynomial, maximal line.
{\bf Mathematics Subject Classification (2010):} \\ 41A05, 41A63.
\section{Introduction\label{sec:intro}} An $n$-poised set $\mathcal X$ in the plane is a node set for which the interpolation problem with bivariate polynomials of total degree at most $n$ is unisolvent. Node sets with geometric characterization: $GC_n$ sets, introduced by Chang and Yao \cite{CY77}, form an important subclass of $n$-poised sets. In a $GC_n$ set the fundamental polynomial of each node is a product of n linear factors. We say that a node uses a line if the line is a factor of the fundamental polynomial of this node. A line is called $k$-node line if it passes through exactly $k$-nodes of $\mathcal X.$ It is a simple fact that at most $n+1$ nodes can be collinear in $GC_n$ sets. An $(n+1)$-node line is called a maximal line. The conjecture of M. Gasca and J. I. Maeztu \cite{GM82} states that every $GC_n$ set has a maximal line. Until now the conjecture has been proved only for the cases $n \le 5$ (see \cite{B90} and \cite{HJZ14}). For a maximal line $\lambda$ in a $GC_n$ set $\mathcal X$ the following statement is evident: the line $\lambda$ is used by all $\binom{n+1}{2}$ nodes in $\mathcal X\setminus \lambda.$ This immediately follows from the fact that if a polynomial of total degree at most $n$ vanishes at $n+1$ points of a line then the line divides the polynomial (see Proposition \ref{prp:n+1points}, below). Here we consider a conjecture proposed in the paper \cite{BH} by V. Bayramyan and H. H., concerning the usage of any $k$-node line of $GC_n$ set. In this paper we make a correction in the mentioned conjecture and then prove it. Namely, by assuming that the Gasca-Maeztu conjecture is true, we prove that for any $GC_n$ set $\mathcal X$ and any $k$-node line $\ell$ the following statement holds:
\noindent The line $\ell$ is not used at all, or it is used by exactly $\binom{s}{2}$ nodes of $\mathcal X,$ where $s$ satisfies the condition $\sigma=\sigma(\mathcal{X},\ell):=2k-n-1\le s\le k.$ If in addition $\sigma \ge 3$ and $\mu(\mathcal X)>3$ then the first case here is excluded, i.e., the line $\ell$ is necessarily a used line. Here $\mu(\mathcal X)$ denotes the number of maximal lines of $\mathcal X.$ We prove also that the subset of nodes of $\mathcal X$ that use the line $\ell$ forms a $GC_{s-2}$ set if it is not an empty set. Moreover we prove that actually it is a $\ell$-proper subset of $\mathcal X,$ meaning that it can be obtained from $\mathcal X$ by subtracting the nodes in subsequent maximal lines, which do not intersect the line $\ell$ at a node of $\mathcal{X}$ or the nodes in pairs of maximal lines intersecting $\ell$ at the same node of $\mathcal{X}.$ At the last step, when the line $\ell$ becomes maximal, the nodes in $\ell$ are subtracted (see the forthcoming Definition \ref{def:proper}).
At the end, we bring a characterization for the usage of $k$-node lines in $GC_n$ sets when $\sigma=2$ and $\mu(\mathcal X)>3.$
Let us mention that earlier Carnicer and Gasca proved that a $k$-node line $\ell$ can be used by atmost $\binom{k}{2}$ nodes of a $GC_n$ set $\mathcal X$ and in addition there are no $k$ collinear nodes that use $\ell$, provided that GM conjecture is true (see \cite{CG03}, Theorem 4.5).
\subsection{Poised sets}
Denote by $\Pi_n$ the space of bivariate polynomials of total degree at most $n:$ \begin{equation*} \Pi_n=\left\{\sum_{i+j\leq{n}}c_{ij}x^iy^j \right\}. \end{equation*} We have that \begin{equation} \label{N=} N:=\dim \Pi_n=\binom{n+2}{2}. \end{equation} Let $\mathcal{X}$ be a set of $s$ distinct nodes (points): \begin{equation*} {\mathcal X}={\mathcal X}_s=\{ (x_1, y_1), (x_2, y_2), \dots , (x_s, y_s) \} . \end{equation*} The Lagrange bivariate interpolation problem is: for given set of values $\mathcal{C}_s:=\{ c_1, c_2, \dots , c_s \}$ find a polynomial $p \in \Pi_n$ satisfying the conditions \begin{equation}\label{int cond} p(x_i, y_i) = c_i, \ \ \quad i = 1, 2, \dots s. \end{equation}
\begin{definition} A set of nodes ${\mathcal X}_s$ is called \emph{$n$-poised} if for any set of values $\mathcal{C}_s$ there exists a unique polynomial $p \in \Pi_n$ satisfying the conditions \eqref{int cond}. \end{definition} It is an elementary Linear Algebra fact that if a node set $\mathcal{X}_s$ is $n$-poised then $s=N.$ Thus from now on we will consider sets $\mathcal{X}=\mathcal{X}_N$ when $n$-poisedness is studied. If a set $\mathcal{X}$ is $n$-poised then we say that $n$ is the degree of the set $\mathcal{X}.$
\begin{proposition} \label{prp:poised} The set of nodes ${\mathcal X}_N$ is $n$-poised if and only if the following implication holds: $$p \in \Pi_n,\ p(x_i, y_i) = 0, \quad i = 1, \dots , N \Rightarrow p = 0.$$ \end{proposition}
A polynomial $p \in \Pi_n$ is called an \emph{$n$-fundamental polynomial} for a node $ A = (x_k, y_k) \in {\mathcal X}_s,\ 1\le k\le s$, if \begin{equation*} p(x_i, y_i) = \delta _{i k}, \ i = 1, \dots , s , \end{equation*} where $\delta$ is the Kronecker symbol.
Let us denote the $n$-fundamental polynomial of the node $A \in{\mathcal X}_s$ by $p_A^\star=p_{A, {\mathcal X}}^\star.$
A polynomial vanishing at all nodes but one is also called fundamental, since it is a nonzero constant times the fundamental polynomial.
\begin{definition} Given an $n$-poised set $ {\mathcal X}.$ We say that a node $A\in{\mathcal X}$ \emph{uses a line $\ell\in \Pi_1$,} if \begin{equation*}
p_A^\star = \ell q, \ \text{where} \ q\in\Pi_{n-1}. \end{equation*} \end{definition} The following proposition is well-known (see, e.g., \cite{HJZ09b} Proposition 1.3): \begin{proposition}\label{prp:n+1points} Suppose that a polynomial $p \in \Pi_n$ vanishes at $n+1$ points of a line $\ell.$ Then we have that \begin{equation*} p = \ell r, \ \text{where} \ r\in\Pi_{n-1}. \end{equation*} \end{proposition}
\noindent Thus at most $n+1$ nodes of an $n$-poised set $\mathcal{X}$ can be collinear. A line $\lambda$ passing through $n+1$ nodes of the set ${\mathcal X}$ is called a \emph{maximal line}. Clearly, in view of Proposition \ref{prp:n+1points}, any maximal line $\lambda$ is used by all the nodes in $\mathcal{X}\setminus \lambda.$
Below we bring other properties of maximal lines: \begin{corollary}[\cite{CG00}, Prop. 2.1]\label{properties} Let ${\mathcal X}$ be an $n$-poised set. Then we have that
\begin{enumerate} \setlength{\itemsep}{0mm} \item Any two maximal lines of $\mathcal{X}$ intersect necessarily at a node of $\mathcal{X};$ \item Any three maximal lines of $\mathcal{X}$ cannot be concurrent; \item $\mathcal{X}$ can have at most $n+2$ maximal lines. \end{enumerate} \end{corollary}
\section{$GC_n$ sets and the Gasca-Maeztu conjecture \label{ss:GMconj}}
Now let us consider a special type of $n$-poised sets satisfying a geometric characterization (GC) property introduced by K.C. Chung and T.H. Yao: \begin{definition}[\cite{CY77}] An n-poised set ${\mathcal X}$ is called \emph{$GC_n$ set} (or $GC$ set)
if the $n$-fundamental polynomial of each node $A\in{\mathcal X}$ is a product of $n$ linear factors. \end{definition} \noindent Thus, $GC_n$ sets are the sets each node of which uses exactly $n$ lines.
\begin{corollary}[\cite{CG03}, Prop. 2.3] \label{crl:minusmax} Let $\lambda$ be a maximal line of a $GC_n$ set ${\mathcal X}.$ Then the set ${\mathcal X}\setminus \lambda$ is a $GC_{n-1}$ set. Moreover, for any node $A\in \mathcal X\setminus \lambda$ we have that \begin{equation}\label{aaaaa}p_{A, {\mathcal X}}^\star= \lambda p_{A, {\{\mathcal X\setminus \lambda}\}}^\star.\end{equation} \end{corollary}
Next we present the Gasca-Maeztu conjecture, briefly called GM conjecture:
\begin{conjecture}[\cite{GM82}, Sect. 5]\label{conj:GM} Any $GC_n$ set possesses a maximal line. \end{conjecture}
\noindent Till now, this conjecture has been confirmed for the degrees $n\leq 5$ (see \cite{B90}, \cite{HJZ14}). For a generalization of the Gasca-Maeztu conjecture to maximal curves see \cite{HR}.
In the sequel we will make use of the following important result:
\begin{theorem}[\cite{CG03}, Thm. 4.1]\label{thm:CG} If the Gasca-Maeztu conjecture is true for all $k\leq n$, then any $GC_n$ set possesses at least three maximal lines. \end{theorem}
This yields, in view of Corollary \ref{properties} (ii) and Proposition \ref{prp:n+1points}, that each node of a $GC_n$ set $\mathcal{X}$ uses at least one maximal line.
Denote by $\mu:=\mu(\mathcal{X})$ the number of maximal lines of the node set $\mathcal{X}.$
\begin{proposition}[\cite{CG03}, Crl. 3.5]\label{prp:CG-1} Let $\lambda$ be a maximal line of a $GC_n$ set $\mathcal{X}$ such that $\mu(\mathcal{X}\setminus \lambda)\ge 3.$ Then we have that $$\mu(\mathcal{X}\setminus \lambda)=\mu(\mathcal{X})\quad \hbox{or}\quad \mu(\mathcal{X})-1.$$ \end{proposition}
\begin{definition}[\cite{CG01}] Given an $n$-poised set $\mathcal{X}$ and a line $\ell.$ Then $\mathcal{X}_\ell$ is the subset of nodes of $\mathcal{X}$ which use the line $\ell.$ \end{definition} Note that a statement on maximal lines we have mentioned already can be expressed as follows \begin{equation}\label{maxaaa} \mathcal{X}_\ell=\mathcal{X}\setminus \ell, \ \hbox{if $\ell$ is a maximal line}. \end{equation}
Suppose that $\lambda$ is a maximal line of $\mathcal{X}$ and $\ell\neq \lambda$ is any line. Then in view of the relation \eqref{aaaaa} we have that \begin{equation}\label{rep} \mathcal{X}_\ell\setminus \lambda=(\mathcal{X}\setminus \lambda)_\ell. \end{equation}
In the sequel we will use frequently the following two lemmas of Carnicer and Gasca.
Let $\mathcal{X}$ be an $n$-poised set and ${\ell}$ be a line with $|\ell\cap\mathcal{X}|\le n.$ A maximal line $\lambda$ is called $\ell$-\emph{disjoint} if \begin{equation}\label{aaaa} \lambda \cap {\ell} \cap \mathcal{X} =\emptyset. \end{equation}
\begin{lemma}[\cite{CG03}, Lemma 4.4]\label{lem:CG1}
Let $\mathcal{X}$ be an $n$-poised set and ${\ell}$ be a line with $|\ell\cap\mathcal{X}|\le n.$ Suppose also that a maximal line $\lambda$ is $\ell$-disjoint. Then we have that \begin{equation}\label{aaa} \mathcal{X}_{\ell} = {(\mathcal{X} \setminus \lambda)}_{\ell}. \end{equation} Moreover, if ${\ell}$ is an $n$-node line then we have that $\mathcal{X}_{\ell} = \mathcal{X} \setminus (\lambda \cup {\ell}),$ hence $\mathcal{X}_{\ell}$ is an $(n-2)$-poised set. \end{lemma}
Let $\mathcal{X}$ be an $n$-poised set and ${\ell}$ be a line with $|\ell\cap\mathcal{X}|\le n.$ Two maximal lines $\lambda', \lambda''$ are called $\ell$-\emph{adjacent} if \begin{equation}\label{bbbb} \lambda' \cap \lambda''\cap {\ell} \in \mathcal{X}.\end{equation}
\begin{lemma}[\cite{CG03}, proof of Thm. 4.5]\label{lem:CG2}
Let $\mathcal{X}$ be an $n$-poised set and ${\ell}$ be a line with $3\le|\ell\cap\mathcal{X}|\le n.$ Suppose also that two maximal lines $\lambda', \lambda''$ are $\ell$-adjacent. Then we have that \begin{equation}\label{bbb} \mathcal{X}_{\ell} = {(\mathcal{X} \setminus (\lambda' \cup \lambda''))}_{\ell}. \end{equation} Moreover, if ${\ell}$ is an $n$-node line then we have that $\mathcal{X}_{\ell} = \mathcal{X} \setminus (\lambda' \cup \lambda'' \cup {\ell}),$ hence $\mathcal{X}_\ell$ is an $(n-3)$-poised set. \end{lemma}
\noindent Next, by the motivation of above two lemmas, let us introduce the concept of an $\ell$-reduction of a $GC_n$ set. \begin{definition}\label{def:reduct} Let $\mathcal{X}$ be a $GC_n$ set, $\ell$ be a $k$-node line $k\ge 2.$ We say that a set $\mathcal{Y}\subset\mathcal{X}$ is an $\ell$-reduction of $\mathcal{X},$ and briefly denote this by $\mathcal{X}\searrow_\ell\mathcal{Y},$ if $$\mathcal{Y}=\mathcal{X} \setminus \left(\mathcal{C}_0\cup \mathcal{C}_1\cup \cdots \cup\mathcal{C}_k\right),$$ where \begin{enumerate} \item $\mathcal{C}_0$ is an $\ell$-disjoint maximal line of $\mathcal{X},$ or $\mathcal{C}_0$ is the union of a pair of $\ell$-adjacent maximal lines of $\mathcal{X};$ \item $\mathcal{C}_i$ is an $\ell$-disjoint maximal line of the $GC$ set $\mathcal{Y}_i:=\mathcal{X}\setminus (\mathcal{C}_0\cup \mathcal{C}_1\cup \cdots \cup \mathcal{C}_{i-1}),$ or $\mathcal{C}_i$ is the union of a pair of $\ell$-adjacent maximal lines of $\mathcal{Y}_i,\ i=1,\ldots k;$ \item $\ell$ passes through at least $2$ nodes of $\mathcal{Y}.$ \end{enumerate} \end{definition}
Note that, in view of Corollary \ref{crl:minusmax}, the set $\mathcal{Y}$ here is a $GC_m$ set, where $m=n-\sum_{i=0}^{k}\delta_i,$ and $\delta_i =1$ or $2$ if $\mathcal{C}_i$ is an
$\ell$-disjoint maximal line or a union of a pair of $\ell$-adjacent maximal lines, respectively.
We get immediately from Lemmas \ref{lem:CG1} and \ref{lem:CG2} that \begin{equation}\label{abcd}\mathcal{X}\searrow_\ell\mathcal{Y} \Rightarrow \mathcal{X}_\ell=\mathcal{Y}_\ell.\end{equation} Notice that we cannot do any further $\ell$-reduction with the set $\mathcal{Y}$ if the line $\ell$ is a maximal line here. For this situation we have the following \begin{definition}\label{def:proper} Let $\mathcal{X}$ be a $GC_n$ set, $\ell$ be a $k$-node line, $k\ge 2.$ We say that the set $\mathcal{X}_\ell$ is $\ell$-proper $GC_m$ subset of $\mathcal{X}$ if there is a $GC_{m+1}$ set $\mathcal{Y}$ such that \begin{enumerate} \item $\mathcal{X}\searrow_\ell\mathcal{Y};$ \item The line $\ell$ is a maximal line in $\mathcal{Y}.$ \end{enumerate} \end{definition} Note that, in view of the relations \eqref{abcd} and \eqref{maxaaa} here we have that $$\mathcal{X}_\ell = \mathcal{Y}\setminus \ell=\mathcal{X} \setminus \left(\mathcal{C}_0\cup \mathcal{C}_1\cup \cdots \cup\mathcal{C}_k\cup\ell\right),$$ where the sets $\mathcal{C}_i$ satisfy conditions listed in Definition \ref{def:reduct}.
In view of this relation and Corollary \ref{crl:minusmax} we get that infact $\mathcal{X}_\ell$ is a $GC_m$ set, if it is an $\ell$-proper $GC_m$ subset of $\mathcal{X}.$
Note also that the node set $\mathcal{X}_\ell$ in Lemma \ref{lem:CG1} or in Lemma \ref{lem:CG2} is an $\ell$-proper subset of $\mathcal{X}$ if $\ell$ is an $n$-node line.
We immediately get from Definitions \ref{def:reduct} and \ref{def:proper} the following \begin{proposition} \label{proper} Suppose that $\mathcal{X}$ is a $GC_n$ set.
If $\mathcal{X}\searrow_\ell\mathcal{Y}$ and $\mathcal{Y}_\ell$ is a proper $GC_m$ subset of $\mathcal{Y}$ then $\mathcal{X}_\ell$ is a proper $GC_m$ subset of $\mathcal{X}.$ \end{proposition}
\subsection{Classification of $GC_n$ sets\label{ssec:SE}}
Here we will consider the results of Carnicer, Gasca, and God\'es concerning the classification of $GC_n$ sets according to the number of maximal lines the sets possesses. Let us start with
\begin{theorem}[\cite{CGo10}]\label{th:CGo10} Let $\mathcal{X}$ be a $GC_n$ set with $\mu(\mathcal{X})$ maximal lines. Suppose also that $GM$ conjecture is true for the degrees not exceeding $n.$ Then $\mu(\mathcal{X})\in\left\{3, n-1, n,n+1,n+2\right\}.$ \end{theorem}
\noindent \emph{1. Lattices with $n+2$ maximal lines - the Chung-Yao natural lattices.}
Let a set $\mathcal{L}$ of $n+2$ lines be in general position, i.e., no two lines are parallel and no three lines are concurrent, $n\ge 1.$ Then the Chung-Yao set is defined as the set $\mathcal{X}$ of all $\binom{n+2}{2}$ intersection points of these lines. Notice that the $n+2$ lines of $\mathcal{L}$ are maximal for $\mathcal{X}.$ Each fixed node here is lying in exactly $2$ lines and does not belong to the remaining $n$ lines. Observe that the product of the latter $n$ lines gives the fundamental polynomial of the fixed node. Thus $\mathcal{X}$ is a $GC_n$ set.
Let us mention that any $n$-poised set with $n+2$ maximal lines clearly forms a Chung-Yao lattice. Recall that there are no $n$-poised sets with more maximal lines (Proposition \ref{properties}, (iii)).
\noindent \emph{2. Lattices with $n+1$ maximal lines - the Carnicer-Gasca lattices.}
Let a set $\mathcal{L}$ of $n+1$ lines be in general position, $n\ge 2.$ Then the Carnicer-Gasca lattice $\mathcal{X}$ is defined as $\mathcal{X}:=\mathcal{X}^\times\cup\mathcal{X}',$
where $\mathcal{X}^\times$ is the set of all intersection points of these $n+1$ lines and $\mathcal{X}'$ is a set of other $n+1$ non-collinear nodes, called "free" nodes, one in each line, to make the line maximal. We have that $|\mathcal{X}|=\binom{n+1}{2}+(n+1)=\binom{n+2}{2}.$ Each fixed "free" node here is lying in exactly $1$ line. Observe, that the product of the remaining $n$ lines gives the fundamental polynomial of the fixed "free" node. Next, each fixed intersection node is lying in exactly $2$ lines. The product of the remaining $n-1$ lines and the line passing through the two "free" nodes in the $2$ lines gives the fundamental polynomial of the fixed intersection node. Thus $\mathcal{X}$ is a $GC_n$ set. It is easily seen that $\mathcal{X}$ has exactly $n+1$ maximal lines, i.e., the lines of $\mathcal{L}.$
Let us mention that any $n$-poised set with exactly $n+1$ maximal lines clearly forms a Carnicer-Gasca lattice (see \cite{CG00}, Proposition 2.4).
\noindent \emph{3. Lattices with $n$ maximal lines.}
Suppose that a $GC_n$ set $\mathcal{X}$ possesses exactly $n$ maximal lines, $n\ge 3.$ These lines are in a general position and we have that $\binom{n}{2}$ nodes of $\mathcal{X}$ are intersection nodes of these lines. Clearly in each maximal line there are $n-1$ intersection nodes and therefore there are $2$ more nodes, called "free" nodes, to make the line maximal. Thus $N-1=\binom{n}{2}+2n$ nodes are identified. The last node $O,$ called outside node, is outside the maximal lines.
Thus the lattice $\mathcal{X}$ has the following construction \begin{equation}\label{01O}\mathcal{X}:=\mathcal{X}^\times\cup\mathcal{X}''\cup \{O\},\end{equation} where $\mathcal{X}^\times$ is the set of intersection nodes, and $\mathcal{X}''$ is the set of $2n$ "free" nodes.
In the sequel we will need the following characterization of $GC_n$ sets with exactly $n$ maximal lines due to Carnicer and Gasca:
\begin{proposition}[\cite{CG00}, Prop. 2.5]\label{prp:nmax} A set $\mathcal{X}$ is a $GC_n$ set with exactly $n,\ n\ge 3,$ maximal lines $\lambda_1,\ldots,\lambda_n,$ if and only if the representation \eqref{01O} holds with the following additional properties:
\begin{enumerate} \setlength{\itemsep}{0mm} \item There are $3$ lines $\ell_1^o, \ell_2^o,\ell_3^o$ concurrent at the outside node $O: \ O=\ell_1^o\cap \ell_2 ^o\cap \ell_3^o$ such that $\mathcal{X}''\subset \ell_1^o\cup \ell_2^o \cup \ell_3^o;$ \item No line $\ell_i^o,\ i=1,2,3,$ contains $n+1$ nodes of $\mathcal{X}.$ \end{enumerate} \end{proposition}
\noindent \emph{4. Lattices with $n-1$ maximal lines.}
Suppose that a $GC_n$ set $\mathcal{X}$ possesses exactly $n-1$ maximal lines: $\lambda_1,\ldots,\lambda_{n-1},$ where $n\ge 4.$ These lines are in a general position and we have that $\binom{n-1}{2}$ nodes of $\mathcal{X}$ are intersection nodes of these lines. Now, clearly in each maximal line there are $n-2$ intersection nodes and therefore there are $3$ more nodes, called "free" nodes, to make the line maximal. Thus $N-3=\binom{n-1}{2}+3(n-1)$ nodes are identified. The last $3$ nodes $O_1,O_2,O_3$ called outside nodes, are outside the maximal lines. Clearly, the outside nodes are non-collinear. Indeed, otherwise the set $\mathcal{X}$ is lying in $n$ lines, i.e., $n-1$ maximal lines and the line passing through the outside nodes. This, in view of Proposition \ref{prp:poised}, contradicts the $n$-poisedness of $\mathcal{X}.$
Thus the lattice $\mathcal{X}$ has the following construction
\begin{equation}\label{01OOO} \mathcal{X}:=\mathcal{X}^\times\cup\mathcal{X}'''\cup \{O_1,O_2,O_3\},\end{equation} where $\mathcal{X}^\times$ is the set of intersection nodes, and $\\$ $\mathcal{X}''' =\left\{ A_{i}^1, A_i^2,A_i^3\in \lambda_i, : 1\le i\le n-1\right\},$ is the set of $3(n-1)$ "free" nodes.
Denote by $\ell_{i}^{oo},\ 1\le i\le 3,$ the line passing through the two outside nodes $\{O_1,O_2,O_3\}\setminus \{O_i\}.$ We call this lines $OO$ lines.
In the sequel we will need the following characterization of $GC_n$ sets with exactly $n-1$ maximal lines due to Carnicer and God\'es:
\begin{proposition}[\cite{CGo07}, Thm. 3.2]\label{prp:n-1max} A set $\mathcal{X}$ is a $GC_n$ set with exactly $n-1$ maximal lines $\lambda_1,\ldots,\lambda_{n-1},$ where $n\ge 4,$ if and only if, with some permutation of the indexes of the maximal lines and "free" nodes, the representation \eqref{01OOO} holds with the following additional properties:
\begin{enumerate} \setlength{\itemsep}{0mm} \item $\mathcal{X}'''\setminus \{ A_{1}^1, A_2^2,A_3^3\}\subset \ell_{1}^{oo}\cup \ell_{2}^{oo} \cup \ell_{3}^{oo};$ \item Each line $\ell_{i}^{oo}, i=1,2,3,$ passes through exactly $n-2$ "free" nodes (and through $2$ outside nodes). Moreover, $\ell_{i}^{oo}\cap\lambda_i\notin\mathcal{X},\ i=1,2,3;$ \item The triples $\{O_1,A_2^2,A_3^3\},\ \{O_2,A_1^1,A_3^3\},\ \{O_3,A_1^1,A_2^2\}$ are collinear. \end{enumerate} \end{proposition}
\noindent \emph{5. Lattices with $3$ maximal lines - generalized principal lattices.}
A principal lattice is defined as an affine image of the set $$PL_n:=\left\{(i,j)\in{\mathbb N}_0^2 : \quad i+j\le n\right\}.$$ Observe that the following $3$ set of $n+1$ lines, namely $\{x=i:\ i=0,1,\ldots,n+1\},\ \ \{y=j:\ j=0,1,\ldots,n+1\}$ and $\{x+y=k:\ k=0,1,\ldots,n+1\},$ intersect at $PL_n.$ We have that $PL_n$ is a $GC_n$ set. Moreover, clearly the following polynomial is the fundamental polynomial of the node $(i_0,j_0)\in PL_n:$ \begin{equation}\label{aaabbc} p_{i_0j_0}^\star(x,y) =\prod_{0\le i<i_0,\ 0\le j<j_0,\ 0\le k<k_0} (x-i)(y-j)(x+y-n+k),\end{equation} where $k_0=n-i_0-j_0.$
Next let us bring the definition of the generalized principal lattice due to Carnicer, Gasca and God\'es (see \cite{CG05}, \cite{CGo06}): \begin{definition}[\cite{CGo06}] \label{def:GPL} A node set $\mathcal{X}$ is called a generalized principal lattice, briefly $GPL_n$ if there are $3$ sets of lines each containing $n+1$ lines \begin{equation}\label{aaagpl}\ell_i^j(\mathcal{X})_{i\in \{0,1,\ldots,n\}}, \ j=0,1,2,\end{equation} such that the $3n+3$ lines are distinct, $$\ell_i^0(\mathcal{X})\cap \ell_j^1(\mathcal{X}) \cap \ell_k^2(\mathcal{X}) \cap \mathcal{X} \neq \emptyset \iff i+j+k=n$$ and
$$\mathcal{X}=\left\{x_{ijk}\ |\ x_{ijk}:=\ell_i^0(\mathcal{X})\cap \ell_j^1(\mathcal{X}) \cap \ell_k^2(\mathcal{X}), 0\le i,j,k\le n, i+j+k=n\right\}.$$ \end{definition}
Observe that if $0\le i,j,k\le n,\ i+j+k=n$ then the three lines $\ell_i^0(\mathcal{X}), \ell_j^1(\mathcal{X}), \ell_k^2(\mathcal{X})$ intersect at a node $x_{ijk}\in \mathcal{X}.$ This implies that a node of $\mathcal{X}$ belongs to only one line of each of the three sets of $n+1$ lines. Therefore $|\mathcal{X}|=(n+1)(n+2)/2.$
One can find readily, as in the case of $PL_n$, the fundamental polynomial of each node $x_{ijk}\in \mathcal{X},\ i+j+k=n:$ \begin{equation}\label{aaabbc} p_{i_0j_0k_0}^\star =\prod_{0\le i<i_0,\ 0\le j<j_0,\ 0\le k<k_0}\ell_{i}^0(\mathcal{X}) \ell_{j}^1(\mathcal{X}) \ell_{k}^2(\mathcal{X}).\end{equation} Thus $\mathcal{X}$ is a $GC_n$ set.
Now let us bring a characterization for $GPL_n$ set due to Carnicer and God\'es:
\begin{theorem}[\cite{CGo06}, Thm. 3.6]\label{th:CGo06} Assume that GM Conjecture holds for all degrees up to $n-3$. Then the following statements are equivalent:
\begin{enumerate} \setlength{\itemsep}{0mm} \item $\mathcal{X}$ is generalized principal lattice of degree n; \item $\mathcal{X}$ is a $GC_n$ set with exactly $3$ maximal lines. \end{enumerate} \end{theorem}
\section{A conjecture concerning $GC_n$ sets \label{s:conj}}
Now we are in a position to formulate and prove the corrected version of the conjecture proposed in the paper \cite{BH} of V. Bayramyan and H. H.:
\begin{conjecture}[\cite{BH}, Conj. 3.7]\label{mainc} Assume that GM Conjecture holds for all degrees up to $n$. Let $\mathcal{X}$ be a $GC_n$ set, and ${\ell}$ be a $k$-node line, $2\le k\le n+1.$ Then we have that
\begin{equation} \label{1bbaa} \mathcal{X}_{\ell} =\emptyset,\ \hbox{or} \ \mathcal{X}_\ell \ \hbox{is an $\ell$-proper $GC_{s-2}$ subset of $\mathcal{X},$ hence}\ |\mathcal{X}_{\ell}| = \binom{s}{2}, \end{equation} where $\sigma:=2k-n-1\le s \le k.$\\ Moreover, if $\sigma\ge 3$ and $\mu(\mathcal{X}) >3$ then we have that $\mathcal{X}_{\ell}\neq \emptyset.$\\ Furthermore, for any maximal line $\lambda$ we have:
$|\lambda\cap \mathcal{X}_{\ell}| = 0$ or $|\lambda\cap \mathcal{X}_{\ell}| = s-1.$ \end{conjecture}
In the last subsection we characterize constructions of $GC_n$ sets for which there are non-used $k$-node lines with $\sigma=2$ and $\mu(\mathcal{X})>3.$
Let us mention that in the original conjecture in \cite{BH} the possibility that the set $\mathcal{X}_{\ell}$ may be empty in \eqref{1bbaa} was not foreseen. Also we added here the statement that $\mathcal{X}_\ell$ is an $\ell$-proper $GC$ subset.
\subsection{Some known special cases of Conjecture \ref{mainc} \label{s:conj}}
The following theorem concerns the special case $k=n$ of Conjecture \ref{mainc}. It is a corrected version of the original result in \cite{BH}: Theorem 3.3. This result was the first step toward the Conjecture \ref{mainc}.
The corrected version appears in \cite{HV}, Theorem 3.1. \begin{theorem}\label{thm:corrected} \label{th:corrected} Assume that GM Conjecture holds for all degrees up to $n$. Let $\mathcal{X}$ be a $GC_n$ set, $n \ge 1,\ n\neq 3,$ and ${\ell}$ be an $n$-node line. Then we have that
\begin{equation} \label{2bin} |\mathcal{X}_{\ell}| = \binom{n}{2}\quad \hbox{or} \quad \binom{n-1}{2}. \end{equation} Moreover, the following hold:
\begin{enumerate} \setlength{\itemsep}{0mm} \item
$|\mathcal{X}_{\ell}| = \binom{n}{2}$ if and only if there is an $\ell$-disjoint maximal line $\lambda,$ i.e., $\lambda \cap {\ell} \cap \mathcal{X} =\emptyset.$ In this case we have that $\mathcal{X}_{\ell}=\mathcal{X}\setminus (\lambda\cup \ell).$ Hence it is an $\ell$-proper $GC_{n-2}$ set;
\item
$|\mathcal{X}_{\ell}| = \binom{n-1}{2}$ if and only if there is a pair of $\ell$-adjacent maximal lines $\lambda', \lambda'',$ i.e., $\lambda' \cap \lambda'' \cap {\ell} \in \mathcal{X}.$ In this case we have that $\mathcal{X}_{\ell}=\mathcal{X}\setminus (\lambda'\cup \lambda''\cup \ell).$ Hence it is an $\ell$-proper $GC_{n-3}$ set. \end{enumerate} \end{theorem}
Next let us bring a characterization for the case $n=3,$ which is not covered in above Theorem (see \cite{HT}, Corollary 6.1). \begin{proposition}[\cite{HV}, Prop. 3.3]\label{prp:n=3} Let $\mathcal{X}$ be a $3$-poised set and ${\ell}$ be a $3$-node line. Then we have that
\begin{equation} \label{2bin} |\mathcal{X}_{\ell}| = 3,\quad 1,\quad\hbox{or} \quad 0. \end{equation} Moreover, the following hold:
\begin{enumerate} \setlength{\itemsep}{0mm} \item
$|\mathcal{X}_{\ell}| = 3$ if and only if there is a maximal line $\lambda_0$ such that $\lambda_0 \cap {\ell} \cap \mathcal{X} =\emptyset.$ In this case we have that $\mathcal{X}_{\ell}=\mathcal{X}\setminus (\lambda_0\cup \ell).$ Hence it is an $\ell$-proper $GC_{1}$ set. \item
$|\mathcal{X}_{\ell}| = 1$ if and only if there are two maximal lines $\lambda', \lambda'',$ such that $\lambda' \cap \lambda'' \cap {\ell} \in \mathcal{X}.$ In this case we have that $\mathcal{X}_{\ell}=\mathcal{X}\setminus (\lambda'\cup \lambda''\cup \ell).$ Hence it is an $\ell$-proper $GC_{0}$ set.
\item $|\mathcal{X}_{\ell}| = 0$ if and only if there are exactly three maximal lines in $\mathcal{X}$ and they intersect $\ell$ at three distinct nodes. \end{enumerate} \end{proposition} Let us mention that the statement \eqref{2bin} of Proposition \ref{prp:n=3} (without the ``Moreover" part) is valid for $3$-node lines in any $n$-poised set (see \cite{HT}, Corollary 6.1). More precisely the following statement holds:
\begin{equation*} \hbox{If $\mathcal{X}$ is an $n$-poised set and $\ell$ is a $3$-node line then\ } |\mathcal{X}_{\ell}| = 3,\quad 1,\quad\hbox{or} \quad 0. \end{equation*}
Note that this statement contains all conclusions of Conjecture \ref{mainc} for the case of $3$-node lines, except the claim that the set $\mathcal{X}_\ell$ is an $\ell$-proper $GC_n$ subset in the cases $ |\mathcal{X}_{\ell}| = 3,1.$ And for this reason we cannot use it in proving Conjecture \ref{mainc}.
Let us mention that, in view of the relation \eqref{maxaaa}, Conjecture \ref{mainc} is true if the line $\ell$ is a maximal line. Also Conjecture \ref{mainc} is true in the case when $GC_n$ set $\mathcal{X}$ is a Chung -Yao lattice. Indeed, in this lattice the only used lines are the maximal lines. Also for any $k$-node line in $\mathcal{X}$ with $k\le n$ we have that $2k\le n+2,$ since through any node there pass two maximal lines. Thus for these non-used $k$-node lines we have $\sigma\le 1$ (see \cite{BH}).
The following proposition reveals a rich structure of the Carnicer-Gasca lattice. \begin{proposition}[\cite{BH}, Prop. 3.8]\label{CGl} Let $\mathcal{X}$ be a Carnicer-Gasca lattice of degree $n$ and ${\ell}$ be a $k$-node line. Then we have that
\begin{equation} \label{2bba} \mathcal{X}_\ell \ \hbox{is an $\ell$-proper $GC_{s-2}$ subset of $\mathcal{X},$ hence}\ |\mathcal{X}_{\ell}| = \binom{s}{2}, \end{equation} where $\sigma:=2k-n-1\le s \le k.$\\ Moreover, for any maximal line $\lambda$ we have:
$|\lambda\cap \mathcal{X}_{\ell}| = 0$ or $|\lambda\cap \mathcal{X}_{\ell}| = s-1.$\\ Furthermore, for each $n, k$ and $s$ with $\sigma\le s \le k,$ there is a Carnicer-Gasca lattice of degree $n$ and a $k$-node line $\ell$ such that \eqref{2bba} is satisfied. \end{proposition} Note that the phrase ``$\ell$-proper" is not present in the formulation of Proposition in \cite{BH} but it follows readily from the proof there.
Next consider the following statement of Carnicer and Gasca (see \cite{CG03}, Proposition 4.2):
\begin{equation} \label{2nodel}\hbox{If $\mathcal{X}$ is a $GC_n$ set and $\ell$ is a $2$-node line then\ } |\mathcal{X}_{\ell}| = 1\ \ \hbox{or}\ \ 0. \end{equation}
Let us adjoin this with the following statement:
If $\mathcal{X}$ is a $GC_n$ set, $\ell$ is a $2$-node line, and $|\mathcal{X}_\ell|=1,$ then $\mathcal{X}_\ell$ is an $\ell$-proper $GC_0$ subset, provided that $GM$ conjecture is true for all degrees up to $n.$
Indeed, suppose that $\mathcal{X}_\ell=\{A\}$ and $\ell$ passes through the nodes $B,C\in\mathcal{X}.$ The node $A$ uses a maximal $(n+1)$-node line in $\mathcal{X}$ which we denote by $\lambda_0.$ Next, $A$ uses a maximal $n$-node line in $\mathcal{X}\setminus \lambda_0$ which we denote by $\lambda_1.$ Continuing this way we find consecutively the lines $\lambda_2, \lambda_3,\ldots, \lambda_{n-1}$ and obtain that $$\{A\}=\mathcal{X}\setminus (\lambda_0\cup \lambda_1\cup \cdots \cup \lambda_{n-1}).$$ To finish the proof it suffices to show that $\lambda_{n-1}=\ell$ and the remaining lines $\lambda_i, i=0,\ldots,n-2$ are $\ell$-disjoint. Indeed, the node $A$ uses $\ell$ and since it is a $2$-node line it may coincide only with the last maximal line $\lambda_{n-1}.$ Now, suppose conversely that a maximal line $\lambda_k,\ 0\le k\le n-2,$ intersects $\ell$ at a node, say $B.$ Then consider the polynomial of degree $n:$ $$p=\ell_{A,C}\prod_{i\in\{0,\ldots,n-1\}\setminus\{k\}}\lambda_i,$$ where $\ell_{A,C}$ is the line through $A$ and $C.$ Clearly $p$ passes through all the nodes of $\mathcal{X}$ which contradicts Proposition \ref{prp:poised}.
Now, in view of the statement \eqref{2nodel} and the adjoint statement, we conclude that Conjecture \ref{mainc} is true for the case of $2$-node lines in any $GC_n$ sets.
It is worth mentioning that the statement \eqref{2nodel} is true also for any $n$-poised set $\mathcal{X}$ (see \cite{BH}, relation (1.4), due to V. Bayramyan).
\subsection{Some preliminaries for the proof of Conjecture \ref{mainc} \label{s:conj}}
Here we prove two propositions. The following proposition shows that Conjecture \ref{mainc} is true if the node set $\mathcal{X}$ has exactly $3$ maximal lines.
\begin{proposition}\label{prop:3max} Assume that GM Conjecture holds for all degrees up to $n-3$. Let $\mathcal{X}$ be a $GC_n$ set with exactly $3$ maximal lines, and ${\ell}$ be an $m$-node line, $2\le m\le n+1.$ Then we have that
\begin{equation} \label{1bin3} \mathcal{X}_{\ell}=\emptyset,\ \hbox{or} \ \mathcal{X} \ \hbox{is a proper $GC_{m-2}$ subset of $\mathcal{X},$ hence}\ |\mathcal{X}_{\ell}| = \binom{m}{2}. \end{equation} Moreover, if $\mathcal{X}_{\ell}\neq\emptyset$ and $m\le n$ then for a maximal line $\lambda_1$ of $\mathcal{X}$ we have:
\begin{enumerate} \setlength{\itemsep}{0mm} \item
$\lambda_1 \cap {\ell} \notin \mathcal{X}$ and $|\lambda_1\cap \mathcal{X}_{\ell}| = 0.$
\item
$|\lambda\cap \mathcal{X}_{\ell}| = m-1\ $ for the remaining two maximal lines. \end{enumerate} Furthermore, if the line $\ell$ intersects each maximal line at a node then $\mathcal{X}_\ell=\emptyset.$ \end{proposition} \begin{proof} According to Theorem \ref{th:CGo06} the set $\mathcal{X}$ is a generalized principal lattice of degree n with some three sets of $n+1$ lines \eqref{aaagpl}. Then we obtain from \eqref{aaabbc} that the only used lines in $\mathcal{X}$ are the lines $\ell_{s}^r(\mathcal{X}),$ where $0\le s< n,\ r=0,1,2.$ Therefore the only used $m$-node lines are the lines $\ell_{n-m+1}^r(\mathcal{X}),\ r=0,1,2.$ Consider the line, say with $r=0,$ i.e., $\ell\equiv \ell_{n-m+1}^0(\mathcal{X}).$ It is used by all the nodes $x_{ijk}\in \mathcal{X}$ with $i>n-m+1,$ i.e., $i=n-m+2, n-m+3,\ldots,n.$ Thus, $\ell$ is used by exactly $\binom{m}{2}=(m-1)+(m-2)+\cdots+1$ nodes. This implies also that $\mathcal{X}_\ell= \mathcal{X}\setminus (\ell_0\cup \ell_1\cup\cdots\cup \ell_{n-m+1}).$ Hence $\mathcal{X}$ is a proper $GC_{m-2}$ subset of $\mathcal{X}.$ The part ``Moreover" also follows readily from here. Now it remains to notice that the part ``Furthermore" is a straightforward consequence of the part ``Moreover". \end{proof}
The following statement on the presence and usage of $(n-1)$-node lines in $GC_n$ sets with exactly $n-1$ maximal lines will be used in the sequel (cf. Proposition 4.2, \cite{HV}). \begin{proposition}\label{prp:n-1} Let $\mathcal{X}$ be a $GC_n$ set with exactly $n-1$ maximal lines and $\ell$ be an $(n-1)$-node line, where $n\ge 4.$ Assume also that through each node of $\ell$ there passes exactly one maximal line. Then we have that either $n=4$ or $n=5.$ Moreover, in both these cases we have that $\mathcal{X}_\ell=\emptyset.$ \end{proposition} \begin{proof} Consider a $GC_n$ set with exactly $n-1$ maximal lines. In this case we have the representation \eqref{01OOO}, i.e., $\mathcal{X}:=\mathcal{X}^\times\cup\mathcal{X}'''\cup \{O_1,O_2,O_3\},$ satisfying the conditions of Proposition \ref{prp:n-1max}. Here $\mathcal{X}^\times$ is the set of all intersection nodes of the maximal lines, $\mathcal{X}'''$ is the set of the remaining ``free" nodes in the maximal lines, and $O_1,O_2,O_3$ are the three nodes outside the maximal lines. Let $\ell$ be an $(n-1)$-node line.
First notice that, according to the hypothesis of Proposition, all the nodes of the line $\ell$ are ``free" nodes. Therefore $\ell$ does not coincide with any $OO$ line, i.e., line passing through two outside nodes.
From Proposition \ref{prp:n-1max} we have that all the ``free" nodes except the three special nodes $A_1^1,A_2^2,A_3^3,$ which we will call here $(s)$ nodes, belong to the three $OO$ lines. We have also, in view of Proposition \ref{prp:n-1max}, (iii), that the nodes $A_1^1,A_2^2,A_3^3$ are not collinear. Therefore there are three possible cases:
\begin{enumerate} \setlength{\itemsep}{0mm} \item $\ell$ does not pass through any $(s)$ node, \item $\ell$ passes through two $(s)$ nodes, \item $\ell$ passes through one $(s)$ node. \end{enumerate} In the first case $\ell$ may pass only through nodes lying in three $OO$ lines. Then it may pass through at most three nodes, i.e., $n\le 4.$ Therefore, in view of the hypothesis $n\ge 4,$ we get $n=4.$ Since $\mu(\mathcal{X})=3$ we get, in view of Proposition \ref{prop:3max}, part ``Furthermore", that $\mathcal{X}_\ell=\emptyset.$
Next, consider the case when $\ell$ passes through two $(s)$ nodes. Then, according to Proposition \ref{prp:n-1max}, (iii), it passes through an outside $O$ node. Recall that this case is excluded since $\ell$ passes through ``free" nodes only.
Finally, consider the third case when $\ell$ passes through exactly one $(s)$ node. Then it may pass through at most three other ``free" nodes lying in $OO$ lines. Therefore $\ell$ may pass through at most four nodes.
First suppose that $\ell$ passes through exactly $3$ nodes. Then again we obtain that $n=4$ and $\mathcal{X}_\ell=\emptyset.$
Next suppose that $\ell$ passes through exactly $4$ nodes. Then we have that $n=5.$ Without loss of generality we may assume that the (s) node $\ell$ passes through is, say, $A_1^1.$
Next let us show first that $|\mathcal{X}_\ell|\le 1.$ Here we have exactly $4=n-1$ maximal lines. Consider the maximal line $\lambda_4$ for which, in view of Proposition \ref{prp:n-1max}, the intersection with each $OO$ line is a node in $\mathcal{X}$ (see Fig. \ref{pic1}). Denote $B:=\ell\cap \lambda_4.$ Assume that the node $B$ belongs to the line $\in \ell_i^{oo},\ 1\le i\le 3,$ i.e., the line passing through $\{O_1, O_2,O_3\}\setminus \{O_i\}$ ($i=2$ in Fig. \ref{pic1}). According to the condition (ii) of Proposition \ref{prp:n-1max} we have that $\ell_i^{oo}\cap \lambda_i\notin \mathcal{X}$. Denote $C:=\lambda_i\cap \lambda_4.$ \begin{figure}
\caption{The node set $\mathcal{X}$ for $n=5.$}
\label{pic1}
\end{figure}
Now let us prove that $\mathcal{X}_\ell\subset \{C\}$ which implies $|\mathcal{X}_\ell|\le 1.$
Consider the $GC_4$ set $\mathcal{X}_i=\mathcal{X}\setminus \lambda_i.$ Here we have two maximal lines: $\lambda_4$ and $\ell_i^{oo},$ intersecting at the node $B\in \ell.$ Therefore we conclude from Lemma \ref{lem:CG2} that no node from these two maximal lines uses $\ell$ in $\mathcal{X}_2.$ Thus, in view of \eqref{rep}, no node from $\lambda_4,$ except possibly $C,$ uses $\ell$ in $\mathcal{X}.$ Now consider the $GC_4$ set $\mathcal{X}_4=\mathcal{X}\setminus \lambda_4.$ Observe, on the basis of the characterization of Proposition \ref{prp:n-1max}, that $\mathcal{X}_4$ has exactly $3$ maximal lines. On the other hand here the line $\ell$ intersects each maximal line at a node. Therefore, in view of Proposition \ref{prop:3max}, part "Furthermore", we have that ${(\mathcal{X}_4)}_\ell=\emptyset.$ Hence, in view of \eqref{rep}, we conclude that $\mathcal{X}_\ell\subset \{C\}.$
Now, to complete the proof it suffices to show that the node $C$ does not use $\ell.$ Let us determine which lines uses $C.$ Since $C=\lambda_i\cap \lambda_4$ first of all it uses the two maxmal lines $\{\lambda_1,\lambda_2,\lambda_3\}\setminus \{\lambda_i\}.$ It is easily seen that the next two lines $B$ uses are $OO$ lines: $\{\ell_1^{oo},\ell_2^{oo},\ell_3^{oo}\}\setminus\{\ell_i^{oo}\}.$ Now notice that, the two nodes, except $C$, which do not belong to the four used lines are $B$ and the (s) node $A_i^i.$ Hence the fifth line $B$ uses is the line passing through the latter two nodes. This line coincides with $\ell$ if and only if $i=1.$
In the final and most interesting part of the proof we will show that the case $i=1,$ when the node $B$ uses the line $\ell,$ i.e., the case when the (s) node $\ell$ passes is $A_1^1$ and $B\in \ell_1^{oo},$ where $B=\ell\cap\lambda_4,$ is impossible. \begin{figure}
\caption{The case $i=1.$}
\label{pic12}
\end{figure} More precicely, we will do the following (see Fig. \ref{pic12}). By assuming that \begin{enumerate} \item the maximal lines $\lambda_i, i=1,\ldots,4,$ are given, the three outside nodes $O_1,O_2,O_3$ are given, \item the two $OO$ lines $\ell_2^{oo},\ell_3^{oo}$ are intersecting the three maximal lines at $6$ distinct points, and \item the three conditions in Proposition \ref{prp:n-1max}, part ``Moreover" are satisfied, i.e., the line through the two special nodes $\{A_1^1,A_2^2,A_3^3\}\setminus A_i^i\}$ passes through the outside node $O_i$ for each $i=1,2,3,$ \end{enumerate} we will prove that the third $OO$ line $\ell_1^{oo}$ passes necessarily through the the node $D,$ i.e., intersection node of the two maximal lines $\lambda_2$ and $\lambda_3.$ Since this makes the number of all nodes of the set $\mathcal{X}$ only $19$ instead of $21,$ we may conclude that the abovementioned case is impossible.
For this end, to simplify the Fig. \ref{pic12} let us delete from it the maximal lines $\lambda_1$ and $\lambda_4$ to obtain the following Fig. \ref{pic15}. \begin{figure}
\caption{The $(9_3)$ configuration with $3$ triangles.}
\label{pic15}
\end{figure} Let us now apply the well-known Pappus hexagon theorem for the pair of triple collinear nodes here $$A_1^1,\ \ E,\ \ F;$$ $$O_1,\ \ A_2^2,\ \ A_3^3.$$ Now observe that $$\ell(A_1^1,A_2^2) \cap \ell(E,O_1)=O_3,\ \ell(E,A_3^3) \cap \ell(F,A_2^2)=D,\ \ell(A_1^1,A_3^3) \cap \ell(F,O_1)=O_2,$$ where $\ell(A,B)$ denotes the line passing through the points $A$ and $B.$ Thus, according to the Pappus theorem we get that the triple of nodes $D,O_2,O_3$ is collinear. \end{proof}
Notice that in Fig. \ref{pic15} we have a $(9_3)$ configuration of 9 lines and 9 points (three triangles) that occurs with each line meeting 3 of the points and each point meeting 3 lines (see \cite{HCV}, Chapter 3, Section 17). \begin{figure}
\caption{A non-used $4$-node line $\ell_4^*$ in a $GC_5$ set $\mathcal{X}^*$}
\label{pic11}
\end{figure} \begin{remark} \label{rm} Let us show that the case of non-used $4$-node line in Figure \ref{pic1} is possible nevertheless. The problem with this is that we have to confirm that the three conditions in Proposition \ref{prp:n-1max}, are satisfied. More precicely: \begin{enumerate} \item $\ell_i^{oo}\cap \lambda_i=\emptyset$ for each $i=1,2,3;$ \item The line through the two special nodes $\{A_1^1,A_2^2,A_3^3\}\setminus A_i^i\}$ passes through the outside node $O_i$ for each $i=1,2,3.$ \end{enumerate} Let us outline how one can get a desired figure (see Fig. \ref{pic11}). Let us start the figure with the three maximal lines (non-concurrent) $\lambda_1, \lambda_2,\lambda_3.$ Then we choose two $OO$ lines lines $\ell_1^{oo}, \ell_3^{oo}$ through the outside node $O_2,$ which intersect the three maximal lines at $6$ distinct points. Next we get the line $\ell_4^*$ which passes through the points $B$ and $C,$ where $B=\ell_1^{oo}\cap \lambda_3$ and $C=\ell_3^{oo}\cap \lambda_2.$ Now we find the point $A_1^1:=\ell_4^*\cap \lambda_1.$ By intersecting the line through $O_2$ and $A_1^1$ with the maximal line $\lambda_3$ we get the node $A_3^3.$ Then we choose a ``free node" $A_2^2$ on the maximal line $\lambda_2.$ This enables to determine the two remaining outside nodes: $O_1, O_3.$ Namely we have that $O_1$ is the intersection point of the line through $A_2^2$ and $A_3^3$ with the line $\ell_3^{oo}$ and $O_3$ is the intersection point of the line through $A_2^2$ and $A_1^1$ with the line $\ell_1^{oo}.$ Thus we get the line $\ell_2^{oo}$ which passes through the outside nodes $O_1, O_3.$ Next we get the points of intersection of the line $\ell_2^{oo}$ with the three maximal lines as well as the point of intersection with the line $\ell_4^*$ denoted by $D.$ Now, we choose the maximal line $\lambda_4$ passing through $D$ and intersecting the remaining maximal lines and the three $OO$ lines at $6$ new distinct nodes. Finally, all the specified intersection points in Fig. \ref{pic11} we declare as the nodes of $\mathcal{X}^*.$\end{remark}
\section{Proof of Conjecture \ref{mainc} \label{ss:conj}}
Let us start the proof with a list of the major cases in which Conjecture \ref{mainc} is true.
\noindent \emph{Step 1.} The Conjecture \ref{mainc} is true in the following cases:
\begin{enumerate} \setlength{\itemsep}{0mm} \item The line $\ell$ is a maximal line.
Indeed, as we have mentioned already, in this case we have $\mathcal{X}_\ell=\mathcal{X}\setminus \ell$ and all the conclusions of Conjecture can be readily verified.
\item The line $\ell$ is an $n$-node line, $n\in \mathbb N$.
In this case Conjecture \ref{mainc} is valid by virtue of Theorem \ref{th:corrected} (for $n\in \mathbb N\setminus \{3\}$) and Proposition \ref{prp:n=3} (for $n=3$).
\item The line $\ell$ is a $2$-node line.
In this case Conjecture \ref{mainc} follows from the statement \eqref{2nodel} and the adjoint statement next to it. \end{enumerate}
Now, let us prove Conjecture by complete induction on $n$ - the degree of the node set $\mathcal{X}.$ Obviously Conjecture is true in the cases $n=1,2.$ Note that this follows also from Step 1 (i) and (ii).
Assume that Conjecture is true for any node set of degree not exceeding $n-1.$ Then let us prove that it is true for the node set $\mathcal{X}$ of degree $n.$ Suppose that we have a $k$-node line $\ell.$
\noindent \emph{Step 2:} Suppose additionally that there is an $\ell$-disjoint maximal line $\lambda.$ Then we get from Lemma \ref{lem:CG1} that \begin{equation}\label{abb}\mathcal{X}_{\ell} = {(\mathcal{X} \setminus \lambda)}_{\ell}. \end{equation} Therefore by using the induction hypothesis for the $GC_{n-1}$ set $\mathcal{X}':=\mathcal{X} \setminus \lambda$ we get the relation \eqref{1bbaa}, where $\sigma':=\sigma(\mathcal{X}',\ell)\le s \le k$ and $\sigma'=2k-(n-1)-1=2k-n=\sigma+1.$ Here we use also Proposition \ref{proper} in checking that $\mathcal{X}_\ell$ is an $\ell$-proper subset of $\mathcal{X}.$
Now let us verify the part "Moreover". Suppose that $\sigma=2k-n-1\ge 3,$ i.e. $2k\ge n+4,$ and $\mu(\mathcal{X})>3.$ For the line $\ell$ in the $GC_{n-1}$ set $\mathcal{X}_0$ we have $\sigma'=\sigma+1\ge 4.$ Thus if $\mu(\mathcal{X}')>3$ then, by the induction hypothesis, we have that $(\mathcal{X}')_\ell\neq\emptyset.$ Therefore we get, in view of \eqref{abb}, that $\mathcal{X}_\ell\neq\emptyset.$ It remains to consider the case $\mu(\mathcal{X}')=3.$ In this case, in view of Proposition \ref{prp:CG-1}, we have that $\mu(\mathcal{X})=4,$ which, in view of Theorem \ref{th:CGo10}, implies that $4\in \{n-1,n,n+1,n+2\},$ i.e., $2\le n\le 5.$
The case $n=2$ was verified already. Now, since $2k\ge n+4$ we deduce that either $k\ge 4$ if $n=3, 4,$ or $k\ge 5$ if $n=5.$ All these cases follow from Step 1 (i) or (ii).
The part "Furthermore" follows readily from the relation \eqref{abb}.
\noindent \emph{Step 3:} Suppose additionally that there is a pair of $\ell$-adjacent maximal lines $\lambda', \lambda''.$ Then we get from Lemma \ref{lem:CG2} that \begin{equation}\label{bbac} \mathcal{X}_{\ell} = {(\mathcal{X} \setminus (\lambda' \cup \lambda''))}_{\ell}. \end{equation}
Therefore by using the induction hypothesis for the $GC_{n-2}$ set $\mathcal{X}'':=\mathcal{X} \setminus (\lambda' \cup \lambda''))$ we get the relation \eqref{1bbaa}, where $\sigma'':=\sigma(\mathcal{X}'',\ell)\le s \le k-1$ and $\sigma''=2(k-1)-(n-2)-1=\sigma.$ Here we use also Proposition \ref{proper} to check that $\mathcal{X}_\ell$ is an $\ell$-proper subset of $\mathcal{X}.$
Let us verify the part "Moreover". Suppose that $\sigma=2k-n-1\ge 3$ and $\mu(\mathcal{X})>3.$ The line $\ell$ is $(k-1)$-node line in the $GC_{n-2}$ set $\mathcal{X}''$ and we have that $\sigma''=\sigma\ge 3.$ Thus if $\mu(\mathcal{X}'')>3$ then, by the induction hypothesis, we have that $(\mathcal{X}'')_\ell\neq\emptyset$ and therefore we get, in view of \eqref{bbac}, that $\mathcal{X}_\ell\neq\emptyset.$ It remains to consider the case $\mu(\mathcal{X}'')=3.$ Then, in view of Proposition \ref{prp:CG-1}, we have that $\mu(\mathcal{X})=4$ or $5,$ which, in view of Theorem \ref{th:CGo10}, implies that $4\ \hbox{or}\ 5\in \{n-1,n,n+1,n+2,\}$ i.e., $2\le n\le 6.$
The cases $2\le n\le 5$ were considered in the previous step. Thus suppose that $n=6.$ Then, since $2k\ge n+4,$ we deduce that $k\ge 5.$ In view of Step 1, (ii), we may suppose that $k=5.$
Now the set $\mathcal{X}''$ is a $GC_4$ and the line $\ell$ is a $4$-node line there. Thus, in view of Step 1 (ii) we have that $(\mathcal{X}'')_\ell\neq \emptyset.$ Therefore we get, in view of \eqref{bbac}, that
$\mathcal{X}_\ell\neq \emptyset.$
The part "Furthermore" follows readily from the relation \eqref{bbac}.
\noindent \emph{Step 4.} Now consider any $k$-node line $\ell$ in a $GC_n$ set $\mathcal{X}.$ In view of Step 1 (iii) we may assume that $k\ge 3.$ In view of Theorem \ref{th:CGo10} and Proposition \ref{prop:3max} we may assume also that $\mu(\mathcal{X})\ge n-1.$
Next suppose that $k\le n-2.$ Since then $\mu(\mathcal{X})>k$ we necessarily have either the situation of Step 2 or Step 3.
Thus we may assume that $k\ge n-1.$ Then, in view of Step 1 (i) and (ii), it remains to consider the case $k =n-1,$ i.e., $\ell$ is an $(n-1)$-node line. Again if $\mu(\mathcal{X})\ge n$ then we necessarily have either the situation of Step 2 or Step 3. Therefore we may assume also that $\mu(\mathcal{X})=n-1.$ By the same argument we may assume that each of the $n-1$ nodes of the line $\ell$ is an intersection node with one of the $n-1$ maximal lines.
Therefore the conditions of Proposition \ref{prp:n-1} are satisfied and we arrive to the two cases: $n=4, k=3, \sigma=1$ or $n=5, k=4, \sigma=2.$ In both cases we have that $\mathcal{X}_\ell=\emptyset.$ Thus in this case Conjecture is true.
$\square$
\subsection{The \label{ss:count} characterization of the case $\sigma=2,\ \mu(\mathcal{X}) >3$}
Here we bring, for each $n$ and $k$ with $\sigma=2k-n-1 =2,$ two constructions of $GC_n$ set and a nonused $k$-node line there. At the end (see forthcoming Proposition \ref{charact}) we prove that these are the only constructions with the mentioned property.
Let us start with a counterexample in the case $n=k=3$ (see \cite{HV}, Section 3.1), i.e., with a $GC_3$ set $\mathcal{Y}^*$ and a $3$-node line $\ell_3^*$ there which is not used.
\begin{figure}
\caption{A non-used $3$-node line $\ell_3^*$ in a $GC_3$ set $\mathcal{Y}^*$}
\label{pic2}
\end{figure}
Consider a $GC_3$ set $\mathcal{Y}^*$ of $10$ nodes with exactly three maximal lines: $\lambda_1,\lambda_2,\lambda_3$ (see Fig. \ref{pic2}). This set has construction \eqref{01O} and satisfies the conditions listed in Proposition \ref{prp:nmax}. Now observe that the $3$-node line $\ell_3^*$ here intersects all the three maximal lines at nodes. Therefore, in view of Proposition \ref{prp:n=3}, (iii), the line $\ell_3^*$ cannot be used by any node in $\mathcal{Y}^*,$ i.e., $$(\mathcal{Y}^*)_{\ell_3^*}=\emptyset.$$
Let us outline how one can get Fig. \ref{pic2}. We start the mentioned figure with the three lines $\ell_1^o, \ell_2^o,\ell_3^o$ through $O,$ i.e., the outside node. Then we choose the maximal lines $\lambda_1,\lambda_2,$ intersecting $\ell_1^o, \ell_2^o$ at $4$ distinct points. Let $A_i$ be the intersection point $\lambda_i\cap\ell_i^o,\ i=1,2.$ We chose the points $A_1$ and $A_2$ such that the line through them: $\ell_3^*$ intersects the line $\ell_3^o$ at a point $A_3.$ Next we choose a third maximal line $\lambda_3$ passing through $A_3.$ Let us mention that we chose the maximal lines such that they are not concurrent and intersect the three lines through $O$ at nine distinct points. Finally, all the specified intersection points in Fig. \ref{pic2} we declare as the nodes of $\mathcal{Y}^*.$
In the general case of $\sigma=2k-n-1=2$ we set $k=m+3$ and obtain $n=2m+3,$ where $m=0,1,2,\ldots$. Let us describe how the previous $GC_3$ node set $\mathcal{Y}^*$ together with the $3$-node line $\ell_3^*$ can be modified to $GC_n$ node set $\bar{\mathcal{X}}^*$ with a $k$-node line $\bar{\ell}_3^*$ in a way to fit the general case. That is to have that $(\bar{\mathcal{Y}^*})_{\bar{\ell_3}}=\emptyset.$ \begin{figure}\label{pic3}
\end{figure}
For this end we just leave the line $\ell_3^*$ unchanged, i.e., $\bar{\ell}_3^*\equiv\ell_3^*$ and extend the set $\mathcal{Y}^*$ to a $GC_n$ set $\bar{\mathcal{Y}}^*$ in the following way. We fix $m$ points: $B_i, i=1,\ldots,m,$ in $\ell_3^*$ different from $A_1,A_2,A_3$ (see Fig. \ref{pic3}). Then we add $m$ pairs of (maximal) lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ intersecting at these $m$ points respectively: $$\lambda_i'\cap \lambda_i'' = B_i,\ i=1,\ldots,m.$$
We assume that the following condition is satisfied: \begin{enumerate} \item The $2m$ lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ together with $\lambda_1,\lambda_2,\lambda_3$ are in general position, i.e., no two lines are parallel and no three lines are concurrent; \item The mentioned $2m+3$ lines intersect the lines $\ell_1^o, \ell_2^o,\ell_3^o$ at distinct $3(2m+3)$ points. \end{enumerate} Now all the points of intersections of the $2m+3$ lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ together with $\lambda_1,\lambda_2,\lambda_3$ are declared as the nodes of the set $\bar{\mathcal{Y}}^*.$ Next for each of the lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ also two from the three intersection points with the lines $\ell_1,\ell_2,\ell_3,$ are declared as (``free") nodes. After this the lines $\lambda_1,\lambda_2,\lambda_3$ and the lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ become $(2m+4)$-node lines, i.e., maximal lines.
Now one can verify readily that the set $\bar{\mathcal{Y}}^*$ is a $GC_n$ set since it satisfies the construction \eqref{01O} with $n=2m+3$ maximal lines: $\lambda_i',\lambda_i'', i=1,\ldots,m,$ together with $\lambda_1,\lambda_2,\lambda_3,$ and satisfies the conditions listed in Proposition \ref{prp:nmax}.
Finally, in view of Lemma \ref{lem:CG2} and the relation \eqref{bbb}, applied $m$ times with respect to the pairs $\lambda_i',\lambda_i'', i=1,\ldots,m,$ gives: ${(\bar{\mathcal{Y}}^*)}_{\bar{\ell}_3^*}=(\mathcal{Y}^*)_{\ell_3^*}=\emptyset.$
Let us call the set $\bar{\mathcal{Y}}^*$ an $m$-modification of the set $\mathcal{Y}^*.$ In the same way we could define $\bar{\mathcal{X}}^*$ as an $m$-modification of the set $\mathcal{X}^*$ from Fig. \ref{pic1}, with the $4$-node non-used line $\ell_4^*$ (see Remark \ref{rm}). The only differences from the previous case here are: \begin{enumerate} \item
Now $k=m+4,\ n=2m+5, m=0,1,2,\ldots$ (again $\sigma=2$); \item We have $2n+4$ maximal lines: $\lambda_i',\lambda_i'', i=1,\ldots,m,$ and the lines $\lambda_1,\lambda_2,\lambda_3,\lambda_4;$ \item Instead of the lines $\ell_1^o, \ell_2^o,\ell_3^o$ we have the lines $\ell_{1}^{oo}, \ell_{2}^{oo}, \ell_{3}^{oo};$ \item For each of the lines $\lambda_i',\lambda_i'', i=1,\ldots,m,$ all three intersection points with the lines $\ell_1^{oo},\ell_2^{oo},\ell_3^{oo},$ are declared as (``free") nodes. \end{enumerate} Now one can verify readily that the set $\bar{\mathcal{X}}^*$ is a $GC_n$ set since it satisfies the construction \eqref{01OOO} with $n=2m+4$ maximal lines,
and satisfies the conditions listed in Proposition \ref{prp:n-1max}.
Thus we obtain another construction of non-used $k$-node lines in $GC_n$ sets, with $\sigma=2,$ where $k=m+4, n=2m+5.$
At the end let us prove the following \begin{proposition}\label{prp:sigma2}\label{charact} Let $\mathcal{X}$ be a $GC_n$ set and $\ell$ be a $k$-node line with $\sigma:=2k-n-1=2$ and $\mu(\mathcal{X})>3.$ Suppose that the line $\ell$ is a non-used line. Then we have that either $\mathcal{X}=\bar{\mathcal{X}}^*, \ \ell={\bar{\ell}_4^*},$ or $\mathcal{X}=\bar{\mathcal{Y}}^*, \ \ell={\bar{\ell}_3^*}.$ \end{proposition} \begin{proof} Notice that $n$ is an odd number and $n\ge 3.$ In the case $n=3$ Proposition \ref{prp:sigma2} follows from Proposition \ref{prp:n=3}.
Thus suppose that $n\ge 5.$ Since $\mu(\mathcal{X})>3,$ we get, in view of Theorem \ref{th:CGo10}, that \begin{equation}\label{n-1}\mu(\mathcal{X})\ge n-1. \end{equation}
Next, let us prove that there is no $\ell$-disjoint maximal line $\lambda$ in $\mathcal{X}.$
Suppose conversely that $\lambda$ is a maximal line with $\lambda\cap \ell\notin\mathcal{X}.$ Denote by $\mathcal{X}':=\mathcal{X}\setminus \lambda.$ Since $\mathcal{X}_\ell=\emptyset$ therefore, by virtue of the relation \eqref{rep}, we obtain that $(\mathcal{X}')_\ell=\emptyset.$ Then we have that $\sigma':=\sigma(\mathcal{X}',\ell)=2k-(n-1)-1=3.$ By taking into account the latter two facts, i.e., $(\mathcal{X}')_\ell=\emptyset$ and $\sigma'=3,$ we conclude from Conjecture \ref{mainc}, part ``Moreover", that $\mu(\mathcal{X}')=3.$ Next, by using \eqref{n-1} and Proposition \ref{prp:CG-1}, we obtain that that $\mu(\mathcal{X})=4.$ By applying again \eqref{n-1} we get that $4\ge n-1,$ i.e., $n\le 5.$ Therefore we arrive to the case: $n=5.$ Since $\sigma=2$ we conclude that $k=4.$ Then observe that the line $\ell$ is $4$-node line in the $GC_4$ set $\mathcal{X}'$. By using Theorem \ref{th:corrected} we get that $(\mathcal{X}')_\ell\ne\emptyset,$ which is a contradiction.
Now let us prove Proposition in the case $n=5.$ As we mentioned above then $k=4.$ We have that there is no $\ell$-disjoint maximal line. Suppose also that there is no pair of $\ell$-adjacent maximal lines. Then, in view of \eqref{n-1}, we readily get that $\mu(\mathcal{X})= 4$ and through each of the four nodes of the line $\ell$ there passes a maximal line. Therefore, Proposition \ref{prp:n-1max} yields that $\mathcal{X}$ coincides with $\mathcal{X}^*$ (or, in other words, $\mathcal{X}$ is a $0$-modification of $\mathcal{X}^*$) and $\ell$ coincides with $\ell_4^*.$
Next suppose that there is a pair of $\ell$-adjacent maximal lines: $\lambda',\lambda''.$ Denote by $\mathcal{X}'':=\mathcal{X}\setminus (\lambda'\cup\lambda'').$ Then we have that $\ell$ is a $3$-node non-used line in $\mathcal{X}''.$ Thus we conclude readily that $\mathcal{X}$ coincides with $\bar{\mathcal{Y}}^*$ and $\ell$ coincides with ${\bar\ell}_3^*$ (with $m=1$).
Now let us continue by using induction on $n.$ Assume that Proposition is valid for all degrees up to $n-1.$ Let us prove it in the case of the degree $n.$ We may suppose that $n\ge 7.$ It suffices to prove that there is a pair of $\ell$-adjacent maximal lines: $\lambda',\lambda''.$ Indeed, in this case we can complete the proof just as in the case $n=5.$
Suppose by way of contradiction that there is no pair of $\ell$-adjacent maximal lines. Also we have that there is no $\ell$-disjoint maximal line. Therefore we have that $\mu(\mathcal{X}) \le k.$ Now, by using \eqref{n-1}, we get that $k\ge n-1.$ Therefore $2=\sigma=2k-n-1\ge 2n-2-n-1=n-3.$ This implies that $n-3\le 2,$ i.e., $n\le 5,$ which is a contradiction. \end{proof}
\noindent \emph{Hakop Hakopian}
\noindent{Department of Informatics and Applied Mathematics\\ Yerevan State University\\ A. Manukyan St. 1\\ 0025 Yerevan, Armenia}
\noindent E-mail: $<[email protected]$>$
\noindent \emph{Vahagn Vardanyan}
\noindent{Institute of Mathematics\\ National Academy of Sciences of Armenia\\ 24/5 Marshal Baghramian Ave. \\ 0019 Yerevan, Armenia}
\noindent E-mail:$<[email protected]$>$
\end{document} |
\begin{document}
\title{Topological Protection of Coherence in Noisy Open Quantum Systems} \author{Yu Yao$^{*}$} \affiliation{Department of Physics and Astronomy, University of Southern California, Los Angeles, CA 90089-0484} \author{Henning Schlömer$^{*}$} \affiliation{Institute for Theoretical Solid State Physics, RWTH Aachen University, Otto-Blumenthal-Str. 26, D-52056 Aachen, Germany} \author{Zhengzhi Ma} \affiliation{Department of Physics and Astronomy, University of Southern California, Los Angeles, CA 90089-0484} \author{Lorenzo Campos Venuti} \affiliation{Department of Physics and Astronomy, University of Southern California, Los Angeles, CA 90089-0484} \author{Stephan Haas} \affiliation{Department of Physics and Astronomy, University of Southern California, Los Angeles, CA 90089-0484} \date{\today} \begin{abstract} We consider topological protection mechanisms in dissipative quantum systems in the presence of quenched disorder, with the intent to prolong coherence times of qubits. The physical setting is a network of qubits and dissipative cavities whose coupling parameters are tunable, such that topological edge states can be stabilized. The evolution of a fiducial qubit is entirely determined by a non-Hermitian Hamiltonian which thus emerges from a bona-fide physical process. It is shown how even in the presence of disorder winding numbers can be defined and evaluated in real space, as long as certain symmetries are preserved. Hence we can construct the topological phase diagrams of noisy open quantum models, such as the non-Hermitian disordered Su-Schrieffer-Heeger dimer model and a trimer model that includes longer-range couplings. In the presence of competing disorder parameters, interesting re-entrance phenomena of topologically non-trivial sectors are observed. This means that in certain parameter regions, increasing disorder drastically increases the coherence time of the fiducial qubit.
\end{abstract} \maketitle \global\long\def*{*} \footnotetext{These authors contributed equally to this work}
\section{Introduction} Due to their characteristic protection against environmental noise, topological phases of matter \citep{Fu2007,Fu2007_2, Kobayashi2013} are considered to be promising candidates for the realization of noise-resilient quantum computers \citep{Sankar2008, Sau2010, Sarma2015, Stern2013, Alicea2011, Freedman2006, Freedman2003, Akhmerov2010}. Furthermore, it was shown in \cite{Venuti2017} that the presence of topological edge states can preserve quantum mechanical features, e.g. coherence of a fiducial qubit, in the presence of dissipation (see also \citep{man_cavity-based_2015, campos_venuti_long-distance_2006, campos_venuti_qubit_2007} for other works in a similar spirit). In that work, dissipative one-dimensional (1D) quantum optical qubit-cavity architectures were analyzed, where effective non-Hermitian Hamiltonians of the form of a tight-binding chain with diagonal complex entries were derived. The time evolution of the boundary-qubit coherence, driven by such non-Hermitian Hamiltonians, was extensively studied for choices of hopping parameters that admit symmetry-protected topological states localized at the edges of the system. It was found that (quasi-)dark modes, i.e., boundary states with exponentially small (in system size) imaginary parts, protect the edge qubits from decoherence effects via photon leakage through cavities.
Moreover, disordered as well as non-Hermitian generalizations of 1D topological insulators, such as the Su-Schrieffer-Heeger (SSH) model \citep{Su1980, Heeger1988}, have been studied theoretically \citep{Mondragon2014,Luo2019}, where the real space winding number was analyzed for different disorder strengths on the hopping parameters.
Here, we build on the work presented in \cite{Venuti2017}, addressing the role of quenched disorder in the qubit-cavity arrays. We fully characterize the disordered, non-Hermitian system's topology by computing the winding number in real space in the parameter space spanned by the coupling amplitude and the disorder strength. From this characterization, accurate predictions for the behavior of a fiducial qubit's coherence can be made for long times, therefore expanding the discussion of the quantum optical systems to a broader physical context, considering both quenched disorder and dissipation.
The remainder of this paper is organized as follows. In Sec.~\ref{sec:setup}, we derive the effective non-Hermitian Hamiltonian describing qubit-cavity arrays using the Lindblad formalism. In Sec.~\ref{sec:real_space_method}, the topological characterization of dissipative, disordered systems is illustrated, which is then applied to non-Hermitian dimer and trimer models in Sec~\ref{sec:main}. Special focus is put on the coherence of the qubit located at the boundary, whose faith can be accurately predicted from the phase diagrams. We then briefly discuss possible applications in quantum computation via dark-state braiding in Sec.~\ref{sec:quantum_computation} and conclude in Sec.~\ref{sec:conclusion}.
\section{The Setup}
\label{sec:setup} We consider a network consisting of qubits coupled to dissipative cavities in a Jaynes-Cumming fashion. Specifically, we study networks of $M$ qubits and $K$ dissipative cavities, as illustrated in Fig. \ref{fig:network} for $M=4$ and $K=5$. The Hamiltonian of the system has the following form \begin{eqnarray} H & = & \sum_{l,m=1}^{K}J_{l,m}(a_{l}^{\dagger}a_{m}+\text{h.c.})\nonumber \\
& + & \sum_{i=1}^{M}\sum_{l=1}^{K}\tilde{J}_{l,i}(a_{l}^{\dagger}\sigma_{i}^{-}+\text{h.c.}),\label{hamiltonian} \end{eqnarray} where $a_{l}^{\dagger}$ and $a_{l}$ are the bosonic creation and annihilation operators for cavity mode $l$, and $\sigma_{i}^{\pm}$ are the ladder operators for qubit $i$. We consider a Lindblad master equation $\dot{\rho}=\mathcal{L}[\rho]$, where $\mathcal{L}=\mathcal{K}+\mathcal{D}$, the coherent part is $\mathcal{K}=-i\left[H,\bullet\right]$, whereas the dissipative part is \begin{equation} \mathcal{D}[\rho]=\sum_{l=1}^{K}\Gamma_{l}\Big[2a_{l}\rho a_{l}^{\dagger}-\{a_{l}^{\dagger}a_{l},\rho\}\Big].\label{lindbladian} \end{equation} Such Lindbladian description is accurate at sufficiently low temperature in particular in circuit QED experiments.
\begin{figure}
\caption{Example of a network of qubits (filled circles) interacting with lossy cavities (hollow circles). Wavy lines indicate coherent hopping matrix elements $\tilde{J}_{i,j}$, connecting cavity $i$ with qubit $j$, and $J_{i,j}$, connecting cavity $i$ with cavity $j$. Arrows indicate incoherent decay $\Gamma_{i}$ in cavity $i$. }
\label{fig:network}
\end{figure}
The Lindbladian can also be written as $\mathcal{L}=\mathcal{K}'+\mathcal{D}'$, where $\mathcal{K}'(\rho)=-i(H'\rho-\rho{H'}^{\dagger})$ defines the non-Hermitian Hamiltonian $H'$: \begin{equation} H'=H-i\sum_{l=1}^{K}\Gamma_{l}a_{l}^{\dagger}a_{l}, \end{equation} and \begin{equation} \mathcal{D}'(\rho)=\sum_{l=1}^{K}2\Gamma_{l}a_{l}\rho a_{l}^{\dagger}. \end{equation}
Consider the Fock space of the system $\mathcal{F}=\oplus_{n=0}^{\infty}\mathcal{H}_{n}$
where $\mathcal{H}_{n}$ is the Hilbert space of $n$ particles (at this level the distinction between spins and bosons is unimportant). In the space of operators on $\mathcal{F}$ we define the Hilbert-Schmidt scalar product $\langle\!\langle x|y\rangle\!\rangle=\operatorname{Tr}(x^{\dagger}y)$. Using the isomorphism $\mathcal{B}_{HS}(\mathcal{F})\simeq\mathcal{F}\otimes\mathcal{F}^{*}$ ($\mathcal{B}_{HS}(\mathcal{F})$ is the space of bounded Hilbert-Schmidt operators on $\mathcal{F}$), the space of operators $\mathcal{B}_{HS}(\mathcal{F})$ can be identified with \begin{equation} \mathcal{B}_{HS}(\mathcal{F})\simeq\bigoplus_{i,j=0}^{\infty}\mathcal{H}_{i}\otimes\mathcal{H}_{j}^{*}. \end{equation} In simpler terms, $\mathcal{B}_{HS}(\mathcal{F})$ has a block structure with two labels $(i,j)$ each label being a particle number. The non-Hermitian Hamiltonian $H'$ preserves the number of particles and correspondigly $\mathcal{K}'$ is block-diagonal in $(i,j)$. Instead, $\mathcal{D}'$ connects the sector $(i,j)$ with the sector $(i-1,j-1)$, i.e.~it decreases the number of particles by one.
In this paper we will be mostly interested in the coherence of a fiducial qubit, that, without loss of generality we place at site 1. In the standard basis, the coherence of a qubit in state $\rho$, can be defined as $\mathcal{C}=2\vert\rho_{\downarrow,\uparrow}\vert$ \citep{Baumgratz2014}. We initialize the system such that all cavities are empty and qubits are in the lowest state ($|\downarrow\rangle$), while on the fiducial qubit the state is $|\psi\rangle=\alpha|\downarrow\rangle+\beta|\uparrow\rangle$. We further fix $|\alpha\beta|=1/2$ which means that at the beginning the coherence assumes its maximal value one. We are interested in the evolution of the coherence as a function of time.
Let $|0\rangle$ be the vacuum state with no excitation on any qubit or cavity, while $|j\rangle$ denotes a single excitation on the $j$th site, describing either an excited qubit or a cavity hosting a photon. We use the notation $|j\rangle\!\rangle\leftrightarrow|0\rangle\langle j|$. It can be shown \cite{Venuti2017} that the evolution of the coherence at later time is given by \begin{eqnarray}
\mathcal{C}(t)=2|\rho_{\downarrow,\uparrow}(t)| & = & 2|\langle0|\rho(t)|1\rangle|=2|\operatorname{Tr}|1\rangle\langle0|\rho(t)|\nonumber \\
& = & 2|\langle\!\langle1|\rho(t)\rangle\!\rangle|=2|\langle\!\langle1|e^{t\mathcal{L}}|\rho(0)\rangle\!\rangle|.\label{coh} \end{eqnarray}
Because of the block structure of the Lindbladian, $\langle\!\langle1|e^{t\mathcal{L}}|\rho(0)\rangle\!\rangle=\langle\!\langle1|e^{t\tilde{\mathcal{L}}}|\tilde{\rho}(0)\rangle\!\rangle$, where $\tilde{X}$ is the operator $X$ restricted to the linear space
$\mathcal{V}_{0,1}=\mathrm{Span}(|0\rangle\langle j|),j=1,2,\ldots,N$. In particular, $\tilde{\rho}(0)=\overline{\alpha}\beta|0\rangle\langle1|=(1/2)|0\rangle\langle1|$. For what regard the restriction of the Lindbladian we have $\tilde{\mathcal{L}}=\tilde{\mathcal{K}'}$. Moreover, in $\mathcal{V}_{0,1}$, \begin{align}
{\mathcal{K}'}_{l,m} & =\operatorname{Tr}\left(|l\rangle\langle0|{\mathcal{K}'}(|0\rangle\langle m|)\right)\\
& =i\langle m|{H'}^{\dagger}|l\rangle\\
& =i\overline{\langle l|H'|m\rangle}. \end{align}
In other words, the evolution of the coherence of the fiducial qubit is entirely determined by the non-Hermitian Hamiltonian $-\overline{H'}$ in the one-particle sector. Calling $\mathsf{H}:=-\left.\overline{H'}\right\vert _{\mathrm{one\ particle}}$ we have finally
\begin{equation}
\mathcal{C}(t)=\left\vert \langle\!\langle1|e^{-it\mathsf{H}}|1\rangle\!\rangle\right\vert .\label{eq:coherence_final} \end{equation}
We would like to stress that, in this setting, a non-Hermitian Hamiltonian emerges from a genuine \emph{bona-fide} quantum evolution whereas in most current proposals non-Hermitian Hamiltonians are simulated in classical dissipative wave-guides via the analogy between Helmoltz and Schrödinger equation (see e.g.~\citep{rudner_topological_2009}).
In order to prolong the coherence Eq.~\eqref{eq:coherence_final}, one seeks a (non-Hermitian) Hamiltonian $\mathsf{H}$ that admits i) long-lived states, i.e.~eigenstates of $\mathsf{H}$ with small (negative) imaginary part; and ii) that also have large amplitude on the site
$|1\rangle\!\rangle$ (conventionally placed at the beginning of the chain).
Interestingly, both these requirement are satisfied to a large degree in one dimensional topological systems which admit edge states with the required properties in the non-trivial phase. The classification of such dissipative, non-Hermitian, topological chains has been done in \citep{Levitov2016} and utilized to prolong quantum coherence for the first time in \citep{Venuti2017}. Here we extend the analysis of \citep{Venuti2017} to disordered systems where translational invariance is broken. The phase diagrams of topological dissipative chains will tell us which parameter regions and models can be used to prolong the quantum coherence of the fiducial qubit.
\begin{comment}
Let us focus on the single excitation sector of this model. By initializing the first qubit in a state $\alpha_{0}|\uparrow\rangle+\beta_{0}|\downarrow\rangle$
and restricting all other qubits and cavities to be in the $|\downarrow\rangle$ and empty state, respectively, there exists at most one excitation in the entire system during the Lindbladian evolution. Within this super-one-particle sector, the corresponding Hilbert space $\mathcal{H}$
is spanned by $\{|0\rangle$,$|j\rangle$; $j=1,2,\cdots,N=M+K\}$. Here, the $|0\rangle$ state represents the vacuum with no excitation on any qubit or cavity. The remaining states $|j\rangle$ correspond to a single excitation on the $j$th site, i.e., they describe either an excited qubit or a cavity hosting a photon. As a result, the system's density matrix is of the form \begin{equation}
\rho=\rho_{0,0}|0\rangle\langle0|+\sum_{j=1}^{N}(\rho_{0,j}|0\rangle\langle j|+\text{h.c.})+\sum_{i,j=1}^{N}\rho_{i,j}|i\rangle\langle j|,\label{rho_restricted} \end{equation}
with the matrix basis $\{|i\rangle\langle j|;i,j=0,1,2,\cdots,N\}$ spanning the superoperator Hilbert space $\mathcal{H}^{\otimes2}$. Intending to exploit topological edge state protection, we are interested in the coherence of the qubit located at one of the edges of the system, for which we can find the reduced density matrix by tracing out all other degrees of freedom, i.e., \begin{eqnarray}
\operatorname{Tr}_{\text{rest}}\rho & = & \Big(\rho_{0,0}+\sum_{i=2}^{N}\rho_{i,i}\Big)|\downarrow\rangle\langle\downarrow|+\rho_{1,1}|\uparrow\rangle\langle\uparrow|\nonumber \\
& + & (\rho_{\downarrow,\uparrow}|\downarrow\rangle\langle\uparrow|+\text{h.c.}). \end{eqnarray} A coherence measure can now be defined by the sum over all off-diagonal elements of the qubit density matrix \citep{Baumgratz2014}, such that \begin{eqnarray}
\mathcal{C}(t)=2|\rho_{\downarrow,\uparrow}(t)| & = & |\langle0|\rho(t)|1\rangle|=|\operatorname{Tr}|1\rangle\langle0|\rho(t)|\nonumber \\
& = & |\langle\!\langle1|\rho(t)\rangle\!\rangle|=|\langle\!\langle1|e^{t\mathcal{L}}|\rho(0)\rangle\!\rangle|,\label{coh} \end{eqnarray}
where $|1\rangle\!\rangle\leftrightarrow|0\rangle\langle1|$ and $\langle\!\langle x|y\rangle\!\rangle=\operatorname{Tr}(x^{\dagger}y)$. It is easy to show that the Lindblad operator $\mathcal{L}$ only mixes $|1\rangle\!\rangle\leftrightarrow|0\rangle\langle1|$ with
$|j\rangle\!\rangle\leftrightarrow|0\rangle\langle j|$ in the superoperator Hilbert space $\mathcal{H}^{\otimes2}$, i.e. it has a block-diagonal structure, such that we can restrict our considerations to the representation of $\mathcal{L}$ within the subspace $\mathcal{V}_{0,1}$ spanned by the basis $\{|j\rangle\!\rangle;j=1,2,\cdots,N\}$. Therefore, by restricting $\mathcal{L}$ to $\tilde{\mathcal{L}}=\mathcal{L}|_{\mathcal{V}_{0,1}}$
and projecting the initial density matrix to $\tilde{\rho}(0)=\rho(0)|_{\mathcal{V}_{0,1}}$, the fiducial edge qubit's coherence can be evaluated via \begin{equation}
C(t)=2\big|\langle\!\langle1|e^{t\tilde{\mathcal{L}}}|\tilde{\rho}(0)\rangle\!\rangle\big|. \end{equation}
The problem therefore boils down to studying the spectrum of the restricted Lindblad operator $\tilde{\mathcal{L}}=-i\mathsf{H}$. While the coherent part of $\mathsf{H}$ inherits the real and hermitian tight binding character of the original Hamiltonian \eqref{hamiltonian}, the dissipative part contributes complex diagonal elements, given by $i\mathcal{D}(|0\rangle\langle j|)=-i\frac{\Gamma_{j}}{2}|0\rangle\langle j|$, where $j$ labels a cavity. As the imaginary diagonal elements make $\mathsf{H}$ a non-Hermitian operator, the defined $\ell^{2}$ scalar products do not represent physical probabilities anymore. The coherence measure in Eq.~\eqref{coh}, however, is exactly given by such a non-Hermitian time evolution. \end{comment}
\section{Topological invariant of dissipative systems in real space}
\label{sec:real_space_method}
We now briefly recall the topological classification of non-Hermitian quantum systems provided by Rudner \textit{et al.} in Ref.~\citep{Levitov2016}. For non-Hermitian quantum systems hosting dissipative sites, the topological invariant can be defined as the winding number around the dark-state manifold in the Hamiltonian parameter space. A non-trivial phase in dissipative systems corresponds to long-lived edge modes with infinite or exponential large lifetimes.
In previous work \citep{Venuti2017}, the topological classification of non-Hermitian models was formulated within the framework of Bloch theory, which we briefly outline here for comparison with the real-space approach to be introduced. Consider a one-dimensional periodic non-Hermitian chain with $n$ sites per unit cell. In the thermodynamic limit, the Hamiltonian is given by $\mathsf{H}=\oint dk/(2\pi)\sum_{\alpha,\beta=1}^{n}H_{\alpha,\beta}(k)|k,\alpha\rangle\langle k,\beta|$. We shall only focus on the cases with one leaky site per unit cell, as the topological characterization is trivial in all other cases if no additional constraints are imposed \citep{Levitov2016}. The Bloch Hamiltonian of any such system is an $n\times n$ matrix, which can be written as \begin{equation} H(k)=\left(\begin{array}{cc} h(k) & v_{k}\\ v_{k}^{\dagger} & \Delta(k)-i\Gamma \end{array}\right), \end{equation} where $h(k)$ is an $(n-1)\times(n-1)$ Hermitian matrix, $v_{k}$ is a $(n-1)$-dimensional vector and $\Delta(k)-i\Gamma$ is a complex number. The Hamiltonian can be further decomposed in the following manner \begin{equation} H(k)=\left(\begin{array}{cc} U(k) & 0\\ 0 & 1 \end{array}\right)\left(\begin{array}{cc} \tilde{h}(k) & \tilde{v}_{k}\\ \tilde{v}_{k}^{\dagger} & \Delta(k)-i\Gamma \end{array}\right)\left(\begin{array}{cc} U(k)^{\dagger} & 0\\ 0 & 1 \end{array}\right),\label{eqn:Hk} \end{equation} where $U(k)$ is a $(n-1)\times(n-1)$ unitary matrix whose columns are the eigenvectors of $h(k)$, and $\tilde{h}(k)$ is the $(n-1)\times(n-1)$ diagonal matrix of the corresponding eigenvalues. The phases of the eigenvectors are fixed by making all entries of the $(n-1)$-dimensional vector $\tilde{v}_{k}$ real and positive. Any $U(k)$ satisfying the above criteria can be chosen without affecting the final result. Since $U(k)$ is the only component parametrizing the Hamiltonian that can lead to non-trivial topology \citep{Levitov2016}, the winding number of $H(k)$ reduces to the one of $U(k)$, which is given by \begin{equation} W=\oint\frac{dk}{2\pi i}\partial_{k}\ln\operatorname{det}\{U(k)\}.\label{eqn:Wk} \end{equation} We now construct a real-space representation of the winding number that remains well defined when translation invariance is destroyed by e.g.~the presence of disorder. Consider a chain with $n$ sites in each cell and $M$ number of unit cells. For what we said previously, we consider only one leaky site per unit cell, which, without loss of generality, we place at the final site of the cell.
The one-particle (non-Hermitian) Hamiltonian can be written as
\begin{equation}
\mathsf{H}=\sum_{i,j=1}^{M}\sum_{\alpha,\beta=1}^{n}H_{\alpha,\beta}^{i,j}|i,\alpha\rangle\langle j,\beta|\label{eq:H_one} \end{equation}
Generally one thinks of the chain as being made of $M$ cells with $n$ sites each, but one may as well think of $n$ sections with $M$ sites each. In other words, we rearrange Eq.~\eqref{eq:H_one} according to the following block structure \begin{eqnarray} \mathsf{H}=\left(\begin{array}{ccccc} H_{1,1} & H_{1,2} & H_{1,3} & \dots & H_{1,n}\\ H_{2,1} & H_{2,2} & H_{2,3} & \dots & H_{2,n}\\ \vdots & \vdots & \vdots & & \vdots\\ H_{n,1} & H_{n,2} & H_{n,3} & \dots & H_{n,n} \end{array}\right), \end{eqnarray} where each $H_{\alpha,\beta}$ is a $M\times M$ matrix. The matrices $H_{\alpha,\alpha}$ $\alpha=1,\ldots,(n-1)$ are diagonal with chemical potentials on the diagonal. Since we put the leaky site at position $\alpha=n$, the matrix $H_{n,n}=\epsilon_{n}-i\Gamma\leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I}$, where $\epsilon_{n}$ is a diagonal matrix of chemical potentials and for simplicity we set the leakage to have value $\Gamma$ on each site.
Recalling the approach used in $k$-space, we first write the real-space Hamiltonian as \begin{align} H & =\left(\begin{array}{cc} \Lambda & V\\ V^{\dagger} & \epsilon_{n}-i\Gamma\leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I} \end{array}\right)\nonumber \\
& =\left(\begin{array}{cc} U & 0\\ 0 & \leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I} \end{array}\right)\left(\begin{array}{cc} \tilde{\Lambda} & \tilde{V}\\ \tilde{V}^{\dagger} & \epsilon_{n}-i\Gamma\leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I} \end{array}\right)\left(\begin{array}{cc} U^{\dagger} & 0\\ 0 & \leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I} \end{array}\right).\label{eqn:Hr} \end{align}
$\Lambda$ is a $(n-1)M\times(n-1)M$ Hermitian matrix, while $V$ is a $(n-1)M\times M$ real matrix describing the hopping between decaying and non-decaying sites. $\tilde{\Lambda}$ is a $(n-1)L\times(n-1)L$ diagonal matrix with real eigenvalues of $\Lambda$, and $U$ is a $(n-1)L\times(n-1)L$ unitary matrix that diagonalizes $\Lambda$. The degrees of freedom for the choice of $U$ are fixed by making each $L\times L$ submatrix in $\tilde{V}$ positive-definite, analogous to the procedure in reciprocal space.
With these preparations, the winding number of the unitary matrix $U$ in real space can be evaluated with the prescription of \citep{Kitaev2006} and further elaborations of Refs.~\citep{Mondragon2014,Wang2019,Luo2019}. In particular, $\int_{0}^{2\pi}(dk/2\pi)\times\operatorname{tr}\{\}$ and $\partial_{k}$ become trace per volume and the commutator $-i[X,]$ ($X$ being the position operator), respectively. Note that $X$ is the $M$-sized cell position operator, i.e.~$X=\operatorname{diag}(1,2,\ldots,M,1,2,\dots,M,\ldots,M-1,M)$.
Thus, Eq.~(\ref{eqn:Wk}) in real space can be written as \begin{equation} W=\frac{1}{L^{\prime}}\operatorname{tr}^{\prime}(U^{\dagger}[X,U]).\label{eqn:Wr} \end{equation} Here, $\operatorname{tr}^{\prime}$ stands for trace with truncation. Specifically, we take the trace over the middle interval of length $M^{\prime}$ and leave out $\ell$ sites on each side (total length $M=M^{\prime}+2\ell$).
With Eq.~(\ref{eqn:Wr}), we can explore topological phases in presence of dissipation and disorder.
Note that in the model that we will consider, the matrix $\Lambda$ is not noisy. In general, the model supports a non-trivial topological phase as long as a certain (chiral) symmetry is preserved. Disorder on the elements of $\Lambda$ destroys the symmetry and consequently the system becomes topologically trivial.
\section{Disordered non-Hermitian Systems}
\label{sec:main}
We now apply the real space formalism to investigate topological features in two explicit network geometries, namely the disordered non-Hermitian SSH dimer model and a disordered non-Hermitian trimer model.
\subsection{Disordered non-Hermitian SSH Dimer Model} \label{sec:ssh} \begin{figure}
\caption{Non-Hermitian Su-Schrieffer-Heeger (SSH) model with open boundary conditions. The qubit-cavity and cavity-qubit couplings are given by $J_{1}$ and $J_{2}$ respectively. Off-diagonal disorder is controlled via a uniform distribution function from which the hopping parameters are drawn. }
\label{fig:ssh}
\end{figure}
This model describes an open quantum system of coupled qubits and optical cavities which are arranged in an alternating manner, as shown in Fig.~\ref{fig:ssh}. In the super-one-particle sector, the corresponding restricted Hamiltonian $\mathsf{H}$ in the presence of disorder is given by \begin{eqnarray}
\mathsf{H} & = & \sum_{j=1}^{M}\epsilon_{A,j}|j,A\rangle\langle j,A|+(\epsilon_{B,j}-i\Gamma)|j,B\rangle\langle j,B|\nonumber \\
& + & \sum_{j=1}^{M}(J_{1,j}|j,B\rangle\langle j,A|+\mathrm{h.c.})\nonumber \\
& + & \sum_{j=1}^{M-1}(J_{2,j}|j+1,A\rangle\langle j,B|+\mathrm{h.c.}).\label{2siteH} \end{eqnarray}
Due to the chiral symmetry and the pseudo-anti-hermiticity of the non-dissipative and dissipative model, respectively \citep{Venuti2017,Lieu2018}, the topological states are expected to be robust against the chiral symmetry preserving off-diagonal disorder, i.e., noise in the hopping parameters. In contrast, disorder in the on-site potentials breaks the symmetries and is thus expected to quickly diminish topological features. Indeed, diagonal disorder leads to a unit cell as large as the system, thus having more than one dissipative site per unit cell and hence preventing the existence of topological dark states according to the argument in \citep{Levitov2016}. We therefore restrict the randomness to act on the hopping parameters, i.e., $J_{1,j}\equiv J_{1}+\mu_{1}\omega_{1,j}$ and $J_{2,j}\equiv J_{2}+\mu_{2}\omega_{2,j}$, where $\omega_{\alpha,j}$ are independent random variables with uniform distribution in the range $[-1,+1]$.
\begin{figure}
\caption{Complex density of states of the restricted Hamiltonian $\mathsf{H}$. (a)\&(b) Topologically trivial regime for the clean and disordered ($\mu=1$) case, respectively. (c)\&(d) Topologically non-trivial phase for clean and disordered ($\mu=1$) systems, respectively. Results are averaged over 1000 diagonalizations. Here, $N=20$, $\Gamma=0.5$, $J_{2}=1$ and $J_{1}=1.5$ ($J_{1}=0.5$) for the topologically trivial (non-trivial) configurations.}
\label{fig:dos_offdiag}
\end{figure}
The effect of off-diagonal disorder on the spectrum of the restricted Hamiltonian $\mathsf{H}$ is illustrated in Figure \ref{fig:dos_offdiag}, where the density of states is plotted in the complex plane. In the topologically trivial regime of the clean system, Fig.~\ref{fig:dos_offdiag}~(a), all eigenvalues have imaginary part $-\Gamma/2$. When disorder is introduced, they mainly wash out on axis $\mathrm{Im}(E)=-\Gamma/2$, as seen in Fig. \ref{fig:dos_offdiag}(b) . There is, however, a notable non-vanishing density of states emerging in the vicinity of $\operatorname{Re}(E)=0$. In the topologically non-trivial regime, Figs.~\ref{fig:dos_offdiag}~(c)\&(d), a dark state with corresponding $\operatorname{Im}(E)=0$ can be found. Its topological protection against off-diagonal disorder manifests itself in its eigenvalue being left almost unchanged when disorder disturbs the system, while the bulk states featuring eigenvalues with imaginary part $-\Gamma/2$ blur out. The protected dark state corresponds to an edge state having support only on the non-dissipative sites, thus not decaying through the cavities. Another state emerging in the non-trivial phase lives, on the contrary, only on the dissipative sites, with eigenvalue satisfying $\mathrm{Im}(E)=-\Gamma$, as also seen in Fig.~ \ref{fig:dos_offdiag}~(c). The mentioned destructive character of on-site potential disorder is discussed in the Appendix, Sec.~\ref{sec:diagonal_disorder}, where the density of states for diagonal disorder is analyzed, see Figs~\ref{fig:diag_disorder}~(a)-(d).
We now turn to the computation of the winding number. In absence of disorder we can go to reciprocal space and realize that the unitary $U(k)$ in Eq.~\eqref{eqn:Hk} is simply given by the phase of $J_{1}+J_{2}e^{-ik}$. The winding number of the dissipative system is thus the same as the winding number of the closed, Hermitian SSH-chain, resulting in \begin{equation}
W=\Theta(|J_{2}|-|J_{1}|),\label{eqn:Wssh} \end{equation} where $\Theta$ is the Heaviside function ($\Theta(x)=1$ for $x>0$ and $\Theta(x)=0$ for $x<0$). In order to compute the winding number in real space for the non-Hermitian SSH model, we follow the steps described in Sec.~\ref{sec:real_space_method}. First, the Hamiltonian is written in the order of sublattices and divided into four blocks, as in Eq.~\eqref{eqn:Hr}. In this case, $\Lambda=H_{1,1}=\epsilon_{A}\leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I}$, and $V=H_{1,2}$. From Eq.~\eqref{eqn:Hr}, we get $U\tilde{V}=V$, where $U$, $V$ and $\tilde{V}$ are all of dimension $M\times M$. To determine the unitary matrix $U$, we need to fulfill two requirements: i) the columns of $U$ need to be eigenvectors of $\Lambda$ and ii) $\tilde{V}$ needs to be positive definite. Since $\Lambda\propto\leavevmode{\rm 1\ifmmode\mkern-4.8mu \else\kern-0.3em \fi I}$, the first requirement is satisfied for any vector. In order to satisfy the second requirement, we recall that the polar decomposition of an invertible square matrix $V$ is a factorization of the form $V=U\tilde{V}$, where $U$ is a unitary matrix and $\tilde{V}$ is a positive-definite Hermitian matrix. $\tilde{V}$ is uniquely determined by $\tilde{V}=(V^{\dagger}V)^{1/2}$. As a result, $U$ can be written as \begin{equation} U=V(V^{\dagger}V)^{-1/2}. \end{equation}
Finally, the winding number $W$ can be calculated via Eq.~\eqref{eqn:Wr}. From here on, we set the on-site potentials to be zero, i.e., $\epsilon_{A}=\epsilon_{B}=0$.
\begin{figure}
\caption{Phase diagram of the disordered, dissipative SSH model for $N=1000$, $\Gamma=0.5$ and $J_2=1$. Results are averaged over 40 random realizations. (a) isotropic disorder ($\mu_{1}=\mu_{2}=\mu$), (b) anisotropic disorder ($\mu_{1}=2\mu_{2}=\mu$). White lines indicate points of diverging localization length in the thermodynamic limit. }
\label{fig:pd_ssh}
\end{figure} Fig.~\ref{fig:pd_ssh} presents the phase diagrams of the disordered dissipative SSH model as a function of coupling and disorder strength. In Fig.~\ref{fig:pd_ssh}~(a), the disorder is isotropic, i.e.~$\mu_{1}=\mu_{2}=\mu$ and $J_{2}=1$, while in Fig.~\ref{fig:pd_ssh}~(b), we consider anisotropic disorder with $\mu_{1}=2\mu_{2}=\mu$ and $J_{2}=1$. The exact location of the phase transition, illustrated by the white lines in Fig.~\ref{fig:pd_ssh}, can be obtained analytically by studying loci of the divergences in the localization length of the edge modes
\citep{Mondragon2014,book:Palyi2016}, as elucidated in more detail in the Appendix, Sec.~\ref{sec:analytic_contour}. In Fig.~\ref{fig:pd_ssh}~(a), the phase transition occurs at $|J_{2}/J_{1}|=1$ for all disorder strengths as for the clean case.
Fig.~\ref{fig:pd_ssh}~(b) shows a non-trivial \emph{topology by disorder} effect. Namely, for fixed value of $|J_{1}|>1$ close to one, one enters the topologically non-trivial region by increasing the disorder strength $\mu$, before transitioning into the topologically trivial regime after further increasing the noise. This widening of the topological phase boundary is observed for any kind of anisotropic disorder $\mu_{1}\neq\mu_{2}$.
As already mentioned, the exact phase transition points can be evaluated from the divergence of the localization length. In particular, the phase boundary of the disordered SSH model is given by the equation
$\mathbb{E}(\log|J_{1,j}|)=\mathbb{E}(\log|J_{2,j}|)$, where $\mathbb{E}(\bullet)$
denotes average over disorder (see Eq.~\eqref{eq:loc_ssh}). We first discuss the widening at small disorder strengths observed in Fig.~\ref{fig:pd_ssh}~(b). The second order Taylor expansion of $\mathbb{E}(\log|X|)$ in $\mu_{i}/J_{i}$ reads $\mathbb{E}[\log|X|]\simeq\log(\mathbb{E}[X])-\frac{\mathbb{V}[X]}{2\mathbb{E}[X]^{2}}$ \citep{NIPS2006_3113}, resulting in the following approximation of the phase boundary equation,
\begin{equation}
\log|J_{1}|-\frac{\mu_{1}^{2}}{6J_{1}^{2}}\simeq\log|J_{2}|-\frac{\mu_{2}^{2}}{6J_{2}^{2}}.\label{eq:phaseb_approx} \end{equation}
Fixing $J_{2}$ and $\mu_{2}$ such that the right hand side of Eq.~(\ref{eq:phaseb_approx}) is constant, we see that the function $\log|J_{1}|-\frac{\mu_{1}^{2}}{6J_{1}^{2}}$ is monotonically increasing in $J_{1}$ and decreasing in $\mu_{1}$. Hence, if $\mu_{1}$ increases, $J_{1}$
needs to grow as well in order to compensate. This corresponds to a widening of the topologically non-trivial region for small increasing noise. In the opposite, strong disorder limit, we can expand $\mathbb{E}(\log|J_{\alpha,i}|)$
in $J_{\alpha}/\mu_{\alpha}$, obtaining $\mathbb{E}(\log|J_{\alpha,i}|)=\log|\mu_{\alpha}|-1+O(J_{\alpha}/\mu_{\alpha})$. The phase boundary equation in this regime becomes
\begin{equation}
\log|\mu_{1}|\simeq\log|\mu_{2}|. \end{equation} Hence, for strong disorder, the phase boundary is roughly independent of $J_{1}$ accounting for the horizontal boundary in Fig.~\ref{fig:pd_ssh}~(b).
Similar disorder-induced topological characteristics were also recently discussed in the context of other non-Hermitian models \citep{Luo2019,Zhang2020}. \\
\begin{figure}
\caption{Coherence of the first qubit in the disordered, dissipative SSH model for $N=100$, $\Gamma=0.5$ and $J_2=1$. Results are averaged over 40 random realizations. (a) isotropic disorder ($\mu_{1}=\mu_{2}=\mu$) along the vertical line where $J_1=0$ with $\mu=0.1$ (orange), $\mu=0.5$ (purple), $\mu=1.0$ (red), and in the topologically trivial regime $J_1=1.5$ with $\mu=0.5$ (black). (b) anisotropic disorder ($\mu_{1}=2\mu_{2}=\mu$) along the vertical line where $J_1=1.2$ with $\mu=0.5$ (purple), $\mu=1.5$ (red), $\mu=2.5$ (black), and for $J_1=1.5$ with $\mu=0.5$ (orange). Solid black lines indicate the asymptotic prediction $\mathbb{E}(1-x^2)$ Eq.~(\ref{eq:cinf_approx}) valid for small disorders. }
\label{fig:coh_ssh}
\end{figure}
For each phase diagram, we now fix $J_2=1$ and choose four characteristic parameter configurations in order to get representative coherence time evolutions for the different topological sectors, depicted Fig.~\ref{fig:coh_ssh}. For isotropic disorder, Fig.~\ref{fig:coh_ssh}~(a), we choose three points along the vertical $J_1=0$ with $\mu=0.1,0.5,1.0$ as well as the configuration $J_1=1.5, \mu=0.5$, representing the disordered topologically non-trivial and trivial regime, respectively. The coherence decays to a non-zero (respectively zero) value at large times in the topologically non-trivial (respectively trivial) sector, thus matching the phase diagram Fig.~\ref{fig:pd_ssh}~(a). In the topologically non-trivial regime, increasing disorder leads to a smaller asymptotic value of the coherence. Similarly, for anisotropic disorder, Fig.~\ref{fig:coh_ssh}~(b), we choose three points along the vertical $J_1=1.2$ with $\mu=0.5,1.5,2.5$ as well as $J_1=0,\mu=0.5$. The former three parameter pairs lie on a vertical line cutting through the broadening of the topologically non-trivial regime, thus representing the reentrance phenomenon into a higher topological phase. It can be seen that a finite coherence of the first qubit is present at large times only for $\mu=1.5$, being in consent with the corresponding phase diagram Fig.~\ref{fig:pd_ssh}~(b). For $J_1=0$ and $\mu=0.5$, a similar behavior as for the isotropic disordered chain can be observed, with a large asymptotic coherence value. In previous work \citep{Venuti2017}, it was shown that for large chains in the topologically non-trivial regime, the coherence saturates to approximately \[ \mathcal{C}(t\rightarrow\infty)\approx 1-x^2, \]
where $x=J_1/J_2$, with $|x|<1$. It is thus natural to assume that the expectation value of the asymptotic coherence including disorder is given by \begin{gather} \mathbb{E}[\mathcal{C}(t\rightarrow\infty)]\approx \mathbb{E}(1-x^2) = \nonumber \\ \frac{1}{4\mu_1 \mu_2} \int_{-\mu_2}^{\mu_2}\int_{-\mu_1}^{\mu_1} 1- \Big( \frac{J_1+\mu_1}{J_2+\mu_2} \Big)^2 d\mu_1 d\mu_2 = \label{eq:cinf_approx} \\ 1-\frac{3J_1^2 + \mu_1^2}{3J_2^2-3\mu_2^2} \, . \nonumber \end{gather}
Note that this only holds for weak to moderate disorder such that no change of topological phase can be generated randomly, i.e., $\mu_1 + \mu_2 < |J_2| - |J_1|$.
In Fig.~\ref{fig:coh_ssh}, the prediction Eq.~(\ref{eq:cinf_approx}) is illustrated by black solid lines for disorder strengths falling into the discussed regime. For large disorder, random phase changes result in a decrease of the mean coherence in the simulation, and Eq.~(\ref{eq:cinf_approx}) breaks down. \\ \\ It is important to note that, even though the phase diagram is the same as those found in previous works \citep{Mondragon2014,Luo2019}, the physical interpretation is different, as our models include dissipation. Edge states do not correspond to actual electronic states located at one of the boundaries of the chain, but rather describe the physics of the projected density matrix introduced in Sec.~\ref{sec:setup}. A non-trivial topological phase, resulting in quasi-dark states of the restricted Hamiltonian, leads to having an exponentially long (in system size) coherence time of the edge qubit. In the topologically trivial regime, the decoherence of the edge qubit is governed by dissipation, leading to a finite coherence time.
\subsection{Disordered non-Hermitian Trimer Model}
\label{sec:3site}
\begin{figure}
\caption{Non-Hermitian trimer model. Here, the nearest-neighbor couplings $J_{1},J_{2},J_{3}$ alternate cyclically, building a unit cell with three sites. Next-nearest-neighbor couplings $J$ link the first and third site in each unit cell, thus enabling three distinct winding numbers $W=0,1,2$.}
\label{fig:3site}
\end{figure}
Next, we consider a trimer chain with nearest-neighbor as well as next-nearest-neighbor couplings, as depicted in Fig.~\ref{fig:3site}. The corresponding non-Hermitian Hamiltonian, derived from the restricted Lindbladian, is given by \begin{eqnarray}
\mathsf{H} & = & \sum_{j=1}^{M}(J_{1,j}|j,B\rangle\langle j,A|+\text{h.c.})\nonumber \\
& + & \sum_{j=1}^{M-1}(J_{2,j}|j,C\rangle\langle j,B|+\text{h.c.})\nonumber \\
& + & \sum_{j=1}^{M-1}(J_{j}|j,C\rangle\langle j,A|+\text{h.c.}) \\
& + & \sum_{j=1}^{M-1}(J_{3,j}|j+1,A\rangle\langle j,C|+\text{h.c.})\nonumber \\
& + & \sum_{j=1}^{M}\epsilon_{A}|j,A\rangle\langle j,A|+\sum_{j=1}^{N}\epsilon_{B}|j,B\rangle\langle j,B|\nonumber \\
& + & \sum_{j=1}^{M}(\epsilon_{C,j}-i\Gamma)|j,C\rangle\langle j,C|.\label{3site_H}\nonumber \end{eqnarray}
It has been demonstrated that robust chiral edge modes exist in non-dissipative trimer chains, even in the absence of inversion symmetry \citep{Alvarez2019}. It has been argued that their topological character is inherited through a mapping of a higher-dimensional model, namely the commensurate off-diagonal Aubry-André-Harper model, which is topologically equivalent to a two dimensional tight-binding lattice pierced by a magnetic flux \citep{Kraus2012}.
The topological classification by Rudner \textit{et al.}~including dissipation, however, imposes only translational symmetry. In fact, it turns out that the winding number in Eq.~\eqref{eqn:Wr} can be used as a reliable predictor for the number of (quasi)-dark states located on the edge of the trimer chain with open boundary conditions. In previous work \citep{Venuti2017}, it was found that in the clean case, the presence of next-nearest-neighbor couplings enable winding numbers $W=0,1,2$. Concretely, $W$ is given by \begin{align}
W & =\Theta\left(\left|J_{3}\right|-\left|J+J_{2}\tan(\vartheta/2)\right|\right)\nonumber \\
& +\Theta\left(\left|J_{3}\right|-\left|J-J_{2}\cot(\vartheta/2)\right|\right),\label{windingeq_3site} \end{align} where $\vartheta=\arccos\left[\left(\epsilon_{A}-\epsilon_{B}\right)/\sqrt{4J_{1}^{2}+\left(\epsilon_{A}-\epsilon_{B}\right)^{2}}\right].$ We further verify the above equation in the Appendix, Sec.~\ref{sec:appendix}, by solving the system analytically for a convenient system size and counting the number of dark states localized on one edge of the chain. \begin{figure}
\caption{Topology and coherence for the clean, non-disordered trimer model. (a) Winding number and (b)-(d) time dependent coherence for three parameter configurations corresponding to the three topological sectors. The dotted lines indicate the theoretically predicted asymptotic coherence as $t\rightarrow\infty$. Dissipation is set to $\Gamma=0.5$ and a chain with $N=300$, $J_{1}=1,J_{2}=2$ and $J=1$ is considered. The three time evolutions of the coherence in the topological sectors $W=0,1,2$ correspond to parameter choices $J_{3}=0.5,2.0,3.5$, respectively. }
\label{fig:clean_3site}
\end{figure} In order to calculate the winding number using the the real-space approach, we first rewrite the Hamiltonian with respect to its sublattices and decompose it as in Eq.~\eqref{eqn:Wr}. In this case, the matrices $\Lambda$ and (respectively $V$) with dimensions $2M\times2M$ (respectively $2M\times M$) are given by \begin{equation} \Lambda=\begin{pmatrix}\epsilon_{A}\mathbb{1} & H_{AB}\\ H_{BA} & \epsilon_{B}\mathbb{1} \end{pmatrix} ;\qquad V=\begin{pmatrix}H_{AC}\\ H_{BC} \end{pmatrix}. \end{equation} Here, $H_{AB}=J_{1}\mathbb{1}$. Due to the symmetry of $\Lambda$, $U$ from Eq.~\eqref{eqn:Hr} can be written as \begin{equation} U=\begin{pmatrix}-\cos(\vartheta/2)U_{-} & \sin(\vartheta/2)U_{+}\\ \sin(\vartheta/2)U_{-} & \cos(\vartheta/2)U_{+} \end{pmatrix},\label{eq:U_vsUpm} \end{equation} where $U_{\pm}$ are two $M\times M$ so far unspecified unitaries and $\vartheta$ has been given above. From Eq.~\eqref{eqn:Hr}, we further get $U\tilde{V}=V$, which gives \begin{equation} \begin{pmatrix}-\cos(\vartheta/2)U_{-} & \sin(\vartheta/2)U_{+}\\ \sin(\vartheta/2)U_{-} & \cos(\vartheta/2)U_{+} \end{pmatrix}\begin{pmatrix}\tilde{V}_{-}\\ \tilde{V}_{+} \end{pmatrix}=\begin{pmatrix}H_{AC}\\ H_{BC} \end{pmatrix}, \end{equation} where $\tilde{V}:=(\tilde{V}_{-},\tilde{V}_{+})^{T}$. From the above equation we find \begin{align} U_{+}\tilde{V}_{+} & =\frac{1}{2}(\cos(\vartheta/2)H_{AC}+\sin(\vartheta/2)H_{BC}),\nonumber \\ U_{-}\tilde{V}_{-} & =\frac{1}{2}(-\sin(\vartheta/2)H_{AC}+\cos(\vartheta/2)H_{BC}).\label{eq:3site_decomp} \end{align} Recall that we must fix the gauge freedom in $U$ by requiring the submatrices $\tilde{V}_{\pm}$ to be positive definite. Consequently, $U_{\pm}$ can be determined by polar decomposition of the right hand side of Eq.~\eqref{eq:3site_decomp}, after which the unitary matrix $U$ is obtained using Eq.~\eqref{eq:U_vsUpm}. Finally, the winding number is computed via Eq.~\eqref{eqn:Wr}. Interestingly, it can be shown that the winding number of $U$ is nothing more than the sum of the winding numbers of $U_{+}$ and $U_{-}$.
\begin{figure}
\caption{Phase diagram of the disordered, dissipative trimer model for $N=1500$, $\Gamma=0.5$ , $J_1=2$, $J_2=2$, $J_3=3$. Results are averaged over 40 random realizations. In (a), $\mu_{2}=\mu_{J}=\mu_{3}=\mu$, whereas (b) describes disorder with $2\mu_{J}=2\mu_{2}=\mu_{3}=\mu$. White lines indicate the loci of diverging localization lengths in the thermodynamic limit. }
\label{fig:pd_3site}
\end{figure}
For simplicity, we again limit our considerations to the case of vanishing the on-site chemical potentials, i.e., $\epsilon_{A}=\epsilon_{B}=\epsilon_{C}=0$. Using the real-space winding number approach for the clean trimer model results in Fig.~\ref{fig:clean_3site}~(a), matching Eq.~\eqref{windingeq_3site}. Figs~\ref{fig:clean_3site}~(b)-(d) show the typical behavior of the coherence in the three distinct topological sectors $W=0,1,2$ in the clean trimer model, respectively. In the topologically trivial regime, no dark states are present, driving decoherence of the first qubit. For $W=1$, the dark state manifold is one-dimensional, leading to a saturation of the coherence at infinite times. For $W=2$, the existence of two dark states result in Rabi like oscillations of the first qubit's coherence. The asymptotic solution, Eq.~\eqref{theo_coh}, is also featured in Figs.~\ref{fig:clean_3site}~(b)-(d).
Because of the $J_{1}$ dependence of the dark states, disorder in $J_{1}$ is expected to quickly destroy the topological features of the system. This is further suggested by the degree of freedom of the matrix $U$, Eq.~\eqref{eqn:Hr}, which collapses as soon as $J_{1}$ becomes disordered, leading to an immediate collapse of a well-defined winding number. Therefore, we shall from now on focus on the analysis of the disordered regime where only $J_{2},J_{3},J$ are exposed to noise, which we control via additive random noise drawn from a uniform distribution. Concretely, if $j$ labels the unit cell and $\{\omega_{1}\}$, $\{\omega_{2}\}$, $\{\omega\}$ are sets of independent, uniformly distributed random variables $\in[-1,1]$, $J_{i,j}=J_{i}+\mu_{i}\omega_{i,j}$ for $i=2,3$, $J_{j}=J+\mu_{J}\omega_{j}$, and $J_{1,j}=J_{1}$ for all $j$. Looking at the density of states for the different disorder types, depicted in Figure \ref{fig:trimer_dos}, the selection rules for the type of disorder under which topological dark states are stable is further underlined.
As for the disordered non-Hermitian SSH model, the full phase diagram for different disorder strengths can be constructed, shown in Fig.~\ref{fig:pd_3site}. Again, the exact phase transition points in the thermodynamic limit are depicted by white lines, which are derived via the dark state localization length considering disorder in the Appendix, Sec.~\ref{sec:appendix}. The phase diagram features rich structures, presenting widenings of topologically non-trivial phases for moderate (high) disorder strengths in the chain with equal (different) disorder amplitudes. Note that the system with different distributions on the disordered parameters, $2\mu_{2}=2\mu_{J}=\mu_{3}=\mu$, is more similar to what we called anisotropic disorder in the SSH model, being due
to the competition between $|J_{2}\pm J|$ and $J_{3}$ deciding the topological phase for the trimer model Eq.~\eqref{windingeq_3site}. When computing the localization length, the disorder amplitudes of
$J_{2}$ and $J$ hence add up, as is explicitly seen in Eq.~\eqref{eq:3siteLL}. Note, however, that the effective disorder on $|J_{2}\pm J|$ is $\mu/\sqrt{2}<\mu$, which results in having a widening of the non-topological phases in the large disorder regime. Analogously, the trimer system having equal disorder on all hopping parameters resembles the case $\mu_{1}>\mu_{2}$ of the SSH-model, featuring a widening of the topologically non-trivial regimes for small disorders. \\ \\ \begin{figure}
\caption{Coherence of the first qubit in the disordered, dissipative trimer model for $N=300$, $\Gamma=0.5$, $J_1=1$, $J_2=2$ and $J_3=3$. Results are averaged over 40 random realizations. (a) For $\mu_{J}=\mu_{2}=\mu_{3}=\mu$, we show the coherence for $(J,\mu)=(0,1)$ (orange), $(J,\mu)=(3,1)$ (purple), and $(J,\mu)=(0,7)$ (black), corresponding to $W=2,1,0$, respectively. (b) For $2\mu_{J}=2\mu_{2}=\mu_{3}=\mu$, we highlight the reentrance into a higher topological phase along the vertical line $J=1.2$, with $\mu=1$ (purple) and $\mu=7$ (orange), corresponding to $W=1,2$, respectively. We further show the trivial regime by evaluating the coherence for $(J,\mu)=(6,1)$ (black). The observable broadening of the curves is due to the error of the mean, pictured by error bars for every data point. }
\label{fig:coh_3site}
\end{figure} We shall again pick three points in each phase diagram and illustrate the corresponding time evolution of the first qubits coherence, seen in Fig.~\ref{fig:coh_3site}. For $\mu_{J}=\mu_{2}=\mu_{3}=\mu$, Fig.~\ref{fig:coh_3site}~(a), we choose the parameter pairs $(J,\mu) = (0,1), (3,1), (0,7)$, belonging to winding numbers $W=2,1,0$, respectively (cf. Fig.~\ref{fig:pd_3site}). For all configurations, we find that the asymptotic behavior of the coherence the one of the clean case, namely a decrease to zero for $W=0$, a convergence to a constant larger than zero for $W=1$, and an oscillation for $W=2$. For different disorder strengths $2\mu_{J}=2\mu_{2}=\mu_{3}=\mu$, we focus on the reentrance phenomenon $W=1\rightarrow 2$ by computing the coherence for $(J,\mu)=(1.2,1), (1.2,7)$. Indeed, we find that for large enough disorder, an oscillating behavior emerges, signaling the change of topological phase. For completeness, we also include $(J,\mu)=(6,1)$ representing the trivial sector, where a vanishing coherence can be observed at large times.
\section{Application to Quantum Computation} \label{sec:quantum_computation} Ever since Kitaev's proposal \citep{Kitaev2006} to braid anyons in order to realize non-trivial quantum gates, the field of topological quantum computation has been an exceptionally active field of research \citep{Sankar2008, Sau2010, Sarma2015, Stern2013, Alicea2011, Freedman2006, Freedman2003, Akhmerov2010}. This is mainly due to the promising protection against environmental noise governed by the non-locality of the state manifold used for braiding \citep{Lahtinen2017}. Spinless p-wave superconductor wires hosting non-Abelian Majorana fermions bound to topological defects have been of particular interest \citep{Fisher2010}, as the intrinsic particle-hole symmetry of the BdG-Hamiltonian promises a realizable topological protection. Recently, the SSH model has been analyzed in terms of its applicability to quantum computation \citep{Andras2019}, where it was found that the non-trivial braiding statistics of the topological edge modes can be used to build quantum gates via Y-junctions. However, as for all quantum gates based on symmetry protected topological states, the set of quantum gates is not universal \citep{Lahtinen2017}. Nevertheless, studying the braiding statistics for our concrete open disordered models seems like an exciting and promising work for future projects.
\section{Conclusions} \label{sec:conclusion} We have analyzed and topologically classified disordered dissipative qubit-cavity dimer and trimer architectures, with special focus on topological protection mechanisms of the coherence measure in a fiducial qubit. The evolution of the coherence's qubit is exactly given by a non-Hermitian Hamiltonian which thus emerges from a bona-fide physical system. We demonstrated the use of a real-space topological invariant $W$, which accurately predicts the number of non-trivial (quasi-)dark modes in disordered, non-Hermitian models, as long as certain symmetries are preserved by the disorder operators. We then computed the phase diagrams of dimer and trimer chains in the parameter space spanned by the tunneling amplitude and the disorder strength, predicting the faith of the fiducial qubit's coherence at long times, i.e., decay to zero, a constant value or oscillatory behavior for winding numbers $W=0,1,2$, respectively. For certain choices of disorder strengths or the hopping parameters, reentrance phenomena into topological phases with higher winding numbers were observed, leading to an increase of coherence times (exponentially large in system size) when introducing higher noise levels. Possible applications in topological quantum computing via braiding of dark modes were briefly discussed, opening up interesting questions for future research. Furthermore, generalizations of the classification to larger numbers of sites per unit cell and systems of higher dimension would be of great interest.
\textbf{Acknowledgements:} We would like to thank Hubert Saleur for useful discussions. This work was supported by the US Department of Energy under grant number DE-FG03-01ER45908. L.C.V. acknowledges partial support from the Air Force Research Laboratory award no. FA8750-18-1- 0041. The research is based upon work (partially) supported by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via the U.S. Army Research Office contract W911NF-17-C-0050. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright annotation thereon.
\onecolumngrid
\appendix
\section{Dark states in the dissipative trimer model}
\label{sec:appendix}
\label{ds3site}
We here derive an exact form of the asymptotic coherence dynamics and the topological phase transition in the trimer model by studying the dark states, i.e., by finding all states that obey $\mathsf{H}|\psi\rangle\!\rangle=E|\psi\rangle\!\rangle$ with $E\in\mathbb{R}$. For the sake of convenience, the following considerations assume chain lengths $N\mod3=2$, as the system then hosts exact dark states with vanishing imaginary part. For all other system sizes the states are quasi-dark, as they have an imaginary part exponentially small in the system size. Of course, in the thermodynamic limit, these differences vanish, and the dynamics is exactly described by the result below. The ansatz is to look for possible dark states with energies $E=\pm J_{1}$, i.e., to find the kernel of the matrix \begin{equation} \mathsf{H}\mp\mathbb{1}J_{1}=\begin{pmatrix}\mp J_{1} & J_{1} & J & 0 & 0 & 0\\ J_{1} & \mp J_{1} & J_{2} & 0 & 0 & 0\\ J & J_{2} & \mp J_{1}-i\Gamma & J_{3} & 0 & 0\\ 0 & 0 & J_{3} & \mp J_{1} & J_{1} & J\\ 0 & 0 & 0 & J_{1} & \mp J_{1} & \ddots\\ 0 & 0 & 0 & J & \ddots & \ddots \end{pmatrix}.\label{dseq} \end{equation} For $N\mod3=2$, solutions of \eqref{dseq} are of the form \begin{eqnarray} v_{+} & = & \big(1,1,0,-\delta_{+},-\delta_{+},0,(-\delta_{+})^{2},(-\delta_{+})^{2},0,...,(-\delta_{+})^{\frac{N-2}{3}},(-\delta_{+})^{\frac{N-2}{3}}\big)^{T},\nonumber \\ v_{-} & = & \big(1,-1,0,\delta_{-},-\delta_{-},0,\delta_{-}^{2},-\delta_{-}^{2},0,...,\delta_{-}^{\frac{N-2}{3}},-\delta_{-}^{\frac{N-2}{3}}\big)^{T}.\label{ds} \end{eqnarray}
These solutions are intuitive and analogous to the open SSH model \citep{Venuti2017}, in the sense that they disappear on all dissipative sites. The condition $E=\pm J_{1}$ signals the equivalence of the first two sites of each unit cell up to a sign factor. Eq.~\eqref{ds} leads to \begin{equation}
\delta_{\pm}=\frac{|J\pm J_{2}|}{J_{3}}, \end{equation}
where the sign of the solution is fixed without loss of generality by assuming $\delta_{\pm}$ to be positive. The winding number classification is illustrated in the corresponding vectors, as we find zero, one, or two dark states localized at the outer left qubit for different topological sectors, i.e., $W=\Theta(J_{3}>|J-J_{2}|)+\Theta(J_{3}>J+J_{2})$. Taking into account the normalization factor of the solutions, \begin{equation} A_{\pm}^{-2}=2\sum_{k=0}^{\frac{N-2}{3}}\delta_{\pm}^{2k}=2\frac{1-\delta_{\pm}^{\frac{2N-4}{3}}}{1-\delta_{\pm}^{2}}, \end{equation} the time dependent coherence can be approximated for large times $t\gg1/\Gamma$, \begin{align}
\mathcal{C}(t) & =|\langle\langle1|e^{-i\mathsf{H}t}|1\rangle\rangle|\approx|e^{-iJ_{1}t}A_{+}^{2}+e^{iJ_{1}t}A_{-}^{2}|\nonumber \\
& =|A_{+}^{4}+A_{-}^{4}+2A_{+}^{2}A_{-}^{2}\cos2J_{1}t|.\label{theo_coh} \end{align}
\section{Analytical Determination of Critical Phase Transition Contours} \label{sec:analytic_contour}
In \citep{Mondragon2014}, the critical phase transition surface was derived for the Hermitian SSH model, using the numerical transfer matrix method and level-spacing statistics analysis. The analytical critical phase transition contour for non-Hermitian models can be calculated in a similar manner. To see this, consider the non-Hermitian SSH model. Here, the dark edge state is exactly at zero energy and only lives on the non-decaying sublattice. We consider the critical phase transition in the thermodynamic limit, such that the results for the linear chain of odd length coincide with the results of even length. Now recall that the edge state of the disordered Hermitian SSH model is also supported entirely by one sublattice or the other. Its zero energy edge state on sublattice A ($\psi_{n,B}=0$) can be written as \begin{equation}
\psi_{n,A}=i^{n-1}\prod_{j=1}^{n}\left|\frac{J_{1j}}{J_{2j}}\right|\psi_{1,A},\label{eq:wf_phasetrans} \end{equation} where $J_{1}j$ and $J_{2}j$ are the two perturbed hopping parameters in the $j$th unit cell. The edge states in the two systems share an identical distribution in the clean limit. Consequently, in this case, the non-Hermitian problem follows the same localization length and phase transition as a one-dimensional Hermitian SSH model.
With the exact wave function distribution as in Eq.~\eqref{eq:wf_phasetrans}, the inverse localization length of a edge mode can be obtained by \begin{eqnarray}
\Lambda^{-1} & = & -\lim_{n\rightarrow\infty}\frac{1}{n}\log\left|\psi_{n,A}\right|\nonumber \\
& = & \left|\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{j=1}^{n}\left(\ln\left|J_{1j}\right|-\ln\left|J_{2j}\right|\right)\right| \end{eqnarray} An analytical result can be obtained by taking the ensemble average of the last expression. The limit of the sum turns into an integration for independent and identically distributed disorder, \begin{equation}
\Lambda^{-1}=\frac{1}{4}\left|\int_{-1}^{1}d\omega\int_{-1}^{1}d\omega^{\prime}\left(\ln\left|J_{1}+\mu_{1}\omega\right|-\ln\left|J_{2}+\mu_{2}\omega^{\prime}\right|\right)\right|,\label{eq:loc_ssh} \end{equation} where $J_{1}$ and $J_{2}$ are the unperturbed hopping parameters. $\mu_{1}$ and $\mu_{2}$ control the strength of disorder in $J_{1}$ and $J_{2}$ respectively. The random variables $\omega$ and $\omega^{\prime}$ are both drawn from a uniform distribution in the range $[-1,1]$, leading to a normalization prefactor $1/4$. The analytic solution to this integral has been obtained in \citep{Mondragon2014}, \begin{multline}
\Lambda^{-1}=\frac{1}{4\mu_{1}}\left[\left(J_{1}+\mu_{1}\right)\log\left|J_{1}+\mu_{1}\right|-\left(J_{1}-\mu_{1}\right)\log\left|J_{1}-\mu_{1}\right|\right]\\
-\frac{1}{4\mu_{2}}\left[\left(J_{2}+\mu_{2}\right)\log\left|J_{2}+\mu_{2}\right|-\left(J_{2}-\mu_{2}\right)\log\left|J_{2}-\mu_{2}\right|\right]\label{eq:2siteLL} \end{multline}
For small disorder, $\mu_{1},\mu_{2}\ll J_{2},J_{1}$, the localization length Eq.~\eqref{eq:loc_ssh} can be approximated by \begin{eqnarray}
\Lambda^{-1} & \propto & \int_{-1}^{1}\int_{-1}^{1}d\omega_{1}d\omega_{2}\ln|J_{1}+\omega_{1}\mu_{1}|-\ln|J_{2}+\omega_{2}\mu_{2}|\nonumber \\
& = & \int_{-1}^{1}\int_{-1}^{1}d\omega_{1}d\omega_{2}\ln|J_{1}|+\frac{\omega_{1}\mu_{1}}{J_{1}}-\frac{1}{2}\Big(\frac{\omega_{1}\mu_{1}}{J_{1}}\Big)^{2}-\Big[\ln|J_{2}|+\frac{\omega_{2}\mu_{2}}{J_{2}}-\frac{1}{2}\Big(\frac{\omega_{2}\mu_{2}}{J_{2}}\Big)^{2}\Big]\nonumber \\
& + & \mathcal{O}\Big(\Big(\frac{\mu_{1}}{J_{1}}\Big)^{3}\Big)+\mathcal{O}\Big(\Big(\frac{\mu_{2}}{J_{2}}\Big)^{3}\Big). \end{eqnarray}
Performing the integration up to order $\mathcal{O}\big(\big(\frac{\mu_{1}}{J_{1}}\big)^{3}\big)$ and $\mathcal{O}\big(\big(\frac{\mu_{2}}{J_{2}}\big)^{3}\big)$, one finds that the localization length diverges for \begin{equation}
|J_{1}|(\mu_{1},\mu_{2})=|J_{2}|\exp\Big(\frac{J_{2}^{2}\mu_{1}^{2}-J_{1}^{2}\mu_{2}^{2}}{J_{1}^{2}J_{2}^{2}}\Big), \end{equation} which, up to leading order in the expansion of the exponential function, reduces to \begin{equation}
|J_{1}|(\mu_{1},\mu_{2})=|J_{2}|\exp\Big(\frac{\mu_{1}^{2}-\mu_{2}^{2}}{J_{2}^{2}}\Big). \end{equation} We thus arrive at the conclusion that the value of $J_{1}$ where the non-trivial$\leftrightarrow$trivial transition occurs increases (decreases) compared to the clean case for small disorder strengths if $\mu_{2}<\mu_{1}$ ($\mu_{2}>\mu_{1}$). This corresponds to the \textit{topology by disorder} effect discussed in the main text and can be nicely seen in Fig.~\ref{fig:pd_ssh}(b). For $\mu_{1}=\mu_{2}$, the phase transition always occurs at $J_{1}=J_{2}$, as observed in Fig.~\ref{fig:pd_ssh}(a). Now we continue to generalize the result to the non-Hermitian trimer model. In Sec.~\ref{ds3site}, we have shown that the trimer model of length $N\mod{3}$ can host two dark edge modes with energies $E=\pm J_{1}$. These edge modes are supported purely by non-decaying sublattices. The wave functions of the two dark states for disordered three-site model is given by \begin{equation}
\psi_{n,ds\pm}=(-1)^{n-1}\prod_{j=1}^{n}\left|\frac{J_{2j}\pm J_{j}}{J_{3j}}\right|\psi_{1,ds\pm}, \end{equation} where $J_{j}$, $J_{2j}$ and $J_{3j}$ are perturbed hopping parameters in the $j$th unit cell. Since there exist two different edge modes, we would expect two disjoint localization lengths, \begin{align}
\Lambda_{ds\pm}^{-1} & =-\lim_{n\rightarrow\infty}\frac{1}{n}\log\left|\psi_{n,ds\pm}\right|\\
& =\left|\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{j=1}^{n}\left(\ln\left|J_{2j}\pm J_{j}\right|-\ln\left|J_{3j}\right|\right)\right|. \end{align} Again, we take the ensemble average, and the summation turns into an integration, which gives \begin{equation}
\Lambda_{ds\pm}^{-1}=\frac{1}{8}\left|\int_{-1}^{1}d\omega\int_{-1}^{1}d\omega^{\prime}\int_{-1}^{1}d\omega^{\prime\prime}\left(\ln\left|(J+\mu\omega)\pm(J_{2}+\mu_{2}\omega^{\prime})\right|-\ln\left|J_{3}+\mu_{3}\omega^{\prime\prime}\right|\right)\right|. \end{equation} Here $J$, $J_{2}$ and $J_{3}$ are unperturbed hopping parameters. $\mu$ $\mu_{2}$ and $\mu_{3}$ define the amplitudes of disorder. $\omega$, $\omega^{\prime}$ and $\omega^{\prime\prime}$ are three independent and identically distributed random variables in the range of $[-1,1]$. After performing the integration explicitly, we arrive at \begin{eqnarray}
\Lambda_{ds\pm}^{-1} & = & {2\mu\mu_{2}}\Big\{\left(J\pm J_{2}-\mu-\mu_{2}\right)^{2}\log\left(\left|J\pm J_{2}-\mu-\mu_{2}\right|\right)-\left(J\pm J_{2}+\mu-\mu_{2}\right)^{2}\log\left(\left|J\pm J_{2}+\mu-\mu_{2}\right|\right)\nonumber \\
& - & \left(J\pm J_{2}-\mu+\mu_{2}\right)^{2}\log\left(\left|J\pm J_{2}-\mu+\mu_{2}\right|\right)+\left(J\pm J_{2}+\mu+\mu_{2}\right)^{2}\log\left|J\pm J_{2}+\mu+\mu_{2}\right|\Big\}\nonumber \\
& - & \frac{1}{\mu_{3}}\Big\{\left(J_{3}+\mu_{3}\right)\log\left(\left|J_{3}+\mu_{3}\right|\right)-\left(J_{3}-\mu_{3}\right)\log\left(\left|J_{3}-\mu_{3}\right|\right)\Big\}-4.\label{eq:3siteLL} \end{eqnarray} Eq. \eqref{eq:2siteLL} and Eq. \eqref{eq:3siteLL} allow us to trace the exact critical phase transition contours in the non-Hermitian SSH dimer and trimer models.
\section{Diagonal Disorder} \label{sec:diagonal_disorder} \begin{figure}
\caption{Eigenspectrum density of states of the restricted Hamiltonian $\mathsf{H}$ in the topologically non-trivial regime for (a) the clean system, and diagonal disorder strengths (b) $\mu=0.5$, (c) $\mu=1$, (d) $\mu=1.5$. Results are averaged over 1000 diagonalizations. Here, $N=20$, $\Gamma=0.5$, $J_{1}=0.5$, $J_{2}=1$. (e) Coherence time of the edge qubit as a function of the intra-cell hopping strength $J_{1}$ and diagonal disorder $\mu$. In this setting, $J_{2}=1$, $N=100$, and the time evolution of the coherence is disorder averaged over 50 realizations. }
\label{fig:diag_disorder}
\end{figure}
As argued in the main text, diagonal disorder destroys the protective chiral symmetry of the SSH model, making it collapse to a topologically trivial phase. This effect can be nicely seen when considering the eigenspectrum density of states of the restricted Hamiltonian, as already introduced in the main text for symmetry conserving disorder. In analogy to off-diagonal disorder, the on-site potentials $\epsilon_{A,i}$ and $\epsilon_{B,i}$ are chosen to be uniformly distributed between $[-\mu,\mu]$.
Fig.~\ref{fig:diag_disorder}~(a)-(d) illustrates how the topological dark states appearing in the clean system quickly wash out, joining the non-topological bulk state manifold. This is in in stark contrast to a finite symmetry conserving off-diagonal disorder, where the topological dark states were almost unaffected by the noise, cf. Figure \ref{fig:dos_offdiag}.
To underline the destructive effect further, the edgequbit's coherence is inspected. As soon as disorder on the on-site potentials is introduced, the coherence time is not infinite anymore, but it is reduced to a finite value $\tau$. By assuming an exponential decay in time, i.e., $\mathcal{C}(t)=\mathcal{C}(t_{0})e^{-(t-t_{0})/\tau}$ for some $t_{0}\gg1/\Gamma$, we can extract $\tau$ by integrating over the time evolution of the coherence, i.e., \begin{equation} I:=\int_{t_{0}}^{t_{1}}\mathcal{C}(t)dt=\int_{t_{0}}^{t_{1}}\mathcal{C}(t_{0})e^{-(t-t_{0})/\tau}dt=\tau(\mathcal{C}(t_{1})-\mathcal{C}(t_{0})). \end{equation}
Numerical integration leads to the results depicted in Fig.~\ref{fig:diag_disorder}~(e), where a sharp drop of the coherence time away from the fully dimerized, clean limit can be observed (notice the logarithmic scaling on the z-axis). For the trimer model, very similar behavior is being observed, for disorder acting on either on-site potentials or the coupling parameter $J_{1}$, see Fig.~\ref{fig:trimer_dos} for the DOS.
\begin{figure}
\caption{Density of states for the trimer model. (a)-(c) Clean density of states for $W=2,1,0$, respectively. (d)-(f) Diagonal disorder $\mu=1$. (g)-(i) Off diagonal disorder on $J_{2},J_{3},J$ with $\mu=1$. Here, $N=21$, $J_{1}=J_{2}=2$, $J_{3}=3$, and $J=0,3,6$ for the topological phases $W=2,1,0$, respectively. It is seen how $W$ quasi-dark states with energies $E=\pm J_{1}$ exist in the clean system, being unstable (stable) for the considered diagonal (off-diagonal) disorder. }
\label{fig:trimer_dos}
\end{figure}
\end{document} |